diff options
Diffstat (limited to 'accel/tcg/user-exec.c')
| -rw-r--r-- | accel/tcg/user-exec.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index dc8d6b5d40..8fbcbf9771 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -721,7 +721,7 @@ int page_unprotect(target_ulong address, uintptr_t pc) return current_tb_invalidated ? 2 : 1; } -static int probe_access_internal(CPUArchState *env, target_ulong addr, +static int probe_access_internal(CPUArchState *env, vaddr addr, int fault_size, MMUAccessType access_type, bool nonfault, uintptr_t ra) { @@ -759,7 +759,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); } -int probe_access_flags(CPUArchState *env, target_ulong addr, int size, +int probe_access_flags(CPUArchState *env, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t ra) { @@ -771,7 +771,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size, return flags; } -void *probe_access(CPUArchState *env, target_ulong addr, int size, +void *probe_access(CPUArchState *env, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t ra) { int flags; @@ -783,7 +783,7 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, return size ? g2h(env_cpu(env), addr) : NULL; } -tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, void **hostp) { int flags; @@ -889,7 +889,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { } /* The softmmu versions of these helpers are in cputlb.c. */ -static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr, +static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr, MemOp mop, uintptr_t ra, MMUAccessType type) { int a_bits = get_alignment_bits(mop); @@ -914,6 +914,7 @@ static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr, uint8_t ret; tcg_debug_assert((mop & MO_SIZE) == MO_8); + cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); ret = ldub_p(haddr); clear_helper_retaddr(); @@ -947,6 +948,7 @@ static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr, uint16_t ret; tcg_debug_assert((mop & MO_SIZE) == MO_16); + cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); ret = load_atom_2(env, ra, haddr, mop); clear_helper_retaddr(); @@ -984,6 +986,7 @@ static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr, uint32_t ret; tcg_debug_assert((mop & MO_SIZE) == MO_32); + cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); ret = load_atom_4(env, ra, haddr, mop); clear_helper_retaddr(); @@ -1021,6 +1024,7 @@ static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr, uint64_t ret; tcg_debug_assert((mop & MO_SIZE) == MO_64); + cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); ret = load_atom_8(env, ra, haddr, mop); clear_helper_retaddr(); @@ -1052,6 +1056,7 @@ static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr, Int128 ret; tcg_debug_assert((mop & MO_SIZE) == MO_128); + cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); ret = load_atom_16(env, ra, haddr, mop); clear_helper_retaddr(); @@ -1087,6 +1092,7 @@ static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, void *haddr; tcg_debug_assert((mop & MO_SIZE) == MO_8); + cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); stb_p(haddr, val); clear_helper_retaddr(); @@ -1111,6 +1117,7 @@ static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, void *haddr; tcg_debug_assert((mop & MO_SIZE) == MO_16); + cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1139,6 +1146,7 @@ static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, void *haddr; tcg_debug_assert((mop & MO_SIZE) == MO_32); + cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1167,6 +1175,7 @@ static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, void *haddr; tcg_debug_assert((mop & MO_SIZE) == MO_64); + cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1195,6 +1204,7 @@ static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, void *haddr; tcg_debug_assert((mop & MO_SIZE) == MO_128); + cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1324,8 +1334,8 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, /* * Do not allow unaligned operations to proceed. Return the host address. */ -static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - MemOpIdx oi, int size, uintptr_t retaddr) +static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi, + int size, uintptr_t retaddr) { MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); |