diff options
| author | Richard Henderson <richard.henderson@linaro.org> | 2022-11-07 19:48:14 +1100 |
|---|---|---|
| committer | Richard Henderson <richard.henderson@linaro.org> | 2023-02-04 06:19:42 -1000 |
| commit | cb48f3654e290ee5d7cbf1fb31888463fa2a180c (patch) | |
| tree | f528facdd87c95035ca0503406a20b4b86b7c3eb /accel/tcg/cputlb.c | |
| parent | 4771e71c28eb0cece2a17a2d891bbd724bdc158d (diff) | |
| download | focaccia-qemu-cb48f3654e290ee5d7cbf1fb31888463fa2a180c.tar.gz focaccia-qemu-cb48f3654e290ee5d7cbf1fb31888463fa2a180c.zip | |
tcg: Add guest load/store primitives for TCGv_i128
These are not yet considering atomicity of the 16-byte value; this is a direct replacement for the current target code which uses a pair of 8-byte operations. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/cputlb.c')
| -rw-r--r-- | accel/tcg/cputlb.c | 112 |
1 files changed, 112 insertions, 0 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 04e270742e..4812d83961 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2192,6 +2192,64 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); } +Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int mmu_idx = get_mmuidx(oi); + MemOpIdx new_oi; + unsigned a_bits; + uint64_t h, l; + + tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); + a_bits = get_alignment_bits(mop); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, + mmu_idx, ra); + } + + /* Construct an unaligned 64-bit replacement MemOpIdx. */ + mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; + new_oi = make_memop_idx(mop, mmu_idx); + + h = helper_be_ldq_mmu(env, addr, new_oi, ra); + l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra); + + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return int128_make128(l, h); +} + +Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int mmu_idx = get_mmuidx(oi); + MemOpIdx new_oi; + unsigned a_bits; + uint64_t h, l; + + tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); + a_bits = get_alignment_bits(mop); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, + mmu_idx, ra); + } + + /* Construct an unaligned 64-bit replacement MemOpIdx. */ + mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; + new_oi = make_memop_idx(mop, mmu_idx); + + l = helper_le_ldq_mmu(env, addr, new_oi, ra); + h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra); + + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return int128_make128(l, h); +} + /* * Store Helpers */ @@ -2546,6 +2604,60 @@ void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); } +void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int mmu_idx = get_mmuidx(oi); + MemOpIdx new_oi; + unsigned a_bits; + + tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); + a_bits = get_alignment_bits(mop); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, ra); + } + + /* Construct an unaligned 64-bit replacement MemOpIdx. */ + mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; + new_oi = make_memop_idx(mop, mmu_idx); + + helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra); + helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra); + + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + +void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, + MemOpIdx oi, uintptr_t ra) +{ + MemOp mop = get_memop(oi); + int mmu_idx = get_mmuidx(oi); + MemOpIdx new_oi; + unsigned a_bits; + + tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); + a_bits = get_alignment_bits(mop); + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, ra); + } + + /* Construct an unaligned 64-bit replacement MemOpIdx. */ + mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; + new_oi = make_memop_idx(mop, mmu_idx); + + helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra); + helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra); + + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); +} + #include "ldst_common.c.inc" /* |