diff options
Diffstat (limited to 'target')
| -rw-r--r-- | target/loongarch/csr_helper.c | 1 | ||||
| -rw-r--r-- | target/loongarch/iocsr_helper.c | 1 | ||||
| -rw-r--r-- | target/m68k/translate.c | 1 | ||||
| -rw-r--r-- | target/mips/tcg/micromips_translate.c.inc | 24 | ||||
| -rw-r--r-- | target/mips/tcg/mips16e_translate.c.inc | 18 | ||||
| -rw-r--r-- | target/mips/tcg/mxu_translate.c | 3 | ||||
| -rw-r--r-- | target/mips/tcg/nanomips_translate.c.inc | 32 | ||||
| -rw-r--r-- | target/nios2/translate.c | 10 | ||||
| -rw-r--r-- | target/sh4/translate.c | 102 |
9 files changed, 123 insertions, 69 deletions
diff --git a/target/loongarch/csr_helper.c b/target/loongarch/csr_helper.c index 7e02787895..6526367946 100644 --- a/target/loongarch/csr_helper.c +++ b/target/loongarch/csr_helper.c @@ -15,7 +15,6 @@ #include "exec/cpu_ldst.h" #include "hw/irq.h" #include "cpu-csr.h" -#include "tcg/tcg-ldst.h" target_ulong helper_csrrd_pgd(CPULoongArchState *env) { diff --git a/target/loongarch/iocsr_helper.c b/target/loongarch/iocsr_helper.c index 505853e17b..dda9845d6c 100644 --- a/target/loongarch/iocsr_helper.c +++ b/target/loongarch/iocsr_helper.c @@ -12,7 +12,6 @@ #include "exec/helper-proto.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" -#include "tcg/tcg-ldst.h" #define GET_MEMTXATTRS(cas) \ ((MemTxAttrs){.requester_id = env_cpu(cas)->cpu_index}) diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 744eb3748b..44d852b106 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -959,6 +959,7 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, switch (opsize) { case OS_BYTE: case OS_WORD: + case OS_LONG: tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE); gen_helper_exts32(cpu_env, fp, tmp); break; diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc index e8b193aeda..211d102cf6 100644 --- a/target/mips/tcg/micromips_translate.c.inc +++ b/target/mips/tcg/micromips_translate.c.inc @@ -977,20 +977,24 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, gen_reserved_instruction(ctx); return; } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | + ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd); tcg_gen_movi_tl(t1, 4); gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | + ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd + 1); break; case SWP: gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); tcg_gen_movi_tl(t1, 4); gen_op_addr_add(ctx, t0, t0, t1); gen_load_gpr(t1, rd + 1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); break; #ifdef TARGET_MIPS64 case LDP: @@ -998,20 +1002,24 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, gen_reserved_instruction(ctx); return; } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd); tcg_gen_movi_tl(t1, 8); gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd + 1); break; case SDP: gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + ctx->default_tcg_memop_mask); tcg_gen_movi_tl(t1, 8); gen_op_addr_add(ctx, t0, t0, t1); gen_load_gpr(t1, rd + 1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + ctx->default_tcg_memop_mask); break; #endif } diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc index 602f5f0c02..5cffe0e412 100644 --- a/target/mips/tcg/mips16e_translate.c.inc +++ b/target/mips/tcg/mips16e_translate.c.inc @@ -172,22 +172,26 @@ static void gen_mips16_save(DisasContext *ctx, case 4: gen_base_offset_addr(ctx, t0, 29, 12); gen_load_gpr(t1, 7); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); /* Fall through */ case 3: gen_base_offset_addr(ctx, t0, 29, 8); gen_load_gpr(t1, 6); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); /* Fall through */ case 2: gen_base_offset_addr(ctx, t0, 29, 4); gen_load_gpr(t1, 5); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); /* Fall through */ case 1: gen_base_offset_addr(ctx, t0, 29, 0); gen_load_gpr(t1, 4); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); } gen_load_gpr(t0, 29); @@ -196,7 +200,8 @@ static void gen_mips16_save(DisasContext *ctx, tcg_gen_movi_tl(t2, -4); \ gen_op_addr_add(ctx, t0, t0, t2); \ gen_load_gpr(t1, reg); \ - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); \ + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | \ + ctx->default_tcg_memop_mask); \ } while (0) if (do_ra) { @@ -298,7 +303,8 @@ static void gen_mips16_restore(DisasContext *ctx, #define DECR_AND_LOAD(reg) do { \ tcg_gen_movi_tl(t2, -4); \ gen_op_addr_add(ctx, t0, t0, t2); \ - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); \ + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | \ + ctx->default_tcg_memop_mask); \ gen_store_gpr(t1, reg); \ } while (0) diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c index bdd20709c0..be038b5f07 100644 --- a/target/mips/tcg/mxu_translate.c +++ b/target/mips/tcg/mxu_translate.c @@ -831,7 +831,8 @@ static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx) tcg_gen_ori_tl(t1, t1, 0xFFFFF000); } tcg_gen_add_tl(t1, t0, t1); - tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP)); + tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, (MO_TESL ^ (sel * MO_BSWAP)) | + ctx->default_tcg_memop_mask); gen_store_mxu_gpr(t1, XRa); } diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc index 97b9572caa..a98dde0d2e 100644 --- a/target/mips/tcg/nanomips_translate.c.inc +++ b/target/mips/tcg/nanomips_translate.c.inc @@ -998,7 +998,7 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, TCGv tmp2 = tcg_temp_new(); gen_base_offset_addr(ctx, taddr, base, offset); - tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ | MO_ALIGN); if (cpu_is_bigendian(ctx)) { tcg_gen_extr_i64_tl(tmp2, tmp1, tval); } else { @@ -1039,7 +1039,8 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, tcg_gen_ld_i64(llval, cpu_env, offsetof(CPUMIPSState, llval_wp)); tcg_gen_atomic_cmpxchg_i64(val, taddr, llval, tval, - eva ? MIPS_HFLAG_UM : ctx->mem_idx, MO_64); + eva ? MIPS_HFLAG_UM : ctx->mem_idx, + MO_64 | MO_ALIGN); if (reg1 != 0) { tcg_gen_movi_tl(cpu_gpr[reg1], 1); } @@ -2640,52 +2641,49 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) switch (extract32(ctx->opcode, 7, 4)) { case NM_LBX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_SB); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_SB); gen_store_gpr(t0, rd); break; case NM_LHX: /*case NM_LHXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TESW); + MO_TESW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_LWX: /*case NM_LWXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TESL); + MO_TESL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_LBUX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_UB); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_UB); gen_store_gpr(t0, rd); break; case NM_LHUX: /*case NM_LHUXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TEUW); + MO_TEUW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_SBX: check_nms(ctx); gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_8); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_8); break; case NM_SHX: /*case NM_SHXS:*/ check_nms(ctx); gen_load_gpr(t1, rd); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUW); + MO_TEUW | ctx->default_tcg_memop_mask); break; case NM_SWX: /*case NM_SWXS:*/ check_nms(ctx); gen_load_gpr(t1, rd); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUL); + MO_TEUL | ctx->default_tcg_memop_mask); break; case NM_LWC1X: /*case NM_LWC1XS:*/ @@ -3738,7 +3736,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) addr_off); tcg_gen_movi_tl(t0, addr); - tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, + MO_TESL | ctx->default_tcg_memop_mask); } break; case NM_SWPC48: @@ -3754,7 +3753,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) tcg_gen_movi_tl(t0, addr); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, + MO_TEUL | ctx->default_tcg_memop_mask); } break; default: @@ -4305,7 +4305,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) TCGv va = tcg_temp_new(); TCGv t1 = tcg_temp_new(); MemOp memop = (extract32(ctx->opcode, 8, 3)) == - NM_P_LS_UAWM ? MO_UNALN : 0; + NM_P_LS_UAWM ? MO_UNALN : MO_ALIGN; count = (count == 0) ? 8 : count; while (counter != count) { diff --git a/target/nios2/translate.c b/target/nios2/translate.c index 6610e22236..a548e16ed5 100644 --- a/target/nios2/translate.c +++ b/target/nios2/translate.c @@ -298,6 +298,11 @@ static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags) TCGv data = dest_gpr(dc, instr.b); tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s); +#ifdef CONFIG_USER_ONLY + flags |= MO_UNALN; +#else + flags |= MO_ALIGN; +#endif tcg_gen_qemu_ld_tl(data, addr, dc->mem_idx, flags); } @@ -309,6 +314,11 @@ static void gen_stx(DisasContext *dc, uint32_t code, uint32_t flags) TCGv addr = tcg_temp_new(); tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s); +#ifdef CONFIG_USER_ONLY + flags |= MO_UNALN; +#else + flags |= MO_ALIGN; +#endif tcg_gen_qemu_st_tl(val, addr, dc->mem_idx, flags); } diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 6e40d5dd6a..0dedbb8210 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -527,13 +527,15 @@ static void _decode_opc(DisasContext * ctx) case 0x9000: /* mov.w @(disp,PC),Rn */ { TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2); - tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW); + tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, + MO_TESW | MO_ALIGN); } return; case 0xd000: /* mov.l @(disp,PC),Rn */ { TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3); - tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, + MO_TESL | MO_ALIGN); } return; case 0x7000: /* add #imm,Rn */ @@ -801,9 +803,11 @@ static void _decode_opc(DisasContext * ctx) { TCGv arg0, arg1; arg0 = tcg_temp_new(); - tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, + MO_TESL | MO_ALIGN); arg1 = tcg_temp_new(); - tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); gen_helper_macl(cpu_env, arg0, arg1); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); @@ -813,9 +817,11 @@ static void _decode_opc(DisasContext * ctx) { TCGv arg0, arg1; arg0 = tcg_temp_new(); - tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, + MO_TESL | MO_ALIGN); arg1 = tcg_temp_new(); - tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); gen_helper_macw(cpu_env, arg0, arg1); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); @@ -961,30 +967,36 @@ static void _decode_opc(DisasContext * ctx) if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); - tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEUQ); + tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, + MO_TEUQ | MO_ALIGN); } else { - tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, + MO_TEUL | MO_ALIGN); } return; case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, + MO_TEUQ | MO_ALIGN); gen_store_fpr64(ctx, fp, XHACK(B11_8)); } else { - tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, + MO_TEUL | MO_ALIGN); } return; case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, + MO_TEUQ | MO_ALIGN); gen_store_fpr64(ctx, fp, XHACK(B11_8)); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); } else { - tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, + MO_TEUL | MO_ALIGN); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); } return; @@ -996,10 +1008,12 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); tcg_gen_subi_i32(addr, REG(B11_8), 8); - tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ); + tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, + MO_TEUQ | MO_ALIGN); } else { tcg_gen_subi_i32(addr, REG(B11_8), 4); - tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, + MO_TEUL | MO_ALIGN); } tcg_gen_mov_i32(REG(B11_8), addr); } @@ -1011,10 +1025,12 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B7_4), REG(0)); if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, + MO_TEUQ | MO_ALIGN); gen_store_fpr64(ctx, fp, XHACK(B11_8)); } else { - tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, + MO_TEUL | MO_ALIGN); } } return; @@ -1026,9 +1042,11 @@ static void _decode_opc(DisasContext * ctx) if (ctx->tbflags & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); - tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ); + tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, + MO_TEUQ | MO_ALIGN); } else { - tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, + MO_TEUL | MO_ALIGN); } } return; @@ -1158,14 +1176,14 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); - tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW); + tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN); } return; case 0xc600: /* mov.l @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); - tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN); } return; case 0xc000: /* mov.b R0,@(disp,GBR) */ @@ -1179,14 +1197,14 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); - tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW); + tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN); } return; case 0xc200: /* mov.l R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); - tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN); } return; case 0x8000: /* mov.b R0,@(disp,Rn) */ @@ -1286,7 +1304,8 @@ static void _decode_opc(DisasContext * ctx) return; case 0x4087: /* ldc.l @Rm+,Rn_BANK */ CHECK_PRIVILEGED - tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); return; case 0x0082: /* stc Rm_BANK,Rn */ @@ -1298,7 +1317,8 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); - tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, + MO_TEUL | MO_ALIGN); tcg_gen_mov_i32(REG(B11_8), addr); } return; @@ -1354,7 +1374,8 @@ static void _decode_opc(DisasContext * ctx) CHECK_PRIVILEGED { TCGv val = tcg_temp_new(); - tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_andi_i32(val, val, 0x700083f3); gen_write_sr(val); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); @@ -1372,7 +1393,7 @@ static void _decode_opc(DisasContext * ctx) TCGv val = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); gen_read_sr(val); - tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN); tcg_gen_mov_i32(REG(B11_8), addr); } return; @@ -1383,7 +1404,8 @@ static void _decode_opc(DisasContext * ctx) return; \ case ldpnum: \ prechk \ - tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \ + tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, \ + MO_TESL | MO_ALIGN); \ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \ return; #define ST(reg,stnum,stpnum,prechk) \ @@ -1396,7 +1418,8 @@ static void _decode_opc(DisasContext * ctx) { \ TCGv addr = tcg_temp_new(); \ tcg_gen_subi_i32(addr, REG(B11_8), 4); \ - tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \ + tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, \ + MO_TEUL | MO_ALIGN); \ tcg_gen_mov_i32(REG(B11_8), addr); \ } \ return; @@ -1423,7 +1446,8 @@ static void _decode_opc(DisasContext * ctx) CHECK_FPU_ENABLED { TCGv addr = tcg_temp_new(); - tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); gen_helper_ld_fpscr(cpu_env, addr); ctx->base.is_jmp = DISAS_STOP; @@ -1441,16 +1465,18 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff); addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); - tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN); tcg_gen_mov_i32(REG(B11_8), addr); } return; case 0x00c3: /* movca.l R0,@Rm */ { TCGv val = tcg_temp_new(); - tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL); + tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, + MO_TEUL | MO_ALIGN); gen_helper_movcal(cpu_env, REG(B11_8), val); - tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TEUL | MO_ALIGN); } ctx->has_movcal = 1; return; @@ -1492,11 +1518,13 @@ static void _decode_opc(DisasContext * ctx) cpu_lock_addr, fail); tmp = tcg_temp_new(); tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value, - REG(0), ctx->memidx, MO_TEUL); + REG(0), ctx->memidx, + MO_TEUL | MO_ALIGN); tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value); } else { tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail); - tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); + tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TEUL | MO_ALIGN); tcg_gen_movi_i32(cpu_sr_t, 1); } tcg_gen_br(done); @@ -1521,11 +1549,13 @@ static void _decode_opc(DisasContext * ctx) if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) { TCGv tmp = tcg_temp_new(); tcg_gen_mov_i32(tmp, REG(B11_8)); - tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_mov_i32(cpu_lock_value, REG(0)); tcg_gen_mov_i32(cpu_lock_addr, tmp); } else { - tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); + tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, + MO_TESL | MO_ALIGN); tcg_gen_movi_i32(cpu_lock_addr, 0); } return; |