summary refs log tree commit diff stats
path: root/tcg/arm/tcg-target.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-05-13 13:16:37 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-05-13 13:16:37 +0100
commitcd2b9b86803e46a09cf239afc44413884efa53f4 (patch)
treedf975535f574d6846a27242e3a85229a17c0cd34 /tcg/arm/tcg-target.c
parentfccae3226da3238eb857dfdf8f997434b168372c (diff)
parent96d0ee7f0950e725ef7a4f7516e0af6a08d303e0 (diff)
downloadfocaccia-qemu-cd2b9b86803e46a09cf239afc44413884efa53f4.tar.gz
focaccia-qemu-cd2b9b86803e46a09cf239afc44413884efa53f4.zip
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20140512' into staging
tcg updates for 20140512

# gpg: Signature made Tue 13 May 2014 00:19:56 BST using RSA key ID 4DD0279B
# gpg: Can't check signature: public key not found

* remotes/rth/tags/pull-tcg-20140512: (26 commits)
  tcg: Remove unreachable code in tcg_out_op and op_defs
  tcg: Use tcg_target_available_regs in tcg_reg_alloc_mov
  tcg: Make call address a constant parameter
  tci: Create tcg_out_call
  tcg-mips: Split out tcg_out_call
  tcg-sparc: Create tcg_out_call
  tcg-ppc64: Rename tcg_out_calli to tcg_out_call
  tcg-ppc: Split out tcg_out_call
  tcg-s390: Rename tgen_calli to tcg_out_call
  tcg-i386: Rename tcg_out_calli to tcg_out_call
  tcg: Require TCG_TARGET_INSN_UNIT_SIZE
  tci: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-mips: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-ia64: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-s390: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-aarch64: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-arm: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-sparc: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-ppc: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-ppc64: Define TCG_TARGET_INSN_UNIT_SIZE
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'tcg/arm/tcg-target.c')
-rw-r--r--tcg/arm/tcg-target.c174
1 files changed, 53 insertions, 121 deletions
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
index 7535175f9c..538ca2aed0 100644
--- a/tcg/arm/tcg-target.c
+++ b/tcg/arm/tcg-target.c
@@ -115,36 +115,18 @@ static const int tcg_target_call_oarg_regs[2] = {
 
 #define TCG_REG_TMP  TCG_REG_R12
 
-static inline void reloc_abs32(void *code_ptr, intptr_t target)
+static inline void reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
 {
-    *(uint32_t *) code_ptr = target;
+    ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
+    *code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
 }
 
-static inline void reloc_pc24(void *code_ptr, intptr_t target)
-{
-    uint32_t offset = ((target - ((intptr_t)code_ptr + 8)) >> 2);
-
-    *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
-                             | (offset & 0xffffff);
-}
-
-static void patch_reloc(uint8_t *code_ptr, int type,
+static void patch_reloc(tcg_insn_unit *code_ptr, int type,
                         intptr_t value, intptr_t addend)
 {
-    switch (type) {
-    case R_ARM_ABS32:
-        reloc_abs32(code_ptr, value);
-        break;
-
-    case R_ARM_CALL:
-    case R_ARM_JUMP24:
-    default:
-        tcg_abort();
-
-    case R_ARM_PC24:
-        reloc_pc24(code_ptr, value);
-        break;
-    }
+    assert(type == R_ARM_PC24);
+    assert(addend == 0);
+    reloc_pc24(code_ptr, (tcg_insn_unit *)value);
 }
 
 #define TCG_CT_CONST_ARM  0x100
@@ -379,20 +361,18 @@ static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
 
 static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
 {
-    /* We pay attention here to not modify the branch target by skipping
-       the corresponding bytes. This ensure that caches and memory are
+    /* We pay attention here to not modify the branch target by masking
+       the corresponding bytes.  This ensure that caches and memory are
        kept coherent during retranslation. */
-    s->code_ptr += 3;
-    tcg_out8(s, (cond << 4) | 0x0a);
+    tcg_out32(s, deposit32(*s->code_ptr, 24, 8, (cond << 4) | 0x0a));
 }
 
 static inline void tcg_out_bl_noaddr(TCGContext *s, int cond)
 {
-    /* We pay attention here to not modify the branch target by skipping
-       the corresponding bytes. This ensure that caches and memory are
+    /* We pay attention here to not modify the branch target by masking
+       the corresponding bytes.  This ensure that caches and memory are
        kept coherent during retranslation. */
-    s->code_ptr += 3;
-    tcg_out8(s, (cond << 4) | 0x0b);
+    tcg_out32(s, deposit32(*s->code_ptr, 24, 8, (cond << 4) | 0x0b));
 }
 
 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
@@ -1010,20 +990,21 @@ static inline void tcg_out_st8(TCGContext *s, int cond,
  * with the code buffer limited to 16MB we wouldn't need the long case.
  * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
  */
-static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
+static inline void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
 {
-    int32_t disp = addr - (tcg_target_long) s->code_ptr;
+    intptr_t addri = (intptr_t)addr;
+    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
 
-    if ((addr & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
+    if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
         tcg_out_b(s, cond, disp);
         return;
     }
 
-    tcg_out_movi32(s, cond, TCG_REG_TMP, addr);
+    tcg_out_movi32(s, cond, TCG_REG_TMP, addri);
     if (use_armv5t_instructions) {
         tcg_out_bx(s, cond, TCG_REG_TMP);
     } else {
-        if (addr & 1) {
+        if (addri & 1) {
             tcg_abort();
         }
         tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP);
@@ -1032,39 +1013,28 @@ static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
 
 /* The call case is mostly used for helpers - so it's not unreasonable
  * for them to be beyond branch range */
-static inline void tcg_out_call(TCGContext *s, uint32_t addr)
+static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
 {
-    int32_t val;
+    intptr_t addri = (intptr_t)addr;
+    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
 
-    val = addr - (tcg_target_long) s->code_ptr;
-    if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
-        if (addr & 1) {
+    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
+        if (addri & 1) {
             /* Use BLX if the target is in Thumb mode */
             if (!use_armv5t_instructions) {
                 tcg_abort();
             }
-            tcg_out_blx_imm(s, val);
+            tcg_out_blx_imm(s, disp);
         } else {
-            tcg_out_bl(s, COND_AL, val);
+            tcg_out_bl(s, COND_AL, disp);
         }
     } else if (use_armv7_instructions) {
-        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addr);
+        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
         tcg_out_blx(s, COND_AL, TCG_REG_TMP);
     } else {
         tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
         tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
-        tcg_out32(s, addr);
-    }
-}
-
-static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
-{
-    if (use_armv5t_instructions) {
-        tcg_out_blx(s, cond, arg);
-    } else {
-        tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
-                        TCG_REG_PC, SHIFT_IMM_LSL(0));
-        tcg_out_bx(s, cond, arg);
+        tcg_out32(s, addri);
     }
 }
 
@@ -1073,9 +1043,9 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
     TCGLabel *l = &s->labels[label_index];
 
     if (l->has_value) {
-        tcg_out_goto(s, cond, l->u.value);
+        tcg_out_goto(s, cond, l->u.value_ptr);
     } else {
-        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
+        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 0);
         tcg_out_b_noaddr(s, cond);
     }
 }
@@ -1084,7 +1054,7 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
  *                                     int mmu_idx, uintptr_t ra)
  */
-static const void * const qemu_ld_helpers[16] = {
+static void * const qemu_ld_helpers[16] = {
     [MO_UB]   = helper_ret_ldub_mmu,
     [MO_SB]   = helper_ret_ldsb_mmu,
 
@@ -1104,7 +1074,7 @@ static const void * const qemu_ld_helpers[16] = {
 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
  *                                     uintxx_t val, int mmu_idx, uintptr_t ra)
  */
-static const void * const qemu_st_helpers[16] = {
+static void * const qemu_st_helpers[16] = {
     [MO_UB]   = helper_ret_stb_mmu,
     [MO_LEUW] = helper_le_stw_mmu,
     [MO_LEUL] = helper_le_stl_mmu,
@@ -1256,7 +1226,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
                                 TCGReg datalo, TCGReg datahi, TCGReg addrlo,
                                 TCGReg addrhi, int mem_index,
-                                uint8_t *raddr, uint8_t *label_ptr)
+                                tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
 {
     TCGLabelQemuLdst *label = new_ldst_label(s);
 
@@ -1275,9 +1245,9 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 {
     TCGReg argreg, datalo, datahi;
     TCGMemOp opc = lb->opc;
-    uintptr_t func;
+    void *func;
 
-    reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
+    reloc_pc24(lb->label_ptr[0], s->code_ptr);
 
     argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
     if (TARGET_LONG_BITS == 64) {
@@ -1292,9 +1262,9 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
        icache usage.  For pre-armv6, use the signed helpers since we do
        not have a single insn sign-extend.  */
     if (use_armv6_instructions) {
-        func = (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN];
+        func = qemu_ld_helpers[opc & ~MO_SIGN];
     } else {
-        func = (uintptr_t)qemu_ld_helpers[opc];
+        func = qemu_ld_helpers[opc];
         if (opc & MO_SIGN) {
             opc = MO_UL;
         }
@@ -1328,7 +1298,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
         break;
     }
 
-    tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
+    tcg_out_goto(s, COND_AL, lb->raddr);
 }
 
 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@@ -1336,7 +1306,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
     TCGReg argreg, datalo, datahi;
     TCGMemOp opc = lb->opc;
 
-    reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
+    reloc_pc24(lb->label_ptr[0], s->code_ptr);
 
     argreg = TCG_REG_R0;
     argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
@@ -1368,7 +1338,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
     argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
 
     /* Tail-call to the helper, which will return to the fast path.  */
-    tcg_out_goto(s, COND_AL, (uintptr_t)qemu_st_helpers[opc]);
+    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc]);
 }
 #endif /* SOFTMMU */
 
@@ -1499,7 +1469,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
 #ifdef CONFIG_SOFTMMU
     int mem_index;
     TCGReg addend;
-    uint8_t *label_ptr;
+    tcg_insn_unit *label_ptr;
 #endif
 
     datalo = *args++;
@@ -1628,7 +1598,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
 #ifdef CONFIG_SOFTMMU
     int mem_index;
     TCGReg addend;
-    uint8_t *label_ptr;
+    tcg_insn_unit *label_ptr;
 #endif
 
     datalo = *args++;
@@ -1660,7 +1630,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
 #endif
 }
 
-static uint8_t *tb_ret_addr;
+static tcg_insn_unit *tb_ret_addr;
 
 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
                 const TCGArg *args, const int *const_args)
@@ -1670,51 +1640,21 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
 
     switch (opc) {
     case INDEX_op_exit_tb:
-        if (use_armv7_instructions || check_fit_imm(args[0])) {
-            tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
-            tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
-        } else {
-            uint8_t *ld_ptr = s->code_ptr;
-            tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
-            tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
-            *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
-            tcg_out32(s, args[0]);
-        }
+        tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
+        tcg_out_goto(s, COND_AL, tb_ret_addr);
         break;
     case INDEX_op_goto_tb:
         if (s->tb_jmp_offset) {
             /* Direct jump method */
-#if defined(USE_DIRECT_JUMP)
-            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
             tcg_out_b_noaddr(s, COND_AL);
-#else
-            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
-            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
-            tcg_out32(s, 0);
-#endif
         } else {
             /* Indirect jump method */
-#if 1
-            c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
-            if (c > 0xfff || c < -0xfff) {
-                tcg_out_movi32(s, COND_AL, TCG_REG_R0,
-                                (tcg_target_long) (s->tb_next + args[0]));
-                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
-            } else
-                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
-#else
-            tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
-            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
-            tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
-#endif
+            intptr_t ptr = (intptr_t)(s->tb_next + args[0]);
+            tcg_out_movi32(s, COND_AL, TCG_REG_R0, ptr & ~0xfff);
+            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, ptr & 0xfff);
         }
-        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
-        break;
-    case INDEX_op_call:
-        if (const_args[0])
-            tcg_out_call(s, args[0]);
-        else
-            tcg_out_callr(s, COND_AL, args[0]);
+        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
         break;
     case INDEX_op_br:
         tcg_out_goto_label(s, COND_AL, args[0]);
@@ -1745,13 +1685,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
         tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
         break;
 
-    case INDEX_op_mov_i32:
-        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
-                        args[0], 0, args[1], SHIFT_IMM_LSL(0));
-        break;
-    case INDEX_op_movi_i32:
-        tcg_out_movi32(s, COND_AL, args[0], args[1]);
-        break;
     case INDEX_op_movcond_i32:
         /* Constraints mean that v2 is always in the same register as dest,
          * so we only need to do "if condition passed, move v1 to dest".
@@ -1967,6 +1900,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
         tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
         break;
 
+    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
+    case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi.  */
+    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
     default:
         tcg_abort();
     }
@@ -1975,12 +1911,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
 static const TCGTargetOpDef arm_op_defs[] = {
     { INDEX_op_exit_tb, { } },
     { INDEX_op_goto_tb, { } },
-    { INDEX_op_call, { "ri" } },
     { INDEX_op_br, { } },
 
-    { INDEX_op_mov_i32, { "r", "r" } },
-    { INDEX_op_movi_i32, { "r" } },
-
     { INDEX_op_ld8u_i32, { "r", "r" } },
     { INDEX_op_ld8s_i32, { "r", "r" } },
     { INDEX_op_ld16u_i32, { "r", "r" } },