diff options
Diffstat (limited to 'tcg')
| -rw-r--r-- | tcg/optimize.c | 2 | ||||
| -rw-r--r-- | tcg/tcg-op-gvec.c | 189 | ||||
| -rw-r--r-- | tcg/tcg-op.c | 258 | ||||
| -rw-r--r-- | tcg/tcg.c | 282 |
4 files changed, 388 insertions, 343 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c index 763bca9ea6..ce05989c39 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -190,7 +190,7 @@ static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) } else if (i->kind > ts->kind) { if (i->kind == TEMP_GLOBAL) { g = i; - } else if (i->kind == TEMP_LOCAL) { + } else if (i->kind == TEMP_TB) { l = i; } } diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c index aacedd3e15..291a65c4bf 100644 --- a/tcg/tcg-op-gvec.c +++ b/tcg/tcg-op-gvec.c @@ -116,8 +116,8 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, TCGv_ptr a0, a1; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -137,8 +137,8 @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c, TCGv_ptr a0, a1; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -157,9 +157,9 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -180,10 +180,10 @@ void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2, a3; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); - a3 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); + a3 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -206,11 +206,11 @@ void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2, a3, a4; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); - a3 = tcg_temp_new_ptr(); - a4 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); + a3 = tcg_temp_ebb_new_ptr(); + a4 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -236,8 +236,8 @@ void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs, TCGv_ptr a0, a1; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -257,9 +257,9 @@ void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -282,10 +282,10 @@ void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2, a3; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); - a3 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); + a3 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -310,11 +310,11 @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, TCGv_ptr a0, a1, a2, a3, a4; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); - a0 = tcg_temp_new_ptr(); - a1 = tcg_temp_new_ptr(); - a2 = tcg_temp_new_ptr(); - a3 = tcg_temp_new_ptr(); - a4 = tcg_temp_new_ptr(); + a0 = tcg_temp_ebb_new_ptr(); + a1 = tcg_temp_ebb_new_ptr(); + a2 = tcg_temp_ebb_new_ptr(); + a3 = tcg_temp_ebb_new_ptr(); + a4 = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(a0, cpu_env, dofs); tcg_gen_addi_ptr(a1, cpu_env, aofs); @@ -575,16 +575,16 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, be simple enough. */ if (TCG_TARGET_REG_BITS == 64 && (vece != MO_32 || !check_size_impl(oprsz, 4))) { - t_64 = tcg_temp_new_i64(); + t_64 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t_64, in_32); tcg_gen_dup_i64(vece, t_64, t_64); } else { - t_32 = tcg_temp_new_i32(); + t_32 = tcg_temp_ebb_new_i32(); tcg_gen_dup_i32(vece, t_32, in_32); } } else if (in_64) { /* We are given a 64-bit variable input. */ - t_64 = tcg_temp_new_i64(); + t_64 = tcg_temp_ebb_new_i64(); tcg_gen_dup_i64(vece, t_64, in_64); } else { /* We are given a constant input. */ @@ -619,7 +619,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, } /* Otherwise implement out of line. */ - t_ptr = tcg_temp_new_ptr(); + t_ptr = tcg_temp_ebb_new_ptr(); tcg_gen_addi_ptr(t_ptr, cpu_env, dofs); /* @@ -629,13 +629,13 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, * stores through to memset. */ if (oprsz == maxsz && vece == MO_8) { - TCGv_ptr t_size = tcg_const_ptr(oprsz); + TCGv_ptr t_size = tcg_constant_ptr(oprsz); TCGv_i32 t_val; if (in_32) { t_val = in_32; } else if (in_64) { - t_val = tcg_temp_new_i32(); + t_val = tcg_temp_ebb_new_i32(); tcg_gen_extrl_i64_i32(t_val, in_64); } else { t_val = tcg_constant_i32(in_c); @@ -645,7 +645,6 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, if (in_64) { tcg_temp_free_i32(t_val); } - tcg_temp_free_ptr(t_size); tcg_temp_free_ptr(t_ptr); return; } @@ -670,7 +669,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, if (in_32) { fns[vece](t_ptr, t_desc, in_32); } else if (in_64) { - t_32 = tcg_temp_new_i32(); + t_32 = tcg_temp_ebb_new_i32(); tcg_gen_extrl_i64_i32(t_32, in_64); fns[vece](t_ptr, t_desc, t_32); tcg_temp_free_i32(t_32); @@ -1734,7 +1733,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, do_dup_store(type, dofs, oprsz, maxsz, t_vec); tcg_temp_free_vec(t_vec); } else if (vece <= MO_32) { - TCGv_i32 in = tcg_temp_new_i32(); + TCGv_i32 in = tcg_temp_ebb_new_i32(); switch (vece) { case MO_8: tcg_gen_ld8u_i32(in, cpu_env, aofs); @@ -1749,7 +1748,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0); tcg_temp_free_i32(in); } else { - TCGv_i64 in = tcg_temp_new_i64(); + TCGv_i64 in = tcg_temp_ebb_new_i64(); tcg_gen_ld_i64(in, cpu_env, aofs); do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0); tcg_temp_free_i64(in); @@ -1768,8 +1767,8 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, } tcg_temp_free_vec(in); } else { - TCGv_i64 in0 = tcg_temp_new_i64(); - TCGv_i64 in1 = tcg_temp_new_i64(); + TCGv_i64 in0 = tcg_temp_ebb_new_i64(); + TCGv_i64 in1 = tcg_temp_ebb_new_i64(); tcg_gen_ld_i64(in0, cpu_env, aofs); tcg_gen_ld_i64(in1, cpu_env, aofs + 8); @@ -1814,7 +1813,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, int j; for (j = 0; j < 4; ++j) { - in[j] = tcg_temp_new_i64(); + in[j] = tcg_temp_ebb_new_i64(); tcg_gen_ld_i64(in[j], cpu_env, aofs + j * 8); } for (i = (aofs == dofs) * 32; i < oprsz; i += 32) { @@ -1859,9 +1858,9 @@ void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs, the 64-bit operation. */ static void gen_addv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); - TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + TCGv_i64 t3 = tcg_temp_ebb_new_i64(); tcg_gen_andc_i64(t1, a, m); tcg_gen_andc_i64(t2, b, m); @@ -1884,9 +1883,9 @@ void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80)); - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - TCGv_i32 t3 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + TCGv_i32 t3 = tcg_temp_ebb_new_i32(); tcg_gen_andc_i32(t1, a, m); tcg_gen_andc_i32(t2, b, m); @@ -1908,8 +1907,8 @@ void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t1, a, ~0xffff); tcg_gen_add_i32(t2, a, b); @@ -1922,8 +1921,8 @@ void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t1, a, ~0xffffffffull); tcg_gen_add_i64(t2, a, b); @@ -2042,9 +2041,9 @@ void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs, Compare gen_addv_mask above. */ static void gen_subv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); - TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + TCGv_i64 t3 = tcg_temp_ebb_new_i64(); tcg_gen_or_i64(t1, a, m); tcg_gen_andc_i64(t2, b, m); @@ -2067,9 +2066,9 @@ void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80)); - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - TCGv_i32 t3 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + TCGv_i32 t3 = tcg_temp_ebb_new_i32(); tcg_gen_or_i32(t1, a, m); tcg_gen_andc_i32(t2, b, m); @@ -2091,8 +2090,8 @@ void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t1, b, ~0xffff); tcg_gen_sub_i32(t2, a, b); @@ -2105,8 +2104,8 @@ void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t1, b, ~0xffffffffull); tcg_gen_sub_i64(t2, a, b); @@ -2467,8 +2466,8 @@ void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs, Compare gen_subv_mask above. */ static void gen_negv_mask(TCGv_i64 d, TCGv_i64 b, TCGv_i64 m) { - TCGv_i64 t2 = tcg_temp_new_i64(); - TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + TCGv_i64 t3 = tcg_temp_ebb_new_i64(); tcg_gen_andc_i64(t3, m, b); tcg_gen_andc_i64(t2, b, m); @@ -2493,8 +2492,8 @@ void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 b) void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 b) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t1, b, ~0xffffffffull); tcg_gen_neg_i64(t2, b); @@ -2539,7 +2538,7 @@ void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs, static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); int nbit = 8 << vece; /* Create -1 for each negative element. */ @@ -2748,7 +2747,7 @@ static const GVecGen2s gop_ands = { void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { - TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_ebb_new_i64(); tcg_gen_dup_i64(vece, tmp, c); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands); tcg_temp_free_i64(tmp); @@ -2772,7 +2771,7 @@ static const GVecGen2s gop_xors = { void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { - TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_ebb_new_i64(); tcg_gen_dup_i64(vece, tmp, c); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors); tcg_temp_free_i64(tmp); @@ -2796,7 +2795,7 @@ static const GVecGen2s gop_ors = { void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) { - TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_ebb_new_i64(); tcg_gen_dup_i64(vece, tmp, c); tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors); tcg_temp_free_i64(tmp); @@ -2943,7 +2942,7 @@ void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t s_mask = dup_const(MO_8, 0x80 >> c); uint64_t c_mask = dup_const(MO_8, 0xff >> c); - TCGv_i64 s = tcg_temp_new_i64(); + TCGv_i64 s = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(d, a, c); tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */ @@ -2957,7 +2956,7 @@ void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c) { uint64_t s_mask = dup_const(MO_16, 0x8000 >> c); uint64_t c_mask = dup_const(MO_16, 0xffff >> c); - TCGv_i64 s = tcg_temp_new_i64(); + TCGv_i64 s = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(d, a, c); tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */ @@ -2971,7 +2970,7 @@ void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c) { uint32_t s_mask = dup_const(MO_8, 0x80 >> c); uint32_t c_mask = dup_const(MO_8, 0xff >> c); - TCGv_i32 s = tcg_temp_new_i32(); + TCGv_i32 s = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(d, a, c); tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */ @@ -2985,7 +2984,7 @@ void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c) { uint32_t s_mask = dup_const(MO_16, 0x8000 >> c); uint32_t c_mask = dup_const(MO_16, 0xffff >> c); - TCGv_i32 s = tcg_temp_new_i32(); + TCGv_i32 s = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(d, a, c); tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */ @@ -3179,7 +3178,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, TCGv_vec v_shift = tcg_temp_new_vec(type); if (vece == MO_64) { - TCGv_i64 sh64 = tcg_temp_new_i64(); + TCGv_i64 sh64 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(sh64, shift); tcg_gen_dup_i64_vec(MO_64, v_shift, sh64); tcg_temp_free_i64(sh64); @@ -3220,14 +3219,14 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, if (vece == MO_32 && check_size_impl(oprsz, 4)) { expand_2s_i32(dofs, aofs, oprsz, shift, false, g->fni4); } else if (vece == MO_64 && check_size_impl(oprsz, 8)) { - TCGv_i64 sh64 = tcg_temp_new_i64(); + TCGv_i64 sh64 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(sh64, shift); expand_2s_i64(dofs, aofs, oprsz, sh64, false, g->fni8); tcg_temp_free_i64(sh64); } else { - TCGv_ptr a0 = tcg_temp_new_ptr(); - TCGv_ptr a1 = tcg_temp_new_ptr(); - TCGv_i32 desc = tcg_temp_new_i32(); + TCGv_ptr a0 = tcg_temp_ebb_new_ptr(); + TCGv_ptr a1 = tcg_temp_ebb_new_ptr(); + TCGv_i32 desc = tcg_temp_ebb_new_i32(); tcg_gen_shli_i32(desc, shift, SIMD_DATA_SHIFT); tcg_gen_ori_i32(desc, desc, simd_desc(oprsz, maxsz, 0)); @@ -3359,7 +3358,7 @@ static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_shl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_shl_i32(d, a, t); @@ -3368,7 +3367,7 @@ static void tcg_gen_shl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_shl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_shl_i64(d, a, t); @@ -3422,7 +3421,7 @@ static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_shr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_shr_i32(d, a, t); @@ -3431,7 +3430,7 @@ static void tcg_gen_shr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_shr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_shr_i64(d, a, t); @@ -3485,7 +3484,7 @@ static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_sar_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_sar_i32(d, a, t); @@ -3494,7 +3493,7 @@ static void tcg_gen_sar_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_sar_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_sar_i64(d, a, t); @@ -3548,7 +3547,7 @@ static void tcg_gen_rotlv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_rotl_i32(d, a, t); @@ -3557,7 +3556,7 @@ static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_rotl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_rotl_i64(d, a, t); @@ -3607,7 +3606,7 @@ static void tcg_gen_rotrv_mod_vec(unsigned vece, TCGv_vec d, static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_andi_i32(t, b, 31); tcg_gen_rotr_i32(d, a, t); @@ -3616,7 +3615,7 @@ static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static void tcg_gen_rotr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_andi_i64(t, b, 63); tcg_gen_rotr_i64(d, a, t); @@ -3657,8 +3656,8 @@ void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs, static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); uint32_t i; for (i = 0; i < oprsz; i += 4) { @@ -3675,8 +3674,8 @@ static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, static void expand_cmp_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); uint32_t i; for (i = 0; i < oprsz; i += 8) { @@ -3822,7 +3821,7 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, static void tcg_gen_bitsel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_and_i64(t, b, a); tcg_gen_andc_i64(d, c, a); diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index c581ae77c4..f2269a1b91 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -264,7 +264,7 @@ void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_div_i32) { tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t0, arg1, 31); tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); tcg_temp_free_i32(t0); @@ -278,13 +278,13 @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_rem_i32) { tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2); tcg_gen_mul_i32(t0, t0, arg2); tcg_gen_sub_i32(ret, arg1, t0); tcg_temp_free_i32(t0); } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t0, arg1, 31); tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); tcg_temp_free_i32(t0); @@ -298,7 +298,7 @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_div_i32) { tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_movi_i32(t0, 0); tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); tcg_temp_free_i32(t0); @@ -312,13 +312,13 @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_rem_i32) { tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2); tcg_gen_mul_i32(t0, t0, arg2); tcg_gen_sub_i32(ret, arg1, t0); tcg_temp_free_i32(t0); } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_movi_i32(t0, 0); tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); tcg_temp_free_i32(t0); @@ -332,7 +332,7 @@ void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_andc_i32) { tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_not_i32(t0, arg2); tcg_gen_and_i32(ret, arg1, t0); tcg_temp_free_i32(t0); @@ -374,7 +374,7 @@ void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_orc_i32) { tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_not_i32(t0, arg2); tcg_gen_or_i32(ret, arg1, t0); tcg_temp_free_i32(t0); @@ -386,8 +386,8 @@ void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_clz_i32) { tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_clz_i64) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t1, arg1); tcg_gen_extu_i32_i64(t2, arg2); tcg_gen_addi_i64(t2, t2, 32); @@ -411,8 +411,8 @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_ctz_i32) { tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_ctz_i64) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t1, arg1); tcg_gen_extu_i32_i64(t2, arg2); tcg_gen_ctz_i64(t1, t1, t2); @@ -423,7 +423,7 @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) || TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i32 || TCG_TARGET_HAS_clz_i64) { - TCGv_i32 z, t = tcg_temp_new_i32(); + TCGv_i32 z, t = tcg_temp_ebb_new_i32(); if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) { tcg_gen_subi_i32(t, arg1, 1); @@ -448,7 +448,7 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) { if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) { /* This equivalence has the advantage of not requiring a fixup. */ - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_subi_i32(t, arg1, 1); tcg_gen_andc_i32(t, t, arg1); tcg_gen_ctpop_i32(ret, t); @@ -461,7 +461,7 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_clz_i32) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t, arg, 31); tcg_gen_xor_i32(t, t, arg); tcg_gen_clzi_i32(t, t, 32); @@ -477,7 +477,7 @@ void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1) if (TCG_TARGET_HAS_ctpop_i32) { tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1); } else if (TCG_TARGET_HAS_ctpop_i64) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t, arg1); tcg_gen_ctpop_i64(t, t); tcg_gen_extrl_i64_i32(ret, t); @@ -494,8 +494,8 @@ void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) } else { TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i32(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_shl_i32(t0, arg1, arg2); tcg_gen_subfi_i32(t1, 32, arg2); tcg_gen_shr_i32(t1, arg1, t1); @@ -515,8 +515,8 @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2)); } else { TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i32(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_shli_i32(t0, arg1, arg2); tcg_gen_shri_i32(t1, arg1, 32 - arg2); tcg_gen_or_i32(ret, t0, t1); @@ -532,8 +532,8 @@ void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) } else { TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i32(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_shr_i32(t0, arg1, arg2); tcg_gen_subfi_i32(t1, 32, arg2); tcg_gen_shl_i32(t1, arg1, t1); @@ -574,7 +574,7 @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, return; } - t1 = tcg_temp_new_i32(); + t1 = tcg_temp_ebb_new_i32(); if (TCG_TARGET_HAS_extract2_i32) { if (ofs + len == 32) { @@ -801,7 +801,7 @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah, } else if (TCG_TARGET_HAS_extract2_i32) { tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, al, ofs); tcg_gen_deposit_i32(ret, t0, ah, 32 - ofs, ofs); tcg_temp_free_i32(t0); @@ -818,8 +818,8 @@ void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, } else if (TCG_TARGET_HAS_movcond_i32) { tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_setcond_i32(cond, t0, c1, c2); tcg_gen_neg_i32(t0, t0); tcg_gen_and_i32(t1, v1, t0); @@ -836,8 +836,8 @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, if (TCG_TARGET_HAS_add2_i32) { tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_concat_i32_i64(t0, al, ah); tcg_gen_concat_i32_i64(t1, bl, bh); tcg_gen_add_i64(t0, t0, t1); @@ -853,8 +853,8 @@ void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, if (TCG_TARGET_HAS_sub2_i32) { tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_concat_i32_i64(t0, al, ah); tcg_gen_concat_i32_i64(t1, bl, bh); tcg_gen_sub_i64(t0, t0, t1); @@ -869,14 +869,14 @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_mulu2_i32) { tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_muluh_i32) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2); tcg_gen_mov_i32(rl, t); tcg_temp_free_i32(t); } else if (TCG_TARGET_REG_BITS == 64) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t0, arg1); tcg_gen_extu_i32_i64(t1, arg2); tcg_gen_mul_i64(t0, t0, t1); @@ -893,16 +893,16 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) if (TCG_TARGET_HAS_muls2_i32) { tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_mulsh_i32) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2); tcg_gen_mov_i32(rl, t); tcg_temp_free_i32(t); } else if (TCG_TARGET_REG_BITS == 32) { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); - TCGv_i32 t3 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); + TCGv_i32 t3 = tcg_temp_ebb_new_i32(); tcg_gen_mulu2_i32(t0, t1, arg1, arg2); /* Adjust for negative inputs. */ tcg_gen_sari_i32(t2, arg1, 31); @@ -917,8 +917,8 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) tcg_temp_free_i32(t2); tcg_temp_free_i32(t3); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_ext_i32_i64(t0, arg1); tcg_gen_ext_i32_i64(t1, arg2); tcg_gen_mul_i64(t0, t0, t1); @@ -931,9 +931,9 @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_REG_BITS == 32) { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_mulu2_i32(t0, t1, arg1, arg2); /* Adjust for negative input for the signed arg1. */ tcg_gen_sari_i32(t2, arg1, 31); @@ -944,8 +944,8 @@ void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_ext_i32_i64(t0, arg1); tcg_gen_extu_i32_i64(t1, arg2); tcg_gen_mul_i64(t0, t0, t1); @@ -1001,8 +1001,8 @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags) if (TCG_TARGET_HAS_bswap16_i32) { tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, arg, 8); if (!(flags & TCG_BSWAP_IZ)) { @@ -1030,8 +1030,8 @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg) if (TCG_TARGET_HAS_bswap32_i32) { tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); TCGv_i32 t2 = tcg_constant_i32(0x00ff00ff); /* arg = abcd */ @@ -1078,7 +1078,7 @@ void tcg_gen_umax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t, a, 31); tcg_gen_xor_i32(ret, a, t); @@ -1241,8 +1241,8 @@ void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) TCGv_i64 t0; TCGv_i32 t1; - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i64(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_mulu2_i32(TCGV_LOW(t0), TCGV_HIGH(t0), TCGV_LOW(arg1), TCGV_LOW(arg2)); @@ -1423,7 +1423,7 @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1, tcg_gen_extract2_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c); } else { - TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c); tcg_gen_deposit_i32(TCGV_HIGH(ret), t0, TCGV_HIGH(arg1), c, 32 - c); @@ -1557,7 +1557,7 @@ void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_div_i64) { tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t0, arg1, 63); tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); tcg_temp_free_i64(t0); @@ -1571,13 +1571,13 @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_rem_i64) { tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2); tcg_gen_mul_i64(t0, t0, arg2); tcg_gen_sub_i64(ret, arg1, t0); tcg_temp_free_i64(t0); } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t0, arg1, 63); tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); tcg_temp_free_i64(t0); @@ -1591,7 +1591,7 @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_div_i64) { tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_movi_i64(t0, 0); tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); tcg_temp_free_i64(t0); @@ -1605,13 +1605,13 @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_rem_i64) { tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2); tcg_gen_mul_i64(t0, t0, arg2); tcg_gen_sub_i64(ret, arg1, t0); tcg_temp_free_i64(t0); } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_movi_i64(t0, 0); tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); tcg_temp_free_i64(t0); @@ -1710,8 +1710,8 @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) } else if (TCG_TARGET_HAS_bswap16_i64) { tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t0, arg, 8); if (!(flags & TCG_BSWAP_IZ)) { @@ -1749,8 +1749,8 @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) } else if (TCG_TARGET_HAS_bswap32_i64) { tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_constant_i64(0x00ff00ff); /* arg = xxxxabcd */ @@ -1778,8 +1778,8 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(); - t1 = tcg_temp_new_i32(); + t0 = tcg_temp_ebb_new_i32(); + t1 = tcg_temp_ebb_new_i32(); tcg_gen_bswap32_i32(t0, TCGV_LOW(arg)); tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg)); @@ -1790,9 +1790,9 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) } else if (TCG_TARGET_HAS_bswap64_i64) { tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); /* arg = abcdefgh */ tcg_gen_movi_i64(t2, 0x00ff00ff00ff00ffull); @@ -1822,8 +1822,8 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg) { uint64_t m = 0x0000ffff0000ffffull; - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); /* See include/qemu/bitops.h, hswap64. */ tcg_gen_rotli_i64(t1, arg, 32); @@ -1863,7 +1863,7 @@ void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) } else if (TCG_TARGET_HAS_andc_i64) { tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_not_i64(t0, arg2); tcg_gen_and_i64(ret, arg1, t0); tcg_temp_free_i64(t0); @@ -1917,7 +1917,7 @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) } else if (TCG_TARGET_HAS_orc_i64) { tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_not_i64(t0, arg2); tcg_gen_or_i64(ret, arg1, t0); tcg_temp_free_i64(t0); @@ -1938,7 +1938,7 @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_clz_i32 && arg2 <= 0xffffffffu) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32); tcg_gen_addi_i32(t, t, 32); tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t); @@ -1956,7 +1956,7 @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_ctz_i64) { tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) { - TCGv_i64 z, t = tcg_temp_new_i64(); + TCGv_i64 z, t = tcg_temp_ebb_new_i64(); if (TCG_TARGET_HAS_ctpop_i64) { tcg_gen_subi_i64(t, arg1, 1); @@ -1983,7 +1983,7 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctz_i32 && arg2 <= 0xffffffffu) { - TCGv_i32 t32 = tcg_temp_new_i32(); + TCGv_i32 t32 = tcg_temp_ebb_new_i32(); tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32); tcg_gen_addi_i32(t32, t32, 32); tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32); @@ -1993,7 +1993,7 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) && TCG_TARGET_HAS_ctpop_i64 && arg2 == 64) { /* This equivalence has the advantage of not requiring a fixup. */ - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_subi_i64(t, arg1, 1); tcg_gen_andc_i64(t, t, arg1); tcg_gen_ctpop_i64(ret, t); @@ -2008,7 +2008,7 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t, arg, 63); tcg_gen_xor_i64(t, t, arg); tcg_gen_clzi_i64(t, t, 64); @@ -2039,8 +2039,8 @@ void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2); } else { TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); + t0 = tcg_temp_ebb_new_i64(); + t1 = tcg_temp_ebb_new_i64(); tcg_gen_shl_i64(t0, arg1, arg2); tcg_gen_subfi_i64(t1, 64, arg2); tcg_gen_shr_i64(t1, arg1, t1); @@ -2060,8 +2060,8 @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2)); } else { TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); + t0 = tcg_temp_ebb_new_i64(); + t1 = tcg_temp_ebb_new_i64(); tcg_gen_shli_i64(t0, arg1, arg2); tcg_gen_shri_i64(t1, arg1, 64 - arg2); tcg_gen_or_i64(ret, t0, t1); @@ -2076,8 +2076,8 @@ void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2); } else { TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); + t0 = tcg_temp_ebb_new_i64(); + t1 = tcg_temp_ebb_new_i64(); tcg_gen_shr_i64(t0, arg1, arg2); tcg_gen_subfi_i64(t1, 64, arg2); tcg_gen_shl_i64(t1, arg1, t1); @@ -2133,7 +2133,7 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, } } - t1 = tcg_temp_new_i64(); + t1 = tcg_temp_ebb_new_i64(); if (TCG_TARGET_HAS_extract2_i64) { if (ofs + len == 64) { @@ -2365,7 +2365,7 @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg, tcg_gen_sextract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), 0, len - 32); return; } else if (len > 32) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_ebb_new_i32(); /* Extract the bits for the high word normally. */ tcg_gen_sextract_i32(t, TCGV_HIGH(arg), ofs + 32, len - 32); /* Shift the field down for the low part. */ @@ -2460,7 +2460,7 @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah, } else if (TCG_TARGET_HAS_extract2_i64) { tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t0, al, ofs); tcg_gen_deposit_i64(ret, t0, ah, 64 - ofs, ofs); tcg_temp_free_i64(t0); @@ -2475,8 +2475,8 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, } else if (cond == TCG_COND_NEVER) { tcg_gen_mov_i64(ret, v2); } else if (TCG_TARGET_REG_BITS == 32) { - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0, TCGV_LOW(c1), TCGV_HIGH(c1), TCGV_LOW(c2), TCGV_HIGH(c2), cond); @@ -2503,8 +2503,8 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, } else if (TCG_TARGET_HAS_movcond_i64) { tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_setcond_i64(cond, t0, c1, c2); tcg_gen_neg_i64(t0, t0); tcg_gen_and_i64(t1, v1, t0); @@ -2521,8 +2521,8 @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, if (TCG_TARGET_HAS_add2_i64) { tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_add_i64(t0, al, bl); tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al); tcg_gen_add_i64(rh, ah, bh); @@ -2539,8 +2539,8 @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, if (TCG_TARGET_HAS_sub2_i64) { tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_sub_i64(t0, al, bl); tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl); tcg_gen_sub_i64(rh, ah, bh); @@ -2556,13 +2556,13 @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_mulu2_i64) { tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_muluh_i64) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_mul_i64(t0, arg1, arg2); gen_helper_muluh_i64(rh, arg1, arg2); tcg_gen_mov_i64(rl, t0); @@ -2575,16 +2575,16 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_HAS_muls2_i64) { tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_mulsh_i64) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); - TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); + TCGv_i64 t3 = tcg_temp_ebb_new_i64(); tcg_gen_mulu2_i64(t0, t1, arg1, arg2); /* Adjust for negative inputs. */ tcg_gen_sari_i64(t2, arg1, 63); @@ -2599,7 +2599,7 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) tcg_temp_free_i64(t2); tcg_temp_free_i64(t3); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_mul_i64(t0, arg1, arg2); gen_helper_mulsh_i64(rh, arg1, arg2); tcg_gen_mov_i64(rl, t0); @@ -2609,9 +2609,9 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_mulu2_i64(t0, t1, arg1, arg2); /* Adjust for negative input for the signed arg1. */ tcg_gen_sari_i64(t2, arg1, 63); @@ -2645,7 +2645,7 @@ void tcg_gen_umax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) void tcg_gen_abs_i64(TCGv_i64 ret, TCGv_i64 a) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t, a, 63); tcg_gen_xor_i64(ret, a, t); @@ -2675,7 +2675,7 @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg) tcg_gen_op2(INDEX_op_extrh_i64_i32, tcgv_i32_arg(ret), tcgv_i64_arg(arg)); } else { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t, arg, 32); tcg_gen_mov_i32(ret, (TCGv_i32)t); tcg_temp_free_i64(t); @@ -2714,7 +2714,7 @@ void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high) return; } - tmp = tcg_temp_new_i64(); + tmp = tcg_temp_ebb_new_i64(); /* These extensions are only needed for type correctness. We may be able to do better given target specific information. */ tcg_gen_extu_i32_i64(tmp, high); @@ -2826,7 +2826,7 @@ void tcg_gen_lookup_and_goto_ptr(void) } plugin_gen_disable_mem_helpers(); - ptr = tcg_temp_new_ptr(); + ptr = tcg_temp_ebb_new_ptr(); gen_helper_lookup_tb_ptr(ptr, cpu_env); tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); tcg_temp_free_ptr(ptr); @@ -2987,7 +2987,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) oi = make_memop_idx(memop, idx); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { - swap = tcg_temp_new_i32(); + swap = tcg_temp_ebb_new_i32(); switch (memop & MO_SIZE) { case MO_16: tcg_gen_bswap16_i32(swap, val, 0); @@ -3082,7 +3082,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) oi = make_memop_idx(memop, idx); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { - swap = tcg_temp_new_i64(); + swap = tcg_temp_ebb_new_i64(); switch (memop & MO_SIZE) { case MO_16: tcg_gen_bswap16_i64(swap, val, 0); @@ -3224,7 +3224,7 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) addr_p8 = tcg_temp_new(); if ((mop[0] ^ memop) & MO_BSWAP) { - TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_bswap64_i64(t, x); gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr, mop[0], idx); @@ -3328,8 +3328,8 @@ static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, TCGv_i32 newv, TCGArg idx, MemOp memop) { - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); @@ -3385,8 +3385,8 @@ void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, return; } - t1 = tcg_temp_new_i64(); - t2 = tcg_temp_new_i64(); + t1 = tcg_temp_ebb_new_i64(); + t2 = tcg_temp_ebb_new_i64(); tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); @@ -3442,9 +3442,9 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, tcg_gen_movi_i32(TCGV_HIGH(retv), 0); } } else { - TCGv_i32 c32 = tcg_temp_new_i32(); - TCGv_i32 n32 = tcg_temp_new_i32(); - TCGv_i32 r32 = tcg_temp_new_i32(); + TCGv_i32 c32 = tcg_temp_ebb_new_i32(); + TCGv_i32 n32 = tcg_temp_ebb_new_i32(); + TCGv_i32 r32 = tcg_temp_ebb_new_i32(); tcg_gen_extrl_i64_i32(c32, cmpv); tcg_gen_extrl_i64_i32(n32, newv); @@ -3476,10 +3476,10 @@ void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); } else { - TCGv_i128 oldv = tcg_temp_new_i128(); - TCGv_i128 tmpv = tcg_temp_new_i128(); - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i128 oldv = tcg_temp_ebb_new_i128(); + TCGv_i128 tmpv = tcg_temp_ebb_new_i128(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 z = tcg_constant_i64(0); tcg_gen_qemu_ld_i128(oldv, addr, idx, memop); @@ -3541,8 +3541,8 @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) { - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t2 = tcg_temp_ebb_new_i32(); memop = tcg_canonicalize_memop(memop, 0, 0); @@ -3579,8 +3579,8 @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t2 = tcg_temp_ebb_new_i64(); memop = tcg_canonicalize_memop(memop, 1, 0); @@ -3616,8 +3616,8 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, tcg_gen_movi_i64(ret, 0); #endif /* CONFIG_ATOMIC64 */ } else { - TCGv_i32 v32 = tcg_temp_new_i32(); - TCGv_i32 r32 = tcg_temp_new_i32(); + TCGv_i32 v32 = tcg_temp_ebb_new_i32(); + TCGv_i32 r32 = tcg_temp_ebb_new_i32(); tcg_gen_extrl_i64_i32(v32, val); do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table); diff --git a/tcg/tcg.c b/tcg/tcg.c index 9822c65ea8..5cccc06ae3 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -1254,66 +1254,69 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base, return ts; } -TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local) +TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind) { TCGContext *s = tcg_ctx; - TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL; TCGTemp *ts; - int idx, k; - - k = type + (temp_local ? TCG_TYPE_COUNT : 0); - idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS); - if (idx < TCG_MAX_TEMPS) { - /* There is already an available temp with the right type. */ - clear_bit(idx, s->free_temps[k].l); - - ts = &s->temps[idx]; - ts->temp_allocated = 1; - tcg_debug_assert(ts->base_type == type); - tcg_debug_assert(ts->kind == kind); - } else { - int i, n; + int n; - switch (type) { - case TCG_TYPE_I32: - case TCG_TYPE_V64: - case TCG_TYPE_V128: - case TCG_TYPE_V256: - n = 1; - break; - case TCG_TYPE_I64: - n = 64 / TCG_TARGET_REG_BITS; - break; - case TCG_TYPE_I128: - n = 128 / TCG_TARGET_REG_BITS; - break; - default: - g_assert_not_reached(); + if (kind == TEMP_EBB) { + int idx = find_first_bit(s->free_temps[type].l, TCG_MAX_TEMPS); + + if (idx < TCG_MAX_TEMPS) { + /* There is already an available temp with the right type. */ + clear_bit(idx, s->free_temps[type].l); + + ts = &s->temps[idx]; + ts->temp_allocated = 1; + tcg_debug_assert(ts->base_type == type); + tcg_debug_assert(ts->kind == kind); + goto done; } + } else { + tcg_debug_assert(kind == TEMP_TB); + } - ts = tcg_temp_alloc(s); - ts->base_type = type; - ts->temp_allocated = 1; - ts->kind = kind; + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + n = 1; + break; + case TCG_TYPE_I64: + n = 64 / TCG_TARGET_REG_BITS; + break; + case TCG_TYPE_I128: + n = 128 / TCG_TARGET_REG_BITS; + break; + default: + g_assert_not_reached(); + } - if (n == 1) { - ts->type = type; - } else { - ts->type = TCG_TYPE_REG; + ts = tcg_temp_alloc(s); + ts->base_type = type; + ts->temp_allocated = 1; + ts->kind = kind; + + if (n == 1) { + ts->type = type; + } else { + ts->type = TCG_TYPE_REG; - for (i = 1; i < n; ++i) { - TCGTemp *ts2 = tcg_temp_alloc(s); + for (int i = 1; i < n; ++i) { + TCGTemp *ts2 = tcg_temp_alloc(s); - tcg_debug_assert(ts2 == ts + i); - ts2->base_type = type; - ts2->type = TCG_TYPE_REG; - ts2->temp_allocated = 1; - ts2->temp_subindex = i; - ts2->kind = kind; - } + tcg_debug_assert(ts2 == ts + i); + ts2->base_type = type; + ts2->type = TCG_TYPE_REG; + ts2->temp_allocated = 1; + ts2->temp_subindex = i; + ts2->kind = kind; } } + done: #if defined(CONFIG_DEBUG_TCG) s->temps_in_use++; #endif @@ -1340,7 +1343,7 @@ TCGv_vec tcg_temp_new_vec(TCGType type) } #endif - t = tcg_temp_new_internal(type, 0); + t = tcg_temp_new_internal(type, TEMP_EBB); return temp_tcgv_vec(t); } @@ -1351,14 +1354,13 @@ TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match) tcg_debug_assert(t->temp_allocated != 0); - t = tcg_temp_new_internal(t->base_type, 0); + t = tcg_temp_new_internal(t->base_type, TEMP_EBB); return temp_tcgv_vec(t); } void tcg_temp_free_internal(TCGTemp *ts) { TCGContext *s = tcg_ctx; - int k, idx; switch (ts->kind) { case TEMP_CONST: @@ -1367,26 +1369,25 @@ void tcg_temp_free_internal(TCGTemp *ts) * silently ignore free. */ return; - case TEMP_NORMAL: - case TEMP_LOCAL: + case TEMP_EBB: + case TEMP_TB: break; default: g_assert_not_reached(); } + tcg_debug_assert(ts->temp_allocated != 0); + ts->temp_allocated = 0; + #if defined(CONFIG_DEBUG_TCG) + assert(s->temps_in_use > 0); s->temps_in_use--; - if (s->temps_in_use < 0) { - fprintf(stderr, "More temporaries freed than allocated!\n"); - } #endif - tcg_debug_assert(ts->temp_allocated != 0); - ts->temp_allocated = 0; - - idx = temp_idx(ts); - k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT); - set_bit(idx, s->free_temps[k].l); + if (ts->kind == TEMP_EBB) { + int idx = temp_idx(ts); + set_bit(idx, s->free_temps[ts->base_type].l); + } } TCGTemp *tcg_constant_internal(TCGType type, int64_t val) @@ -1474,22 +1475,6 @@ TCGv_i64 tcg_const_i64(int64_t val) return t0; } -TCGv_i32 tcg_const_local_i32(int32_t val) -{ - TCGv_i32 t0; - t0 = tcg_temp_local_new_i32(); - tcg_gen_movi_i32(t0, val); - return t0; -} - -TCGv_i64 tcg_const_local_i64(int64_t val) -{ - TCGv_i64 t0; - t0 = tcg_temp_local_new_i64(); - tcg_gen_movi_i64(t0, val); - return t0; -} - #if defined(CONFIG_DEBUG_TCG) void tcg_clear_temp_count(void) { @@ -1866,7 +1851,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) case TCG_CALL_ARG_EXTEND_U: case TCG_CALL_ARG_EXTEND_S: { - TCGv_i64 temp = tcg_temp_new_i64(); + TCGv_i64 temp = tcg_temp_ebb_new_i64(); TCGv_i32 orig = temp_tcgv_i32(ts); if (loc->kind == TCG_CALL_ARG_EXTEND_S) { @@ -1912,11 +1897,10 @@ static void tcg_reg_alloc_start(TCGContext *s) break; case TEMP_GLOBAL: break; - case TEMP_NORMAL: case TEMP_EBB: val = TEMP_VAL_DEAD; /* fall through */ - case TEMP_LOCAL: + case TEMP_TB: ts->mem_allocated = 0; break; default: @@ -1938,13 +1922,10 @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, case TEMP_GLOBAL: pstrcpy(buf, buf_size, ts->name); break; - case TEMP_LOCAL: + case TEMP_TB: snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); break; case TEMP_EBB: - snprintf(buf, buf_size, "ebb%d", idx - s->nb_globals); - break; - case TEMP_NORMAL: snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); break; case TEMP_CONST: @@ -2637,9 +2618,10 @@ TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, } /* Reachable analysis : remove unreachable code. */ -static void reachable_code_pass(TCGContext *s) +static void __attribute__((noinline)) +reachable_code_pass(TCGContext *s) { - TCGOp *op, *op_next; + TCGOp *op, *op_next, *op_prev; bool dead = false; QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { @@ -2649,6 +2631,22 @@ static void reachable_code_pass(TCGContext *s) switch (op->opc) { case INDEX_op_set_label: label = arg_label(op->args[0]); + + /* + * Optimization can fold conditional branches to unconditional. + * If we find a label which is preceded by an unconditional + * branch to next, remove the branch. We couldn't do this when + * processing the branch because any dead code between the branch + * and label had not yet been removed. + */ + op_prev = QTAILQ_PREV(op, link); + if (op_prev->opc == INDEX_op_br && + label == arg_label(op_prev->args[0])) { + tcg_op_remove(s, op_prev); + /* Fall through means insns become live again. */ + dead = false; + } + if (label->refs == 0) { /* * While there is an occasional backward branch, virtually @@ -2662,21 +2660,6 @@ static void reachable_code_pass(TCGContext *s) /* Once we see a label, insns become live again. */ dead = false; remove = false; - - /* - * Optimization can fold conditional branches to unconditional. - * If we find a label with one reference which is preceded by - * an unconditional branch to it, remove both. This needed to - * wait until the dead code in between them was removed. - */ - if (label->refs == 1) { - TCGOp *op_prev = QTAILQ_PREV(op, link); - if (op_prev->opc == INDEX_op_br && - label == arg_label(op_prev->args[0])) { - tcg_op_remove(s, op_prev); - remove = true; - } - } } break; @@ -2759,10 +2742,9 @@ static void la_bb_end(TCGContext *s, int ng, int nt) switch (ts->kind) { case TEMP_FIXED: case TEMP_GLOBAL: - case TEMP_LOCAL: + case TEMP_TB: state = TS_DEAD | TS_MEM; break; - case TEMP_NORMAL: case TEMP_EBB: case TEMP_CONST: state = TS_DEAD; @@ -2804,16 +2786,13 @@ static void la_bb_sync(TCGContext *s, int ng, int nt) int state; switch (ts->kind) { - case TEMP_LOCAL: + case TEMP_TB: state = ts->state; ts->state = state | TS_MEM; if (state != TS_DEAD) { continue; } break; - case TEMP_NORMAL: - s->temps[i].state = TS_DEAD; - break; case TEMP_EBB: case TEMP_CONST: continue; @@ -2857,10 +2836,80 @@ static void la_cross_call(TCGContext *s, int nt) } } +/* + * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce + * to TEMP_EBB, if possible. + */ +static void __attribute__((noinline)) +liveness_pass_0(TCGContext *s) +{ + void * const multiple_ebb = (void *)(uintptr_t)-1; + int nb_temps = s->nb_temps; + TCGOp *op, *ebb; + + for (int i = s->nb_globals; i < nb_temps; ++i) { + s->temps[i].state_ptr = NULL; + } + + /* + * Represent each EBB by the op at which it begins. In the case of + * the first EBB, this is the first op, otherwise it is a label. + * Collect the uses of each TEMP_TB: NULL for unused, EBB for use + * within a single EBB, else MULTIPLE_EBB. + */ + ebb = QTAILQ_FIRST(&s->ops); + QTAILQ_FOREACH(op, &s->ops, link) { + const TCGOpDef *def; + int nb_oargs, nb_iargs; + + switch (op->opc) { + case INDEX_op_set_label: + ebb = op; + continue; + case INDEX_op_discard: + continue; + case INDEX_op_call: + nb_oargs = TCGOP_CALLO(op); + nb_iargs = TCGOP_CALLI(op); + break; + default: + def = &tcg_op_defs[op->opc]; + nb_oargs = def->nb_oargs; + nb_iargs = def->nb_iargs; + break; + } + + for (int i = 0; i < nb_oargs + nb_iargs; ++i) { + TCGTemp *ts = arg_temp(op->args[i]); + + if (ts->kind != TEMP_TB) { + continue; + } + if (ts->state_ptr == NULL) { + ts->state_ptr = ebb; + } else if (ts->state_ptr != ebb) { + ts->state_ptr = multiple_ebb; + } + } + } + + /* + * For TEMP_TB that turned out not to be used beyond one EBB, + * reduce the liveness to TEMP_EBB. + */ + for (int i = s->nb_globals; i < nb_temps; ++i) { + TCGTemp *ts = &s->temps[i]; + if (ts->kind == TEMP_TB && ts->state_ptr != multiple_ebb) { + ts->kind = TEMP_EBB; + } + } +} + /* Liveness analysis : update the opc_arg_life array to tell if a given input arguments is dead. Instructions updating dead temporaries are removed. */ -static void liveness_pass_1(TCGContext *s) +static void __attribute__((noinline)) +liveness_pass_1(TCGContext *s) { int nb_globals = s->nb_globals; int nb_temps = s->nb_temps; @@ -3200,7 +3249,8 @@ static void liveness_pass_1(TCGContext *s) } /* Liveness analysis: Convert indirect regs to direct temporaries. */ -static bool liveness_pass_2(TCGContext *s) +static bool __attribute__((noinline)) +liveness_pass_2(TCGContext *s) { int nb_globals = s->nb_globals; int nb_temps, i; @@ -3497,10 +3547,9 @@ static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) case TEMP_FIXED: return; case TEMP_GLOBAL: - case TEMP_LOCAL: + case TEMP_TB: new_type = TEMP_VAL_MEM; break; - case TEMP_NORMAL: case TEMP_EBB: new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD; break; @@ -3785,10 +3834,9 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) TCGTemp *ts = &s->temps[i]; switch (ts->kind) { - case TEMP_LOCAL: + case TEMP_TB: temp_save(s, ts, allocated_regs); break; - case TEMP_NORMAL: case TEMP_EBB: /* The liveness analysis already ensures that temps are dead. Keep an tcg_debug_assert for safety. */ @@ -3822,12 +3870,9 @@ static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs) * Keep tcg_debug_asserts for safety. */ switch (ts->kind) { - case TEMP_LOCAL: + case TEMP_TB: tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent); break; - case TEMP_NORMAL: - tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); - break; case TEMP_EBB: case TEMP_CONST: break; @@ -4870,6 +4915,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) #endif reachable_code_pass(s); + liveness_pass_0(s); liveness_pass_1(s); if (s->nb_indirects > 0) { |