diff options
Diffstat (limited to 'tcg/aarch64/tcg-target.c.inc')
| -rw-r--r-- | tcg/aarch64/tcg-target.c.inc | 32 |
1 files changed, 11 insertions, 21 deletions
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index ab199b143f..23954ec7cf 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -857,14 +857,14 @@ static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); } -static void tcg_out_dupi_vec(TCGContext *s, TCGType type, - TCGReg rd, tcg_target_long v64) +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg rd, int64_t v64) { bool q = type == TCG_TYPE_V128; int cmode, imm8, i; /* Test all bytes equal first. */ - if (v64 == dup_const(MO_8, v64)) { + if (vece == MO_8) { imm8 = (uint8_t)v64; tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0xe, imm8); return; @@ -891,7 +891,7 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, * cannot find an expansion there's no point checking a larger * width because we already know by replication it cannot match. */ - if (v64 == dup_const(MO_16, v64)) { + if (vece == MO_16) { uint16_t v16 = v64; if (is_shimm16(v16, &cmode, &imm8)) { @@ -910,7 +910,7 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0x8, v16 & 0xff); tcg_out_insn(s, 3606, ORR, q, rd, 0, 0xa, v16 >> 8); return; - } else if (v64 == dup_const(MO_32, v64)) { + } else if (vece == MO_32) { uint32_t v32 = v64; uint32_t n32 = ~v32; @@ -1011,13 +1011,6 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, case TCG_TYPE_I64: tcg_debug_assert(rd < 32); break; - - case TCG_TYPE_V64: - case TCG_TYPE_V128: - tcg_debug_assert(rd >= 32); - tcg_out_dupi_vec(s, type, rd, value); - return; - default: g_assert_not_reached(); } @@ -2264,8 +2257,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ - case INDEX_op_movi_i64: case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: g_assert_not_reached(); @@ -2442,7 +2433,7 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); break; } - tcg_out_dupi_vec(s, type, TCG_VEC_TMP, 0); + tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); a2 = TCG_VEC_TMP; } insn = cmp_insn[cond]; @@ -2473,7 +2464,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ - case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ default: g_assert_not_reached(); @@ -2526,7 +2516,7 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, TCGArg a0, ...) { va_list va; - TCGv_vec v0, v1, v2, t1, t2; + TCGv_vec v0, v1, v2, t1, t2, c1; TCGArg a2; va_start(va, a0); @@ -2558,8 +2548,8 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, case INDEX_op_rotlv_vec: t1 = tcg_temp_new_vec(type); - tcg_gen_dupi_vec(vece, t1, 8 << vece); - tcg_gen_sub_vec(vece, t1, v2, t1); + c1 = tcg_constant_vec(type, vece, 8 << vece); + tcg_gen_sub_vec(vece, t1, v2, c1); /* Right shifts are negative left shifts for AArch64. */ vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(t1)); @@ -2572,9 +2562,9 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, case INDEX_op_rotrv_vec: t1 = tcg_temp_new_vec(type); t2 = tcg_temp_new_vec(type); + c1 = tcg_constant_vec(type, vece, 8 << vece); tcg_gen_neg_vec(vece, t1, v2); - tcg_gen_dupi_vec(vece, t2, 8 << vece); - tcg_gen_add_vec(vece, t2, t1, t2); + tcg_gen_sub_vec(vece, t2, c1, v2); /* Right shifts are negative left shifts for AArch64. */ vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(t1)); |