summary refs log tree commit diff stats
path: root/tcg/optimize.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2025-01-06 09:11:39 -0800
committerRichard Henderson <richard.henderson@linaro.org>2025-04-28 13:40:15 -0700
commit79602f632a20228fc963161cd53f7d5f6a3bd953 (patch)
tree84cd79d9169a7297920061c95b708fbd23b4c6ad /tcg/optimize.c
parent662cdbcf0073c4de8456f5e573523571e3d9da5b (diff)
downloadfocaccia-qemu-79602f632a20228fc963161cd53f7d5f6a3bd953.tar.gz
focaccia-qemu-79602f632a20228fc963161cd53f7d5f6a3bd953.zip
tcg: Merge INDEX_op_add_{i32,i64}
Rely on TCGOP_TYPE instead of opcodes specific to each type.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/optimize.c')
-rw-r--r--tcg/optimize.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 8d5bad07aa..a53e4f4675 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -424,7 +424,7 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
     uint64_t l64, h64;
 
     switch (op) {
-    CASE_OP_32_64(add):
+    case INDEX_op_add:
         return x + y;
 
     CASE_OP_32_64(sub):
@@ -2261,7 +2261,7 @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
             break;
         }
         if (convert) {
-            TCGOpcode add_opc, xor_opc, neg_opc;
+            TCGOpcode xor_opc, neg_opc;
 
             if (!inv && !neg) {
                 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
@@ -2269,12 +2269,10 @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
 
             switch (ctx->type) {
             case TCG_TYPE_I32:
-                add_opc = INDEX_op_add_i32;
                 neg_opc = INDEX_op_neg_i32;
                 xor_opc = INDEX_op_xor_i32;
                 break;
             case TCG_TYPE_I64:
-                add_opc = INDEX_op_add_i64;
                 neg_opc = INDEX_op_neg_i64;
                 xor_opc = INDEX_op_xor_i64;
                 break;
@@ -2285,7 +2283,7 @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
             if (!inv) {
                 op->opc = neg_opc;
             } else if (neg) {
-                op->opc = add_opc;
+                op->opc = INDEX_op_add;
                 op->args[2] = arg_new_constant(ctx, -1);
             } else {
                 op->opc = xor_opc;
@@ -2650,8 +2648,7 @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
     if (arg_is_const(op->args[2])) {
         uint64_t val = arg_info(op->args[2])->val;
 
-        op->opc = (ctx->type == TCG_TYPE_I32
-                   ? INDEX_op_add_i32 : INDEX_op_add_i64);
+        op->opc = INDEX_op_add;
         op->args[2] = arg_new_constant(ctx, -val);
     }
     return finish_folding(ctx, op);
@@ -2842,7 +2839,7 @@ void tcg_optimize(TCGContext *s)
          * Sorted alphabetically by opcode as much as possible.
          */
         switch (opc) {
-        CASE_OP_32_64(add):
+        case INDEX_op_add:
             done = fold_add(&ctx, op);
             break;
         case INDEX_op_add_vec: