summary refs log tree commit diff stats
path: root/tcg/optimize.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2025-01-21 21:47:16 -0800
committerRichard Henderson <richard.henderson@linaro.org>2025-04-28 13:40:17 -0700
commite996804d40c10572550a1d3ca936a5dfb29ca0fc (patch)
tree9c46c615af55a3487c47b6523922f48ed32a61a2 /tcg/optimize.c
parent0de5c9d1f56332554c48152f535b47a1a0c2af7b (diff)
downloadfocaccia-qemu-e996804d40c10572550a1d3ca936a5dfb29ca0fc.tar.gz
focaccia-qemu-e996804d40c10572550a1d3ca936a5dfb29ca0fc.zip
tcg: Merge INDEX_op_ld*_{i32,i64}
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/optimize.c')
-rw-r--r--tcg/optimize.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 52e194aaa9..d928a38e14 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -2880,22 +2880,22 @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
 
     /* We can't do any folding with a load, but we can record bits. */
     switch (op->opc) {
-    CASE_OP_32_64(ld8s):
+    case INDEX_op_ld8s:
         s_mask = INT8_MIN;
         break;
-    CASE_OP_32_64(ld8u):
+    case INDEX_op_ld8u:
         z_mask = MAKE_64BIT_MASK(0, 8);
         break;
-    CASE_OP_32_64(ld16s):
+    case INDEX_op_ld16s:
         s_mask = INT16_MIN;
         break;
-    CASE_OP_32_64(ld16u):
+    case INDEX_op_ld16u:
         z_mask = MAKE_64BIT_MASK(0, 16);
         break;
-    case INDEX_op_ld32s_i64:
+    case INDEX_op_ld32s:
         s_mask = INT32_MIN;
         break;
-    case INDEX_op_ld32u_i64:
+    case INDEX_op_ld32u:
         z_mask = MAKE_64BIT_MASK(0, 32);
         break;
     default:
@@ -3126,16 +3126,15 @@ void tcg_optimize(TCGContext *s)
         case INDEX_op_extrh_i64_i32:
             done = fold_extu(&ctx, op);
             break;
-        CASE_OP_32_64(ld8s):
-        CASE_OP_32_64(ld8u):
-        CASE_OP_32_64(ld16s):
-        CASE_OP_32_64(ld16u):
-        case INDEX_op_ld32s_i64:
-        case INDEX_op_ld32u_i64:
+        case INDEX_op_ld8s:
+        case INDEX_op_ld8u:
+        case INDEX_op_ld16s:
+        case INDEX_op_ld16u:
+        case INDEX_op_ld32s:
+        case INDEX_op_ld32u:
             done = fold_tcg_ld(&ctx, op);
             break;
-        case INDEX_op_ld_i32:
-        case INDEX_op_ld_i64:
+        case INDEX_op_ld:
         case INDEX_op_ld_vec:
             done = fold_tcg_ld_memcopy(&ctx, op);
             break;