From b701f195d3ed39429fab5787df7c6b1c9377ab94 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 25 Oct 2023 21:14:04 -0700 Subject: tcg: Remove TCG_TARGET_HAS_neg_{i32,i64} The movcond opcode is now mandatory for backends to implement. Signed-off-by: Richard Henderson Message-Id: <20231026041404.1229328-7-richard.henderson@linaro.org> --- tcg/optimize.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tcg/optimize.c') diff --git a/tcg/optimize.c b/tcg/optimize.c index 2db5177c32..6b072d4cdb 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2001,11 +2001,11 @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) switch (ctx->type) { case TCG_TYPE_I32: neg_op = INDEX_op_neg_i32; - have_neg = TCG_TARGET_HAS_neg_i32; + have_neg = true; break; case TCG_TYPE_I64: neg_op = INDEX_op_neg_i64; - have_neg = TCG_TARGET_HAS_neg_i64; + have_neg = true; break; case TCG_TYPE_V64: case TCG_TYPE_V128: -- cgit 1.4.1 From 986cac1d2a773f6b2d8f1051504a8af512688cbd Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 9 Jan 2023 13:59:35 -0800 Subject: tcg/optimize: Pipe OptContext into reset_ts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Will be needed in the next patch. Reviewed-by: Song Gao Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- tcg/optimize.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'tcg/optimize.c') diff --git a/tcg/optimize.c b/tcg/optimize.c index 6b072d4cdb..cbb095b241 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -123,7 +123,7 @@ static inline bool ts_is_copy(TCGTemp *ts) } /* Reset TEMP's state, possibly removing the temp for the list of copies. */ -static void reset_ts(TCGTemp *ts) +static void reset_ts(OptContext *ctx, TCGTemp *ts) { TempOptInfo *ti = ts_info(ts); TempOptInfo *pi = ts_info(ti->prev_copy); @@ -138,9 +138,9 @@ static void reset_ts(TCGTemp *ts) ti->s_mask = 0; } -static void reset_temp(TCGArg arg) +static void reset_temp(OptContext *ctx, TCGArg arg) { - reset_ts(arg_temp(arg)); + reset_ts(ctx, arg_temp(arg)); } /* Initialize and activate a temporary. */ @@ -239,7 +239,7 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) return true; } - reset_ts(dst_ts); + reset_ts(ctx, dst_ts); di = ts_info(dst_ts); si = ts_info(src_ts); @@ -702,7 +702,7 @@ static void finish_folding(OptContext *ctx, TCGOp *op) nb_oargs = def->nb_oargs; for (i = 0; i < nb_oargs; i++) { TCGTemp *ts = arg_temp(op->args[i]); - reset_ts(ts); + reset_ts(ctx, ts); /* * Save the corresponding known-zero/sign bits mask for the * first output argument (only one supported so far). @@ -1215,14 +1215,14 @@ static bool fold_call(OptContext *ctx, TCGOp *op) for (i = 0; i < nb_globals; i++) { if (test_bit(i, ctx->temps_used.l)) { - reset_ts(&ctx->tcg->temps[i]); + reset_ts(ctx, &ctx->tcg->temps[i]); } } } /* Reset temp data for outputs. */ for (i = 0; i < nb_oargs; i++) { - reset_temp(op->args[i]); + reset_temp(ctx, op->args[i]); } /* Stop optimizing MB across calls. */ -- cgit 1.4.1 From 9f75e52828a967e6c95eb73e726ef1eff4c05496 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 2 Nov 2023 13:37:46 -0700 Subject: tcg/optimize: Split out cmp_better_copy Compare two temps for "better", split out from finding the best from a whole list. Use TCGKind, which already gives the proper priority. Signed-off-by: Richard Henderson --- tcg/optimize.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) (limited to 'tcg/optimize.c') diff --git a/tcg/optimize.c b/tcg/optimize.c index cbb095b241..118561f56d 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -122,6 +122,11 @@ static inline bool ts_is_copy(TCGTemp *ts) return ts_info(ts)->next_copy != ts; } +static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b) +{ + return a->kind < b->kind ? b : a; +} + /* Reset TEMP's state, possibly removing the temp for the list of copies. */ static void reset_ts(OptContext *ctx, TCGTemp *ts) { @@ -174,30 +179,20 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts) } } -static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) +static TCGTemp *find_better_copy(TCGTemp *ts) { - TCGTemp *i, *g, *l; + TCGTemp *i, *ret; /* If this is already readonly, we can't do better. */ if (temp_readonly(ts)) { return ts; } - g = l = NULL; + ret = ts; for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { - if (temp_readonly(i)) { - return i; - } else if (i->kind > ts->kind) { - if (i->kind == TEMP_GLOBAL) { - g = i; - } else if (i->kind == TEMP_TB) { - l = i; - } - } + ret = cmp_better_copy(ret, i); } - - /* If we didn't find a better representation, return the same temp. */ - return g ? g : l ? l : ts; + return ret; } static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) @@ -672,12 +667,10 @@ static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) static void copy_propagate(OptContext *ctx, TCGOp *op, int nb_oargs, int nb_iargs) { - TCGContext *s = ctx->tcg; - for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { TCGTemp *ts = arg_temp(op->args[i]); if (ts_is_copy(ts)) { - op->args[i] = temp_arg(find_better_copy(s, ts)); + op->args[i] = temp_arg(find_better_copy(ts)); } } } -- cgit 1.4.1 From ab84dc398b3b702b0c692538b947ef65dbbdf52f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 23 Aug 2023 23:04:24 -0700 Subject: tcg/optimize: Optimize env memory operations Propagate stores to loads, loads to loads. Reviewed-by: Song Gao Signed-off-by: Richard Henderson --- tcg/optimize.c | 264 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 243 insertions(+), 21 deletions(-) (limited to 'tcg/optimize.c') diff --git a/tcg/optimize.c b/tcg/optimize.c index 118561f56d..b32ef0be0f 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -25,6 +25,7 @@ #include "qemu/osdep.h" #include "qemu/int128.h" +#include "qemu/interval-tree.h" #include "tcg/tcg-op-common.h" #include "tcg-internal.h" @@ -37,10 +38,18 @@ glue(glue(case INDEX_op_, x), _i64): \ glue(glue(case INDEX_op_, x), _vec) +typedef struct MemCopyInfo { + IntervalTreeNode itree; + QSIMPLEQ_ENTRY (MemCopyInfo) next; + TCGTemp *ts; + TCGType type; +} MemCopyInfo; + typedef struct TempOptInfo { bool is_const; TCGTemp *prev_copy; TCGTemp *next_copy; + QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy; uint64_t val; uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */ @@ -51,6 +60,9 @@ typedef struct OptContext { TCGOp *prev_mb; TCGTempSet temps_used; + IntervalTreeRoot mem_copy; + QSIMPLEQ_HEAD(, MemCopyInfo) mem_free; + /* In flight values from optimization. */ uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ @@ -127,27 +139,6 @@ static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b) return a->kind < b->kind ? b : a; } -/* Reset TEMP's state, possibly removing the temp for the list of copies. */ -static void reset_ts(OptContext *ctx, TCGTemp *ts) -{ - TempOptInfo *ti = ts_info(ts); - TempOptInfo *pi = ts_info(ti->prev_copy); - TempOptInfo *ni = ts_info(ti->next_copy); - - ni->prev_copy = ti->prev_copy; - pi->next_copy = ti->next_copy; - ti->next_copy = ts; - ti->prev_copy = ts; - ti->is_const = false; - ti->z_mask = -1; - ti->s_mask = 0; -} - -static void reset_temp(OptContext *ctx, TCGArg arg) -{ - reset_ts(ctx, arg_temp(arg)); -} - /* Initialize and activate a temporary. */ static void init_ts_info(OptContext *ctx, TCGTemp *ts) { @@ -167,6 +158,7 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts) ti->next_copy = ts; ti->prev_copy = ts; + QSIMPLEQ_INIT(&ti->mem_copy); if (ts->kind == TEMP_CONST) { ti->is_const = true; ti->val = ts->val; @@ -179,6 +171,45 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts) } } +static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l) +{ + IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l); + return r ? container_of(r, MemCopyInfo, itree) : NULL; +} + +static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l) +{ + IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l); + return r ? container_of(r, MemCopyInfo, itree) : NULL; +} + +static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc) +{ + TCGTemp *ts = mc->ts; + TempOptInfo *ti = ts_info(ts); + + interval_tree_remove(&mc->itree, &ctx->mem_copy); + QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next); + QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next); +} + +static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l) +{ + while (true) { + MemCopyInfo *mc = mem_copy_first(ctx, s, l); + if (!mc) { + break; + } + remove_mem_copy(ctx, mc); + } +} + +static void remove_mem_copy_all(OptContext *ctx) +{ + remove_mem_copy_in(ctx, 0, -1); + tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy)); +} + static TCGTemp *find_better_copy(TCGTemp *ts) { TCGTemp *i, *ret; @@ -195,6 +226,80 @@ static TCGTemp *find_better_copy(TCGTemp *ts) return ret; } +static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts) +{ + TempOptInfo *si = ts_info(src_ts); + TempOptInfo *di = ts_info(dst_ts); + MemCopyInfo *mc; + + QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) { + tcg_debug_assert(mc->ts == src_ts); + mc->ts = dst_ts; + } + QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy); +} + +/* Reset TEMP's state, possibly removing the temp for the list of copies. */ +static void reset_ts(OptContext *ctx, TCGTemp *ts) +{ + TempOptInfo *ti = ts_info(ts); + TCGTemp *pts = ti->prev_copy; + TCGTemp *nts = ti->next_copy; + TempOptInfo *pi = ts_info(pts); + TempOptInfo *ni = ts_info(nts); + + ni->prev_copy = ti->prev_copy; + pi->next_copy = ti->next_copy; + ti->next_copy = ts; + ti->prev_copy = ts; + ti->is_const = false; + ti->z_mask = -1; + ti->s_mask = 0; + + if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) { + if (ts == nts) { + /* Last temp copy being removed, the mem copies die. */ + MemCopyInfo *mc; + QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) { + interval_tree_remove(&mc->itree, &ctx->mem_copy); + } + QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy); + } else { + move_mem_copies(find_better_copy(nts), ts); + } + } +} + +static void reset_temp(OptContext *ctx, TCGArg arg) +{ + reset_ts(ctx, arg_temp(arg)); +} + +static void record_mem_copy(OptContext *ctx, TCGType type, + TCGTemp *ts, intptr_t start, intptr_t last) +{ + MemCopyInfo *mc; + TempOptInfo *ti; + + mc = QSIMPLEQ_FIRST(&ctx->mem_free); + if (mc) { + QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next); + } else { + mc = tcg_malloc(sizeof(*mc)); + } + + memset(mc, 0, sizeof(*mc)); + mc->itree.start = start; + mc->itree.last = last; + mc->type = type; + interval_tree_insert(&mc->itree, &ctx->mem_copy); + + ts = find_better_copy(ts); + ti = ts_info(ts); + mc->ts = ts; + QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next); +} + static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) { TCGTemp *i; @@ -221,6 +326,18 @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2) return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); } +static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s) +{ + MemCopyInfo *mc; + + for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) { + if (mc->itree.start == s && mc->type == type) { + return find_better_copy(mc->ts); + } + } + return NULL; +} + static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) { TCGTemp *dst_ts = arg_temp(dst); @@ -270,6 +387,11 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) si->next_copy = dst_ts; di->is_const = si->is_const; di->val = si->val; + + if (!QSIMPLEQ_EMPTY(&si->mem_copy) + && cmp_better_copy(src_ts, dst_ts) == dst_ts) { + move_mem_copies(dst_ts, src_ts); + } } return true; } @@ -688,6 +810,7 @@ static void finish_folding(OptContext *ctx, TCGOp *op) ctx->prev_mb = NULL; if (!(def->flags & TCG_OPF_COND_BRANCH)) { memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); + remove_mem_copy_all(ctx); } return; } @@ -1213,6 +1336,11 @@ static bool fold_call(OptContext *ctx, TCGOp *op) } } + /* If the function has side effects, reset mem data. */ + if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) { + remove_mem_copy_all(ctx); + } + /* Reset temp data for outputs. */ for (i = 0; i < nb_oargs; i++) { reset_temp(ctx, op->args[i]); @@ -2070,6 +2198,83 @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) return false; } +static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op) +{ + TCGTemp *dst, *src; + intptr_t ofs; + TCGType type; + + if (op->args[1] != tcgv_ptr_arg(tcg_env)) { + return false; + } + + type = ctx->type; + ofs = op->args[2]; + dst = arg_temp(op->args[0]); + src = find_mem_copy_for(ctx, type, ofs); + if (src && src->base_type == type) { + return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src)); + } + + reset_ts(ctx, dst); + record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1); + return true; +} + +static bool fold_tcg_st(OptContext *ctx, TCGOp *op) +{ + intptr_t ofs = op->args[2]; + intptr_t lm1; + + if (op->args[1] != tcgv_ptr_arg(tcg_env)) { + remove_mem_copy_all(ctx); + return false; + } + + switch (op->opc) { + CASE_OP_32_64(st8): + lm1 = 0; + break; + CASE_OP_32_64(st16): + lm1 = 1; + break; + case INDEX_op_st32_i64: + case INDEX_op_st_i32: + lm1 = 3; + break; + case INDEX_op_st_i64: + lm1 = 7; + break; + case INDEX_op_st_vec: + lm1 = tcg_type_size(ctx->type) - 1; + break; + default: + g_assert_not_reached(); + } + remove_mem_copy_in(ctx, ofs, ofs + lm1); + return false; +} + +static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) +{ + TCGTemp *src; + intptr_t ofs, last; + TCGType type; + + if (op->args[1] != tcgv_ptr_arg(tcg_env)) { + fold_tcg_st(ctx, op); + return false; + } + + src = arg_temp(op->args[0]); + ofs = op->args[2]; + type = ctx->type; + last = ofs + tcg_type_size(type) - 1; + remove_mem_copy_in(ctx, ofs, last); + record_mem_copy(ctx, type, src, ofs, last); + return false; +} + static bool fold_xor(OptContext *ctx, TCGOp *op) { if (fold_const2_commutative(ctx, op) || @@ -2093,6 +2298,8 @@ void tcg_optimize(TCGContext *s) TCGOp *op, *op_next; OptContext ctx = { .tcg = s }; + QSIMPLEQ_INIT(&ctx.mem_free); + /* Array VALS has an element for each temp. If this temp holds a constant then its value is kept in VALS' element. If this temp is a copy of other ones then the other copies are @@ -2214,6 +2421,21 @@ void tcg_optimize(TCGContext *s) case INDEX_op_ld32u_i64: done = fold_tcg_ld(&ctx, op); break; + case INDEX_op_ld_i32: + case INDEX_op_ld_i64: + case INDEX_op_ld_vec: + done = fold_tcg_ld_memcopy(&ctx, op); + break; + CASE_OP_32_64(st8): + CASE_OP_32_64(st16): + case INDEX_op_st32_i64: + done = fold_tcg_st(&ctx, op); + break; + case INDEX_op_st_i32: + case INDEX_op_st_i64: + case INDEX_op_st_vec: + done = fold_tcg_st_memcopy(&ctx, op); + break; case INDEX_op_mb: done = fold_mb(&ctx, op); break; -- cgit 1.4.1 From 3eaadaeb4ee9cc6a1267882b3e31c893cd99bb9e Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 23 Aug 2023 23:13:06 -0700 Subject: tcg: Eliminate duplicate env store operations Notice when a constant is stored to the same location twice. Reviewed-by: Song Gao Signed-off-by: Richard Henderson --- tcg/optimize.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'tcg/optimize.c') diff --git a/tcg/optimize.c b/tcg/optimize.c index b32ef0be0f..a4fe9ee9bb 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2269,6 +2269,19 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) src = arg_temp(op->args[0]); ofs = op->args[2]; type = ctx->type; + + /* + * Eliminate duplicate stores of a constant. + * This happens frequently when the target ISA zero-extends. + */ + if (ts_is_const(src)) { + TCGTemp *prev = find_mem_copy_for(ctx, type, ofs); + if (src == prev) { + tcg_op_remove(ctx->tcg, op); + return true; + } + } + last = ofs + tcg_type_size(type) - 1; remove_mem_copy_in(ctx, ofs, last); record_mem_copy(ctx, type, src, ofs, last); -- cgit 1.4.1 From 26aac97c849f28e23bc818035bac38be34e62983 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 23 Oct 2023 12:31:57 -0700 Subject: tcg/optimize: Split out arg_new_constant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes a bug wherein raw uses of tcg_constant_internal do not have their TempOptInfo initialized. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Richard Henderson --- tcg/optimize.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) (limited to 'tcg/optimize.c') diff --git a/tcg/optimize.c b/tcg/optimize.c index a4fe9ee9bb..d8e437c826 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -338,6 +338,21 @@ static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s) return NULL; } +static TCGArg arg_new_constant(OptContext *ctx, uint64_t val) +{ + TCGType type = ctx->type; + TCGTemp *ts; + + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + + ts = tcg_constant_internal(type, val); + init_ts_info(ctx, ts); + + return temp_arg(ts); +} + static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) { TCGTemp *dst_ts = arg_temp(dst); @@ -399,16 +414,8 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, TCGArg dst, uint64_t val) { - TCGTemp *tv; - - if (ctx->type == TCG_TYPE_I32) { - val = (int32_t)val; - } - /* Convert movi to mov with constant temp. */ - tv = tcg_constant_internal(ctx->type, val); - init_ts_info(ctx, tv); - return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); + return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val)); } static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) @@ -1431,7 +1438,7 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) op->opc = and_opc; op->args[1] = op->args[2]; - op->args[2] = temp_arg(tcg_constant_internal(ctx->type, mask)); + op->args[2] = arg_new_constant(ctx, mask); ctx->z_mask = mask & arg_info(op->args[1])->z_mask; return false; } @@ -1442,7 +1449,7 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op) uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0); op->opc = and_opc; - op->args[2] = temp_arg(tcg_constant_internal(ctx->type, mask)); + op->args[2] = arg_new_constant(ctx, mask); ctx->z_mask = mask & arg_info(op->args[1])->z_mask; return false; } -- cgit 1.4.1 From 6334a968eec3f2498a992a7b96c1bfcaf100066f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 25 Oct 2023 18:39:43 -0700 Subject: tcg/optimize: Canonicalize subi to addi during optimization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-Id: <20231026013945.1152174-3-richard.henderson@linaro.org> --- tcg/optimize.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'tcg/optimize.c') diff --git a/tcg/optimize.c b/tcg/optimize.c index d8e437c826..468f827399 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2166,7 +2166,19 @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op) static bool fold_sub(OptContext *ctx, TCGOp *op) { - return fold_const2(ctx, op) || fold_sub_vec(ctx, op); + if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) { + return true; + } + + /* Fold sub r,x,i to add r,x,-i */ + if (arg_is_const(op->args[2])) { + uint64_t val = arg_info(op->args[2])->val; + + op->opc = (ctx->type == TCG_TYPE_I32 + ? INDEX_op_add_i32 : INDEX_op_add_i64); + op->args[2] = arg_new_constant(ctx, -val); + } + return false; } static bool fold_sub2(OptContext *ctx, TCGOp *op) -- cgit 1.4.1 From f245757701ccd2d4f1c9ee0ed349e3a1d8049996 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 25 Oct 2023 18:39:44 -0700 Subject: tcg/optimize: Canonicalize sub2 with constants to add2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Message-Id: <20231026013945.1152174-4-richard.henderson@linaro.org> --- tcg/optimize.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) (limited to 'tcg/optimize.c') diff --git a/tcg/optimize.c b/tcg/optimize.c index 468f827399..f2d01654c5 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1044,8 +1044,10 @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op) static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) { - if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && - arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { + bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]); + bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]); + + if (a_const && b_const) { uint64_t al = arg_info(op->args[2])->val; uint64_t ah = arg_info(op->args[3])->val; uint64_t bl = arg_info(op->args[4])->val; @@ -1089,6 +1091,21 @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) tcg_opt_gen_movi(ctx, op2, rh, ah); return true; } + + /* Fold sub2 r,x,i to add2 r,x,-i */ + if (!add && b_const) { + uint64_t bl = arg_info(op->args[4])->val; + uint64_t bh = arg_info(op->args[5])->val; + + /* Negate the two parts without assembling and disassembling. */ + bl = -bl; + bh = ~bh + !bl; + + op->opc = (ctx->type == TCG_TYPE_I32 + ? INDEX_op_add2_i32 : INDEX_op_add2_i64); + op->args[4] = arg_new_constant(ctx, bl); + op->args[5] = arg_new_constant(ctx, bh); + } return false; } -- cgit 1.4.1