Commit 1e2edf85cc for qemu.org
commit 1e2edf85cc4755c844f9c98cbe52ac63d7d1fb18
Author: Richard Henderson <richard.henderson@linaro.org>
Date: Mon Dec 9 16:48:36 2024 -0600
tcg/optimize: Build and use o_bits in fold_and
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 49ef039932..d9ccbb36e2 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1422,7 +1422,7 @@ static bool fold_addco(OptContext *ctx, TCGOp *op)
static bool fold_and(OptContext *ctx, TCGOp *op)
{
- uint64_t z1, z2, z_mask, s_mask;
+ uint64_t z_mask, o_mask, s_mask, a_mask;
TempOptInfo *t1, *t2;
if (fold_const2_commutative(ctx, op) ||
@@ -1434,18 +1434,9 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
t1 = arg_info(op->args[1]);
t2 = arg_info(op->args[2]);
- z1 = t1->z_mask;
- z2 = t2->z_mask;
- /*
- * Known-zeros does not imply known-ones. Therefore unless
- * arg2 is constant, we can't infer affected bits from it.
- */
- if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
- return true;
- }
-
- z_mask = z1 & z2;
+ z_mask = t1->z_mask & t2->z_mask;
+ o_mask = t1->o_mask & t2->o_mask;
/*
* Sign repetitions are perforce all identical, whether they are 1 or 0.
@@ -1453,7 +1444,10 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
*/
s_mask = t1->s_mask & t2->s_mask;
- return fold_masks_zs(ctx, op, z_mask, s_mask);
+ /* Affected bits are those not known zero, masked by those known one. */
+ a_mask = t1->z_mask & ~t2->o_mask;
+
+ return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
}
static bool fold_andc(OptContext *ctx, TCGOp *op)