-
Notifications
You must be signed in to change notification settings - Fork 13.7k
[InstCombine] fold (a == c && b != c) || (a != c && b == c))
to (a == c) == (b != c)
#94915
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-llvm-transforms Author: Zain Jaffal (zjaffal) Changesresolves #92966 Full diff: https://github.com/llvm/llvm-project/pull/94915.diff 2 Files Affected:
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 8695e9e69df20..e873a86f3332f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -3421,6 +3421,29 @@ Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd);
}
+Value *foldAorBZero(BinaryOperator &I, InstCombiner::BuilderTy &Builder) {
+ Value *Op0 = I.getOperand(0);
+ Value *Op1 = I.getOperand(1);
+ if (!Op0->hasOneUse() || !Op1->hasOneUse())
+ return nullptr;
+
+ // match each operand of I with and
+ Value *A, *B;
+ CmpInst::Predicate Pred = CmpInst::ICMP_EQ;
+ CmpInst::Predicate InPred = CmpInst::ICMP_EQ;
+ bool IsOp0 = match(Op0, m_c_And(m_Cmp(Pred, m_Value(A), m_ZeroInt()),
+ m_Cmp(InPred, m_Value(B), m_ZeroInt())));
+ bool IsOp1 = match(Op1, m_c_And(m_Cmp(InPred, m_Specific(A), m_ZeroInt()),
+ m_Cmp(Pred, m_Specific(B), m_ZeroInt())));
+ if (!IsOp0 || !IsOp1)
+ return nullptr;
+
+ Constant *Zero = ConstantInt::getNullValue(A->getType());
+ auto *LHS = Builder.CreateICmpEQ(A, Zero);
+ auto *RHS = Builder.CreateICmpEQ(B, Zero);
+ return Builder.CreateICmpNE(LHS, RHS);
+}
+
// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
// here. We should standardize that construct where it is needed or choose some
// other way to ensure that commutated variants of patterns are not missed.
@@ -3450,6 +3473,10 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
return X;
+ // (A == 0 & B != 0) | (A != 0 & B == 0)) -> (A == 0) != (B == 0)
+ if (Value *V = foldAorBZero(I, Builder))
+ return replaceInstUsesWith(I, V);
+
// (A&B)|(A&C) -> A&(B|C) etc
if (Value *V = foldUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V);
diff --git a/llvm/test/Transforms/InstCombine/fold-a-or-b-zero.ll b/llvm/test/Transforms/InstCombine/fold-a-or-b-zero.ll
new file mode 100644
index 0000000000000..5a68927e879a7
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/fold-a-or-b-zero.ll
@@ -0,0 +1,104 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -S -passes=instcombine | FileCheck %s
+
+declare void @use(i1)
+
+define void @a_or_b(i32 %a, i32 %b) {
+; CHECK-LABEL: define void @src(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[A_EQ_ZERO:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[B_NE_ZERO:%.*]] = icmp ne i32 [[B]], 0
+; CHECK-NEXT: [[AND_1:%.*]] = and i1 [[A_EQ_ZERO]], [[B_NE_ZERO]]
+; CHECK-NEXT: [[A_NE_ZERO:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[B_EQ_ZERO:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[A_NE_ZERO]], [[B_EQ_ZERO]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND_1]], [[AND_2]]
+; CHECK-NEXT: call void @use(i1 [[OR]])
+; CHECK-NEXT: ret void
+;
+ %a_eq_zero = icmp eq i32 %a, 0
+ %b_ne_zero = icmp ne i32 %b, 0
+ %and.1 = and i1 %a_eq_zero, %b_ne_zero
+ %a_ne_zero = icmp ne i32 %a, 0
+ %b_eq_zero = icmp eq i32 %b, 0
+ %and.2 = and i1 %a_ne_zero, %b_eq_zero
+ %or = or i1 %and.1, %and.2
+ call void @use(i1 %or)
+ ret void
+}
+
+
+define void @a_or_b_zero(i32 %a, i32 %b) {
+; CHECK-LABEL: define void @src(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[A_EQ_ZERO:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[B_NE_ZERO:%.*]] = icmp ne i32 [[B]], 0
+; CHECK-NEXT: [[AND_1:%.*]] = and i1 [[A_EQ_ZERO]], [[B_NE_ZERO]]
+; CHECK-NEXT: [[A_NE_ZERO:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[B_EQ_ZERO:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[A_NE_ZERO]], [[B_EQ_ZERO]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND_1]], [[AND_2]]
+; CHECK-NEXT: call void @use(i1 [[OR]])
+; CHECK-NEXT: ret void
+;
+ %a_eq_zero = icmp eq i32 %a, 0
+ %b_ne_zero = icmp ne i32 %b, 0
+ %and.1 = and i1 %a_eq_zero, %b_ne_zero
+ %a_ne_zero = icmp ne i32 %a, 0
+ %b_eq_zero = icmp eq i32 %b, 0
+ %and.2 = and i1 %a_ne_zero, %b_eq_zero
+ %or = or i1 %and.1, %and.2
+ call void @use(i1 %or)
+ ret void
+}
+
+define void @a_or_b_multiple_uses(i32 %a, i32 %b) {
+; CHECK-LABEL: define void @src(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[A_EQ_ZERO:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[B_NE_ZERO:%.*]] = icmp ne i32 [[B]], 0
+; CHECK-NEXT: [[AND_1:%.*]] = and i1 [[A_EQ_ZERO]], [[B_NE_ZERO]]
+; CHECK-NEXT: [[A_NE_ZERO:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[B_EQ_ZERO:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[A_NE_ZERO]], [[B_EQ_ZERO]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND_1]], [[AND_2]]
+; CHECK-NEXT: call void @use(i1 [[OR]])
+; CHECK-NEXT: ret void
+;
+ %a_eq_zero = icmp eq i32 %a, 0
+ %b_ne_zero = icmp ne i32 %b, 0
+ %and.1 = and i1 %a_eq_zero, %b_ne_zero
+ %a_ne_zero = icmp ne i32 %a, 0
+ %b_eq_zero = icmp eq i32 %b, 0
+ %and.2 = and i1 %a_ne_zero, %b_eq_zero
+ call void @use(i1 %and.2)
+ %or = or i1 %and.1, %and.2
+ ret void
+}
+
+define void @a_or_b_multiple_uses_2(i32 %a, i32 %b) {
+; CHECK-LABEL: define void @src(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[A_EQ_ZERO:%.*]] = icmp eq i32 [[A]], 0
+; CHECK-NEXT: [[B_NE_ZERO:%.*]] = icmp ne i32 [[B]], 0
+; CHECK-NEXT: [[AND_1:%.*]] = and i1 [[A_EQ_ZERO]], [[B_NE_ZERO]]
+; CHECK-NEXT: [[A_NE_ZERO:%.*]] = icmp ne i32 [[A]], 0
+; CHECK-NEXT: [[B_EQ_ZERO:%.*]] = icmp eq i32 [[B]], 0
+; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[A_NE_ZERO]], [[B_EQ_ZERO]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[AND_1]], [[AND_2]]
+; CHECK-NEXT: call void @use(i1 [[OR]])
+; CHECK-NEXT: ret void
+;
+ %a_eq_zero = icmp eq i32 %a, 0
+ %b_ne_zero = icmp ne i32 %b, 0
+ call void @use(i1 %b_ne_zero)
+ %and.1 = and i1 %a_eq_zero, %b_ne_zero
+ %a_ne_zero = icmp ne i32 %a, 0
+ %b_eq_zero = icmp eq i32 %b, 0
+ %and.2 = and i1 %a_ne_zero, %b_eq_zero
+ call void @use(i1 %and.1)
+ %or = or i1 %and.1, %and.2
+ ret void
+}
+
+
|
Hello, @dtcxzyw! I noticed that pattern during core review on my job and was curios if it can be optimized. IIRC the check was about I believe, it can be generalized. |
I am using `isKnownInversion` in the following pr #94915 it is useful to have the method in a shared class so I can reuse it. I am not sure if `ValueTracking` is the correct place but it looks like most of the methods with the pattern `isKnownX` belong there.
8ade0f6
to
371770c
Compare
I used isKnownInversion to check for both operands and added tests to check against a constant and to check for pointer compare |
Also there is a small typo in pr title: |
Please also add an alive2 proof to the PR description. |
(a == 1 && b != 0) || (a != 0 && b == 0))
to (a ==0) != (b == 0)
(a == c && b != c) || (a != c && b == c))
to (a == c) == (b != c)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Looks mostly good, a few more nits.
@@ -3421,6 +3421,25 @@ Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, | |||
return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd); | |||
} | |||
|
|||
static Value *foldAorBConst(BinaryOperator &I, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What does Const
mean here?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
static Value *foldAorBConst(BinaryOperator &I, | |
static Value *foldOrOfInversions(BinaryOperator &I, |
Maybe?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM. Thank you!
Please wait for additional approval from other reviewers :)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Note that there is a conjugate pattern with or and and swapped: https://alive2.llvm.org/ce/z/mRhkK2 Would be good to handle it in a followup as well.
!match(I.getOperand(1), m_And(m_Value(Cmp3), m_Value(Cmp4)))) | ||
return nullptr; | ||
|
||
// Check if any two pairs of the and operations are invertions of each other. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
// Check if any two pairs of the and operations are invertions of each other. | |
// Check if any two pairs of the and operations are inversions of each other. |
@@ -3421,6 +3421,25 @@ Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, | |||
return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd); | |||
} | |||
|
|||
static Value *foldAorBConst(BinaryOperator &I, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
static Value *foldAorBConst(BinaryOperator &I, | |
static Value *foldOrOfInversions(BinaryOperator &I, |
Maybe?
… == c) == (b != c)` (llvm#94915) resolves llvm#92966 alive proof https://alive2.llvm.org/ce/z/bLAQBS
resolves #92966
alive proof
https://alive2.llvm.org/ce/z/bLAQBS