From faba1577fb716c39cac6257ea73fe9c41faff8d5 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Mon, 17 May 2021 14:53:53 +0300 Subject: [PATCH] [InstCombine] isFreeToInvert(): constant expressions aren't free to invert (PR50370) This fixes https://bugs.llvm.org/show_bug.cgi?id=50370, which reports a yet another endless combine loop, this one regressed from 554b1bced325a8d860ad00bd59020d66d01c95f8, which fixed yet another endless combine loop (PR50308) This code had fallen into the very typical pitfall of forgetting that constant expressions exist, and they aren't free to invert, because the `not` won't be absorbed by the "constant", but will remain a (constant) expression... --- .../Transforms/InstCombine/InstCombiner.h | 4 +-- test/Transforms/InstCombine/not-add.ll | 32 +++++++++++++++++++ 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/include/llvm/Transforms/InstCombine/InstCombiner.h b/include/llvm/Transforms/InstCombine/InstCombiner.h index aae0694e4ca..ba0d41f9b74 100644 --- a/include/llvm/Transforms/InstCombine/InstCombiner.h +++ b/include/llvm/Transforms/InstCombine/InstCombiner.h @@ -249,8 +249,8 @@ public: if (BinaryOperator *BO = dyn_cast(V)) if (BO->getOpcode() == Instruction::Add || BO->getOpcode() == Instruction::Sub) - if (isa(BO->getOperand(0)) || - isa(BO->getOperand(1))) + if (match(BO, PatternMatch::m_c_BinOp(PatternMatch::m_Value(), + PatternMatch::m_ImmConstant()))) return WillInvertAllUses; // Selects with invertible operands are freely invertible diff --git a/test/Transforms/InstCombine/not-add.ll b/test/Transforms/InstCombine/not-add.ll index d372e760372..5ece88dbef1 100644 --- a/test/Transforms/InstCombine/not-add.ll +++ b/test/Transforms/InstCombine/not-add.ll @@ -165,3 +165,35 @@ cond.end: %sub = sub nsw i32 %v3, %cond ret i32 %sub } + +@g = extern_weak global i32 +define void @pr50370(i32 %x) { +; CHECK-LABEL: @pr50370( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[X:%.*]], 1 +; CHECK-NEXT: [[B15:%.*]] = srem i32 ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 65537)), [[XOR]] +; CHECK-NEXT: [[B22:%.*]] = add nsw i32 [[B15]], sdiv (i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 65537), i32 2147483647) +; CHECK-NEXT: [[B14:%.*]] = srem i32 ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 65537)), [[B22]] +; CHECK-NEXT: [[B12:%.*]] = add nuw nsw i32 [[B15]], ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 65537)) +; CHECK-NEXT: [[B8:%.*]] = shl i32 sdiv (i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 65537), i32 2147483647), [[B14]] +; CHECK-NEXT: [[B2:%.*]] = xor i32 [[B12]], [[B8]] +; CHECK-NEXT: [[B:%.*]] = xor i32 [[B2]], -1 +; CHECK-NEXT: store i32 [[B]], i32* undef, align 4 +; CHECK-NEXT: ret void +; +entry: + %xor = xor i32 %x, 1 + %or4 = or i32 or (i32 zext (i1 icmp eq (i32* @g, i32* null) to i32), i32 1), 65536 + %B6 = ashr i32 65536, %or4 + %B15 = srem i32 %B6, %xor + %B20 = sdiv i32 %or4, 2147483647 + %B22 = add i32 %B15, %B20 + %B14 = srem i32 %B6, %B22 + %B12 = add i32 %B15, %B6 + %B8 = shl i32 %B20, %B14 + %B2 = xor i32 %B12, %B8 + %B3 = or i32 %B12, undef + %B = xor i32 %B2, %B3 + store i32 %B, i32* undef, align 4 + ret void +}