diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index 226126b39ed..9bb65efbd61 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -1198,11 +1198,16 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, Type *AndTy = AndCST->getType(); // Type of the and. // We can fold this as long as we can't shift unknown bits - // into the mask. This can only happen with signed shift - // rights, as they sign-extend. + // into the mask. This can happen with signed shift + // rights, as they sign-extend. With logical shifts, + // we must still make sure the comparison is not signed + // because we are effectively changing the + // position of the sign bit (PR17827). + // TODO: We can relax these constraints a bit more. if (ShAmt) { - bool CanFold = Shift->isLogicalShift(); - if (!CanFold) { + bool CanFold = false; + unsigned ShiftOpcode = Shift->getOpcode(); + if (ShiftOpcode == Instruction::AShr) { // To test for the bad case of the signed shr, see if any // of the bits shifted in could be tested after the mask. uint32_t TyBits = Ty->getPrimitiveSizeInBits(); @@ -1212,6 +1217,9 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) & AndCST->getValue()) == 0) CanFold = true; + } else if (ShiftOpcode == Instruction::Shl || + ShiftOpcode == Instruction::LShr) { + CanFold = !ICI.isSigned(); } if (CanFold) { diff --git a/test/Transforms/InstCombine/pr17827.ll b/test/Transforms/InstCombine/pr17827.ll new file mode 100644 index 00000000000..a8b59263552 --- /dev/null +++ b/test/Transforms/InstCombine/pr17827.ll @@ -0,0 +1,74 @@ +; RUN: opt < %s -instcombine -S | FileCheck %s + +; With left shift, the comparison should not be modified. +; CHECK-LABEL: @test_shift_and_cmp_not_changed1( +; CHECK: icmp slt i8 %andp, 32 +define i1 @test_shift_and_cmp_not_changed1(i8 %p) #0 { +entry: + %shlp = shl i8 %p, 5 + %andp = and i8 %shlp, -64 + %cmp = icmp slt i8 %andp, 32 + ret i1 %cmp +} + +; With arithmetic right shift, the comparison should not be modified. +; CHECK-LABEL: @test_shift_and_cmp_not_changed2( +; CHECK: icmp slt i8 %andp, 32 +define i1 @test_shift_and_cmp_not_changed2(i8 %p) #0 { +entry: + %shlp = ashr i8 %p, 5 + %andp = and i8 %shlp, -64 + %cmp = icmp slt i8 %andp, 32 + ret i1 %cmp +} + +; This should simplify functionally to the left shift case. +; The extra input parameter should be optimized away. +; CHECK-LABEL: @test_shift_and_cmp_changed1( +; CHECK: %andp = shl i8 %p, 5 +; CHECK-NEXT: %shl = and i8 %andp, -64 +; CHECK-NEXT: %cmp = icmp slt i8 %shl, 32 +define i1 @test_shift_and_cmp_changed1(i8 %p, i8 %q) #0 { +entry: + %andp = and i8 %p, 6 + %andq = and i8 %q, 8 + %or = or i8 %andq, %andp + %shl = shl i8 %or, 5 + %ashr = ashr i8 %shl, 5 + %cmp = icmp slt i8 %ashr, 1 + ret i1 %cmp +} + +; Unsigned compare allows a transformation to compare against 0. +; CHECK-LABEL: @test_shift_and_cmp_changed2( +; CHECK: icmp eq i8 %andp, 0 +define i1 @test_shift_and_cmp_changed2(i8 %p) #0 { +entry: + %shlp = shl i8 %p, 5 + %andp = and i8 %shlp, -64 + %cmp = icmp ult i8 %andp, 32 + ret i1 %cmp +} + +; nsw on the shift should not affect the comparison. +; CHECK-LABEL: @test_shift_and_cmp_changed3( +; CHECK: icmp slt i8 %andp, 32 +define i1 @test_shift_and_cmp_changed3(i8 %p) #0 { +entry: + %shlp = shl nsw i8 %p, 5 + %andp = and i8 %shlp, -64 + %cmp = icmp slt i8 %andp, 32 + ret i1 %cmp +} + +; Logical shift right allows a return true because the 'and' guarantees no bits are set. +; CHECK-LABEL: @test_shift_and_cmp_changed4( +; CHECK: ret i1 true +define i1 @test_shift_and_cmp_changed4(i8 %p) #0 { +entry: + %shlp = lshr i8 %p, 5 + %andp = and i8 %shlp, -64 + %cmp = icmp slt i8 %andp, 32 + ret i1 %cmp +} +