1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

[InstSimplify] analyze (optionally casted) icmps to eliminate obviously false logic (PR27869)

By moving this transform to InstSimplify from InstCombine, we sidestep the problem/question
raised by PR27869:
https://llvm.org/bugs/show_bug.cgi?id=27869
...where InstCombine turns an icmp+zext into a shift causing us to miss the fold.

Credit to David Majnemer for a draft patch of the changes to InstructionSimplify.cpp.

Differential Revision: http://reviews.llvm.org/D21512

llvm-svn: 273200
This commit is contained in:
Sanjay Patel 2016-06-20 20:59:59 +00:00
parent c92e81b80a
commit 1977275dcd
5 changed files with 129 additions and 55 deletions

View File

@ -1492,9 +1492,8 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
return nullptr;
}
/// Simplify (and (icmp ...) (icmp ...)) to true when we can tell that the range
/// of possible values cannot be satisfied.
static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
Type *ITy = Op0->getType();
ICmpInst::Predicate Pred0, Pred1;
ConstantInt *CI1, *CI2;
Value *V;
@ -1502,6 +1501,18 @@ static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
return X;
// Look for this pattern: (icmp V, C0) & (icmp V, C1)).
const APInt *C0, *C1;
if (match(Op0, m_ICmp(Pred0, m_Value(V), m_APInt(C0))) &&
match(Op1, m_ICmp(Pred1, m_Specific(V), m_APInt(C1)))) {
// Make a constant range that's the intersection of the two icmp ranges.
// If the intersection is empty, we know that the result is false.
auto Range0 = ConstantRange::makeAllowedICmpRegion(Pred0, *C0);
auto Range1 = ConstantRange::makeAllowedICmpRegion(Pred1, *C1);
if (Range0.intersectWith(Range1).isEmptySet())
return getFalse(ITy);
}
if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_ConstantInt(CI1)),
m_ConstantInt(CI2))))
return nullptr;
@ -1509,8 +1520,6 @@ static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Specific(CI1))))
return nullptr;
Type *ITy = Op0->getType();
auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
bool isNSW = AddInst->hasNoSignedWrap();
bool isNUW = AddInst->hasNoUnsignedWrap();
@ -1608,6 +1617,24 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
}
}
// The compares may be hidden behind casts. Look through those and try the
// same folds as above.
auto *Cast0 = dyn_cast<CastInst>(Op0);
auto *Cast1 = dyn_cast<CastInst>(Op1);
if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
Cast0->getSrcTy() == Cast1->getSrcTy()) {
auto *Cmp0 = dyn_cast<ICmpInst>(Cast0->getOperand(0));
auto *Cmp1 = dyn_cast<ICmpInst>(Cast1->getOperand(0));
if (Cmp0 && Cmp1) {
Instruction::CastOps CastOpc = Cast0->getOpcode();
Type *ResultType = Cast0->getType();
if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp0, Cmp1)))
return ConstantExpr::getCast(CastOpc, V, ResultType);
if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp1, Cmp0)))
return ConstantExpr::getCast(CastOpc, V, ResultType);
}
}
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
MaxRecurse))

View File

@ -968,16 +968,6 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
return nullptr;
// Make a constant range that's the intersection of the two icmp ranges.
// If the intersection is empty, we know that the result is false.
ConstantRange LHSRange =
ConstantRange::makeAllowedICmpRegion(LHSCC, LHSCst->getValue());
ConstantRange RHSRange =
ConstantRange::makeAllowedICmpRegion(RHSCC, RHSCst->getValue());
if (LHSRange.intersectWith(RHSRange).isEmptySet())
return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
// We can't fold (ugt x, C) & (sgt x, C2).
if (!PredicatesFoldable(LHSCC, RHSCC))
return nullptr;

View File

@ -228,17 +228,6 @@ define i8 @test20(i8 %A) {
ret i8 %D
}
define i1 @test22(i32 %A) {
; CHECK-LABEL: @test22(
; CHECK-NEXT: ret i1 false
;
%B = icmp eq i32 %A, 1
%C = icmp sge i32 %A, 3
;; false
%D = and i1 %B, %C
ret i1 %D
}
define i1 @test23(i32 %A) {
; CHECK-LABEL: @test23(
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 %A, 2

View File

@ -33,16 +33,6 @@ define i32 @test3(i32 %X, i32 %Y) {
ret i32 %b
}
define i1 @test4(i32 %X) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: ret i1 false
;
%a = icmp ult i32 %X, 31
%b = icmp slt i32 %X, 0
%c = and i1 %a, %b
ret i1 %c
}
; Make sure we don't go into an infinite loop with this test
define <4 x i32> @test5(<4 x i32> %A) {
; CHECK-LABEL: @test5(

View File

@ -231,37 +231,115 @@ define i1 @or_icmp3(i32 %x, i32 %y) {
ret i1 %3
}
define i1 @disjoint_cmps(i32 %A) {
; CHECK-LABEL: @disjoint_cmps(
; CHECK-NEXT: ret i1 false
;
%B = icmp eq i32 %A, 1
%C = icmp sge i32 %A, 3
%D = and i1 %B, %C
ret i1 %D
}
define i1 @disjoint_cmps2(i32 %X) {
; CHECK-LABEL: @disjoint_cmps2(
; CHECK-NEXT: ret i1 false
;
%a = icmp ult i32 %X, 31
%b = icmp slt i32 %X, 0
%c = and i1 %a, %b
ret i1 %c
}
; PR27869 - Look through casts to eliminate cmps and bitwise logic.
define i32 @and_of_zexted_icmps(i32 %i) {
; CHECK-LABEL: @and_of_zexted_icmps(
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %i, 0
; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i32 %i, 4
; CHECK-NEXT: [[CONV2:%.*]] = zext i1 [[CMP1]] to i32
; CHECK-NEXT: [[AND:%.*]] = and i32 [[CONV]], [[CONV2]]
; CHECK-NEXT: ret i32 [[AND]]
; CHECK-NEXT: ret i32 0
;
%cmp = icmp eq i32 %i, 0
%conv = zext i1 %cmp to i32
%cmp0 = icmp eq i32 %i, 0
%conv0 = zext i1 %cmp0 to i32
%cmp1 = icmp ugt i32 %i, 4
%conv2 = zext i1 %cmp1 to i32
%and = and i32 %conv, %conv2
%conv1 = zext i1 %cmp1 to i32
%and = and i32 %conv0, %conv1
ret i32 %and
}
; Make sure vectors work too.
define <4 x i32> @and_of_zexted_icmps_vec(<4 x i32> %i) {
; CHECK-LABEL: @and_of_zexted_icmps_vec(
; CHECK-NEXT: [[CMP:%.*]] = icmp eq <4 x i32> %i, zeroinitializer
; CHECK-NEXT: [[CONV:%.*]] = zext <4 x i1> [[CMP]] to <4 x i32>
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <4 x i32> %i, zeroinitializer
; CHECK-NEXT: [[CONV2:%.*]] = zext <4 x i1> [[CMP1]] to <4 x i32>
; CHECK-NEXT: [[AND:%.*]] = and <4 x i32> [[CONV]], [[CONV2]]
; CHECK-NEXT: ret <4 x i32> [[AND]]
; CHECK-NEXT: ret <4 x i32> zeroinitializer
;
%cmp = icmp eq <4 x i32> %i, zeroinitializer
%conv = zext <4 x i1> %cmp to <4 x i32>
%cmp0 = icmp eq <4 x i32> %i, zeroinitializer
%conv0 = zext <4 x i1> %cmp0 to <4 x i32>
%cmp1 = icmp slt <4 x i32> %i, zeroinitializer
%conv2 = zext <4 x i1> %cmp1 to <4 x i32>
%and = and <4 x i32> %conv, %conv2
%conv1 = zext <4 x i1> %cmp1 to <4 x i32>
%and = and <4 x i32> %conv0, %conv1
ret <4 x i32> %and
}
; Try a different cast and weird types.
define i5 @and_of_sexted_icmps(i3 %i) {
; CHECK-LABEL: @and_of_sexted_icmps(
; CHECK-NEXT: ret i5 0
;
%cmp0 = icmp eq i3 %i, 0
%conv0 = sext i1 %cmp0 to i5
%cmp1 = icmp ugt i3 %i, 1
%conv1 = sext i1 %cmp1 to i5
%and = and i5 %conv0, %conv1
ret i5 %and
}
; Try a different cast and weird vector types.
define i3 @and_of_bitcast_icmps_vec(<3 x i65> %i) {
; CHECK-LABEL: @and_of_bitcast_icmps_vec(
; CHECK-NEXT: ret i3 0
;
%cmp0 = icmp sgt <3 x i65> %i, zeroinitializer
%conv0 = bitcast <3 x i1> %cmp0 to i3
%cmp1 = icmp slt <3 x i65> %i, zeroinitializer
%conv1 = bitcast <3 x i1> %cmp1 to i3
%and = and i3 %conv0, %conv1
ret i3 %and
}
; We can't do this if the casts are different.
define i16 @and_of_different_cast_icmps(i8 %i) {
; CHECK-LABEL: @and_of_different_cast_icmps(
; CHECK-NEXT: [[CMP0:%.*]] = icmp eq i8 %i, 0
; CHECK-NEXT: [[CONV0:%.*]] = zext i1 [[CMP0]] to i16
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 %i, 1
; CHECK-NEXT: [[CONV1:%.*]] = sext i1 [[CMP1]] to i16
; CHECK-NEXT: [[AND:%.*]] = and i16 [[CONV0]], [[CONV1]]
; CHECK-NEXT: ret i16 [[AND]]
;
%cmp0 = icmp eq i8 %i, 0
%conv0 = zext i1 %cmp0 to i16
%cmp1 = icmp eq i8 %i, 1
%conv1 = sext i1 %cmp1 to i16
%and = and i16 %conv0, %conv1
ret i16 %and
}
define <2 x i3> @and_of_different_cast_icmps_vec(<2 x i8> %i, <2 x i16> %j) {
; CHECK-LABEL: @and_of_different_cast_icmps_vec(
; CHECK-NEXT: [[CMP0:%.*]] = icmp eq <2 x i8> %i, zeroinitializer
; CHECK-NEXT: [[CONV0:%.*]] = zext <2 x i1> [[CMP0]] to <2 x i3>
; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt <2 x i16> %j, <i16 1, i16 1>
; CHECK-NEXT: [[CONV1:%.*]] = zext <2 x i1> [[CMP1]] to <2 x i3>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i3> [[CONV0]], [[CONV1]]
; CHECK-NEXT: ret <2 x i3> [[AND]]
;
%cmp0 = icmp eq <2 x i8> %i, zeroinitializer
%conv0 = zext <2 x i1> %cmp0 to <2 x i3>
%cmp1 = icmp ugt <2 x i16> %j, <i16 1, i16 1>
%conv1 = zext <2 x i1> %cmp1 to <2 x i3>
%and = and <2 x i3> %conv0, %conv1
ret <2 x i3> %and
}