1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

[InstCombine] use m_APInt to allow icmp (shl X, Y), C folds for splat constant vectors, part 1

This is a partial enablement (move the ConstantInt guard down) because there are many
different folds here and one of the later ones will require reworking 'isSignBitCheck'.

llvm-svn: 279339
This commit is contained in:
Sanjay Patel 2016-08-19 22:33:26 +00:00
parent 670a716948
commit 8dbd727c17
3 changed files with 36 additions and 33 deletions

View File

@ -1990,20 +1990,14 @@ static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
/// Fold icmp (shl X, Y), C.
Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp, Instruction *Shl,
const APInt *C) {
// FIXME: This should use m_APInt to allow splat vectors.
ConstantInt *ShAmt = dyn_cast<ConstantInt>(Shl->getOperand(1));
if (!ShAmt)
const APInt *ShiftAmt;
if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
return foldICmpShlOne(Cmp, Shl, C);
// FIXME: This check restricts all folds under here to scalar types.
ConstantInt *RHS = dyn_cast<ConstantInt>(Cmp.getOperand(1));
if (!RHS)
return nullptr;
// Check that the shift amount is in range. If not, don't perform undefined
// shifts. When the shift is visited it will be simplified.
unsigned TypeBits = C->getBitWidth();
if (ShAmt->uge(TypeBits))
if (ShiftAmt->uge(TypeBits))
return nullptr;
ICmpInst::Predicate Pred = Cmp.getPredicate();
@ -2011,25 +2005,30 @@ Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp, Instruction *Shl,
if (Cmp.isEquality()) {
// If the shift is NUW, then it is just shifting out zeros, no need for an
// AND.
Constant *LShrC = ConstantInt::get(Shl->getType(), C->lshr(*ShiftAmt));
if (cast<BinaryOperator>(Shl)->hasNoUnsignedWrap())
return new ICmpInst(Pred, X, ConstantExpr::getLShr(RHS, ShAmt));
return new ICmpInst(Pred, X, LShrC);
// If the shift is NSW and we compare to 0, then it is just shifting out
// sign bits, no need for an AND either.
if (cast<BinaryOperator>(Shl)->hasNoSignedWrap() && *C == 0)
return new ICmpInst(Pred, X, ConstantExpr::getLShr(RHS, ShAmt));
return new ICmpInst(Pred, X, LShrC);
if (Shl->hasOneUse()) {
// Otherwise strength reduce the shift into an and.
uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
Constant *Mask =
Builder->getInt(APInt::getLowBitsSet(TypeBits, TypeBits - ShAmtVal));
Constant *Mask = ConstantInt::get(Shl->getType(),
APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask");
return new ICmpInst(Pred, And, ConstantExpr::getLShr(RHS, ShAmt));
return new ICmpInst(Pred, And, LShrC);
}
}
// FIXME: This check restricts all folds under here to scalar types.
ConstantInt *RHS = dyn_cast<ConstantInt>(Cmp.getOperand(1));
if (!RHS)
return nullptr;
// If this is a signed comparison to 0 and the shift is sign preserving,
// use the shift LHS operand instead; isSignTest may change 'Pred', so only
// do that if we're sure to not continue on in this function.
@ -2042,7 +2041,7 @@ Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp, Instruction *Shl,
// (X << 31) <s 0 --> (X&1) != 0
Constant *Mask = ConstantInt::get(
X->getType(),
APInt::getOneBitSet(TypeBits, TypeBits - ShAmt->getZExtValue() - 1));
APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask");
return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
And, Constant::getNullValue(And->getType()));
@ -2054,7 +2053,7 @@ Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp, Instruction *Shl,
// This enables to get rid of the shift in favor of a trunc which can be
// free on the target. It has the additional benefit of comparing to a
// smaller constant, which will be target friendly.
unsigned Amt = ShAmt->getLimitedValue(TypeBits - 1);
unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
if (Shl->hasOneUse() && Amt != 0 && C->countTrailingZeros() >= Amt) {
Type *NTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
Constant *NCI = ConstantExpr::getTrunc(

View File

@ -175,10 +175,10 @@ define i1 @test13(i64 %X, %S* %P) {
; CHECK: %C = icmp eq i64 %X, -1
}
; This is a test of icmp + shl nuw in disguise - 4611... is 0x3fff...
define <2 x i1> @test13_vector(<2 x i64> %X, <2 x %S*> %P) nounwind {
; CHECK-LABEL: @test13_vector(
; CHECK-NEXT: [[A_IDX:%.*]] = shl nuw <2 x i64> %X, <i64 2, i64 2>
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> [[A_IDX]], <i64 -4, i64 -4>
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> %X, <i64 4611686018427387903, i64 4611686018427387903>
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%A = getelementptr inbounds %S, <2 x %S*> %P, <2 x i64> zeroinitializer, <2 x i32> <i32 1, i32 1>, <2 x i64> %X
@ -197,10 +197,10 @@ define i1 @test13_as1(i16 %X, %S addrspace(1)* %P) {
ret i1 %C
}
; This is a test of icmp + shl nuw in disguise - 16383 is 0x3fff.
define <2 x i1> @test13_vector_as1(<2 x i16> %X, <2 x %S addrspace(1)*> %P) {
; CHECK-LABEL: @test13_vector_as1(
; CHECK-NEXT: [[A_IDX:%.*]] = shl nuw <2 x i16> %X, <i16 2, i16 2>
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i16> [[A_IDX]], <i16 -4, i16 -4>
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i16> %X, <i16 16383, i16 16383>
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%A = getelementptr inbounds %S, <2 x %S addrspace(1)*> %P, <2 x i16> <i16 0, i16 0>, <2 x i32> <i32 1, i32 1>, <2 x i16> %X

View File

@ -1183,11 +1183,9 @@ define i1 @icmp_shl_nsw_eq(i32 %x) {
ret i1 %cmp
}
; FIXME: Vectors should fold the same way.
define <2 x i1> @icmp_shl_nsw_eq_vec(<2 x i32> %x) {
; CHECK-LABEL: @icmp_shl_nsw_eq_vec(
; CHECK-NEXT: [[MUL:%.*]] = shl nsw <2 x i32> %x, <i32 5, i32 5>
; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[MUL]], zeroinitializer
; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> %x, zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%mul = shl nsw <2 x i32> %x, <i32 5, i32 5>
@ -1206,11 +1204,10 @@ define i1 @icmp_shl_eq(i32 %x) {
ret i1 %cmp
}
; FIXME: Vectors should fold the same way.
define <2 x i1> @icmp_shl_eq_vec(<2 x i32> %x) {
; CHECK-LABEL: @icmp_shl_eq_vec(
; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i32> %x, <i32 5, i32 5>
; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[MUL]], zeroinitializer
; CHECK-NEXT: [[MUL_MASK:%.*]] = and <2 x i32> %x, <i32 134217727, i32 134217727>
; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[MUL_MASK]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%mul = shl <2 x i32> %x, <i32 5, i32 5>
@ -1228,11 +1225,9 @@ define i1 @icmp_shl_nsw_ne(i32 %x) {
ret i1 %cmp
}
; FIXME: Vectors should fold the same way.
define <2 x i1> @icmp_shl_nsw_ne_vec(<2 x i32> %x) {
; CHECK-LABEL: @icmp_shl_nsw_ne_vec(
; CHECK-NEXT: [[MUL:%.*]] = shl nsw <2 x i32> %x, <i32 7, i32 7>
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[MUL]], zeroinitializer
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> %x, zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%mul = shl nsw <2 x i32> %x, <i32 7, i32 7>
@ -1251,11 +1246,10 @@ define i1 @icmp_shl_ne(i32 %x) {
ret i1 %cmp
}
; FIXME: Vectors should fold the same way.
define <2 x i1> @icmp_shl_ne_vec(<2 x i32> %x) {
; CHECK-LABEL: @icmp_shl_ne_vec(
; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i32> %x, <i32 7, i32 7>
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[MUL]], zeroinitializer
; CHECK-NEXT: [[MUL_MASK:%.*]] = and <2 x i32> %x, <i32 33554431, i32 33554431>
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[MUL_MASK]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%mul = shl <2 x i32> %x, <i32 7, i32 7>
@ -1263,6 +1257,16 @@ define <2 x i1> @icmp_shl_ne_vec(<2 x i32> %x) {
ret <2 x i1> %cmp
}
define <2 x i1> @icmp_shl_nuw_ne_vec(<2 x i32> %x) {
; CHECK-LABEL: @icmp_shl_nuw_ne_vec(
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> %x, <i32 2, i32 2>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%shl = shl nuw <2 x i32> %x, <i32 7, i32 7>
%cmp = icmp ne <2 x i32> %shl, <i32 256, i32 256>
ret <2 x i1> %cmp
}
; If the (mul x, C) preserved the sign and this is sign test,
; compare the LHS operand instead
define i1 @icmp_mul_nsw(i32 %x) {