1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-21 03:53:04 +02:00

[AggressiveInstCombine] convert a chain of 'and-shift' bits into masked compare

This is a follow-up to D45986. As suggested there, we should match the "all-bits-set" 
pattern in addition to "any-bits-set".

This was a little more complicated than I thought it would be initially because the 
"and 1" instruction can be anywhere in the chain. Hopefully, the code comments make 
that logic understandable, but if you see a way to simplify or improve that, it's 
most appreciated.

This transforms patterns that emerge from bitfield tests as seen in PR37098:
https://bugs.llvm.org/show_bug.cgi?id=37098

I think it would also help reduce the large test from:
D46336
D46595 
but we need something to reassociate that case to the forms we're expecting here first.

Differential Revision: https://reviews.llvm.org/D46649

llvm-svn: 331937
This commit is contained in:
Sanjay Patel 2018-05-09 23:08:15 +00:00
parent c2f809b52e
commit 882473d8a9
3 changed files with 240 additions and 59 deletions

View File

@ -57,18 +57,45 @@ public:
};
} // namespace
/// This is a recursive helper for 'and X, 1' that walks through a chain of 'or'
/// instructions looking for shift ops of a common source value (first member of
/// the pair). The second member of the pair is a mask constant for all of the
/// bits that are being compared. So this:
/// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
/// returns {X, 0x129} and those are the operands of an 'and' that is compared
/// to zero.
static bool matchMaskedCmpOp(Value *V, std::pair<Value *, APInt> &Result) {
// Recurse through a chain of 'or' operands.
/// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
/// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
/// of 'and' ops, then we also need to capture the fact that we saw an
/// "and X, 1", so that's an extra return value for that case.
struct MaskOps {
Value *Root;
APInt Mask;
bool MatchAndChain;
bool FoundAnd1;
MaskOps(unsigned BitWidth, bool MatchAnds) :
Root(nullptr), Mask(APInt::getNullValue(BitWidth)),
MatchAndChain(MatchAnds), FoundAnd1(false) {}
};
/// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
/// chain of 'and' or 'or' instructions looking for shift ops of a common source
/// value. Examples:
/// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
/// returns { X, 0x129 }
/// and (and (X >> 1), 1), (X >> 4)
/// returns { X, 0x12 }
static bool matchAndOrChain(Value *V, MaskOps &MOps) {
Value *Op0, *Op1;
if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
return matchMaskedCmpOp(Op0, Result) && matchMaskedCmpOp(Op1, Result);
if (MOps.MatchAndChain) {
// Recurse through a chain of 'and' operands. This requires an extra check
// vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
// in the chain to know that all of the high bits are cleared.
if (match(V, m_And(m_Value(Op0), m_One()))) {
MOps.FoundAnd1 = true;
return matchAndOrChain(Op0, MOps);
}
if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
} else {
// Recurse through a chain of 'or' operands.
if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
}
// We need a shift-right or a bare value representing a compare of bit 0 of
// the original source operand.
@ -78,33 +105,50 @@ static bool matchMaskedCmpOp(Value *V, std::pair<Value *, APInt> &Result) {
Candidate = V;
// Initialize result source operand.
if (!Result.first)
Result.first = Candidate;
if (!MOps.Root)
MOps.Root = Candidate;
// Fill in the mask bit derived from the shift constant.
Result.second.setBit(BitIndex);
return Result.first == Candidate;
MOps.Mask.setBit(BitIndex);
return MOps.Root == Candidate;
}
/// Match an 'and' of a chain of or-shifted bits from a common source value into
/// a masked compare:
/// and (or (lshr X, C), ...), 1 --> (X & C') != 0
static bool foldToMaskedCmp(Instruction &I) {
// TODO: This is only looking for 'any-bits-set' and 'all-bits-clear'.
// We should also match 'all-bits-set' and 'any-bits-clear' by looking for a
// a chain of 'and'.
if (!match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
/// Match patterns that correspond to "any-bits-set" and "all-bits-set".
/// These will include a chain of 'or' or 'and'-shifted bits from a
/// common source value:
/// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0
/// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
/// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
/// that differ only with a final 'not' of the result. We expect that final
/// 'not' to be folded with the compare that we create here (invert predicate).
static bool foldAnyOrAllBitsSet(Instruction &I) {
// The 'any-bits-set' ('or' chain) pattern is simpler to match because the
// final "and X, 1" instruction must be the final op in the sequence.
bool MatchAllBitsSet;
if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value())))
MatchAllBitsSet = true;
else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
MatchAllBitsSet = false;
else
return false;
std::pair<Value *, APInt>
MaskOps(nullptr, APInt::getNullValue(I.getType()->getScalarSizeInBits()));
if (!matchMaskedCmpOp(cast<BinaryOperator>(&I)->getOperand(0), MaskOps))
return false;
MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet);
if (MatchAllBitsSet) {
if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1)
return false;
} else {
if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps))
return false;
}
// The pattern was found. Create a masked compare that replaces all of the
// shift and logic ops.
IRBuilder<> Builder(&I);
Value *Mask = Builder.CreateAnd(MaskOps.first, MaskOps.second);
Value *CmpZero = Builder.CreateIsNotNull(Mask);
Value *Zext = Builder.CreateZExt(CmpZero, I.getType());
Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);
Value *And = Builder.CreateAnd(MOps.Root, Mask);
Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask) :
Builder.CreateIsNotNull(And);
Value *Zext = Builder.CreateZExt(Cmp, I.getType());
I.replaceAllUsesWith(Zext);
return true;
}
@ -119,8 +163,13 @@ static bool foldUnusualPatterns(Function &F, DominatorTree &DT) {
if (!DT.isReachableFromEntry(&BB))
continue;
// Do not delete instructions under here and invalidate the iterator.
for (Instruction &I : BB)
MadeChange |= foldToMaskedCmp(I);
// Walk the block backwards for efficiency. We're matching a chain of
// use->defs, so we're more likely to succeed by starting from the bottom.
// Also, we want to avoid matching partial patterns.
// TODO: It would be more efficient if we removed dead instructions
// iteratively in this loop rather than waiting until the end.
for (Instruction &I : make_range(BB.rbegin(), BB.rend()))
MadeChange |= foldAnyOrAllBitsSet(I);
}
// We're done with transforms, so remove dead instructions.

View File

@ -51,19 +51,27 @@ define i32 @anyset_three_bit_mask_all_shifted_bits(i32 %x) {
ret i32 %r
}
; TODO: Recognize the 'and' sibling pattern. The 'and 1' may not be at the end.
; Recognize the 'and' sibling pattern (all-bits-set). The 'and 1' may not be at the end.
define i32 @allset_two_bit_mask(i32 %x) {
; CHECK-LABEL: @allset_two_bit_mask(
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 129
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 129
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
; CHECK-NEXT: ret i32 [[TMP3]]
;
%s = lshr i32 %x, 7
%o = and i32 %s, %x
%r = and i32 %o, 1
ret i32 %r
}
define i64 @allset_four_bit_mask(i64 %x) {
; CHECK-LABEL: @allset_four_bit_mask(
; CHECK-NEXT: [[T1:%.*]] = lshr i64 [[X:%.*]], 1
; CHECK-NEXT: [[T2:%.*]] = lshr i64 [[X]], 2
; CHECK-NEXT: [[T3:%.*]] = lshr i64 [[X]], 3
; CHECK-NEXT: [[T4:%.*]] = lshr i64 [[X]], 4
; CHECK-NEXT: [[A1:%.*]] = and i64 [[T4]], 1
; CHECK-NEXT: [[A2:%.*]] = and i64 [[T2]], [[A1]]
; CHECK-NEXT: [[A3:%.*]] = and i64 [[A2]], [[T1]]
; CHECK-NEXT: [[R:%.*]] = and i64 [[A3]], [[T3]]
; CHECK-NEXT: ret i64 [[R]]
; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 30
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 30
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: ret i64 [[TMP3]]
;
%t1 = lshr i64 %x, 1
%t2 = lshr i64 %x, 2
@ -76,3 +84,136 @@ define i64 @allset_four_bit_mask(i64 %x) {
ret i64 %r
}
declare void @use(i32)
; negative test - extra use means the transform would increase instruction count
define i32 @allset_two_bit_mask_multiuse(i32 %x) {
; CHECK-LABEL: @allset_two_bit_mask_multiuse(
; CHECK-NEXT: [[S:%.*]] = lshr i32 [[X:%.*]], 7
; CHECK-NEXT: [[O:%.*]] = and i32 [[S]], [[X]]
; CHECK-NEXT: [[R:%.*]] = and i32 [[O]], 1
; CHECK-NEXT: call void @use(i32 [[O]])
; CHECK-NEXT: ret i32 [[R]]
;
%s = lshr i32 %x, 7
%o = and i32 %s, %x
%r = and i32 %o, 1
call void @use(i32 %o)
ret i32 %r
}
; negative test - missing 'and 1' mask, so more than the low bit is used here
define i8 @allset_three_bit_mask_no_and1(i8 %x) {
; CHECK-LABEL: @allset_three_bit_mask_no_and1(
; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[X:%.*]], 1
; CHECK-NEXT: [[T2:%.*]] = lshr i8 [[X]], 2
; CHECK-NEXT: [[T3:%.*]] = lshr i8 [[X]], 3
; CHECK-NEXT: [[A2:%.*]] = and i8 [[T1]], [[T2]]
; CHECK-NEXT: [[R:%.*]] = and i8 [[A2]], [[T3]]
; CHECK-NEXT: ret i8 [[R]]
;
%t1 = lshr i8 %x, 1
%t2 = lshr i8 %x, 2
%t3 = lshr i8 %x, 3
%a2 = and i8 %t1, %t2
%r = and i8 %a2, %t3
ret i8 %r
}
; This test demonstrates that the transform can be large. If the implementation
; is slow or explosive (stack overflow due to recursion), it should be made efficient.
define i64 @allset_40_bit_mask(i64 %x) {
; CHECK-LABEL: @allset_40_bit_mask(
; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 2199023255550
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 2199023255550
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: ret i64 [[TMP3]]
;
%t1 = lshr i64 %x, 1
%t2 = lshr i64 %x, 2
%t3 = lshr i64 %x, 3
%t4 = lshr i64 %x, 4
%t5 = lshr i64 %x, 5
%t6 = lshr i64 %x, 6
%t7 = lshr i64 %x, 7
%t8 = lshr i64 %x, 8
%t9 = lshr i64 %x, 9
%t10 = lshr i64 %x, 10
%t11 = lshr i64 %x, 11
%t12 = lshr i64 %x, 12
%t13 = lshr i64 %x, 13
%t14 = lshr i64 %x, 14
%t15 = lshr i64 %x, 15
%t16 = lshr i64 %x, 16
%t17 = lshr i64 %x, 17
%t18 = lshr i64 %x, 18
%t19 = lshr i64 %x, 19
%t20 = lshr i64 %x, 20
%t21 = lshr i64 %x, 21
%t22 = lshr i64 %x, 22
%t23 = lshr i64 %x, 23
%t24 = lshr i64 %x, 24
%t25 = lshr i64 %x, 25
%t26 = lshr i64 %x, 26
%t27 = lshr i64 %x, 27
%t28 = lshr i64 %x, 28
%t29 = lshr i64 %x, 29
%t30 = lshr i64 %x, 30
%t31 = lshr i64 %x, 31
%t32 = lshr i64 %x, 32
%t33 = lshr i64 %x, 33
%t34 = lshr i64 %x, 34
%t35 = lshr i64 %x, 35
%t36 = lshr i64 %x, 36
%t37 = lshr i64 %x, 37
%t38 = lshr i64 %x, 38
%t39 = lshr i64 %x, 39
%t40 = lshr i64 %x, 40
%a1 = and i64 %t1, 1
%a2 = and i64 %t2, %a1
%a3 = and i64 %t3, %a2
%a4 = and i64 %t4, %a3
%a5 = and i64 %t5, %a4
%a6 = and i64 %t6, %a5
%a7 = and i64 %t7, %a6
%a8 = and i64 %t8, %a7
%a9 = and i64 %t9, %a8
%a10 = and i64 %t10, %a9
%a11 = and i64 %t11, %a10
%a12 = and i64 %t12, %a11
%a13 = and i64 %t13, %a12
%a14 = and i64 %t14, %a13
%a15 = and i64 %t15, %a14
%a16 = and i64 %t16, %a15
%a17 = and i64 %t17, %a16
%a18 = and i64 %t18, %a17
%a19 = and i64 %t19, %a18
%a20 = and i64 %t20, %a19
%a21 = and i64 %t21, %a20
%a22 = and i64 %t22, %a21
%a23 = and i64 %t23, %a22
%a24 = and i64 %t24, %a23
%a25 = and i64 %t25, %a24
%a26 = and i64 %t26, %a25
%a27 = and i64 %t27, %a26
%a28 = and i64 %t28, %a27
%a29 = and i64 %t29, %a28
%a30 = and i64 %t30, %a29
%a31 = and i64 %t31, %a30
%a32 = and i64 %t32, %a31
%a33 = and i64 %t33, %a32
%a34 = and i64 %t34, %a33
%a35 = and i64 %t35, %a34
%a36 = and i64 %t36, %a35
%a37 = and i64 %t37, %a36
%a38 = and i64 %t38, %a37
%a39 = and i64 %t39, %a38
%a40 = and i64 %t40, %a39
ret i64 %a40
}

View File

@ -76,14 +76,10 @@ define i32 @anyset(i32 %a) {
define i32 @allset(i32 %a) {
; CHECK-LABEL: @allset(
; CHECK-NEXT: [[BF_LSHR:%.*]] = lshr i32 [[A:%.*]], 1
; CHECK-NEXT: [[BF_LSHR5:%.*]] = lshr i32 [[A]], 2
; CHECK-NEXT: [[BF_LSHR10:%.*]] = lshr i32 [[A]], 3
; CHECK-NEXT: [[BF_CLEAR2:%.*]] = and i32 [[A]], 1
; CHECK-NEXT: [[AND:%.*]] = and i32 [[BF_CLEAR2]], [[BF_LSHR]]
; CHECK-NEXT: [[AND8:%.*]] = and i32 [[AND]], [[BF_LSHR5]]
; CHECK-NEXT: [[AND13:%.*]] = and i32 [[AND8]], [[BF_LSHR10]]
; CHECK-NEXT: ret i32 [[AND13]]
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 15
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 15
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
; CHECK-NEXT: ret i32 [[TMP3]]
;
%a.sroa.0.0.trunc = trunc i32 %a to i8
%a.sroa.5.0.shift = lshr i32 %a, 8
@ -110,15 +106,10 @@ define i32 @allset(i32 %a) {
define i32 @anyclear(i32 %a) {
; CHECK-LABEL: @anyclear(
; CHECK-NEXT: [[BF_LSHR:%.*]] = lshr i32 [[A:%.*]], 1
; CHECK-NEXT: [[BF_LSHR5:%.*]] = lshr i32 [[A]], 2
; CHECK-NEXT: [[BF_LSHR10:%.*]] = lshr i32 [[A]], 3
; CHECK-NEXT: [[BF_CLEAR2:%.*]] = and i32 [[A]], 1
; CHECK-NEXT: [[AND:%.*]] = and i32 [[BF_CLEAR2]], [[BF_LSHR]]
; CHECK-NEXT: [[AND8:%.*]] = and i32 [[AND]], [[BF_LSHR5]]
; CHECK-NEXT: [[AND13:%.*]] = and i32 [[AND8]], [[BF_LSHR10]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND13]], 1
; CHECK-NEXT: ret i32 [[TMP1]]
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 15
; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 15
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
; CHECK-NEXT: ret i32 [[TMP3]]
;
%a.sroa.0.0.trunc = trunc i32 %a to i8
%a.sroa.5.0.shift = lshr i32 %a, 8