1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 02:33:06 +01:00

[InstCombine] Fold (X >>? C1) << C2 patterns to shift+bitmask (PR37872)

This is essentially finalizes a revert of rL155136,
because nowadays the situation has improved, SCEV can model
all these patterns well, and we canonicalize rotate-like patterns
into a funnel shift intrinsics in InstCombine.
So this should not cause any pessimization.

I've verified the canonicalize-{a,l}shr-shl-to-masking.ll transforms
with alive, which confirms that we can freely preserve exact-ness,
and no-wrap flags.

Profs:
* base: https://rise4fun.com/Alive/gPQ
* exact-ness preservation: https://rise4fun.com/Alive/izi
* nuw preservation: https://rise4fun.com/Alive/DmD
* nsw preservation: https://rise4fun.com/Alive/SLN6N
* nuw nsw preservation: https://rise4fun.com/Alive/Qp7

Refs. https://reviews.llvm.org/D46760
This commit is contained in:
Roman Lebedev 2020-10-27 12:17:53 +03:00
parent 27b5264858
commit 49368a63f8
10 changed files with 120 additions and 103 deletions

View File

@ -922,8 +922,6 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
}
// FIXME: we do not yet transform non-exact shr's. The backend (DAGCombine)
// needs a few fixes for the rotate pattern recognition first.
const APInt *ShOp1;
if (match(Op0, m_Exact(m_Shr(m_Value(X), m_APInt(ShOp1))))) {
unsigned ShrAmt = ShOp1->getZExtValue();
@ -945,6 +943,31 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
}
}
if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_APInt(ShOp1))))) {
unsigned ShrAmt = ShOp1->getZExtValue();
if (ShrAmt < ShAmt) {
// If C1 < C2: (X >>? C1) << C2 --> X << (C2 - C1) & (-1 << C2)
Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShrAmt);
auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
Builder.Insert(NewShl);
APInt Mask(APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt));
return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
}
if (ShrAmt > ShAmt) {
// If C1 > C2: (X >>? C1) << C2 --> X >>? (C1 - C2) & (-1 << C2)
Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmt);
auto *OldShr = cast<BinaryOperator>(Op0);
auto *NewShr =
BinaryOperator::Create(OldShr->getOpcode(), X, ShiftDiff);
NewShr->setIsExact(OldShr->isExact());
Builder.Insert(NewShr);
APInt Mask(APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt));
return BinaryOperator::CreateAnd(NewShr, ConstantInt::get(Ty, Mask));
}
}
if (match(Op0, m_Shl(m_Value(X), m_APInt(ShOp1)))) {
unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
// Oversized shifts are simplified to zero in InstSimplify.

View File

@ -36,8 +36,8 @@ define i8 @foo(i8 %arg, i8 %arg1) {
; CHECK-NEXT: [[T7:%.*]] = or i8 [[T4]], [[T6]]
; CHECK-NEXT: [[T8:%.*]] = xor i8 [[T]], [[T3]]
; CHECK-NEXT: [[T9:%.*]] = or i8 [[T7]], [[T8]]
; CHECK-NEXT: [[T10:%.*]] = lshr i8 [[T8]], 7
; CHECK-NEXT: [[T11:%.*]] = shl nuw nsw i8 [[T10]], 5
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[T8]], 2
; CHECK-NEXT: [[T11:%.*]] = and i8 [[TMP1]], 32
; CHECK-NEXT: [[T12:%.*]] = xor i8 [[T11]], [[T9]]
; CHECK-NEXT: ret i8 [[T12]]
;

View File

@ -213,9 +213,8 @@ define <2 x i7> @shl_lshr_splat_vec(<2 x i7> %X) {
define i23 @test11(i23 %x) {
; CHECK-LABEL: @test11(
; CHECK-NEXT: [[A:%.*]] = mul i23 [[X:%.*]], 3
; CHECK-NEXT: [[B:%.*]] = lshr i23 [[A]], 11
; CHECK-NEXT: [[C:%.*]] = shl i23 [[B]], 12
; CHECK-NEXT: [[TMP1:%.*]] = mul i23 [[X:%.*]], 6
; CHECK-NEXT: [[C:%.*]] = and i23 [[TMP1]], -4096
; CHECK-NEXT: ret i23 [[C]]
;
%a = mul i23 %x, 3
@ -250,9 +249,8 @@ define <2 x i47> @test12_splat_vec(<2 x i47> %X) {
define i18 @test13(i18 %x) {
; CHECK-LABEL: @test13(
; CHECK-NEXT: [[A:%.*]] = mul i18 [[X:%.*]], 3
; CHECK-NEXT: [[TMP1:%.*]] = lshr i18 [[A]], 8
; CHECK-NEXT: [[C:%.*]] = shl i18 [[TMP1]], 9
; CHECK-NEXT: [[TMP1:%.*]] = mul i18 [[X:%.*]], 6
; CHECK-NEXT: [[C:%.*]] = and i18 [[TMP1]], -512
; CHECK-NEXT: ret i18 [[C]]
;
%a = mul i18 %x, 3

View File

@ -36,8 +36,8 @@ define i8 @positive_sameconst(i8 %x) {
define i8 @positive_biggerashr(i8 %x) {
; CHECK-LABEL: @positive_biggerashr(
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 6
; CHECK-NEXT: [[RET:%.*]] = shl nsw i8 [[TMP0]], 3
; CHECK-NEXT: [[TMP1:%.*]] = ashr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -8
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr i8 %x, 6
@ -47,8 +47,8 @@ define i8 @positive_biggerashr(i8 %x) {
define i8 @positive_biggershl(i8 %x) {
; CHECK-LABEL: @positive_biggershl(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = shl i8 [[TMP1]], 6
; CHECK-NEXT: [[TMP1:%.*]] = shl i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr i8 %x, 3
@ -83,8 +83,8 @@ define i8 @positive_sameconst_shlnuw(i8 %x) {
define i8 @positive_biggerashr_shlnuw(i8 %x) {
; CHECK-LABEL: @positive_biggerashr_shlnuw(
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 6
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw i8 [[TMP0]], 3
; CHECK-NEXT: [[TMP1:%.*]] = ashr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -8
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr i8 %x, 6
@ -94,8 +94,8 @@ define i8 @positive_biggerashr_shlnuw(i8 %x) {
define i8 @positive_biggershl_shlnuw(i8 %x) {
; CHECK-LABEL: @positive_biggershl_shlnuw(
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = shl nuw i8 [[TMP0]], 6
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr i8 %x, 3
@ -130,8 +130,8 @@ define i8 @positive_sameconst_shlnsw(i8 %x) {
define i8 @positive_biggerashr_shlnsw(i8 %x) {
; CHECK-LABEL: @positive_biggerashr_shlnsw(
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 6
; CHECK-NEXT: [[RET:%.*]] = shl nsw i8 [[TMP0]], 3
; CHECK-NEXT: [[TMP1:%.*]] = ashr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -8
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr i8 %x, 6
@ -141,8 +141,8 @@ define i8 @positive_biggerashr_shlnsw(i8 %x) {
define i8 @positive_biggershl_shlnsw(i8 %x) {
; CHECK-LABEL: @positive_biggershl_shlnsw(
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = shl nsw i8 [[TMP0]], 6
; CHECK-NEXT: [[TMP1:%.*]] = shl nsw i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr i8 %x, 3
@ -177,8 +177,8 @@ define i8 @positive_sameconst_shlnuwnsw(i8 %x) {
define i8 @positive_biggerashr_shlnuwnsw(i8 %x) {
; CHECK-LABEL: @positive_biggerashr_shlnuwnsw(
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 6
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw i8 [[TMP0]], 3
; CHECK-NEXT: [[TMP1:%.*]] = ashr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -8
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr i8 %x, 6
@ -188,8 +188,8 @@ define i8 @positive_biggerashr_shlnuwnsw(i8 %x) {
define i8 @positive_biggershl_shlnuwnsw(i8 %x) {
; CHECK-LABEL: @positive_biggershl_shlnuwnsw(
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw i8 [[TMP0]], 6
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr i8 %x, 3
@ -428,8 +428,8 @@ define <3 x i8> @positive_sameconst_vec_undef2(<3 x i8> %x) {
define <2 x i8> @positive_biggerashr_vec(<2 x i8> %x) {
; CHECK-LABEL: @positive_biggerashr_vec(
; CHECK-NEXT: [[TMP0:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 6, i8 6>
; CHECK-NEXT: [[RET:%.*]] = shl nsw <2 x i8> [[TMP0]], <i8 3, i8 3>
; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 3, i8 3>
; CHECK-NEXT: [[RET:%.*]] = and <2 x i8> [[TMP1]], <i8 -8, i8 -8>
; CHECK-NEXT: ret <2 x i8> [[RET]]
;
%tmp0 = ashr <2 x i8> %x, <i8 6, i8 6>
@ -472,8 +472,8 @@ define <3 x i8> @positive_biggerashr_vec_undef2(<3 x i8> %x) {
define <2 x i8> @positive_biggershl_vec(<2 x i8> %x) {
; CHECK-LABEL: @positive_biggershl_vec(
; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 3, i8 3>
; CHECK-NEXT: [[RET:%.*]] = shl <2 x i8> [[TMP1]], <i8 6, i8 6>
; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i8> [[X:%.*]], <i8 3, i8 3>
; CHECK-NEXT: [[RET:%.*]] = and <2 x i8> [[TMP1]], <i8 -64, i8 -64>
; CHECK-NEXT: ret <2 x i8> [[RET]]
;
%tmp0 = ashr <2 x i8> %x, <i8 3, i8 3>
@ -518,15 +518,14 @@ define <3 x i8> @positive_biggershl_vec_undef2(<3 x i8> %x) {
; Positive multi-use tests with constant
; ============================================================================ ;
; FIXME: drop 'exact' once it is no longer needed.
define i8 @positive_sameconst_multiuse(i8 %x) {
; CHECK-LABEL: @positive_sameconst_multiuse(
; CHECK-NEXT: [[TMP0:%.*]] = ashr exact i8 [[X:%.*]], 3
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 3
; CHECK-NEXT: call void @use32(i8 [[TMP0]])
; CHECK-NEXT: ret i8 [[X]]
; CHECK-NEXT: [[RET:%.*]] = and i8 [[X]], -8
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr exact i8 %x, 3
%tmp0 = ashr i8 %x, 3
call void @use32(i8 %tmp0)
%ret = shl i8 %tmp0, 3
ret i8 %ret
@ -534,12 +533,12 @@ define i8 @positive_sameconst_multiuse(i8 %x) {
define i8 @positive_biggerashr_multiuse(i8 %x) {
; CHECK-LABEL: @positive_biggerashr_multiuse(
; CHECK-NEXT: [[TMP0:%.*]] = ashr exact i8 [[X:%.*]], 6
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 6
; CHECK-NEXT: call void @use32(i8 [[TMP0]])
; CHECK-NEXT: [[RET:%.*]] = ashr exact i8 [[X]], 3
; CHECK-NEXT: [[RET:%.*]] = shl nsw i8 [[TMP0]], 3
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr exact i8 %x, 6
%tmp0 = ashr i8 %x, 6
call void @use32(i8 %tmp0)
%ret = shl i8 %tmp0, 3
ret i8 %ret
@ -547,12 +546,12 @@ define i8 @positive_biggerashr_multiuse(i8 %x) {
define i8 @positive_biggershl_multiuse(i8 %x) {
; CHECK-LABEL: @positive_biggershl_multiuse(
; CHECK-NEXT: [[TMP0:%.*]] = ashr exact i8 [[X:%.*]], 3
; CHECK-NEXT: [[TMP0:%.*]] = ashr i8 [[X:%.*]], 3
; CHECK-NEXT: call void @use32(i8 [[TMP0]])
; CHECK-NEXT: [[RET:%.*]] = shl i8 [[X]], 3
; CHECK-NEXT: [[RET:%.*]] = shl i8 [[TMP0]], 6
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = ashr exact i8 %x, 3
%tmp0 = ashr i8 %x, 3
call void @use32(i8 %tmp0)
%ret = shl i8 %tmp0, 6
ret i8 %ret

View File

@ -36,8 +36,8 @@ define i8 @positive_sameconst(i8 %x) {
define i8 @positive_biggerlshr(i8 %x) {
; CHECK-LABEL: @positive_biggerlshr(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 6
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw i8 [[TMP0]], 3
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], 24
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr i8 %x, 6
@ -47,8 +47,8 @@ define i8 @positive_biggerlshr(i8 %x) {
define i8 @positive_biggershl(i8 %x) {
; CHECK-LABEL: @positive_biggershl(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = shl i8 [[TMP0]], 6
; CHECK-NEXT: [[TMP1:%.*]] = shl i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr i8 %x, 3
@ -83,8 +83,8 @@ define i8 @positive_sameconst_shlnuw(i8 %x) {
define i8 @positive_biggerlshr_shlnuw(i8 %x) {
; CHECK-LABEL: @positive_biggerlshr_shlnuw(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 6
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw i8 [[TMP0]], 3
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], 24
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr i8 %x, 6
@ -94,8 +94,8 @@ define i8 @positive_biggerlshr_shlnuw(i8 %x) {
define i8 @positive_biggershl_shlnuw(i8 %x) {
; CHECK-LABEL: @positive_biggershl_shlnuw(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = shl nuw i8 [[TMP0]], 6
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr i8 %x, 3
@ -130,8 +130,8 @@ define i8 @positive_sameconst_shlnsw(i8 %x) {
define i8 @positive_biggerlshr_shlnsw(i8 %x) {
; CHECK-LABEL: @positive_biggerlshr_shlnsw(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 6
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw i8 [[TMP0]], 3
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], 24
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr i8 %x, 6
@ -141,8 +141,8 @@ define i8 @positive_biggerlshr_shlnsw(i8 %x) {
define i8 @positive_biggershl_shlnsw(i8 %x) {
; CHECK-LABEL: @positive_biggershl_shlnsw(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = shl nsw i8 [[TMP0]], 6
; CHECK-NEXT: [[TMP1:%.*]] = shl nsw i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr i8 %x, 3
@ -177,8 +177,8 @@ define i8 @positive_sameconst_shlnuwnsw(i8 %x) {
define i8 @positive_biggerlshr_shlnuwnsw(i8 %x) {
; CHECK-LABEL: @positive_biggerlshr_shlnuwnsw(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 6
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw i8 [[TMP0]], 3
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], 24
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr i8 %x, 6
@ -188,8 +188,8 @@ define i8 @positive_biggerlshr_shlnuwnsw(i8 %x) {
define i8 @positive_biggershl_shlnuwnsw(i8 %x) {
; CHECK-LABEL: @positive_biggershl_shlnuwnsw(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw i8 [[TMP0]], 6
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i8 [[X:%.*]], 3
; CHECK-NEXT: [[RET:%.*]] = and i8 [[TMP1]], -64
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr i8 %x, 3
@ -428,8 +428,8 @@ define <3 x i8> @positive_sameconst_vec_undef2(<3 x i8> %x) {
define <2 x i8> @positive_biggerlshr_vec(<2 x i8> %x) {
; CHECK-LABEL: @positive_biggerlshr_vec(
; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 6, i8 6>
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw <2 x i8> [[TMP0]], <i8 3, i8 3>
; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 3, i8 3>
; CHECK-NEXT: [[RET:%.*]] = and <2 x i8> [[TMP1]], <i8 24, i8 24>
; CHECK-NEXT: ret <2 x i8> [[RET]]
;
%tmp0 = lshr <2 x i8> %x, <i8 6, i8 6>
@ -472,8 +472,8 @@ define <3 x i8> @positive_biggerlshr_vec_undef2(<3 x i8> %x) {
define <2 x i8> @positive_biggershl_vec(<2 x i8> %x) {
; CHECK-LABEL: @positive_biggershl_vec(
; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 3, i8 3>
; CHECK-NEXT: [[RET:%.*]] = shl <2 x i8> [[TMP0]], <i8 6, i8 6>
; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i8> [[X:%.*]], <i8 3, i8 3>
; CHECK-NEXT: [[RET:%.*]] = and <2 x i8> [[TMP1]], <i8 -64, i8 -64>
; CHECK-NEXT: ret <2 x i8> [[RET]]
;
%tmp0 = lshr <2 x i8> %x, <i8 3, i8 3>
@ -518,15 +518,14 @@ define <3 x i8> @positive_biggershl_vec_undef2(<3 x i8> %x) {
; Positive multi-use tests with constant
; ============================================================================ ;
; FIXME: drop 'exact' once it is no longer needed.
define i8 @positive_sameconst_multiuse(i8 %x) {
; CHECK-LABEL: @positive_sameconst_multiuse(
; CHECK-NEXT: [[TMP0:%.*]] = lshr exact i8 [[X:%.*]], 3
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: call void @use32(i8 [[TMP0]])
; CHECK-NEXT: ret i8 [[X]]
; CHECK-NEXT: [[RET:%.*]] = and i8 [[X]], -8
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr exact i8 %x, 3
%tmp0 = lshr i8 %x, 3
call void @use32(i8 %tmp0)
%ret = shl i8 %tmp0, 3
ret i8 %ret
@ -534,12 +533,12 @@ define i8 @positive_sameconst_multiuse(i8 %x) {
define i8 @positive_biggerlshr_multiuse(i8 %x) {
; CHECK-LABEL: @positive_biggerlshr_multiuse(
; CHECK-NEXT: [[TMP0:%.*]] = lshr exact i8 [[X:%.*]], 6
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 6
; CHECK-NEXT: call void @use32(i8 [[TMP0]])
; CHECK-NEXT: [[RET:%.*]] = lshr exact i8 [[X]], 3
; CHECK-NEXT: [[RET:%.*]] = shl nuw nsw i8 [[TMP0]], 3
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr exact i8 %x, 6
%tmp0 = lshr i8 %x, 6
call void @use32(i8 %tmp0)
%ret = shl i8 %tmp0, 3
ret i8 %ret
@ -547,12 +546,12 @@ define i8 @positive_biggerlshr_multiuse(i8 %x) {
define i8 @positive_biggershl_multiuse(i8 %x) {
; CHECK-LABEL: @positive_biggershl_multiuse(
; CHECK-NEXT: [[TMP0:%.*]] = lshr exact i8 [[X:%.*]], 3
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 [[X:%.*]], 3
; CHECK-NEXT: call void @use32(i8 [[TMP0]])
; CHECK-NEXT: [[RET:%.*]] = shl i8 [[X]], 3
; CHECK-NEXT: [[RET:%.*]] = shl i8 [[TMP0]], 6
; CHECK-NEXT: ret i8 [[RET]]
;
%tmp0 = lshr exact i8 %x, 3
%tmp0 = lshr i8 %x, 3
call void @use32(i8 %tmp0)
%ret = shl i8 %tmp0, 6
ret i8 %ret

View File

@ -684,10 +684,9 @@ define i64 @test49(i64 %A) {
define i64 @test50(i64 %x) {
; ALL-LABEL: @test50(
; ALL-NEXT: [[A:%.*]] = lshr i64 [[X:%.*]], 2
; ALL-NEXT: [[D:%.*]] = shl i64 [[A]], 32
; ALL-NEXT: [[SEXT:%.*]] = add i64 [[D]], -4294967296
; ALL-NEXT: [[E:%.*]] = ashr exact i64 [[SEXT]], 32
; ALL-NEXT: [[TMP1:%.*]] = shl i64 [[X:%.*]], 30
; ALL-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], -4294967296
; ALL-NEXT: [[E:%.*]] = ashr i64 [[TMP2]], 32
; ALL-NEXT: ret i64 [[E]]
;
%a = lshr i64 %x, 2
@ -1318,8 +1317,8 @@ define double @test81(double *%p, float %f) {
define i64 @test82(i64 %A) {
; ALL-LABEL: @test82(
; ALL-NEXT: [[TMP1:%.*]] = shl i64 [[A:%.*]], 1
; ALL-NEXT: [[E:%.*]] = and i64 [[TMP1]], 4294966784
; ALL-NEXT: ret i64 [[E]]
; ALL-NEXT: [[D:%.*]] = and i64 [[TMP1]], 4294966784
; ALL-NEXT: ret i64 [[D]]
;
%B = trunc i64 %A to i32
%C = lshr i32 %B, 8

View File

@ -100,8 +100,8 @@ define void @PR37005(i8* %base, i8** %in) {
; CHECK-NEXT: [[E2:%.*]] = getelementptr inbounds i8*, i8** [[IN:%.*]], i64 undef
; CHECK-NEXT: [[E4:%.*]] = getelementptr inbounds i8*, i8** [[E2]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PI1:%.*]] = ptrtoint <2 x i8**> [[E4]] to <2 x i64>
; CHECK-NEXT: [[LR1:%.*]] = lshr <2 x i64> [[PI1]], <i64 21, i64 21>
; CHECK-NEXT: [[SL1:%.*]] = shl nuw nsw <2 x i64> [[LR1]], <i64 7, i64 7>
; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i64> [[PI1]], <i64 14, i64 14>
; CHECK-NEXT: [[SL1:%.*]] = and <2 x i64> [[TMP0]], <i64 1125899906842496, i64 1125899906842496>
; CHECK-NEXT: [[E51:%.*]] = getelementptr inbounds i8, i8* [[BASE:%.*]], i64 80
; CHECK-NEXT: [[E6:%.*]] = getelementptr inbounds i8, i8* [[E51]], <2 x i64> [[SL1]]
; CHECK-NEXT: call void @blackhole(<2 x i8*> [[E6]])
@ -132,8 +132,8 @@ define void @PR37005_2(i8* %base, i8** %in) {
; CHECK: loop:
; CHECK-NEXT: [[E2:%.*]] = getelementptr inbounds i8*, i8** [[IN:%.*]], i64 undef
; CHECK-NEXT: [[PI1:%.*]] = ptrtoint i8** [[E2]] to i64
; CHECK-NEXT: [[LR1:%.*]] = lshr i64 [[PI1]], 21
; CHECK-NEXT: [[SL1:%.*]] = shl nuw nsw i64 [[LR1]], 7
; CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[PI1]], 14
; CHECK-NEXT: [[SL1:%.*]] = and i64 [[TMP0]], 1125899906842496
; CHECK-NEXT: [[E51:%.*]] = getelementptr inbounds i8, i8* [[BASE:%.*]], <2 x i64> <i64 80, i64 60>
; CHECK-NEXT: [[E6:%.*]] = getelementptr inbounds i8, <2 x i8*> [[E51]], i64 [[SL1]]
; CHECK-NEXT: call void @blackhole(<2 x i8*> [[E6]])
@ -162,8 +162,8 @@ define void @PR37005_3(<2 x i8*> %base, i8** %in) {
; CHECK-NEXT: [[E2:%.*]] = getelementptr inbounds i8*, i8** [[IN:%.*]], i64 undef
; CHECK-NEXT: [[E4:%.*]] = getelementptr inbounds i8*, i8** [[E2]], <2 x i64> <i64 0, i64 1>
; CHECK-NEXT: [[PI1:%.*]] = ptrtoint <2 x i8**> [[E4]] to <2 x i64>
; CHECK-NEXT: [[LR1:%.*]] = lshr <2 x i64> [[PI1]], <i64 21, i64 21>
; CHECK-NEXT: [[SL1:%.*]] = shl nuw nsw <2 x i64> [[LR1]], <i64 7, i64 7>
; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i64> [[PI1]], <i64 14, i64 14>
; CHECK-NEXT: [[SL1:%.*]] = and <2 x i64> [[TMP0]], <i64 1125899906842496, i64 1125899906842496>
; CHECK-NEXT: [[E5:%.*]] = getelementptr inbounds i8, <2 x i8*> [[BASE:%.*]], i64 80
; CHECK-NEXT: [[E6:%.*]] = getelementptr inbounds i8, <2 x i8*> [[E5]], <2 x i64> [[SL1]]
; CHECK-NEXT: call void @blackhole(<2 x i8*> [[E6]])

View File

@ -5,7 +5,8 @@
; with nsw flag should also be non-negative
define i1 @test_shift_nonnegative(i32 %a) {
; CHECK-LABEL: @test_shift_nonnegative(
; CHECK-NEXT: ret i1 true
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], -1
; CHECK-NEXT: ret i1 [[CMP]]
;
%b = lshr i32 %a, 2
%shift = shl nsw i32 %b, 3

View File

@ -87,9 +87,8 @@ define i8 @test10a(i8 %A) {
;; The shl may be valuable to scalar evolution.
define i8 @test11(i8 %x) {
; CHECK-LABEL: @test11(
; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], 3
; CHECK-NEXT: [[B:%.*]] = lshr i8 [[A]], 3
; CHECK-NEXT: [[C:%.*]] = shl i8 [[B]], 4
; CHECK-NEXT: [[TMP1:%.*]] = mul i8 [[X:%.*]], 6
; CHECK-NEXT: [[C:%.*]] = and i8 [[TMP1]], -16
; CHECK-NEXT: ret i8 [[C]]
;
%a = mul i8 %x, 3
@ -143,9 +142,8 @@ define i8 @shishi(i8 %x) {
;; The shl may be valuable to scalar evolution.
define i8 @test13(i8 %x) {
; CHECK-LABEL: @test13(
; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], 3
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[A]], 3
; CHECK-NEXT: [[C:%.*]] = shl i8 [[TMP1]], 4
; CHECK-NEXT: [[TMP1:%.*]] = mul i8 [[X:%.*]], 6
; CHECK-NEXT: [[C:%.*]] = and i8 [[TMP1]], -16
; CHECK-NEXT: ret i8 [[C]]
;
%a = mul i8 %x, 3
@ -1089,8 +1087,8 @@ define i32 @test55(i32 %x) {
define i32 @test56(i32 %x) {
; CHECK-LABEL: @test56(
; CHECK-NEXT: [[SHR2:%.*]] = lshr i32 [[X:%.*]], 1
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[SHR2]], 4
; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 3
; CHECK-NEXT: [[SHL:%.*]] = and i32 [[TMP1]], -16
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], 7
; CHECK-NEXT: ret i32 [[OR]]
;
@ -1102,8 +1100,8 @@ define i32 @test56(i32 %x) {
define i32 @test57(i32 %x) {
; CHECK-LABEL: @test57(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 1
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[TMP1]], 4
; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 3
; CHECK-NEXT: [[SHL:%.*]] = and i32 [[TMP1]], -16
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], 7
; CHECK-NEXT: ret i32 [[OR]]
;
@ -1139,8 +1137,8 @@ define <2 x i32> @test58_splat_vec(<2 x i32> %x) {
define i32 @test59(i32 %x) {
; CHECK-LABEL: @test59(
; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[X:%.*]], 4
; CHECK-NEXT: [[SHL:%.*]] = shl nsw i32 [[SHR]], 1
; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 3
; CHECK-NEXT: [[SHL:%.*]] = and i32 [[TMP1]], -4
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], 2
; CHECK-NEXT: ret i32 [[OR]]
;

View File

@ -744,9 +744,9 @@ define void @trunc_shl_31_i32_i64_multi_use(i64 %val, i32 addrspace(1)* %ptr0, i
define i32 @trunc_shl_lshr_infloop(i64 %arg) {
; CHECK-LABEL: @trunc_shl_lshr_infloop(
; CHECK-NEXT: [[A:%.*]] = lshr i64 [[ARG:%.*]], 1
; CHECK-NEXT: [[B:%.*]] = shl i64 [[A]], 2
; CHECK-NEXT: [[C:%.*]] = trunc i64 [[B]] to i32
; CHECK-NEXT: [[ARG_TR:%.*]] = trunc i64 [[ARG:%.*]] to i32
; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[ARG_TR]], 1
; CHECK-NEXT: [[C:%.*]] = and i32 [[TMP1]], -4
; CHECK-NEXT: ret i32 [[C]]
;
%A = lshr i64 %arg, 1
@ -801,9 +801,9 @@ define <2 x i32> @trunc_shl_v2i32_v2i64_outofrange(<2 x i64> %val) {
define i32 @trunc_shl_ashr_infloop(i64 %arg) {
; CHECK-LABEL: @trunc_shl_ashr_infloop(
; CHECK-NEXT: [[A:%.*]] = ashr i64 [[ARG:%.*]], 3
; CHECK-NEXT: [[B:%.*]] = shl nsw i64 [[A]], 2
; CHECK-NEXT: [[C:%.*]] = trunc i64 [[B]] to i32
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[ARG:%.*]], 1
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
; CHECK-NEXT: [[C:%.*]] = and i32 [[TMP2]], -4
; CHECK-NEXT: ret i32 [[C]]
;
%A = ashr i64 %arg, 3