1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

[SCEV] Simplify zext/trunc idiom that appears when handling bitmasks.

Summary:
Specifically, we transform

  zext(2^K * (trunc X to iN)) to iM ->
  2^K * (zext(trunc X to i{N-K}) to iM)<nuw>

This is helpful because pulling the 2^K out of the zext allows further
optimizations.

Reviewers: sanjoy

Subscribers: hiraditya, llvm-commits, timshen

Differential Revision: https://reviews.llvm.org/D48158

llvm-svn: 334737
This commit is contained in:
Justin Lebar 2018-06-14 17:13:48 +00:00
parent 4a161bf875
commit aff7184247
3 changed files with 42 additions and 2 deletions

View File

@ -1778,6 +1778,32 @@ ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
} }
// zext(2^K * (trunc X to iN)) to iM ->
// 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
//
// Proof:
//
// zext(2^K * (trunc X to iN)) to iM
// = zext((trunc X to iN) << K) to iM
// = zext((trunc X to i{N-K}) << K)<nuw> to iM
// (because shl removes the top K bits)
// = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
// = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
//
if (SA->getNumOperands() == 2)
if (auto *MulLHS = dyn_cast<SCEVConstant>(SA->getOperand(0)))
if (MulLHS->getAPInt().isPowerOf2())
if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SA->getOperand(1))) {
int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
MulLHS->getAPInt().logBase2();
Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
return getMulExpr(
getZeroExtendExpr(MulLHS, Ty),
getZeroExtendExpr(
getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
SCEV::FlagNUW, Depth + 1);
}
} }
// The cast wasn't folded; create an explicit cast node. // The cast wasn't folded; create an explicit cast node.

View File

@ -122,7 +122,7 @@ for.end: ; preds = %for.body
; LAA: Memory dependences are safe{{$}} ; LAA: Memory dependences are safe{{$}}
; LAA: SCEV assumptions: ; LAA: SCEV assumptions:
; LAA-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nusw> ; LAA-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nusw>
; LAA-NEXT: {((2 * (zext i32 (2 * (trunc i64 %N to i32)) to i64))<nuw> + %a),+,-4}<%for.body> Added Flags: <nusw> ; LAA-NEXT: {((4 * (zext i31 (trunc i64 %N to i31) to i64)) + %a),+,-4}<%for.body> Added Flags: <nusw>
; The expression for %mul_ext as analyzed by SCEV is ; The expression for %mul_ext as analyzed by SCEV is
; (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64) ; (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)
@ -131,7 +131,7 @@ for.end: ; preds = %for.body
; LAA: [PSE] %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext: ; LAA: [PSE] %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext:
; LAA-NEXT: ((2 * (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nuw> + %a) ; LAA-NEXT: ((2 * (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nuw> + %a)
; LAA-NEXT: --> {((2 * (zext i32 (2 * (trunc i64 %N to i32)) to i64))<nuw> + %a),+,-4}<%for.body> ; LAA-NEXT: --> {((4 * (zext i31 (trunc i64 %N to i31) to i64)) + %a),+,-4}<%for.body>
; LV-LABEL: f2 ; LV-LABEL: f2
; LV-LABEL: for.body.lver.check ; LV-LABEL: for.body.lver.check

View File

@ -25,3 +25,17 @@ define i64 @test2(i64 %x) {
%z = xor i64 %t, 8 %z = xor i64 %t, 8
ret i64 %z ret i64 %z
} }
; Check that we transform the naive lowering of the sequence below,
; (4 * (zext i5 (2 * (trunc i32 %x to i5)) to i32)),
; to
; (8 * (zext i4 (trunc i32 %x to i4) to i32))
;
; CHECK-LABEL: @test3
define i32 @test3(i32 %x) {
%a = mul i32 %x, 8
; CHECK: %b
; CHECK-NEXT: --> (8 * (zext i4 (trunc i32 %x to i4) to i32))
%b = and i32 %a, 124
ret i32 %b
}