mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 05:01:59 +01:00
[ScalarEvolution] Apply Depth limit to getMulExpr
This is a fix for PR33292 that shows a case of extremely long compilation of a single .c file with clang, with most time spent within SCEV. We have a mechanism of limiting recursion depth for getAddExpr to avoid long analysis in SCEV. However, there are calls from getAddExpr to getMulExpr and back that do not propagate the info about depth. As result of this, a chain getAddExpr -> ... .> getAddExpr -> getMulExpr -> getAddExpr -> ... -> getAddExpr can be extremely long, with every segment of getAddExpr's being up to max depth long. This leads either to long compilation or crash by stack overflow. We face this situation while analyzing big SCEVs in the test of PR33292. This patch applies the same limit on max expression depth for getAddExpr and getMulExpr. Differential Revision: https://reviews.llvm.org/D33984 llvm-svn: 305463
This commit is contained in:
parent
743dbc42d8
commit
0107d0a6ae
@ -1214,26 +1214,31 @@ public:
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
|
||||
unsigned Depth = 0);
|
||||
const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
|
||||
unsigned Depth = 0) {
|
||||
SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
|
||||
return getAddExpr(Ops, Flags);
|
||||
return getAddExpr(Ops, Flags, Depth);
|
||||
}
|
||||
const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
|
||||
unsigned Depth = 0) {
|
||||
SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
|
||||
return getAddExpr(Ops, Flags);
|
||||
return getAddExpr(Ops, Flags, Depth);
|
||||
}
|
||||
const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
|
||||
unsigned Depth = 0);
|
||||
const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
|
||||
unsigned Depth = 0) {
|
||||
SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
|
||||
return getMulExpr(Ops, Flags);
|
||||
return getMulExpr(Ops, Flags, Depth);
|
||||
}
|
||||
const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
|
||||
unsigned Depth = 0) {
|
||||
SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
|
||||
return getMulExpr(Ops, Flags);
|
||||
return getMulExpr(Ops, Flags, Depth);
|
||||
}
|
||||
const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
|
||||
const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
|
||||
@ -1287,7 +1292,8 @@ public:
|
||||
|
||||
/// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
|
||||
const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
|
||||
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
|
||||
unsigned Depth = 0);
|
||||
|
||||
/// Return a SCEV corresponding to a conversion of the input value to the
|
||||
/// specified type. If the type must be extended, it is zero extended.
|
||||
@ -1693,10 +1699,14 @@ private:
|
||||
bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned,
|
||||
bool NoWrap);
|
||||
|
||||
/// Get add expr already created or create a new one
|
||||
/// Get add expr already created or create a new one.
|
||||
const SCEV *getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SCEV::NoWrapFlags Flags);
|
||||
|
||||
/// Get mul expr already created or create a new one.
|
||||
const SCEV *getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SCEV::NoWrapFlags Flags);
|
||||
|
||||
private:
|
||||
FoldingSet<SCEV> UniqueSCEVs;
|
||||
FoldingSet<SCEVPredicate> UniquePreds;
|
||||
|
@ -149,9 +149,9 @@ static cl::opt<unsigned> MaxValueCompareDepth(
|
||||
cl::init(2));
|
||||
|
||||
static cl::opt<unsigned>
|
||||
MaxAddExprDepth("scalar-evolution-max-addexpr-depth", cl::Hidden,
|
||||
cl::desc("Maximum depth of recursive AddExpr"),
|
||||
cl::init(32));
|
||||
MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
|
||||
cl::desc("Maximum depth of recursive arithmetics"),
|
||||
cl::init(32));
|
||||
|
||||
static cl::opt<unsigned> MaxConstantEvolvingDepth(
|
||||
"scalar-evolution-max-constant-evolving-depth", cl::Hidden,
|
||||
@ -2276,8 +2276,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
if (Ops.size() == 1) return Ops[0];
|
||||
}
|
||||
|
||||
// Limit recursion calls depth
|
||||
if (Depth > MaxAddExprDepth)
|
||||
// Limit recursion calls depth.
|
||||
if (Depth > MaxArithDepth)
|
||||
return getOrCreateAddExpr(Ops, Flags);
|
||||
|
||||
// Okay, check to see if the same value occurs in the operand list more than
|
||||
@ -2293,7 +2293,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
++Count;
|
||||
// Merge the values into a multiply.
|
||||
const SCEV *Scale = getConstant(Ty, Count);
|
||||
const SCEV *Mul = getMulExpr(Scale, Ops[i]);
|
||||
const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
|
||||
if (Ops.size() == Count)
|
||||
return Mul;
|
||||
Ops[i] = Mul;
|
||||
@ -2343,7 +2343,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
}
|
||||
}
|
||||
if (Ok)
|
||||
LargeOps.push_back(getMulExpr(LargeMulOps));
|
||||
LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
|
||||
} else {
|
||||
Ok = false;
|
||||
break;
|
||||
@ -2417,7 +2417,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
if (MulOp.first != 0)
|
||||
Ops.push_back(getMulExpr(
|
||||
getConstant(MulOp.first),
|
||||
getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)));
|
||||
getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
|
||||
SCEV::FlagAnyWrap, Depth + 1));
|
||||
if (Ops.empty())
|
||||
return getZero(Ty);
|
||||
if (Ops.size() == 1)
|
||||
@ -2445,11 +2446,12 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
|
||||
Mul->op_begin()+MulOp);
|
||||
MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
|
||||
InnerMul = getMulExpr(MulOps);
|
||||
InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
|
||||
}
|
||||
SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
|
||||
const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
|
||||
const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
|
||||
const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
|
||||
SCEV::FlagAnyWrap, Depth + 1);
|
||||
if (Ops.size() == 2) return OuterMul;
|
||||
if (AddOp < Idx) {
|
||||
Ops.erase(Ops.begin()+AddOp);
|
||||
@ -2478,19 +2480,20 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
|
||||
Mul->op_begin()+MulOp);
|
||||
MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
|
||||
InnerMul1 = getMulExpr(MulOps);
|
||||
InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
|
||||
}
|
||||
const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
|
||||
if (OtherMul->getNumOperands() != 2) {
|
||||
SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
|
||||
OtherMul->op_begin()+OMulOp);
|
||||
MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
|
||||
InnerMul2 = getMulExpr(MulOps);
|
||||
InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
|
||||
}
|
||||
SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
|
||||
const SCEV *InnerMulSum =
|
||||
getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
|
||||
const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
|
||||
const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
|
||||
SCEV::FlagAnyWrap, Depth + 1);
|
||||
if (Ops.size() == 2) return OuterMul;
|
||||
Ops.erase(Ops.begin()+Idx);
|
||||
Ops.erase(Ops.begin()+OtherMulIdx-1);
|
||||
@ -2621,6 +2624,27 @@ ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
return S;
|
||||
}
|
||||
|
||||
const SCEV *
|
||||
ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SCEV::NoWrapFlags Flags) {
|
||||
FoldingSetNodeID ID;
|
||||
ID.AddInteger(scMulExpr);
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
ID.AddPointer(Ops[i]);
|
||||
void *IP = nullptr;
|
||||
SCEVMulExpr *S =
|
||||
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
|
||||
if (!S) {
|
||||
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
|
||||
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
|
||||
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
|
||||
O, Ops.size());
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
}
|
||||
S->setNoWrapFlags(Flags);
|
||||
return S;
|
||||
}
|
||||
|
||||
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
|
||||
uint64_t k = i*j;
|
||||
if (j > 1 && k / j != i) Overflow = true;
|
||||
@ -2673,7 +2697,8 @@ static bool containsConstantSomewhere(const SCEV *StartExpr) {
|
||||
|
||||
/// Get a canonical multiply expression, or something simpler if possible.
|
||||
const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SCEV::NoWrapFlags Flags) {
|
||||
SCEV::NoWrapFlags Flags,
|
||||
unsigned Depth) {
|
||||
assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
|
||||
"only nuw or nsw allowed");
|
||||
assert(!Ops.empty() && "Cannot get empty mul!");
|
||||
@ -2690,6 +2715,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
|
||||
Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
|
||||
|
||||
// Limit recursion calls depth.
|
||||
if (Depth > MaxArithDepth)
|
||||
return getOrCreateMulExpr(Ops, Flags);
|
||||
|
||||
// If there are any constants, fold them together.
|
||||
unsigned Idx = 0;
|
||||
if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
|
||||
@ -2701,8 +2730,11 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
// apply this transformation as well.
|
||||
if (Add->getNumOperands() == 2)
|
||||
if (containsConstantSomewhere(Add))
|
||||
return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
|
||||
getMulExpr(LHSC, Add->getOperand(1)));
|
||||
return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
|
||||
SCEV::FlagAnyWrap, Depth + 1),
|
||||
getMulExpr(LHSC, Add->getOperand(1),
|
||||
SCEV::FlagAnyWrap, Depth + 1),
|
||||
SCEV::FlagAnyWrap, Depth + 1);
|
||||
|
||||
++Idx;
|
||||
while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
|
||||
@ -2730,17 +2762,19 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SmallVector<const SCEV *, 4> NewOps;
|
||||
bool AnyFolded = false;
|
||||
for (const SCEV *AddOp : Add->operands()) {
|
||||
const SCEV *Mul = getMulExpr(Ops[0], AddOp);
|
||||
const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
|
||||
Depth + 1);
|
||||
if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
|
||||
NewOps.push_back(Mul);
|
||||
}
|
||||
if (AnyFolded)
|
||||
return getAddExpr(NewOps);
|
||||
return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
|
||||
} else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
|
||||
// Negation preserves a recurrence's no self-wrap property.
|
||||
SmallVector<const SCEV *, 4> Operands;
|
||||
for (const SCEV *AddRecOp : AddRec->operands())
|
||||
Operands.push_back(getMulExpr(Ops[0], AddRecOp));
|
||||
Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
|
||||
Depth + 1));
|
||||
|
||||
return getAddRecExpr(Operands, AddRec->getLoop(),
|
||||
AddRec->getNoWrapFlags(SCEV::FlagNW));
|
||||
@ -2762,18 +2796,18 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
|
||||
if (Ops.size() > MulOpsInlineThreshold)
|
||||
break;
|
||||
// If we have an mul, expand the mul operands onto the end of the operands
|
||||
// list.
|
||||
// If we have an mul, expand the mul operands onto the end of the
|
||||
// operands list.
|
||||
Ops.erase(Ops.begin()+Idx);
|
||||
Ops.append(Mul->op_begin(), Mul->op_end());
|
||||
DeletedMul = true;
|
||||
}
|
||||
|
||||
// If we deleted at least one mul, we added operands to the end of the list,
|
||||
// and they are not necessarily sorted. Recurse to resort and resimplify
|
||||
// any operands we just acquired.
|
||||
// If we deleted at least one mul, we added operands to the end of the
|
||||
// list, and they are not necessarily sorted. Recurse to resort and
|
||||
// resimplify any operands we just acquired.
|
||||
if (DeletedMul)
|
||||
return getMulExpr(Ops);
|
||||
return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
|
||||
}
|
||||
|
||||
// If there are any add recurrences in the operands list, see if any other
|
||||
@ -2784,8 +2818,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
|
||||
// Scan over all recurrences, trying to fold loop invariants into them.
|
||||
for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
|
||||
// Scan all of the other operands to this mul and add them to the vector if
|
||||
// they are loop invariant w.r.t. the recurrence.
|
||||
// Scan all of the other operands to this mul and add them to the vector
|
||||
// if they are loop invariant w.r.t. the recurrence.
|
||||
SmallVector<const SCEV *, 8> LIOps;
|
||||
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
|
||||
const Loop *AddRecLoop = AddRec->getLoop();
|
||||
@ -2801,9 +2835,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
// NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
|
||||
SmallVector<const SCEV *, 4> NewOps;
|
||||
NewOps.reserve(AddRec->getNumOperands());
|
||||
const SCEV *Scale = getMulExpr(LIOps);
|
||||
const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
|
||||
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
|
||||
NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
|
||||
NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
|
||||
SCEV::FlagAnyWrap, Depth + 1));
|
||||
|
||||
// Build the new addrec. Propagate the NUW and NSW flags if both the
|
||||
// outer mul and the inner addrec are guaranteed to have no overflow.
|
||||
@ -2822,12 +2857,12 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
Ops[i] = NewRec;
|
||||
break;
|
||||
}
|
||||
return getMulExpr(Ops);
|
||||
return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
|
||||
}
|
||||
|
||||
// Okay, if there weren't any loop invariants to be folded, check to see if
|
||||
// there are multiple AddRec's with the same loop induction variable being
|
||||
// multiplied together. If so, we can fold them.
|
||||
// Okay, if there weren't any loop invariants to be folded, check to see
|
||||
// if there are multiple AddRec's with the same loop induction variable
|
||||
// being multiplied together. If so, we can fold them.
|
||||
|
||||
// {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
|
||||
// = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
|
||||
@ -2869,7 +2904,9 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
const SCEV *CoeffTerm = getConstant(Ty, Coeff);
|
||||
const SCEV *Term1 = AddRec->getOperand(y-z);
|
||||
const SCEV *Term2 = OtherAddRec->getOperand(z);
|
||||
Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
|
||||
Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2,
|
||||
SCEV::FlagAnyWrap, Depth + 1),
|
||||
SCEV::FlagAnyWrap, Depth + 1);
|
||||
}
|
||||
}
|
||||
AddRecOps.push_back(Term);
|
||||
@ -2887,7 +2924,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
}
|
||||
}
|
||||
if (OpsModified)
|
||||
return getMulExpr(Ops);
|
||||
return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
|
||||
|
||||
// Otherwise couldn't fold anything into this recurrence. Move onto the
|
||||
// next one.
|
||||
@ -2895,22 +2932,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
|
||||
// Okay, it looks like we really DO need an mul expr. Check to see if we
|
||||
// already have one, otherwise create a new one.
|
||||
FoldingSetNodeID ID;
|
||||
ID.AddInteger(scMulExpr);
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
ID.AddPointer(Ops[i]);
|
||||
void *IP = nullptr;
|
||||
SCEVMulExpr *S =
|
||||
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
|
||||
if (!S) {
|
||||
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
|
||||
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
|
||||
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
|
||||
O, Ops.size());
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
}
|
||||
S->setNoWrapFlags(Flags);
|
||||
return S;
|
||||
return getOrCreateMulExpr(Ops, Flags);
|
||||
}
|
||||
|
||||
/// Get a canonical unsigned division expression, or something simpler if
|
||||
@ -3713,7 +3735,8 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
|
||||
SCEV::NoWrapFlags Flags) {
|
||||
SCEV::NoWrapFlags Flags,
|
||||
unsigned Depth) {
|
||||
// Fast path: X - X --> 0.
|
||||
if (LHS == RHS)
|
||||
return getZero(LHS->getType());
|
||||
@ -3747,7 +3770,7 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
|
||||
// larger scope than intended.
|
||||
auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
|
||||
|
||||
return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags);
|
||||
return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
|
||||
}
|
||||
|
||||
const SCEV *
|
||||
|
@ -352,7 +352,7 @@ bool SimplifyIndvar::eliminateOverflowIntrinsic(CallInst *CI) {
|
||||
return false;
|
||||
|
||||
typedef const SCEV *(ScalarEvolution::*OperationFunctionTy)(
|
||||
const SCEV *, const SCEV *, SCEV::NoWrapFlags);
|
||||
const SCEV *, const SCEV *, SCEV::NoWrapFlags, unsigned);
|
||||
typedef const SCEV *(ScalarEvolution::*ExtensionFunctionTy)(
|
||||
const SCEV *, Type *);
|
||||
|
||||
@ -406,10 +406,11 @@ bool SimplifyIndvar::eliminateOverflowIntrinsic(CallInst *CI) {
|
||||
IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
|
||||
|
||||
const SCEV *A =
|
||||
(SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap), WideTy);
|
||||
(SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0u),
|
||||
WideTy);
|
||||
const SCEV *B =
|
||||
(SE->*Operation)((SE->*Extension)(LHS, WideTy),
|
||||
(SE->*Extension)(RHS, WideTy), SCEV::FlagAnyWrap);
|
||||
(SE->*Extension)(RHS, WideTy), SCEV::FlagAnyWrap, 0u);
|
||||
|
||||
if (A != B)
|
||||
return false;
|
||||
@ -530,8 +531,7 @@ bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
|
||||
return false;
|
||||
|
||||
const SCEV *(ScalarEvolution::*GetExprForBO)(const SCEV *, const SCEV *,
|
||||
SCEV::NoWrapFlags);
|
||||
|
||||
SCEV::NoWrapFlags, unsigned);
|
||||
switch (BO->getOpcode()) {
|
||||
default:
|
||||
return false;
|
||||
@ -560,7 +560,7 @@ bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
|
||||
const SCEV *ExtendAfterOp = SE->getZeroExtendExpr(SE->getSCEV(BO), WideTy);
|
||||
const SCEV *OpAfterExtend = (SE->*GetExprForBO)(
|
||||
SE->getZeroExtendExpr(LHS, WideTy), SE->getZeroExtendExpr(RHS, WideTy),
|
||||
SCEV::FlagAnyWrap);
|
||||
SCEV::FlagAnyWrap, 0u);
|
||||
if (ExtendAfterOp == OpAfterExtend) {
|
||||
BO->setHasNoUnsignedWrap();
|
||||
SE->forgetValue(BO);
|
||||
@ -572,7 +572,7 @@ bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
|
||||
const SCEV *ExtendAfterOp = SE->getSignExtendExpr(SE->getSCEV(BO), WideTy);
|
||||
const SCEV *OpAfterExtend = (SE->*GetExprForBO)(
|
||||
SE->getSignExtendExpr(LHS, WideTy), SE->getSignExtendExpr(RHS, WideTy),
|
||||
SCEV::FlagAnyWrap);
|
||||
SCEV::FlagAnyWrap, 0u);
|
||||
if (ExtendAfterOp == OpAfterExtend) {
|
||||
BO->setHasNoSignedWrap();
|
||||
SE->forgetValue(BO);
|
||||
|
44
test/Analysis/ScalarEvolution/limit-depth.ll
Normal file
44
test/Analysis/ScalarEvolution/limit-depth.ll
Normal file
@ -0,0 +1,44 @@
|
||||
; RUN: opt -scalar-evolution-max-arith-depth=0 -analyze -scalar-evolution < %s | FileCheck %s
|
||||
|
||||
; Check that depth set to 0 prevents getAddExpr and getMulExpr from making
|
||||
; transformations in SCEV. We expect the result to be very straightforward.
|
||||
|
||||
define void @test_add(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
|
||||
; CHECK-LABEL: @test_add
|
||||
; CHECK: %s2 = add i32 %s1, %p3
|
||||
; CHECK-NEXT: --> (%a + %a + %b + %b + %c + %c + %d + %d + %e + %e + %f + %f)
|
||||
%tmp0 = add i32 %a, %b
|
||||
%tmp1 = add i32 %b, %c
|
||||
%tmp2 = add i32 %c, %d
|
||||
%tmp3 = add i32 %d, %e
|
||||
%tmp4 = add i32 %e, %f
|
||||
%tmp5 = add i32 %f, %a
|
||||
|
||||
%p1 = add i32 %tmp0, %tmp3
|
||||
%p2 = add i32 %tmp1, %tmp4
|
||||
%p3 = add i32 %tmp2, %tmp5
|
||||
|
||||
%s1 = add i32 %p1, %p2
|
||||
%s2 = add i32 %s1, %p3
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test_mul(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
|
||||
; CHECK-LABEL: @test_mul
|
||||
; CHECK: %s2 = mul i32 %s1, %p3
|
||||
; CHECK-NEXT: --> (2 * 3 * 4 * 5 * 6 * 7 * %a * %b * %c * %d * %e * %f)
|
||||
%tmp0 = mul i32 %a, 2
|
||||
%tmp1 = mul i32 %b, 3
|
||||
%tmp2 = mul i32 %c, 4
|
||||
%tmp3 = mul i32 %d, 5
|
||||
%tmp4 = mul i32 %e, 6
|
||||
%tmp5 = mul i32 %f, 7
|
||||
|
||||
%p1 = mul i32 %tmp0, %tmp3
|
||||
%p2 = mul i32 %tmp1, %tmp4
|
||||
%p3 = mul i32 %tmp2, %tmp5
|
||||
|
||||
%s1 = mul i32 %p1, %p2
|
||||
%s2 = mul i32 %s1, %p3
|
||||
ret void
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user