mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 11:42:57 +01:00
[Unrolling] Refactor the start and step offsets to simplify overflow
checking and make the cache faster and smaller. I had thought that using an APInt here would be useful, but I think I was just wrong. Notably, we don't have to do any fancy overflow checking, we can just bound the values as quite small and do the math in a higher precision integer. I've switched to a signed integer so that UBSan will even point out if we ever have integer overflow. I've added various asserts to try to catch things as well and hoisted the overflow checks so that we just leave the too-large offsets out of the SCEV-GEP cache. This makes the value in the cache quite a bit smaller which is probably worthwhile. No functionality changed here (for trip counts under 1 billion). llvm-svn: 237209
This commit is contained in:
parent
4abd8e053b
commit
e7ef149711
@ -340,8 +340,8 @@ class UnrollAnalyzer : public InstVisitor<UnrollAnalyzer, bool> {
|
||||
|
||||
struct SCEVGEPDescriptor {
|
||||
Value *BaseAddr;
|
||||
APInt Start;
|
||||
APInt Step;
|
||||
unsigned Start;
|
||||
unsigned Step;
|
||||
};
|
||||
|
||||
/// \brief The loop we're going to analyze.
|
||||
@ -435,12 +435,12 @@ class UnrollAnalyzer : public InstVisitor<UnrollAnalyzer, bool> {
|
||||
if (!CDS)
|
||||
return false;
|
||||
|
||||
// Check possible overflow.
|
||||
if (GEPDesc.Start.getActiveBits() > 32 || GEPDesc.Step.getActiveBits() > 32)
|
||||
return false;
|
||||
unsigned ElemSize = CDS->getElementType()->getPrimitiveSizeInBits() / 8U;
|
||||
uint64_t Index = (GEPDesc.Start.getLimitedValue() +
|
||||
GEPDesc.Step.getLimitedValue() * Iteration) /
|
||||
// This calculation should never overflow because we bound Iteration quite
|
||||
// low and both the start and step are 32-bit integers. We use signed
|
||||
// integers so that UBSan will catch if a bug sneaks into the code.
|
||||
int ElemSize = CDS->getElementType()->getPrimitiveSizeInBits() / 8U;
|
||||
int64_t Index = ((int64_t)GEPDesc.Start +
|
||||
(int64_t)GEPDesc.Step * (int64_t)Iteration) /
|
||||
ElemSize;
|
||||
if (Index >= CDS->getNumElements()) {
|
||||
// FIXME: For now we conservatively ignore out of bound accesses, but
|
||||
@ -495,8 +495,17 @@ class UnrollAnalyzer : public InstVisitor<UnrollAnalyzer, bool> {
|
||||
if (!StepSE || !StartSE)
|
||||
continue;
|
||||
|
||||
SCEVCache[V] = {Visitor.BaseAddress, StartSE->getValue()->getValue(),
|
||||
StepSE->getValue()->getValue()};
|
||||
// Check and skip caching if doing so would require lots of bits to
|
||||
// avoid overflow.
|
||||
APInt Start = StartSE->getValue()->getValue();
|
||||
APInt Step = StepSE->getValue()->getValue();
|
||||
if (Start.getActiveBits() > 32 || Step.getActiveBits() > 32)
|
||||
continue;
|
||||
|
||||
// We found a cacheable SCEV model for the GEP.
|
||||
SCEVCache[V] = {Visitor.BaseAddress,
|
||||
(unsigned)Start.getLimitedValue(),
|
||||
(unsigned)Step.getLimitedValue()};
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -529,6 +538,13 @@ public:
|
||||
bool analyzeLoop() {
|
||||
SmallSetVector<BasicBlock *, 16> BBWorklist;
|
||||
|
||||
// We want to be able to scale offsets by the trip count and add more
|
||||
// offsets to them without checking for overflows, and we already don't want
|
||||
// to analyze *massive* trip counts, so we force the max to be reasonably
|
||||
// small.
|
||||
assert(UnrollMaxIterationsCountToAnalyze < (INT_MAX / 2) &&
|
||||
"The unroll iterations max is too large!");
|
||||
|
||||
// Don't simulate loops with a big or unknown tripcount
|
||||
if (!UnrollMaxIterationsCountToAnalyze || !TripCount ||
|
||||
TripCount > UnrollMaxIterationsCountToAnalyze)
|
||||
|
Loading…
Reference in New Issue
Block a user