1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00

[Transforms] Change std::sort to llvm::sort in response to r327219

Summary:
r327219 added wrappers to std::sort which randomly shuffle the container before sorting.
This will help in uncovering non-determinism caused due to undefined sorting
order of objects having the same key.

To make use of that infrastructure we need to invoke llvm::sort instead of std::sort.

Note: This patch is one of a series of patches to replace *all* std::sort to llvm::sort.
Refer the comments section in D44363 for a list of all the required patches.

Reviewers: kcc, pcc, danielcdh, jmolloy, sanjoy, dberlin, ruiu

Reviewed By: ruiu

Subscribers: ruiu, llvm-commits

Differential Revision: https://reviews.llvm.org/D45142

llvm-svn: 330059
This commit is contained in:
Mandeep Singh Grang 2018-04-13 19:47:57 +00:00
parent 2cdc47a3e2
commit 4e39759fa1
23 changed files with 89 additions and 84 deletions

View File

@ -48,7 +48,7 @@ public:
BlockToIndexMapping(Function &F) {
for (BasicBlock &BB : F)
V.push_back(&BB);
std::sort(V.begin(), V.end());
llvm::sort(V.begin(), V.end());
}
size_t blockToIndex(BasicBlock *BB) const {

View File

@ -1869,11 +1869,11 @@ bool LowerTypeTestsModule::lower() {
}
Sets.emplace_back(I, MaxIndex);
}
std::sort(Sets.begin(), Sets.end(),
[](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
return S1.second < S2.second;
});
llvm::sort(Sets.begin(), Sets.end(),
[](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
return S1.second < S2.second;
});
// For each disjoint set we found...
for (const auto &S : Sets) {
@ -1894,7 +1894,7 @@ bool LowerTypeTestsModule::lower() {
// Order type identifiers by global index for determinism. This ordering is
// stable as there is a one-to-one mapping between metadata and indices.
std::sort(TypeIds.begin(), TypeIds.end(), [&](Metadata *M1, Metadata *M2) {
llvm::sort(TypeIds.begin(), TypeIds.end(), [&](Metadata *M1, Metadata *M2) {
return TypeIdInfo[M1].Index < TypeIdInfo[M2].Index;
});

View File

@ -679,10 +679,10 @@ SampleProfileLoader::findIndirectCallFunctionSamples(
Sum += NameFS.second.getEntrySamples();
R.push_back(&NameFS.second);
}
std::sort(R.begin(), R.end(),
[](const FunctionSamples *L, const FunctionSamples *R) {
return L->getEntrySamples() > R->getEntrySamples();
});
llvm::sort(R.begin(), R.end(),
[](const FunctionSamples *L, const FunctionSamples *R) {
return L->getEntrySamples() > R->getEntrySamples();
});
}
return R;
}
@ -1170,13 +1170,13 @@ static SmallVector<InstrProfValueData, 2> SortCallTargets(
SmallVector<InstrProfValueData, 2> R;
for (auto I = M.begin(); I != M.end(); ++I)
R.push_back({Function::getGUID(I->getKey()), I->getValue()});
std::sort(R.begin(), R.end(),
[](const InstrProfValueData &L, const InstrProfValueData &R) {
if (L.Count == R.Count)
return L.Value > R.Value;
else
return L.Count > R.Count;
});
llvm::sort(R.begin(), R.end(),
[](const InstrProfValueData &L, const InstrProfValueData &R) {
if (L.Count == R.Count)
return L.Value > R.Value;
else
return L.Count > R.Count;
});
return R;
}

View File

@ -272,7 +272,7 @@ namespace {
write(Len);
write(Number);
std::sort(
llvm::sort(
SortedLinesByFile.begin(), SortedLinesByFile.end(),
[](StringMapEntry<GCOVLines> *LHS, StringMapEntry<GCOVLines> *RHS) {
return LHS->getKey() < RHS->getKey();

View File

@ -659,11 +659,11 @@ void SanitizerCoverageModule::InjectTraceForSwitch(
C = ConstantExpr::getCast(CastInst::ZExt, It.getCaseValue(), Int64Ty);
Initializers.push_back(C);
}
std::sort(Initializers.begin() + 2, Initializers.end(),
[](const Constant *A, const Constant *B) {
return cast<ConstantInt>(A)->getLimitedValue() <
cast<ConstantInt>(B)->getLimitedValue();
});
llvm::sort(Initializers.begin() + 2, Initializers.end(),
[](const Constant *A, const Constant *B) {
return cast<ConstantInt>(A)->getLimitedValue() <
cast<ConstantInt>(B)->getLimitedValue();
});
ArrayType *ArrayOfInt64Ty = ArrayType::get(Int64Ty, Initializers.size());
GlobalVariable *GV = new GlobalVariable(
*CurModule, ArrayOfInt64Ty, false, GlobalVariable::InternalLinkage,

View File

@ -571,8 +571,8 @@ void ConstantHoistingPass::findAndMakeBaseConstant(
/// rematerialized with an add from a common base constant.
void ConstantHoistingPass::findBaseConstants() {
// Sort the constants by value and type. This invalidates the mapping!
std::sort(ConstCandVec.begin(), ConstCandVec.end(),
[](const ConstantCandidate &LHS, const ConstantCandidate &RHS) {
llvm::sort(ConstCandVec.begin(), ConstCandVec.end(),
[](const ConstantCandidate &LHS, const ConstantCandidate &RHS) {
if (LHS.ConstInt->getType() != RHS.ConstInt->getType())
return LHS.ConstInt->getType()->getBitWidth() <
RHS.ConstInt->getType()->getBitWidth();

View File

@ -748,11 +748,11 @@ private:
// TODO: Remove fully-redundant expressions.
// Get instruction from the Map, assume that all the Instructions
// with same VNs have same rank (this is an approximation).
std::sort(Ranks.begin(), Ranks.end(),
[this, &Map](const VNType &r1, const VNType &r2) {
return (rank(*Map.lookup(r1).begin()) <
rank(*Map.lookup(r2).begin()));
});
llvm::sort(Ranks.begin(), Ranks.end(),
[this, &Map](const VNType &r1, const VNType &r2) {
return (rank(*Map.lookup(r1).begin()) <
rank(*Map.lookup(r2).begin()));
});
// - Sort VNs according to their rank, and start with lowest ranked VN
// - Take a VN and for each instruction with same VN

View File

@ -239,7 +239,7 @@ public:
SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops;
for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I)
Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)});
std::sort(Ops.begin(), Ops.end());
llvm::sort(Ops.begin(), Ops.end());
for (auto &P : Ops) {
Blocks.push_back(P.first);
Values.push_back(P.second);
@ -361,7 +361,7 @@ public:
for (auto &U : I->uses())
op_push_back(U.getUser());
std::sort(op_begin(), op_end());
llvm::sort(op_begin(), op_end());
}
void setMemoryUseOrder(unsigned MUO) { MemoryUseOrder = MUO; }
@ -761,7 +761,7 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
}
if (Preds.size() < 2)
return 0;
std::sort(Preds.begin(), Preds.end());
llvm::sort(Preds.begin(), Preds.end());
unsigned NumOrigPreds = Preds.size();
// We can only sink instructions through unconditional branches.

View File

@ -555,9 +555,9 @@ bool GuardWideningImpl::combineRangeChecks(
// CurrentChecks.size() will typically be 3 here, but so far there has been
// no need to hard-code that fact.
std::sort(CurrentChecks.begin(), CurrentChecks.end(),
[&](const GuardWideningImpl::RangeCheck &LHS,
const GuardWideningImpl::RangeCheck &RHS) {
llvm::sort(CurrentChecks.begin(), CurrentChecks.end(),
[&](const GuardWideningImpl::RangeCheck &LHS,
const GuardWideningImpl::RangeCheck &RHS) {
return LHS.getOffsetValue().slt(RHS.getOffsetValue());
});

View File

@ -200,10 +200,10 @@ static bool sinkInstruction(Loop &L, Instruction &I,
SmallVector<BasicBlock *, 2> SortedBBsToSinkInto;
SortedBBsToSinkInto.insert(SortedBBsToSinkInto.begin(), BBsToSinkInto.begin(),
BBsToSinkInto.end());
std::sort(SortedBBsToSinkInto.begin(), SortedBBsToSinkInto.end(),
[&](BasicBlock *A, BasicBlock *B) {
return *LoopBlockNumber.find(A) < *LoopBlockNumber.find(B);
});
llvm::sort(SortedBBsToSinkInto.begin(), SortedBBsToSinkInto.end(),
[&](BasicBlock *A, BasicBlock *B) {
return *LoopBlockNumber.find(A) < *LoopBlockNumber.find(B);
});
BasicBlock *MoveBB = *SortedBBsToSinkInto.begin();
// FIXME: Optimize the efficiency for cloned value replacement. The current

View File

@ -1479,7 +1479,7 @@ bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
SmallVector<const SCEV *, 4> Key = F.BaseRegs;
if (F.ScaledReg) Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for uniquifying.
std::sort(Key.begin(), Key.end());
llvm::sort(Key.begin(), Key.end());
return Uniquifier.count(Key);
}
@ -1503,7 +1503,7 @@ bool LSRUse::InsertFormula(const Formula &F, const Loop &L) {
SmallVector<const SCEV *, 4> Key = F.BaseRegs;
if (F.ScaledReg) Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for uniquifying.
std::sort(Key.begin(), Key.end());
llvm::sort(Key.begin(), Key.end());
if (!Uniquifier.insert(Key).second)
return false;
@ -4220,7 +4220,7 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for
// uniquifying.
std::sort(Key.begin(), Key.end());
llvm::sort(Key.begin(), Key.end());
std::pair<BestFormulaeTy::const_iterator, bool> P =
BestFormulae.insert(std::make_pair(Key, FIdx));

View File

@ -439,10 +439,10 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi)
#endif // MERGEICMPS_DOT_ON
// Reorder blocks by LHS. We can do that without changing the
// semantics because we are only accessing dereferencable memory.
std::sort(Comparisons_.begin(), Comparisons_.end(),
[](const BCECmpBlock &a, const BCECmpBlock &b) {
return a.Lhs() < b.Lhs();
});
llvm::sort(Comparisons_.begin(), Comparisons_.end(),
[](const BCECmpBlock &a, const BCECmpBlock &b) {
return a.Lhs() < b.Lhs();
});
#ifdef MERGEICMPS_DOT_ON
errs() << "AFTER REORDERING:\n\n";
dump();

View File

@ -958,7 +958,8 @@ static bool isCopyOfAPHI(const Value *V) {
// order. The BlockInstRange numbers are generated in an RPO walk of the basic
// blocks.
void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const {
std::sort(Ops.begin(), Ops.end(), [&](const ValPair &P1, const ValPair &P2) {
llvm::sort(Ops.begin(), Ops.end(),
[&](const ValPair &P1, const ValPair &P2) {
return BlockInstRange.lookup(P1.second).first <
BlockInstRange.lookup(P2.second).first;
});
@ -3423,10 +3424,10 @@ bool NewGVN::runGVN() {
for (auto &B : RPOT) {
auto *Node = DT->getNode(B);
if (Node->getChildren().size() > 1)
std::sort(Node->begin(), Node->end(),
[&](const DomTreeNode *A, const DomTreeNode *B) {
return RPOOrdering[A] < RPOOrdering[B];
});
llvm::sort(Node->begin(), Node->end(),
[&](const DomTreeNode *A, const DomTreeNode *B) {
return RPOOrdering[A] < RPOOrdering[B];
});
}
// Now a standard depth first ordering of the domtree is equivalent to RPO.
@ -3948,7 +3949,7 @@ bool NewGVN::eliminateInstructions(Function &F) {
convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead);
// Sort the whole thing.
std::sort(DFSOrderedSet.begin(), DFSOrderedSet.end());
llvm::sort(DFSOrderedSet.begin(), DFSOrderedSet.end());
for (auto &VD : DFSOrderedSet) {
int MemberDFSIn = VD.DFSIn;
int MemberDFSOut = VD.DFSOut;
@ -4110,7 +4111,7 @@ bool NewGVN::eliminateInstructions(Function &F) {
// If we have possible dead stores to look at, try to eliminate them.
if (CC->getStoreCount() > 0) {
convertClassToLoadsAndStores(*CC, PossibleDeadStores);
std::sort(PossibleDeadStores.begin(), PossibleDeadStores.end());
llvm::sort(PossibleDeadStores.begin(), PossibleDeadStores.end());
ValueDFSStack EliminationStack;
for (auto &VD : PossibleDeadStores) {
int MemberDFSIn = VD.DFSIn;

View File

@ -522,7 +522,7 @@ bool PlaceSafepoints::runOnFunction(Function &F) {
};
// We need the order of list to be stable so that naming ends up stable
// when we split edges. This makes test cases much easier to write.
std::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
llvm::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
// We can sometimes end up with duplicate poll locations. This happens if
// a single loop is visited more than once. The fact this happens seems

View File

@ -1823,7 +1823,7 @@ static void relocationViaAlloca(
}
}
std::sort(Uses.begin(), Uses.end());
llvm::sort(Uses.begin(), Uses.end());
auto Last = std::unique(Uses.begin(), Uses.end());
Uses.erase(Last, Uses.end());

View File

@ -273,7 +273,7 @@ public:
int OldSize = Slices.size();
Slices.append(NewSlices.begin(), NewSlices.end());
auto SliceI = Slices.begin() + OldSize;
std::sort(SliceI, Slices.end());
llvm::sort(SliceI, Slices.end());
std::inplace_merge(Slices.begin(), SliceI, Slices.end());
}
@ -1057,7 +1057,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
// Sort the uses. This arranges for the offsets to be in ascending order,
// and the sizes to be in descending order.
std::sort(Slices.begin(), Slices.end());
llvm::sort(Slices.begin(), Slices.end());
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@ -1891,7 +1891,7 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
"All non-integer types eliminated!");
return RHSTy->getNumElements() < LHSTy->getNumElements();
};
std::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes);
llvm::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes);
CandidateTys.erase(
std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
CandidateTys.end());
@ -4152,7 +4152,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
}
if (!IsSorted)
std::sort(AS.begin(), AS.end());
llvm::sort(AS.begin(), AS.end());
/// Describes the allocas introduced by rewritePartition in order to migrate
/// the debug info.

View File

@ -1128,11 +1128,12 @@ static Loop *buildClonedLoops(Loop &OrigL, ArrayRef<BasicBlock *> ExitBlocks,
// matter as we're just trying to build up the map from inside-out; we use
// the map in a more stably ordered way below.
auto OrderedClonedExitsInLoops = ClonedExitsInLoops;
std::sort(OrderedClonedExitsInLoops.begin(), OrderedClonedExitsInLoops.end(),
[&](BasicBlock *LHS, BasicBlock *RHS) {
return ExitLoopMap.lookup(LHS)->getLoopDepth() <
ExitLoopMap.lookup(RHS)->getLoopDepth();
});
llvm::sort(OrderedClonedExitsInLoops.begin(),
OrderedClonedExitsInLoops.end(),
[&](BasicBlock *LHS, BasicBlock *RHS) {
return ExitLoopMap.lookup(LHS)->getLoopDepth() <
ExitLoopMap.lookup(RHS)->getLoopDepth();
});
// Populate the existing ExitLoopMap with everything reachable from each
// exit, starting from the inner most exit.

View File

@ -161,7 +161,7 @@ void ImportedFunctionsInliningStatistics::dump(const bool Verbose) {
void ImportedFunctionsInliningStatistics::calculateRealInlines() {
// Removing duplicated Callers.
std::sort(NonImportedCallers.begin(), NonImportedCallers.end());
llvm::sort(NonImportedCallers.begin(), NonImportedCallers.end());
NonImportedCallers.erase(
std::unique(NonImportedCallers.begin(), NonImportedCallers.end()),
NonImportedCallers.end());
@ -190,13 +190,14 @@ ImportedFunctionsInliningStatistics::getSortedNodes() {
for (const NodesMapTy::value_type& Node : NodesMap)
SortedNodes.push_back(&Node);
std::sort(
llvm::sort(
SortedNodes.begin(), SortedNodes.end(),
[&](const SortedNodesTy::value_type &Lhs,
const SortedNodesTy::value_type &Rhs) {
if (Lhs->second->NumberOfInlines != Rhs->second->NumberOfInlines)
return Lhs->second->NumberOfInlines > Rhs->second->NumberOfInlines;
if (Lhs->second->NumberOfRealInlines != Rhs->second->NumberOfRealInlines)
if (Lhs->second->NumberOfRealInlines !=
Rhs->second->NumberOfRealInlines)
return Lhs->second->NumberOfRealInlines >
Rhs->second->NumberOfRealInlines;
return Lhs->first() < Rhs->first();

View File

@ -382,7 +382,7 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
Cases.push_back(CaseRange(Case.getCaseValue(), Case.getCaseValue(),
Case.getCaseSuccessor()));
std::sort(Cases.begin(), Cases.end(), CaseCmp());
llvm::sort(Cases.begin(), Cases.end(), CaseCmp());
// Merge case into clusters
if (Cases.size() >= 2) {

View File

@ -553,7 +553,7 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) {
auto Comparator = [&](const Value *A, const Value *B) {
return valueComesBefore(OI, A, B);
};
std::sort(OpsToRename.begin(), OpsToRename.end(), Comparator);
llvm::sort(OpsToRename.begin(), OpsToRename.end(), Comparator);
ValueDFS_Compare Compare(OI);
// Compute liveness, and rename in O(uses) per Op.
for (auto *Op : OpsToRename) {

View File

@ -475,7 +475,7 @@ static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
// Sort the stores by their index, making it efficient to do a lookup with a
// binary search.
std::sort(StoresByIndex.begin(), StoresByIndex.end(), less_first());
llvm::sort(StoresByIndex.begin(), StoresByIndex.end(), less_first());
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
@ -631,10 +631,10 @@ void PromoteMem2Reg::run() {
SmallVector<BasicBlock *, 32> PHIBlocks;
IDF.calculate(PHIBlocks);
if (PHIBlocks.size() > 1)
std::sort(PHIBlocks.begin(), PHIBlocks.end(),
[this](BasicBlock *A, BasicBlock *B) {
return BBNumbers.lookup(A) < BBNumbers.lookup(B);
});
llvm::sort(PHIBlocks.begin(), PHIBlocks.end(),
[this](BasicBlock *A, BasicBlock *B) {
return BBNumbers.lookup(A) < BBNumbers.lookup(B);
});
unsigned CurrentVersion = 0;
for (BasicBlock *BB : PHIBlocks)
@ -740,7 +740,7 @@ void PromoteMem2Reg::run() {
// Ok, now we know that all of the PHI nodes are missing entries for some
// basic blocks. Start by sorting the incoming predecessors for efficient
// access.
std::sort(Preds.begin(), Preds.end());
llvm::sort(Preds.begin(), Preds.end());
// Now we loop through all BB's which have entries in SomePHI and remove
// them from the Preds list.

View File

@ -5519,7 +5519,7 @@ static bool ReduceSwitchRange(SwitchInst *SI, IRBuilder<> &Builder,
SmallVector<int64_t,4> Values;
for (auto &C : SI->cases())
Values.push_back(C.getCaseValue()->getValue().getSExtValue());
std::sort(Values.begin(), Values.end());
llvm::sort(Values.begin(), Values.end());
// If the switch is already dense, there's nothing useful to do here.
if (isSwitchDense(Values))

View File

@ -180,12 +180,14 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap,
std::make_pair(std::distance(GVtoClusterMap.member_begin(I),
GVtoClusterMap.member_end()), I));
std::sort(Sets.begin(), Sets.end(), [](const SortType &a, const SortType &b) {
if (a.first == b.first)
return a.second->getData()->getName() > b.second->getData()->getName();
else
return a.first > b.first;
});
llvm::sort(Sets.begin(), Sets.end(),
[](const SortType &a, const SortType &b) {
if (a.first == b.first)
return a.second->getData()->getName() >
b.second->getData()->getName();
else
return a.first > b.first;
});
for (auto &I : Sets) {
unsigned CurrentClusterID = BalancinQueue.top().first;