mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 03:02:36 +01:00
[CodeGen] Change std::sort to llvm::sort in response to r327219
Summary: r327219 added wrappers to std::sort which randomly shuffle the container before sorting. This will help in uncovering non-determinism caused due to undefined sorting order of objects having the same key. To make use of that infrastructure we need to invoke llvm::sort instead of std::sort. Note: This patch is one of a series of patches to replace *all* std::sort to llvm::sort. Refer the comments section in D44363 for a list of all the required patches. Reviewers: bogner, rnk, MatzeB, RKSimon Reviewed By: rnk Subscribers: JDevlieghere, javed.absar, llvm-commits Differential Revision: https://reviews.llvm.org/D45133 llvm-svn: 329435
This commit is contained in:
parent
971a8e292d
commit
47c8a6f761
@ -2271,10 +2271,10 @@ void CodeViewDebug::emitLocalVariableList(ArrayRef<LocalVariable> Locals) {
|
||||
for (const LocalVariable &L : Locals)
|
||||
if (L.DIVar->isParameter())
|
||||
Params.push_back(&L);
|
||||
std::sort(Params.begin(), Params.end(),
|
||||
[](const LocalVariable *L, const LocalVariable *R) {
|
||||
return L->DIVar->getArg() < R->DIVar->getArg();
|
||||
});
|
||||
llvm::sort(Params.begin(), Params.end(),
|
||||
[](const LocalVariable *L, const LocalVariable *R) {
|
||||
return L->DIVar->getArg() < R->DIVar->getArg();
|
||||
});
|
||||
for (const LocalVariable *L : Params)
|
||||
emitLocalVariable(*L);
|
||||
|
||||
|
@ -138,7 +138,7 @@ public:
|
||||
// \brief Sort the pieces by offset.
|
||||
// Remove any duplicate entries by dropping all but the first.
|
||||
void sortUniqueValues() {
|
||||
std::sort(Values.begin(), Values.end());
|
||||
llvm::sort(Values.begin(), Values.end());
|
||||
Values.erase(
|
||||
std::unique(
|
||||
Values.begin(), Values.end(), [](const Value &A, const Value &B) {
|
||||
|
@ -241,11 +241,11 @@ ArrayRef<DbgVariable::FrameIndexExpr> DbgVariable::getFrameIndexExprs() const {
|
||||
return A.Expr->isFragment();
|
||||
}) &&
|
||||
"multiple FI expressions without DW_OP_LLVM_fragment");
|
||||
std::sort(FrameIndexExprs.begin(), FrameIndexExprs.end(),
|
||||
[](const FrameIndexExpr &A, const FrameIndexExpr &B) -> bool {
|
||||
return A.Expr->getFragmentInfo()->OffsetInBits <
|
||||
B.Expr->getFragmentInfo()->OffsetInBits;
|
||||
});
|
||||
llvm::sort(FrameIndexExprs.begin(), FrameIndexExprs.end(),
|
||||
[](const FrameIndexExpr &A, const FrameIndexExpr &B) -> bool {
|
||||
return A.Expr->getFragmentInfo()->OffsetInBits <
|
||||
B.Expr->getFragmentInfo()->OffsetInBits;
|
||||
});
|
||||
|
||||
return FrameIndexExprs;
|
||||
}
|
||||
@ -577,21 +577,22 @@ void DwarfDebug::constructAndAddImportedEntityDIE(DwarfCompileUnit &TheCU,
|
||||
/// Sort and unique GVEs by comparing their fragment offset.
|
||||
static SmallVectorImpl<DwarfCompileUnit::GlobalExpr> &
|
||||
sortGlobalExprs(SmallVectorImpl<DwarfCompileUnit::GlobalExpr> &GVEs) {
|
||||
std::sort(GVEs.begin(), GVEs.end(),
|
||||
[](DwarfCompileUnit::GlobalExpr A, DwarfCompileUnit::GlobalExpr B) {
|
||||
// Sort order: first null exprs, then exprs without fragment
|
||||
// info, then sort by fragment offset in bits.
|
||||
// FIXME: Come up with a more comprehensive comparator so
|
||||
// the sorting isn't non-deterministic, and so the following
|
||||
// std::unique call works correctly.
|
||||
if (!A.Expr || !B.Expr)
|
||||
return !!B.Expr;
|
||||
auto FragmentA = A.Expr->getFragmentInfo();
|
||||
auto FragmentB = B.Expr->getFragmentInfo();
|
||||
if (!FragmentA || !FragmentB)
|
||||
return !!FragmentB;
|
||||
return FragmentA->OffsetInBits < FragmentB->OffsetInBits;
|
||||
});
|
||||
llvm::sort(GVEs.begin(), GVEs.end(),
|
||||
[](DwarfCompileUnit::GlobalExpr A,
|
||||
DwarfCompileUnit::GlobalExpr B) {
|
||||
// Sort order: first null exprs, then exprs without fragment
|
||||
// info, then sort by fragment offset in bits.
|
||||
// FIXME: Come up with a more comprehensive comparator so
|
||||
// the sorting isn't non-deterministic, and so the following
|
||||
// std::unique call works correctly.
|
||||
if (!A.Expr || !B.Expr)
|
||||
return !!B.Expr;
|
||||
auto FragmentA = A.Expr->getFragmentInfo();
|
||||
auto FragmentB = B.Expr->getFragmentInfo();
|
||||
if (!FragmentA || !FragmentB)
|
||||
return !!FragmentB;
|
||||
return FragmentA->OffsetInBits < FragmentB->OffsetInBits;
|
||||
});
|
||||
GVEs.erase(std::unique(GVEs.begin(), GVEs.end(),
|
||||
[](DwarfCompileUnit::GlobalExpr A,
|
||||
DwarfCompileUnit::GlobalExpr B) {
|
||||
@ -1878,10 +1879,10 @@ void DwarfDebug::emitDebugARanges() {
|
||||
}
|
||||
|
||||
// Sort the CU list (again, to ensure consistent output order).
|
||||
std::sort(CUs.begin(), CUs.end(),
|
||||
[](const DwarfCompileUnit *A, const DwarfCompileUnit *B) {
|
||||
return A->getUniqueID() < B->getUniqueID();
|
||||
});
|
||||
llvm::sort(CUs.begin(), CUs.end(),
|
||||
[](const DwarfCompileUnit *A, const DwarfCompileUnit *B) {
|
||||
return A->getUniqueID() < B->getUniqueID();
|
||||
});
|
||||
|
||||
// Emit an arange table for each CU we used.
|
||||
for (DwarfCompileUnit *CU : CUs) {
|
||||
|
@ -359,9 +359,9 @@ void EHStreamer::emitExceptionTable() {
|
||||
LandingPads.push_back(&PadInfos[i]);
|
||||
|
||||
// Order landing pads lexicographically by type id.
|
||||
std::sort(LandingPads.begin(), LandingPads.end(),
|
||||
[](const LandingPadInfo *L,
|
||||
const LandingPadInfo *R) { return L->TypeIds < R->TypeIds; });
|
||||
llvm::sort(LandingPads.begin(), LandingPads.end(),
|
||||
[](const LandingPadInfo *L,
|
||||
const LandingPadInfo *R) { return L->TypeIds < R->TypeIds; });
|
||||
|
||||
// Compute the actions table and gather the first action index for each
|
||||
// landing pad site.
|
||||
|
@ -148,15 +148,16 @@ void LegalizerInfo::computeTables() {
|
||||
if (TypeIdx < ScalarSizeChangeStrategies[OpcodeIdx].size() &&
|
||||
ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr)
|
||||
S = ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx];
|
||||
std::sort(ScalarSpecifiedActions.begin(), ScalarSpecifiedActions.end());
|
||||
llvm::sort(ScalarSpecifiedActions.begin(),
|
||||
ScalarSpecifiedActions.end());
|
||||
checkPartialSizeAndActionsVector(ScalarSpecifiedActions);
|
||||
setScalarAction(Opcode, TypeIdx, S(ScalarSpecifiedActions));
|
||||
}
|
||||
|
||||
// 2. Handle pointer types
|
||||
for (auto PointerSpecifiedActions : AddressSpace2SpecifiedActions) {
|
||||
std::sort(PointerSpecifiedActions.second.begin(),
|
||||
PointerSpecifiedActions.second.end());
|
||||
llvm::sort(PointerSpecifiedActions.second.begin(),
|
||||
PointerSpecifiedActions.second.end());
|
||||
checkPartialSizeAndActionsVector(PointerSpecifiedActions.second);
|
||||
// For pointer types, we assume that there isn't a meaningfull way
|
||||
// to change the number of bits used in the pointer.
|
||||
@ -168,8 +169,8 @@ void LegalizerInfo::computeTables() {
|
||||
// 3. Handle vector types
|
||||
SizeAndActionsVec ElementSizesSeen;
|
||||
for (auto VectorSpecifiedActions : ElemSize2SpecifiedActions) {
|
||||
std::sort(VectorSpecifiedActions.second.begin(),
|
||||
VectorSpecifiedActions.second.end());
|
||||
llvm::sort(VectorSpecifiedActions.second.begin(),
|
||||
VectorSpecifiedActions.second.end());
|
||||
const uint16_t ElementSize = VectorSpecifiedActions.first;
|
||||
ElementSizesSeen.push_back({ElementSize, Legal});
|
||||
checkPartialSizeAndActionsVector(VectorSpecifiedActions.second);
|
||||
@ -187,7 +188,7 @@ void LegalizerInfo::computeTables() {
|
||||
Opcode, TypeIdx, ElementSize,
|
||||
moreToWiderTypesAndLessToWidest(NumElementsActions));
|
||||
}
|
||||
std::sort(ElementSizesSeen.begin(), ElementSizesSeen.end());
|
||||
llvm::sort(ElementSizesSeen.begin(), ElementSizesSeen.end());
|
||||
SizeChangeStrategy VectorElementSizeChangeStrategy =
|
||||
&unsupportedForDifferentSizes;
|
||||
if (TypeIdx < VectorElementSizeChangeStrategies[OpcodeIdx].size() &&
|
||||
|
@ -335,7 +335,7 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
|
||||
|
||||
// Sort the frame references by local offset.
|
||||
// Use frame index as a tie-breaker in case MI's have the same offset.
|
||||
std::sort(FrameReferenceInsns.begin(), FrameReferenceInsns.end());
|
||||
llvm::sort(FrameReferenceInsns.begin(), FrameReferenceInsns.end());
|
||||
|
||||
MachineBasicBlock *Entry = &Fn.front();
|
||||
|
||||
|
@ -456,10 +456,10 @@ bool MachineBasicBlock::isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask) const {
|
||||
}
|
||||
|
||||
void MachineBasicBlock::sortUniqueLiveIns() {
|
||||
std::sort(LiveIns.begin(), LiveIns.end(),
|
||||
[](const RegisterMaskPair &LI0, const RegisterMaskPair &LI1) {
|
||||
return LI0.PhysReg < LI1.PhysReg;
|
||||
});
|
||||
llvm::sort(LiveIns.begin(), LiveIns.end(),
|
||||
[](const RegisterMaskPair &LI0, const RegisterMaskPair &LI1) {
|
||||
return LI0.PhysReg < LI1.PhysReg;
|
||||
});
|
||||
// Liveins are sorted by physreg now we can merge their lanemasks.
|
||||
LiveInVector::const_iterator I = LiveIns.begin();
|
||||
LiveInVector::const_iterator J;
|
||||
|
@ -931,7 +931,7 @@ void SwingSchedulerDAG::schedule() {
|
||||
}
|
||||
});
|
||||
|
||||
std::sort(NodeSets.begin(), NodeSets.end(), std::greater<NodeSet>());
|
||||
llvm::sort(NodeSets.begin(), NodeSets.end(), std::greater<NodeSet>());
|
||||
|
||||
groupRemainingNodes(NodeSets);
|
||||
|
||||
@ -1863,7 +1863,8 @@ void SwingSchedulerDAG::registerPressureFilter(NodeSetType &NodeSets) {
|
||||
RecRPTracker.closeBottom();
|
||||
|
||||
std::vector<SUnit *> SUnits(NS.begin(), NS.end());
|
||||
std::sort(SUnits.begin(), SUnits.end(), [](const SUnit *A, const SUnit *B) {
|
||||
llvm::sort(SUnits.begin(), SUnits.end(),
|
||||
[](const SUnit *A, const SUnit *B) {
|
||||
return A->NodeNum > B->NodeNum;
|
||||
});
|
||||
|
||||
@ -3980,7 +3981,7 @@ void SwingSchedulerDAG::checkValidNodeOrder(const NodeSetType &Circuits) const {
|
||||
};
|
||||
|
||||
// sort, so that we can perform a binary search
|
||||
std::sort(Indices.begin(), Indices.end(), CompareKey);
|
||||
llvm::sort(Indices.begin(), Indices.end(), CompareKey);
|
||||
|
||||
bool Valid = true;
|
||||
(void)Valid;
|
||||
|
@ -1562,7 +1562,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
|
||||
if (MemOpRecords.size() < 2)
|
||||
return;
|
||||
|
||||
std::sort(MemOpRecords.begin(), MemOpRecords.end());
|
||||
llvm::sort(MemOpRecords.begin(), MemOpRecords.end());
|
||||
unsigned ClusterLength = 1;
|
||||
for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
|
||||
SUnit *SUa = MemOpRecords[Idx].SU;
|
||||
|
@ -157,7 +157,7 @@ bool ReachingDefAnalysis::runOnMachineFunction(MachineFunction &mf) {
|
||||
// Sorting all reaching defs found for a ceartin reg unit in a given BB.
|
||||
for (MBBDefsInfo &MBBDefs : MBBReachingDefs) {
|
||||
for (MBBRegUnitDefs &RegUnitDefs : MBBDefs)
|
||||
std::sort(RegUnitDefs.begin(), RegUnitDefs.end());
|
||||
llvm::sort(RegUnitDefs.begin(), RegUnitDefs.end());
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -83,7 +83,7 @@ void PhysicalRegisterUsageInfo::print(raw_ostream &OS, const Module *M) const {
|
||||
FPRMPairVector.push_back(&RegMask);
|
||||
|
||||
// sort the vector to print analysis in alphabatic order of function name.
|
||||
std::sort(
|
||||
llvm::sort(
|
||||
FPRMPairVector.begin(), FPRMPairVector.end(),
|
||||
[](const FuncPtrRegMaskPair *A, const FuncPtrRegMaskPair *B) -> bool {
|
||||
return A->first->getName() < B->first->getName();
|
||||
|
@ -992,7 +992,7 @@ void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores,
|
||||
for (auto &I : loads)
|
||||
for (auto *SU : I.second)
|
||||
NodeNums.push_back(SU->NodeNum);
|
||||
std::sort(NodeNums.begin(), NodeNums.end());
|
||||
llvm::sort(NodeNums.begin(), NodeNums.end());
|
||||
|
||||
// The N last elements in NodeNums will be removed, and the SU with
|
||||
// the lowest NodeNum of them will become the new BarrierChain to
|
||||
|
@ -12232,8 +12232,8 @@ static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
|
||||
|
||||
// Sort the slices so that elements that are likely to be next to each
|
||||
// other in memory are next to each other in the list.
|
||||
std::sort(LoadedSlices.begin(), LoadedSlices.end(),
|
||||
[](const LoadedSlice &LHS, const LoadedSlice &RHS) {
|
||||
llvm::sort(LoadedSlices.begin(), LoadedSlices.end(),
|
||||
[](const LoadedSlice &LHS, const LoadedSlice &RHS) {
|
||||
assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
|
||||
return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
|
||||
});
|
||||
@ -13184,10 +13184,10 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
||||
|
||||
// Sort the memory operands according to their distance from the
|
||||
// base pointer.
|
||||
std::sort(StoreNodes.begin(), StoreNodes.end(),
|
||||
[](MemOpLink LHS, MemOpLink RHS) {
|
||||
return LHS.OffsetFromBase < RHS.OffsetFromBase;
|
||||
});
|
||||
llvm::sort(StoreNodes.begin(), StoreNodes.end(),
|
||||
[](MemOpLink LHS, MemOpLink RHS) {
|
||||
return LHS.OffsetFromBase < RHS.OffsetFromBase;
|
||||
});
|
||||
|
||||
// Store Merge attempts to merge the lowest stores. This generally
|
||||
// works out as if successful, as the remaining stores are checked
|
||||
|
@ -243,7 +243,7 @@ void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
|
||||
return;
|
||||
|
||||
// Sort them in increasing order.
|
||||
std::sort(Offsets.begin(), Offsets.end());
|
||||
llvm::sort(Offsets.begin(), Offsets.end());
|
||||
|
||||
// Check if the loads are close enough.
|
||||
SmallVector<SDNode*, 4> Loads;
|
||||
|
@ -7639,7 +7639,7 @@ void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
|
||||
}
|
||||
|
||||
// Sort the uses, so that all the uses from a given User are together.
|
||||
std::sort(Uses.begin(), Uses.end());
|
||||
llvm::sort(Uses.begin(), Uses.end());
|
||||
|
||||
for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
|
||||
UseIndex != UseIndexEnd; ) {
|
||||
|
@ -2522,8 +2522,8 @@ void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
|
||||
assert(CC.Low == CC.High && "Input clusters must be single-case");
|
||||
#endif
|
||||
|
||||
std::sort(Clusters.begin(), Clusters.end(),
|
||||
[](const CaseCluster &a, const CaseCluster &b) {
|
||||
llvm::sort(Clusters.begin(), Clusters.end(),
|
||||
[](const CaseCluster &a, const CaseCluster &b) {
|
||||
return a.Low->getValue().slt(b.Low->getValue());
|
||||
});
|
||||
|
||||
@ -6122,10 +6122,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
|
||||
GA->getGlobal(), getCurSDLoc(),
|
||||
Val.getValueType(), GA->getOffset())});
|
||||
}
|
||||
std::sort(Targets.begin(), Targets.end(),
|
||||
[](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
|
||||
return T1.Offset < T2.Offset;
|
||||
});
|
||||
llvm::sort(Targets.begin(), Targets.end(),
|
||||
[](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
|
||||
return T1.Offset < T2.Offset;
|
||||
});
|
||||
|
||||
for (auto &T : Targets) {
|
||||
Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
|
||||
@ -9470,7 +9470,7 @@ bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
|
||||
}
|
||||
|
||||
BitTestInfo BTI;
|
||||
std::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) {
|
||||
llvm::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) {
|
||||
// Sort by probability first, number of bits second, bit mask third.
|
||||
if (a.ExtraProb != b.ExtraProb)
|
||||
return a.ExtraProb > b.ExtraProb;
|
||||
@ -9669,8 +9669,8 @@ void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
|
||||
// checked first. However, two clusters can have the same probability in
|
||||
// which case their relative ordering is non-deterministic. So we use Low
|
||||
// as a tie-breaker as clusters are guaranteed to never overlap.
|
||||
std::sort(W.FirstCluster, W.LastCluster + 1,
|
||||
[](const CaseCluster &a, const CaseCluster &b) {
|
||||
llvm::sort(W.FirstCluster, W.LastCluster + 1,
|
||||
[](const CaseCluster &a, const CaseCluster &b) {
|
||||
return a.Prob != b.Prob ?
|
||||
a.Prob > b.Prob :
|
||||
a.Low->getValue().slt(b.Low->getValue());
|
||||
|
@ -94,7 +94,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
|
||||
}
|
||||
|
||||
// Sort the Idx2MBBMap
|
||||
std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
|
||||
llvm::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
|
||||
|
||||
DEBUG(mf->print(dbgs(), this));
|
||||
|
||||
|
@ -1224,7 +1224,7 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
|
||||
});
|
||||
|
||||
for (auto &s : LiveStarts)
|
||||
std::sort(s.begin(), s.end());
|
||||
llvm::sort(s.begin(), s.end());
|
||||
|
||||
bool Changed = true;
|
||||
while (Changed) {
|
||||
|
@ -268,11 +268,11 @@ StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
|
||||
// in the list. Merge entries that refer to the same dwarf register and use
|
||||
// the maximum size that needs to be spilled.
|
||||
|
||||
std::sort(LiveOuts.begin(), LiveOuts.end(),
|
||||
[](const LiveOutReg &LHS, const LiveOutReg &RHS) {
|
||||
// Only sort by the dwarf register number.
|
||||
return LHS.DwarfRegNum < RHS.DwarfRegNum;
|
||||
});
|
||||
llvm::sort(LiveOuts.begin(), LiveOuts.end(),
|
||||
[](const LiveOutReg &LHS, const LiveOutReg &RHS) {
|
||||
// Only sort by the dwarf register number.
|
||||
return LHS.DwarfRegNum < RHS.DwarfRegNum;
|
||||
});
|
||||
|
||||
for (auto I = LiveOuts.begin(), E = LiveOuts.end(); I != E; ++I) {
|
||||
for (auto II = std::next(I); II != E; ++II) {
|
||||
|
@ -209,8 +209,8 @@ void StackSlotColoring::InitializeSlots() {
|
||||
Intervals.reserve(LS->getNumIntervals());
|
||||
for (auto &I : *LS)
|
||||
Intervals.push_back(&I);
|
||||
std::sort(Intervals.begin(), Intervals.end(),
|
||||
[](Pair *LHS, Pair *RHS) { return LHS->first < RHS->first; });
|
||||
llvm::sort(Intervals.begin(), Intervals.end(),
|
||||
[](Pair *LHS, Pair *RHS) { return LHS->first < RHS->first; });
|
||||
|
||||
// Gather all spill slots into a list.
|
||||
DEBUG(dbgs() << "Spill slot intervals:\n");
|
||||
|
Loading…
Reference in New Issue
Block a user