mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-21 18:22:53 +01:00
[ADT/STLExtras.h] - Add llvm::is_sorted wrapper and update callers.
It can be used to avoid passing the begin and end of a range. This makes the code shorter and it is consistent with another wrappers we already have. Differential revision: https://reviews.llvm.org/D78016
This commit is contained in:
parent
373d0c84dc
commit
180c8196c1
@ -384,10 +384,10 @@ private:
|
||||
for (IntervalMapOverlaps<MapT, MapT> I(Intervals, Other.Intervals);
|
||||
I.valid(); ++I)
|
||||
Overlaps.emplace_back(I.start(), I.stop());
|
||||
assert(std::is_sorted(Overlaps.begin(), Overlaps.end(),
|
||||
[](IntervalT LHS, IntervalT RHS) {
|
||||
return LHS.second < RHS.first;
|
||||
}) &&
|
||||
assert(llvm::is_sorted(Overlaps,
|
||||
[](IntervalT LHS, IntervalT RHS) {
|
||||
return LHS.second < RHS.first;
|
||||
}) &&
|
||||
"Overlaps must be sorted");
|
||||
return !Overlaps.empty();
|
||||
}
|
||||
|
@ -1257,6 +1257,18 @@ bool is_contained(R &&Range, const E &Element) {
|
||||
return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
|
||||
}
|
||||
|
||||
/// Wrapper function around std::is_sorted to check if elements in a range \p R
|
||||
/// are sorted with respect to a comparator \p C.
|
||||
template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
|
||||
return std::is_sorted(adl_begin(Range), adl_end(Range), C);
|
||||
}
|
||||
|
||||
/// Wrapper function around std::is_sorted to check if elements in a range \p R
|
||||
/// are sorted in non-descending order.
|
||||
template <typename R> bool is_sorted(R &&Range) {
|
||||
return std::is_sorted(adl_begin(Range), adl_end(Range));
|
||||
}
|
||||
|
||||
/// Wrapper function around std::count to count the number of times an element
|
||||
/// \p Element occurs in the given range \p Range.
|
||||
template <typename R, typename E> auto count(R &&Range, const E &Element) {
|
||||
|
@ -617,7 +617,7 @@ namespace llvm {
|
||||
/// subranges). Returns true if found at least one index.
|
||||
template <typename Range, typename OutputIt>
|
||||
bool findIndexesLiveAt(Range &&R, OutputIt O) const {
|
||||
assert(std::is_sorted(R.begin(), R.end()));
|
||||
assert(llvm::is_sorted(R));
|
||||
auto Idx = R.begin(), EndIdx = R.end();
|
||||
auto Seg = segments.begin(), EndSeg = segments.end();
|
||||
bool Found = false;
|
||||
|
@ -552,7 +552,7 @@ bool CFLAndersAAResult::FunctionInfo::mayAlias(
|
||||
return std::less<const Value *>()(LHS.Val, RHS.Val);
|
||||
};
|
||||
#ifdef EXPENSIVE_CHECKS
|
||||
assert(std::is_sorted(Itr->second.begin(), Itr->second.end(), Comparator));
|
||||
assert(llvm::is_sorted(Itr->second, Comparator));
|
||||
#endif
|
||||
auto RangePair = std::equal_range(Itr->second.begin(), Itr->second.end(),
|
||||
OffsetValue{RHS, 0}, Comparator);
|
||||
|
@ -64,10 +64,10 @@ static Loop *getInnerMostLoop(const LoopVectorTy &Loops) {
|
||||
return LastLoop;
|
||||
}
|
||||
|
||||
return (std::is_sorted(Loops.begin(), Loops.end(),
|
||||
[](const Loop *L1, const Loop *L2) {
|
||||
return L1->getLoopDepth() < L2->getLoopDepth();
|
||||
}))
|
||||
return (llvm::is_sorted(Loops,
|
||||
[](const Loop *L1, const Loop *L2) {
|
||||
return L1->getLoopDepth() < L2->getLoopDepth();
|
||||
}))
|
||||
? LastLoop
|
||||
: nullptr;
|
||||
}
|
||||
|
@ -69,11 +69,10 @@ static bool hasBcmp(const Triple &TT) {
|
||||
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T,
|
||||
ArrayRef<StringLiteral> StandardNames) {
|
||||
// Verify that the StandardNames array is in alphabetical order.
|
||||
assert(std::is_sorted(StandardNames.begin(), StandardNames.end(),
|
||||
[](StringRef LHS, StringRef RHS) {
|
||||
return LHS < RHS;
|
||||
}) &&
|
||||
"TargetLibraryInfoImpl function names must be sorted");
|
||||
assert(
|
||||
llvm::is_sorted(StandardNames,
|
||||
[](StringRef LHS, StringRef RHS) { return LHS < RHS; }) &&
|
||||
"TargetLibraryInfoImpl function names must be sorted");
|
||||
|
||||
// Set IO unlocked variants as unavailable
|
||||
// Set them as available per system below
|
||||
|
@ -231,9 +231,9 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
|
||||
return LU->getOperandNo() > RU->getOperandNo();
|
||||
});
|
||||
|
||||
if (std::is_sorted(
|
||||
List.begin(), List.end(),
|
||||
[](const Entry &L, const Entry &R) { return L.second < R.second; }))
|
||||
if (llvm::is_sorted(List, [](const Entry &L, const Entry &R) {
|
||||
return L.second < R.second;
|
||||
}))
|
||||
// Order is already correct.
|
||||
return;
|
||||
|
||||
|
@ -2424,8 +2424,7 @@ void DebugLocEntry::finalize(const AsmPrinter &AP,
|
||||
assert(llvm::all_of(Values, [](DbgValueLoc P) {
|
||||
return P.isFragment();
|
||||
}) && "all values are expected to be fragments");
|
||||
assert(std::is_sorted(Values.begin(), Values.end()) &&
|
||||
"fragments are expected to be sorted");
|
||||
assert(llvm::is_sorted(Values) && "fragments are expected to be sorted");
|
||||
|
||||
for (auto Fragment : Values)
|
||||
DwarfDebug::emitDebugLocValue(AP, BT, Fragment, DwarfExpr);
|
||||
|
@ -513,7 +513,7 @@ void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
|
||||
#ifndef NDEBUG
|
||||
assert(Ops.size() == Indices.size() && "incompatible args");
|
||||
assert(!Ops.empty() && "invalid trivial sequence");
|
||||
assert(std::is_sorted(Indices.begin(), Indices.end()) &&
|
||||
assert(llvm::is_sorted(Indices) &&
|
||||
"sequence offsets must be in ascending order");
|
||||
|
||||
assert(getMRI()->getType(Res).isValid() && "invalid operand type");
|
||||
|
@ -91,8 +91,8 @@ OMPContext::OMPContext(bool IsDeviceCompilation, Triple TargetTriple) {
|
||||
/// expected to be sorted.
|
||||
template <typename T> static bool isSubset(ArrayRef<T> C0, ArrayRef<T> C1) {
|
||||
#ifdef EXPENSIVE_CHECKS
|
||||
assert(std::is_sorted(C0.begin(), C0.end()) &&
|
||||
std::is_sorted(C1.begin(), C1.end()) && "Expected sorted arrays!");
|
||||
assert(llvm::is_sorted(C0) && llvm::is_sorted(C1) &&
|
||||
"Expected sorted arrays!");
|
||||
#endif
|
||||
if (C0.size() > C1.size())
|
||||
return false;
|
||||
|
@ -228,9 +228,9 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
|
||||
return LU->getOperandNo() > RU->getOperandNo();
|
||||
});
|
||||
|
||||
if (std::is_sorted(
|
||||
List.begin(), List.end(),
|
||||
[](const Entry &L, const Entry &R) { return L.second < R.second; }))
|
||||
if (llvm::is_sorted(List, [](const Entry &L, const Entry &R) {
|
||||
return L.second < R.second;
|
||||
}))
|
||||
// Order is already correct.
|
||||
return;
|
||||
|
||||
|
@ -1019,11 +1019,12 @@ AttributeList::get(LLVMContext &C,
|
||||
if (Attrs.empty())
|
||||
return {};
|
||||
|
||||
assert(std::is_sorted(Attrs.begin(), Attrs.end(),
|
||||
[](const std::pair<unsigned, Attribute> &LHS,
|
||||
const std::pair<unsigned, Attribute> &RHS) {
|
||||
return LHS.first < RHS.first;
|
||||
}) && "Misordered Attributes list!");
|
||||
assert(llvm::is_sorted(Attrs,
|
||||
[](const std::pair<unsigned, Attribute> &LHS,
|
||||
const std::pair<unsigned, Attribute> &RHS) {
|
||||
return LHS.first < RHS.first;
|
||||
}) &&
|
||||
"Misordered Attributes list!");
|
||||
assert(llvm::none_of(Attrs,
|
||||
[](const std::pair<unsigned, Attribute> &Pair) {
|
||||
return Pair.second.hasAttribute(Attribute::None);
|
||||
@ -1055,11 +1056,11 @@ AttributeList::get(LLVMContext &C,
|
||||
if (Attrs.empty())
|
||||
return {};
|
||||
|
||||
assert(std::is_sorted(Attrs.begin(), Attrs.end(),
|
||||
[](const std::pair<unsigned, AttributeSet> &LHS,
|
||||
const std::pair<unsigned, AttributeSet> &RHS) {
|
||||
return LHS.first < RHS.first;
|
||||
}) &&
|
||||
assert(llvm::is_sorted(Attrs,
|
||||
[](const std::pair<unsigned, AttributeSet> &LHS,
|
||||
const std::pair<unsigned, AttributeSet> &RHS) {
|
||||
return LHS.first < RHS.first;
|
||||
}) &&
|
||||
"Misordered Attributes list!");
|
||||
assert(llvm::none_of(Attrs,
|
||||
[](const std::pair<unsigned, AttributeSet> &Pair) {
|
||||
@ -1228,7 +1229,7 @@ AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index,
|
||||
AttributeList AttributeList::addParamAttribute(LLVMContext &C,
|
||||
ArrayRef<unsigned> ArgNos,
|
||||
Attribute A) const {
|
||||
assert(std::is_sorted(ArgNos.begin(), ArgNos.end()));
|
||||
assert(llvm::is_sorted(ArgNos));
|
||||
|
||||
SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
|
||||
unsigned MaxIndex = attrIdxToArrayIdx(ArgNos.back() + FirstArgIndex);
|
||||
|
@ -155,10 +155,8 @@ static FeatureBitset getFeatures(StringRef CPU, StringRef FS,
|
||||
if (ProcDesc.empty() || ProcFeatures.empty())
|
||||
return FeatureBitset();
|
||||
|
||||
assert(std::is_sorted(std::begin(ProcDesc), std::end(ProcDesc)) &&
|
||||
"CPU table is not sorted");
|
||||
assert(std::is_sorted(std::begin(ProcFeatures), std::end(ProcFeatures)) &&
|
||||
"CPU features table is not sorted");
|
||||
assert(llvm::is_sorted(ProcDesc) && "CPU table is not sorted");
|
||||
assert(llvm::is_sorted(ProcFeatures) && "CPU features table is not sorted");
|
||||
// Resulting bits
|
||||
FeatureBitset Bits;
|
||||
|
||||
@ -290,7 +288,7 @@ bool MCSubtargetInfo::checkFeatures(StringRef FS) const {
|
||||
}
|
||||
|
||||
const MCSchedModel &MCSubtargetInfo::getSchedModelForCPU(StringRef CPU) const {
|
||||
assert(std::is_sorted(ProcDesc.begin(), ProcDesc.end()) &&
|
||||
assert(llvm::is_sorted(ProcDesc) &&
|
||||
"Processor machine model table is not sorted");
|
||||
|
||||
// Find entry
|
||||
|
@ -417,8 +417,7 @@ static const NEONLdStTableEntry *LookupNEONLdSt(unsigned Opcode) {
|
||||
// Make sure the table is sorted.
|
||||
static std::atomic<bool> TableChecked(false);
|
||||
if (!TableChecked.load(std::memory_order_relaxed)) {
|
||||
assert(std::is_sorted(std::begin(NEONLdStTable), std::end(NEONLdStTable)) &&
|
||||
"NEONLdStTable is not sorted!");
|
||||
assert(llvm::is_sorted(NEONLdStTable) && "NEONLdStTable is not sorted!");
|
||||
TableChecked.store(true, std::memory_order_relaxed);
|
||||
}
|
||||
#endif
|
||||
|
@ -3619,8 +3619,7 @@ public:
|
||||
if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
|
||||
Kind = k_RegisterListWithAPSR;
|
||||
|
||||
assert(std::is_sorted(Regs.begin(), Regs.end()) &&
|
||||
"Register list must be sorted by encoding");
|
||||
assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
|
||||
|
||||
auto Op = std::make_unique<ARMOperand>(Kind);
|
||||
for (const auto &P : Regs)
|
||||
|
@ -30,9 +30,9 @@ static bool isF128SoftLibCall(const char *CallSym) {
|
||||
|
||||
// Check that LibCalls is sorted alphabetically.
|
||||
auto Comp = [](const char *S1, const char *S2) { return strcmp(S1, S2) < 0; };
|
||||
assert(std::is_sorted(std::begin(LibCalls), std::end(LibCalls), Comp));
|
||||
return std::binary_search(std::begin(LibCalls), std::end(LibCalls),
|
||||
CallSym, Comp);
|
||||
assert(llvm::is_sorted(LibCalls, Comp));
|
||||
return std::binary_search(std::begin(LibCalls), std::end(LibCalls), CallSym,
|
||||
Comp);
|
||||
}
|
||||
|
||||
/// This function returns true if Ty is fp128, {f128} or i128 which was
|
||||
|
@ -237,11 +237,9 @@ bool EvexToVexInstPass::CompressEvexToVexImpl(MachineInstr &MI) const {
|
||||
// Make sure the tables are sorted.
|
||||
static std::atomic<bool> TableChecked(false);
|
||||
if (!TableChecked.load(std::memory_order_relaxed)) {
|
||||
assert(std::is_sorted(std::begin(X86EvexToVex128CompressTable),
|
||||
std::end(X86EvexToVex128CompressTable)) &&
|
||||
assert(llvm::is_sorted(X86EvexToVex128CompressTable) &&
|
||||
"X86EvexToVex128CompressTable is not sorted!");
|
||||
assert(std::is_sorted(std::begin(X86EvexToVex256CompressTable),
|
||||
std::end(X86EvexToVex256CompressTable)) &&
|
||||
assert(llvm::is_sorted(X86EvexToVex256CompressTable) &&
|
||||
"X86EvexToVex256CompressTable is not sorted!");
|
||||
TableChecked.store(true, std::memory_order_relaxed);
|
||||
}
|
||||
|
@ -3383,10 +3383,10 @@ static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
|
||||
|
||||
#ifndef NDEBUG
|
||||
static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
|
||||
return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
|
||||
[](const CCValAssign &A, const CCValAssign &B) -> bool {
|
||||
return A.getValNo() < B.getValNo();
|
||||
});
|
||||
return llvm::is_sorted(
|
||||
ArgLocs, [](const CCValAssign &A, const CCValAssign &B) -> bool {
|
||||
return A.getValNo() < B.getValNo();
|
||||
});
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -116,11 +116,8 @@ static void verifyTables() {
|
||||
#ifndef NDEBUG
|
||||
static std::atomic<bool> TableChecked(false);
|
||||
if (!TableChecked.load(std::memory_order_relaxed)) {
|
||||
assert(std::is_sorted(std::begin(Groups), std::end(Groups)) &&
|
||||
std::is_sorted(std::begin(RoundGroups), std::end(RoundGroups)) &&
|
||||
std::is_sorted(std::begin(BroadcastGroups),
|
||||
std::end(BroadcastGroups)) &&
|
||||
"FMA3 tables not sorted!");
|
||||
assert(llvm::is_sorted(Groups) && llvm::is_sorted(RoundGroups) &&
|
||||
llvm::is_sorted(BroadcastGroups) && "FMA3 tables not sorted!");
|
||||
TableChecked.store(true, std::memory_order_relaxed);
|
||||
}
|
||||
#endif
|
||||
|
@ -5529,53 +5529,45 @@ lookupFoldTableImpl(ArrayRef<X86MemoryFoldTableEntry> Table, unsigned RegOp) {
|
||||
// Make sure the tables are sorted.
|
||||
static std::atomic<bool> FoldTablesChecked(false);
|
||||
if (!FoldTablesChecked.load(std::memory_order_relaxed)) {
|
||||
assert(std::is_sorted(std::begin(MemoryFoldTable2Addr),
|
||||
std::end(MemoryFoldTable2Addr)) &&
|
||||
assert(llvm::is_sorted(MemoryFoldTable2Addr) &&
|
||||
std::adjacent_find(std::begin(MemoryFoldTable2Addr),
|
||||
std::end(MemoryFoldTable2Addr)) ==
|
||||
std::end(MemoryFoldTable2Addr) &&
|
||||
std::end(MemoryFoldTable2Addr) &&
|
||||
"MemoryFoldTable2Addr is not sorted and unique!");
|
||||
assert(std::is_sorted(std::begin(MemoryFoldTable0),
|
||||
std::end(MemoryFoldTable0)) &&
|
||||
assert(llvm::is_sorted(MemoryFoldTable0) &&
|
||||
std::adjacent_find(std::begin(MemoryFoldTable0),
|
||||
std::end(MemoryFoldTable0)) ==
|
||||
std::end(MemoryFoldTable0) &&
|
||||
std::end(MemoryFoldTable0) &&
|
||||
"MemoryFoldTable0 is not sorted and unique!");
|
||||
assert(std::is_sorted(std::begin(MemoryFoldTable1),
|
||||
std::end(MemoryFoldTable1)) &&
|
||||
assert(llvm::is_sorted(MemoryFoldTable1) &&
|
||||
std::adjacent_find(std::begin(MemoryFoldTable1),
|
||||
std::end(MemoryFoldTable1)) ==
|
||||
std::end(MemoryFoldTable1) &&
|
||||
std::end(MemoryFoldTable1) &&
|
||||
"MemoryFoldTable1 is not sorted and unique!");
|
||||
assert(std::is_sorted(std::begin(MemoryFoldTable2),
|
||||
std::end(MemoryFoldTable2)) &&
|
||||
assert(llvm::is_sorted(MemoryFoldTable2) &&
|
||||
std::adjacent_find(std::begin(MemoryFoldTable2),
|
||||
std::end(MemoryFoldTable2)) ==
|
||||
std::end(MemoryFoldTable2) &&
|
||||
std::end(MemoryFoldTable2) &&
|
||||
"MemoryFoldTable2 is not sorted and unique!");
|
||||
assert(std::is_sorted(std::begin(MemoryFoldTable3),
|
||||
std::end(MemoryFoldTable3)) &&
|
||||
assert(llvm::is_sorted(MemoryFoldTable3) &&
|
||||
std::adjacent_find(std::begin(MemoryFoldTable3),
|
||||
std::end(MemoryFoldTable3)) ==
|
||||
std::end(MemoryFoldTable3) &&
|
||||
std::end(MemoryFoldTable3) &&
|
||||
"MemoryFoldTable3 is not sorted and unique!");
|
||||
assert(std::is_sorted(std::begin(MemoryFoldTable4),
|
||||
std::end(MemoryFoldTable4)) &&
|
||||
assert(llvm::is_sorted(MemoryFoldTable4) &&
|
||||
std::adjacent_find(std::begin(MemoryFoldTable4),
|
||||
std::end(MemoryFoldTable4)) ==
|
||||
std::end(MemoryFoldTable4) &&
|
||||
std::end(MemoryFoldTable4) &&
|
||||
"MemoryFoldTable4 is not sorted and unique!");
|
||||
assert(std::is_sorted(std::begin(BroadcastFoldTable2),
|
||||
std::end(BroadcastFoldTable2)) &&
|
||||
assert(llvm::is_sorted(BroadcastFoldTable2) &&
|
||||
std::adjacent_find(std::begin(BroadcastFoldTable2),
|
||||
std::end(BroadcastFoldTable2)) ==
|
||||
std::end(BroadcastFoldTable2) &&
|
||||
std::end(BroadcastFoldTable2) &&
|
||||
"BroadcastFoldTable2 is not sorted and unique!");
|
||||
assert(std::is_sorted(std::begin(BroadcastFoldTable3),
|
||||
std::end(BroadcastFoldTable3)) &&
|
||||
assert(llvm::is_sorted(BroadcastFoldTable3) &&
|
||||
std::adjacent_find(std::begin(BroadcastFoldTable3),
|
||||
std::end(BroadcastFoldTable3)) ==
|
||||
std::end(BroadcastFoldTable3) &&
|
||||
std::end(BroadcastFoldTable3) &&
|
||||
"BroadcastFoldTable3 is not sorted and unique!");
|
||||
FoldTablesChecked.store(true, std::memory_order_relaxed);
|
||||
}
|
||||
|
@ -1155,10 +1155,8 @@ static const IntrinsicData* getIntrinsicWithoutChain(unsigned IntNo) {
|
||||
}
|
||||
|
||||
static void verifyIntrinsicTables() {
|
||||
assert(std::is_sorted(std::begin(IntrinsicsWithoutChain),
|
||||
std::end(IntrinsicsWithoutChain)) &&
|
||||
std::is_sorted(std::begin(IntrinsicsWithChain),
|
||||
std::end(IntrinsicsWithChain)) &&
|
||||
assert(llvm::is_sorted(IntrinsicsWithoutChain) &&
|
||||
llvm::is_sorted(IntrinsicsWithChain) &&
|
||||
"Intrinsic data tables should be sorted by Intrinsic ID");
|
||||
assert((std::adjacent_find(std::begin(IntrinsicsWithoutChain),
|
||||
std::end(IntrinsicsWithoutChain)) ==
|
||||
|
@ -72,8 +72,7 @@ public:
|
||||
CVPLatticeVal(CVPLatticeStateTy LatticeState) : LatticeState(LatticeState) {}
|
||||
CVPLatticeVal(std::vector<Function *> &&Functions)
|
||||
: LatticeState(FunctionSet), Functions(std::move(Functions)) {
|
||||
assert(std::is_sorted(this->Functions.begin(), this->Functions.end(),
|
||||
Compare()));
|
||||
assert(llvm::is_sorted(this->Functions, Compare()));
|
||||
}
|
||||
|
||||
/// Get a reference to the functions held by this lattice value. The number
|
||||
|
@ -107,8 +107,8 @@ const PfmCountersInfo PfmCountersInfo::Default = {nullptr, nullptr, nullptr,
|
||||
0u};
|
||||
|
||||
const PfmCountersInfo &ExegesisTarget::getPfmCounters(StringRef CpuName) const {
|
||||
assert(std::is_sorted(
|
||||
CpuPfmCounters.begin(), CpuPfmCounters.end(),
|
||||
assert(llvm::is_sorted(
|
||||
CpuPfmCounters,
|
||||
[](const CpuAndPfmCounters &LHS, const CpuAndPfmCounters &RHS) {
|
||||
return strcmp(LHS.CpuName, RHS.CpuName) < 0;
|
||||
}) &&
|
||||
|
@ -1902,8 +1902,7 @@ static void orderSegments(std::vector<Segment *> &Segments) {
|
||||
// returns an Offset one past the end of the last segment.
|
||||
static uint64_t layoutSegments(std::vector<Segment *> &Segments,
|
||||
uint64_t Offset) {
|
||||
assert(std::is_sorted(std::begin(Segments), std::end(Segments),
|
||||
compareSegmentsByOffset));
|
||||
assert(llvm::is_sorted(Segments, compareSegmentsByOffset));
|
||||
// The only way a segment should move is if a section was between two
|
||||
// segments and that section was removed. If that section isn't in a segment
|
||||
// then it's acceptable, but not ideal, to simply move it to after the
|
||||
|
@ -61,15 +61,16 @@ void MachOLayoutBuilder::updateDySymTab(MachO::macho_load_command &MLC) {
|
||||
assert(MLC.load_command_data.cmd == MachO::LC_DYSYMTAB);
|
||||
// Make sure that nlist entries in the symbol table are sorted by the those
|
||||
// types. The order is: local < defined external < undefined external.
|
||||
assert(std::is_sorted(O.SymTable.Symbols.begin(), O.SymTable.Symbols.end(),
|
||||
[](const std::unique_ptr<SymbolEntry> &A,
|
||||
const std::unique_ptr<SymbolEntry> &B) {
|
||||
bool AL = A->isLocalSymbol(), BL = B->isLocalSymbol();
|
||||
if (AL != BL)
|
||||
return AL;
|
||||
return !AL && !A->isUndefinedSymbol() &&
|
||||
B->isUndefinedSymbol();
|
||||
}) &&
|
||||
assert(llvm::is_sorted(O.SymTable.Symbols,
|
||||
[](const std::unique_ptr<SymbolEntry> &A,
|
||||
const std::unique_ptr<SymbolEntry> &B) {
|
||||
bool AL = A->isLocalSymbol(),
|
||||
BL = B->isLocalSymbol();
|
||||
if (AL != BL)
|
||||
return AL;
|
||||
return !AL && !A->isUndefinedSymbol() &&
|
||||
B->isUndefinedSymbol();
|
||||
}) &&
|
||||
"Symbols are not sorted by their types.");
|
||||
|
||||
uint32_t NumLocalSymbols = 0;
|
||||
|
@ -436,8 +436,8 @@ TEST(SimpleIListTest, merge) {
|
||||
// Check setup.
|
||||
EXPECT_EQ(4u, L1.size());
|
||||
EXPECT_EQ(6u, L2.size());
|
||||
EXPECT_TRUE(std::is_sorted(L1.begin(), L1.end()));
|
||||
EXPECT_TRUE(std::is_sorted(L2.begin(), L2.end()));
|
||||
EXPECT_TRUE(llvm::is_sorted(L1));
|
||||
EXPECT_TRUE(llvm::is_sorted(L2));
|
||||
|
||||
// Merge.
|
||||
auto &LHS = IsL1LHS ? L1 : L2;
|
||||
@ -445,7 +445,7 @@ TEST(SimpleIListTest, merge) {
|
||||
LHS.merge(RHS);
|
||||
EXPECT_TRUE(RHS.empty());
|
||||
EXPECT_FALSE(LHS.empty());
|
||||
EXPECT_TRUE(std::is_sorted(LHS.begin(), LHS.end()));
|
||||
EXPECT_TRUE(llvm::is_sorted(LHS));
|
||||
auto I = LHS.begin();
|
||||
for (Node &N : Ns)
|
||||
EXPECT_EQ(&N, &*I++);
|
||||
@ -473,8 +473,8 @@ TEST(SimpleIListTest, mergeIsStable) {
|
||||
// Check setup.
|
||||
EXPECT_EQ(3u, L1.size());
|
||||
EXPECT_EQ(2u, L2.size());
|
||||
EXPECT_TRUE(std::is_sorted(L1.begin(), L1.end(), makeFalse));
|
||||
EXPECT_TRUE(std::is_sorted(L2.begin(), L2.end(), makeFalse));
|
||||
EXPECT_TRUE(llvm::is_sorted(L1, makeFalse));
|
||||
EXPECT_TRUE(llvm::is_sorted(L2, makeFalse));
|
||||
};
|
||||
|
||||
// Merge. Should be stable.
|
||||
@ -482,7 +482,7 @@ TEST(SimpleIListTest, mergeIsStable) {
|
||||
L1.merge(L2, makeFalse);
|
||||
EXPECT_TRUE(L2.empty());
|
||||
EXPECT_FALSE(L1.empty());
|
||||
EXPECT_TRUE(std::is_sorted(L1.begin(), L1.end(), makeFalse));
|
||||
EXPECT_TRUE(llvm::is_sorted(L1, makeFalse));
|
||||
auto I = L1.begin();
|
||||
EXPECT_EQ(&Ns[0], &*I++);
|
||||
EXPECT_EQ(&Ns[3], &*I++);
|
||||
@ -497,7 +497,7 @@ TEST(SimpleIListTest, mergeIsStable) {
|
||||
L2.merge(L1, makeFalse);
|
||||
EXPECT_TRUE(L1.empty());
|
||||
EXPECT_FALSE(L2.empty());
|
||||
EXPECT_TRUE(std::is_sorted(L2.begin(), L2.end(), makeFalse));
|
||||
EXPECT_TRUE(llvm::is_sorted(L2, makeFalse));
|
||||
I = L2.begin();
|
||||
EXPECT_EQ(&Ns[1], &*I++);
|
||||
EXPECT_EQ(&Ns[2], &*I++);
|
||||
@ -521,7 +521,7 @@ TEST(SimpleIListTest, mergeEmpty) {
|
||||
// Check setup.
|
||||
EXPECT_EQ(4u, L1.size());
|
||||
EXPECT_TRUE(L2.empty());
|
||||
EXPECT_TRUE(std::is_sorted(L1.begin(), L1.end()));
|
||||
EXPECT_TRUE(llvm::is_sorted(L1));
|
||||
|
||||
// Merge.
|
||||
auto &LHS = IsL1LHS ? L1 : L2;
|
||||
@ -529,7 +529,7 @@ TEST(SimpleIListTest, mergeEmpty) {
|
||||
LHS.merge(RHS);
|
||||
EXPECT_TRUE(RHS.empty());
|
||||
EXPECT_FALSE(LHS.empty());
|
||||
EXPECT_TRUE(std::is_sorted(LHS.begin(), LHS.end()));
|
||||
EXPECT_TRUE(llvm::is_sorted(LHS));
|
||||
auto I = LHS.begin();
|
||||
for (Node &N : Ns)
|
||||
EXPECT_EQ(&N, &*I++);
|
||||
@ -554,11 +554,11 @@ TEST(SimpleIListTest, sort) {
|
||||
|
||||
// Check setup.
|
||||
EXPECT_EQ(10u, L.size());
|
||||
EXPECT_FALSE(std::is_sorted(L.begin(), L.end()));
|
||||
EXPECT_FALSE(llvm::is_sorted(L));
|
||||
|
||||
// Sort.
|
||||
L.sort();
|
||||
EXPECT_TRUE(std::is_sorted(L.begin(), L.end()));
|
||||
EXPECT_TRUE(llvm::is_sorted(L));
|
||||
auto I = L.begin();
|
||||
for (Node &N : Ns)
|
||||
EXPECT_EQ(&N, &*I++);
|
||||
@ -581,11 +581,11 @@ TEST(SimpleIListTest, sortIsStable) {
|
||||
|
||||
// Check setup.
|
||||
EXPECT_EQ(10u, L.size());
|
||||
EXPECT_FALSE(std::is_sorted(L.begin(), L.end(), compare));
|
||||
EXPECT_FALSE(llvm::is_sorted(L, compare));
|
||||
|
||||
// Sort.
|
||||
L.sort(compare);
|
||||
EXPECT_TRUE(std::is_sorted(L.begin(), L.end(), compare));
|
||||
EXPECT_TRUE(llvm::is_sorted(L, compare));
|
||||
auto I = L.begin();
|
||||
for (int O : {3, 4, 1, 2, 0})
|
||||
EXPECT_EQ(&Ns[O], &*I++);
|
||||
|
@ -31,7 +31,7 @@ TEST(Parallel, sort) {
|
||||
i = dist(randEngine);
|
||||
|
||||
sort(parallel::par, std::begin(array), std::end(array));
|
||||
ASSERT_TRUE(std::is_sorted(std::begin(array), std::end(array)));
|
||||
ASSERT_TRUE(llvm::is_sorted(array));
|
||||
}
|
||||
|
||||
TEST(Parallel, parallel_for) {
|
||||
|
Loading…
Reference in New Issue
Block a user