1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

Simplify std::lower_bound with llvm::{bsearch,lower_bound}. NFC

llvm-svn: 364006
This commit is contained in:
Fangrui Song 2019-06-21 05:40:31 +00:00
parent 075e9b97a2
commit 8219c5373b
26 changed files with 64 additions and 101 deletions

View File

@ -60,10 +60,9 @@ static cl::opt<int> ProfileSummaryColdCount(
// Find the summary entry for a desired percentile of counts. // Find the summary entry for a desired percentile of counts.
static const ProfileSummaryEntry &getEntryForPercentile(SummaryEntryVector &DS, static const ProfileSummaryEntry &getEntryForPercentile(SummaryEntryVector &DS,
uint64_t Percentile) { uint64_t Percentile) {
auto Compare = [](const ProfileSummaryEntry &Entry, uint64_t Percentile) { auto It = llvm::bsearch(DS, [=](const ProfileSummaryEntry &Entry) {
return Entry.Cutoff < Percentile; return Percentile <= Entry.Cutoff;
}; });
auto It = std::lower_bound(DS.begin(), DS.end(), Percentile, Compare);
// The required percentile has to be <= one of the percentiles in the // The required percentile has to be <= one of the percentiles in the
// detailed summary. // detailed summary.
if (It == DS.end()) if (It == DS.end())

View File

@ -900,8 +900,7 @@ bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
// We are going to enumerate all the register mask slots contained in LI. // We are going to enumerate all the register mask slots contained in LI.
// Start with a binary search of RegMaskSlots to find a starting point. // Start with a binary search of RegMaskSlots to find a starting point.
ArrayRef<SlotIndex>::iterator SlotI = ArrayRef<SlotIndex>::iterator SlotI = llvm::lower_bound(Slots, LiveI->start);
std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
ArrayRef<SlotIndex>::iterator SlotE = Slots.end(); ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
// No slots in range, LI begins after the last call. // No slots in range, LI begins after the last call.
@ -1370,8 +1369,7 @@ private:
void updateRegMaskSlots() { void updateRegMaskSlots() {
SmallVectorImpl<SlotIndex>::iterator RI = SmallVectorImpl<SlotIndex>::iterator RI =
std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), llvm::lower_bound(LIS.RegMaskSlots, OldIdx);
OldIdx);
assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() && assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
"No RegMask at OldIdx."); "No RegMask at OldIdx.");
*RI = NewIdx.getRegSlot(); *RI = NewIdx.getRegSlot();

View File

@ -3726,9 +3726,8 @@ void SwingSchedulerDAG::checkValidNodeOrder(const NodeSetType &Circuits) const {
for (SDep &PredEdge : SU->Preds) { for (SDep &PredEdge : SU->Preds) {
SUnit *PredSU = PredEdge.getSUnit(); SUnit *PredSU = PredEdge.getSUnit();
unsigned PredIndex = unsigned PredIndex = std::get<1>(
std::get<1>(*std::lower_bound(Indices.begin(), Indices.end(), *llvm::lower_bound(Indices, std::make_pair(PredSU, 0), CompareKey));
std::make_pair(PredSU, 0), CompareKey));
if (!PredSU->getInstr()->isPHI() && PredIndex < Index) { if (!PredSU->getInstr()->isPHI() && PredIndex < Index) {
PredBefore = true; PredBefore = true;
Pred = PredSU; Pred = PredSU;
@ -3743,9 +3742,8 @@ void SwingSchedulerDAG::checkValidNodeOrder(const NodeSetType &Circuits) const {
// return Indices.end(). // return Indices.end().
if (SuccSU->isBoundaryNode()) if (SuccSU->isBoundaryNode())
continue; continue;
unsigned SuccIndex = unsigned SuccIndex = std::get<1>(
std::get<1>(*std::lower_bound(Indices.begin(), Indices.end(), *llvm::lower_bound(Indices, std::make_pair(SuccSU, 0), CompareKey));
std::make_pair(SuccSU, 0), CompareKey));
if (!SuccSU->getInstr()->isPHI() && SuccIndex < Index) { if (!SuccSU->getInstr()->isPHI() && SuccIndex < Index) {
SuccBefore = true; SuccBefore = true;
Succ = SuccSU; Succ = SuccSU;
@ -3756,9 +3754,8 @@ void SwingSchedulerDAG::checkValidNodeOrder(const NodeSetType &Circuits) const {
if (PredBefore && SuccBefore && !SU->getInstr()->isPHI()) { if (PredBefore && SuccBefore && !SU->getInstr()->isPHI()) {
// instructions in circuits are allowed to be scheduled // instructions in circuits are allowed to be scheduled
// after both a successor and predecessor. // after both a successor and predecessor.
bool InCircuit = std::any_of( bool InCircuit = llvm::any_of(
Circuits.begin(), Circuits.end(), Circuits, [SU](const NodeSet &Circuit) { return Circuit.count(SU); });
[SU](const NodeSet &Circuit) { return Circuit.count(SU); });
if (InCircuit) if (InCircuit)
LLVM_DEBUG(dbgs() << "In a circuit, predecessor ";); LLVM_DEBUG(dbgs() << "In a circuit, predecessor ";);
else { else {

View File

@ -532,10 +532,9 @@ void DWARFDebugFrame::parse(DWARFDataExtractor Data) {
} }
FrameEntry *DWARFDebugFrame::getEntryAtOffset(uint64_t Offset) const { FrameEntry *DWARFDebugFrame::getEntryAtOffset(uint64_t Offset) const {
auto It = auto It = llvm::bsearch(Entries, [=](const std::unique_ptr<FrameEntry> &E) {
std::lower_bound(Entries.begin(), Entries.end(), Offset, return Offset <= E->getOffset();
[](const std::unique_ptr<FrameEntry> &E, });
uint64_t Offset) { return E->getOffset() < Offset; });
if (It != Entries.end() && (*It)->getOffset() == Offset) if (It != Entries.end() && (*It)->getOffset() == Offset)
return It->get(); return It->get();
return nullptr; return nullptr;

View File

@ -463,12 +463,9 @@ DataLayout::AlignmentsTy::iterator
DataLayout::findAlignmentLowerBound(AlignTypeEnum AlignType, DataLayout::findAlignmentLowerBound(AlignTypeEnum AlignType,
uint32_t BitWidth) { uint32_t BitWidth) {
auto Pair = std::make_pair((unsigned)AlignType, BitWidth); auto Pair = std::make_pair((unsigned)AlignType, BitWidth);
return std::lower_bound(Alignments.begin(), Alignments.end(), Pair, return llvm::bsearch(Alignments, [=](const LayoutAlignElem &E) {
[](const LayoutAlignElem &LHS, return Pair <= std::make_pair(E.AlignType, E.TypeBitWidth);
const std::pair<unsigned, uint32_t> &RHS) { });
return std::tie(LHS.AlignType, LHS.TypeBitWidth) <
std::tie(RHS.first, RHS.second);
});
} }
void void

View File

@ -533,9 +533,9 @@ static ArrayRef<const char *> findTargetSubtable(StringRef Name) {
// Drop "llvm." and take the first dotted component. That will be the target // Drop "llvm." and take the first dotted component. That will be the target
// if this is target specific. // if this is target specific.
StringRef Target = Name.drop_front(5).split('.').first; StringRef Target = Name.drop_front(5).split('.').first;
auto It = std::lower_bound(Targets.begin(), Targets.end(), Target, auto It = llvm::bsearch(Targets, [=](const IntrinsicTargetInfo &TI) {
[](const IntrinsicTargetInfo &TI, return Target <= TI.Name;
StringRef Target) { return TI.Name < Target; }); });
// We've either found the target or just fall back to the generic set, which // We've either found the target or just fall back to the generic set, which
// is always first. // is always first.
const auto &TI = It != Targets.end() && It->Name == Target ? *It : Targets[0]; const auto &TI = It != Targets.end() && It->Name == Target ? *It : Targets[0];

View File

@ -24,7 +24,7 @@ using namespace llvm;
template <typename T> template <typename T>
static const T *Find(StringRef S, ArrayRef<T> A) { static const T *Find(StringRef S, ArrayRef<T> A) {
// Binary search the array // Binary search the array
auto F = std::lower_bound(A.begin(), A.end(), S); auto F = llvm::lower_bound(A, S);
// If not found then return NULL // If not found then return NULL
if (F == A.end() || StringRef(F->Key) != S) return nullptr; if (F == A.end() || StringRef(F->Key) != S) return nullptr;
// Return the found array item // Return the found array item

View File

@ -364,9 +364,9 @@ Error InstrProfSymtab::create(Module &M, bool InLTO) {
uint64_t InstrProfSymtab::getFunctionHashFromAddress(uint64_t Address) { uint64_t InstrProfSymtab::getFunctionHashFromAddress(uint64_t Address) {
finalizeSymtab(); finalizeSymtab();
auto Result = auto Result =
std::lower_bound(AddrToMD5Map.begin(), AddrToMD5Map.end(), Address, llvm::bsearch(AddrToMD5Map, [=](std::pair<uint64_t, uint64_t> A) {
[](const std::pair<uint64_t, uint64_t> &LHS, return Address <= A.first;
uint64_t RHS) { return LHS.first < RHS; }); });
// Raw function pointer collected by value profiler may be from // Raw function pointer collected by value profiler may be from
// external functions that are not instrumented. They won't have // external functions that are not instrumented. They won't have
// mapping data to be used by the deserializer. Force the value to // mapping data to be used by the deserializer. Force the value to

View File

@ -95,14 +95,9 @@ unsigned SourceMgr::SrcBuffer::getLineNumber(const char *Ptr) const {
assert(PtrDiff >= 0 && static_cast<size_t>(PtrDiff) <= std::numeric_limits<T>::max()); assert(PtrDiff >= 0 && static_cast<size_t>(PtrDiff) <= std::numeric_limits<T>::max());
T PtrOffset = static_cast<T>(PtrDiff); T PtrOffset = static_cast<T>(PtrDiff);
// std::lower_bound returns the first EOL offset that's not-less-than // llvm::lower_bound gives the number of EOL before PtrOffset. Add 1 to get
// PtrOffset, meaning the EOL that _ends the line_ that PtrOffset is on // the line number.
// (including if PtrOffset refers to the EOL itself). If there's no such return llvm::lower_bound(*Offsets, PtrOffset) - Offsets->begin() + 1;
// EOL, returns end().
auto EOL = std::lower_bound(Offsets->begin(), Offsets->end(), PtrOffset);
// Lines count from 1, so add 1 to the distance from the 0th line.
return (1 + (EOL - Offsets->begin()));
} }
SourceMgr::SrcBuffer::SrcBuffer(SourceMgr::SrcBuffer &&Other) SourceMgr::SrcBuffer::SrcBuffer(SourceMgr::SrcBuffer &&Other)

View File

@ -875,9 +875,7 @@ void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
// Next, update WaterList. Specifically, we need to add NewMBB as having // Next, update WaterList. Specifically, we need to add NewMBB as having
// available water after it. // available water after it.
water_iterator IP = water_iterator IP = llvm::lower_bound(WaterList, NewBB, CompareMBBNumbers);
std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
CompareMBBNumbers);
WaterList.insert(IP, NewBB); WaterList.insert(IP, NewBB);
} }
@ -928,9 +926,7 @@ MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
// available water after it (but not if it's already there, which happens // available water after it (but not if it's already there, which happens
// when splitting before a conditional branch that is followed by an // when splitting before a conditional branch that is followed by an
// unconditional branch - in that case we want to insert NewBB). // unconditional branch - in that case we want to insert NewBB).
water_iterator IP = water_iterator IP = llvm::lower_bound(WaterList, OrigBB, CompareMBBNumbers);
std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
CompareMBBNumbers);
MachineBasicBlock* WaterBB = *IP; MachineBasicBlock* WaterBB = *IP;
if (WaterBB == OrigBB) if (WaterBB == OrigBB)
WaterList.insert(std::next(IP), NewBB); WaterList.insert(std::next(IP), NewBB);

View File

@ -423,8 +423,7 @@ static const NEONLdStTableEntry *LookupNEONLdSt(unsigned Opcode) {
} }
#endif #endif
auto I = std::lower_bound(std::begin(NEONLdStTable), auto I = llvm::lower_bound(NEONLdStTable, Opcode);
std::end(NEONLdStTable), Opcode);
if (I != std::end(NEONLdStTable) && I->PseudoOpc == Opcode) if (I != std::end(NEONLdStTable) && I->PseudoOpc == Opcode)
return I; return I;
return nullptr; return nullptr;

View File

@ -436,7 +436,7 @@ namespace {
} // end anonymous namespace } // end anonymous namespace
void OrderedRegisterList::insert(unsigned VR) { void OrderedRegisterList::insert(unsigned VR) {
iterator L = std::lower_bound(Seq.begin(), Seq.end(), VR, Ord); iterator L = llvm::lower_bound(Seq, VR, Ord);
if (L == Seq.end()) if (L == Seq.end())
Seq.push_back(VR); Seq.push_back(VR);
else else
@ -449,7 +449,7 @@ void OrderedRegisterList::insert(unsigned VR) {
} }
void OrderedRegisterList::remove(unsigned VR) { void OrderedRegisterList::remove(unsigned VR) {
iterator L = std::lower_bound(Seq.begin(), Seq.end(), VR, Ord); iterator L = llvm::lower_bound(Seq, VR, Ord);
if (L != Seq.end()) if (L != Seq.end())
Seq.erase(L); Seq.erase(L);
} }

View File

@ -459,8 +459,7 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
} }
// one more look at list of intrinsics // one more look at list of intrinsics
const Mips16IntrinsicHelperType *Helper = const Mips16IntrinsicHelperType *Helper =
std::lower_bound(std::begin(Mips16IntrinsicHelper), llvm::lower_bound(Mips16IntrinsicHelper, IntrinsicFind);
std::end(Mips16IntrinsicHelper), IntrinsicFind);
if (Helper != std::end(Mips16IntrinsicHelper) && if (Helper != std::end(Mips16IntrinsicHelper) &&
*Helper == IntrinsicFind) { *Helper == IntrinsicFind) {
Mips16HelperFunction = Helper->Helper; Mips16HelperFunction = Helper->Helper;

View File

@ -841,9 +841,7 @@ void MipsConstantIslands::updateForInsertedWaterBlock
// Next, update WaterList. Specifically, we need to add NewMBB as having // Next, update WaterList. Specifically, we need to add NewMBB as having
// available water after it. // available water after it.
water_iterator IP = water_iterator IP = llvm::lower_bound(WaterList, NewBB, CompareMBBNumbers);
std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
CompareMBBNumbers);
WaterList.insert(IP, NewBB); WaterList.insert(IP, NewBB);
} }
@ -893,9 +891,7 @@ MipsConstantIslands::splitBlockBeforeInstr(MachineInstr &MI) {
// available water after it (but not if it's already there, which happens // available water after it (but not if it's already there, which happens
// when splitting before a conditional branch that is followed by an // when splitting before a conditional branch that is followed by an
// unconditional branch - in that case we want to insert NewBB). // unconditional branch - in that case we want to insert NewBB).
water_iterator IP = water_iterator IP = llvm::lower_bound(WaterList, OrigBB, CompareMBBNumbers);
std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
CompareMBBNumbers);
MachineBasicBlock* WaterBB = *IP; MachineBasicBlock* WaterBB = *IP;
if (WaterBB == OrigBB) if (WaterBB == OrigBB)
WaterList.insert(std::next(IP), NewBB); WaterList.insert(std::next(IP), NewBB);

View File

@ -252,7 +252,7 @@ bool EvexToVexInstPass::CompressEvexToVexImpl(MachineInstr &MI) const {
(Desc.TSFlags & X86II::VEX_L) ? makeArrayRef(X86EvexToVex256CompressTable) (Desc.TSFlags & X86II::VEX_L) ? makeArrayRef(X86EvexToVex256CompressTable)
: makeArrayRef(X86EvexToVex128CompressTable); : makeArrayRef(X86EvexToVex128CompressTable);
auto I = std::lower_bound(Table.begin(), Table.end(), MI.getOpcode()); auto I = llvm::lower_bound(Table, MI.getOpcode());
if (I == Table.end() || I->EvexOpcode != MI.getOpcode()) if (I == Table.end() || I->EvexOpcode != MI.getOpcode())
return false; return false;

View File

@ -596,7 +596,7 @@ namespace {
} }
static int Lookup(ArrayRef<TableEntry> Table, unsigned Opcode) { static int Lookup(ArrayRef<TableEntry> Table, unsigned Opcode) {
const TableEntry *I = std::lower_bound(Table.begin(), Table.end(), Opcode); const TableEntry *I = llvm::lower_bound(Table, Opcode);
if (I != Table.end() && I->from == Opcode) if (I != Table.end() && I->from == Opcode)
return I->to; return I->to;
return -1; return -1;

View File

@ -13099,11 +13099,9 @@ static SDValue lowerV8I16GeneralSingleInputShuffle(
copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; }); copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
array_pod_sort(HiInputs.begin(), HiInputs.end()); array_pod_sort(HiInputs.begin(), HiInputs.end());
HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end()); HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
int NumLToL = int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
int NumHToL = LoInputs.size() - NumLToL; int NumHToL = LoInputs.size() - NumLToL;
int NumLToH = int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
int NumHToH = HiInputs.size() - NumLToH; int NumHToH = HiInputs.size() - NumLToH;
MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL); MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH); MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);

View File

@ -158,11 +158,9 @@ const X86InstrFMA3Group *llvm::getFMA3Group(unsigned Opcode, uint64_t TSFlags) {
// FMA 231 instructions have an opcode of 0xB6-0xBF // FMA 231 instructions have an opcode of 0xB6-0xBF
unsigned FormIndex = ((BaseOpcode - 0x90) >> 4) & 0x3; unsigned FormIndex = ((BaseOpcode - 0x90) >> 4) & 0x3;
auto I = std::lower_bound(Table.begin(), Table.end(), Opcode, auto I = llvm::bsearch(Table, [=](const X86InstrFMA3Group &Group) {
[FormIndex](const X86InstrFMA3Group &Group, return Opcode <= Group.Opcodes[FormIndex];
unsigned Opcode) { });
return Group.Opcodes[FormIndex] < Opcode;
});
assert(I != Table.end() && I->Opcodes[FormIndex] == Opcode && assert(I != Table.end() && I->Opcodes[FormIndex] == Opcode &&
"Couldn't find FMA3 opcode!"); "Couldn't find FMA3 opcode!");
return I; return I;

View File

@ -5288,9 +5288,7 @@ lookupFoldTableImpl(ArrayRef<X86MemoryFoldTableEntry> Table, unsigned RegOp) {
} }
#endif #endif
const X86MemoryFoldTableEntry *Data = std::lower_bound(Table.begin(), const X86MemoryFoldTableEntry *Data = llvm::lower_bound(Table, RegOp);
Table.end(),
RegOp);
if (Data != Table.end() && Data->KeyOp == RegOp && if (Data != Table.end() && Data->KeyOp == RegOp &&
!(Data->Flags & TB_NO_FORWARD)) !(Data->Flags & TB_NO_FORWARD))
return Data; return Data;
@ -5377,7 +5375,7 @@ static ManagedStatic<X86MemUnfoldTable> MemUnfoldTable;
const X86MemoryFoldTableEntry * const X86MemoryFoldTableEntry *
llvm::lookupUnfoldTable(unsigned MemOp) { llvm::lookupUnfoldTable(unsigned MemOp) {
auto &Table = MemUnfoldTable->Table; auto &Table = MemUnfoldTable->Table;
auto I = std::lower_bound(Table.begin(), Table.end(), MemOp); auto I = llvm::lower_bound(Table, MemOp);
if (I != Table.end() && I->KeyOp == MemOp) if (I != Table.end() && I->KeyOp == MemOp)
return &*I; return &*I;
return nullptr; return nullptr;

View File

@ -21,11 +21,9 @@ namespace MachO {
namespace detail { namespace detail {
template <typename C> template <typename C>
typename C::iterator addEntry(C &Container, StringRef InstallName) { typename C::iterator addEntry(C &Container, StringRef InstallName) {
auto I = auto I = llvm::bsearch(Container, [=](const InterfaceFileRef &O) {
std::lower_bound(std::begin(Container), std::end(Container), InstallName, return InstallName <= O.getInstallName();
[](const InterfaceFileRef &LHS, const StringRef &RHS) { });
return LHS.getInstallName() < RHS;
});
if ((I != std::end(Container)) && !(InstallName < I->getInstallName())) if ((I != std::end(Container)) && !(InstallName < I->getInstallName()))
return I; return I;
@ -46,11 +44,12 @@ void InterfaceFile::addReexportedLibrary(StringRef InstallName,
} }
void InterfaceFile::addUUID(Architecture Arch, StringRef UUID) { void InterfaceFile::addUUID(Architecture Arch, StringRef UUID) {
auto I = std::lower_bound(UUIDs.begin(), UUIDs.end(), Arch, auto I =
[](const std::pair<Architecture, std::string> &LHS, llvm::bsearch(UUIDs, [=](const std::pair<Architecture, std::string> &O) {
Architecture RHS) { return LHS.first < RHS; }); return Arch <= O.first;
});
if ((I != UUIDs.end()) && !(Arch < I->first)) { if (I != UUIDs.end() && Arch == I->first) {
I->second = UUID; I->second = UUID;
return; return;
} }

View File

@ -52,7 +52,7 @@ public:
} }
size_t blockToIndex(BasicBlock *BB) const { size_t blockToIndex(BasicBlock *BB) const {
auto *I = std::lower_bound(V.begin(), V.end(), BB); auto *I = llvm::lower_bound(V, BB);
assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
return I - V.begin(); return I - V.begin();
} }

View File

@ -1480,8 +1480,7 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LoadI) {
for (pred_iterator PI = PB; PI != PE; ++PI) { for (pred_iterator PI = PB; PI != PE; ++PI) {
BasicBlock *P = *PI; BasicBlock *P = *PI;
AvailablePredsTy::iterator I = AvailablePredsTy::iterator I =
std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(), llvm::lower_bound(AvailablePreds, std::make_pair(P, (Value *)nullptr));
std::make_pair(P, (Value*)nullptr));
assert(I != AvailablePreds.end() && I->first == P && assert(I != AvailablePreds.end() && I->first == P &&
"Didn't find entry for predecessor!"); "Didn't find entry for predecessor!");

View File

@ -278,8 +278,8 @@ void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
unsigned Alignment, Instruction *Inst) { unsigned Alignment, Instruction *Inst) {
int64_t End = Start+Size; int64_t End = Start+Size;
range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start, range_iterator I = llvm::bsearch(
[](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; }); Ranges, [=](const MemsetRange &O) { return Start <= O.End; });
// We now know that I == E, in which case we didn't find anything to merge // We now know that I == E, in which case we didn't find anything to merge
// with, or that Start <= I->End. If End < I->Start or I == E, then we need // with, or that Start <= I->End. If End < I->Start or I == E, then we need

View File

@ -1826,7 +1826,7 @@ Value *ReassociatePass::OptimizeMul(BinaryOperator *I,
return V; return V;
ValueEntry NewEntry = ValueEntry(getRank(V), V); ValueEntry NewEntry = ValueEntry(getRank(V), V);
Ops.insert(std::lower_bound(Ops.begin(), Ops.end(), NewEntry), NewEntry); Ops.insert(llvm::lower_bound(Ops, NewEntry), NewEntry);
return nullptr; return nullptr;
} }

View File

@ -1766,18 +1766,16 @@ static void insertLineSequence(std::vector<DWARFDebugLine::Row> &Seq,
return; return;
} }
auto InsertPoint = std::lower_bound( object::SectionedAddress Front = Seq.front().Address;
Rows.begin(), Rows.end(), Seq.front(), auto InsertPoint = llvm::bsearch(
[](const DWARFDebugLine::Row &LHS, const DWARFDebugLine::Row &RHS) { Rows, [=](const DWARFDebugLine::Row &O) { return !(O.Address < Front); });
return LHS.Address < RHS.Address;
});
// FIXME: this only removes the unneeded end_sequence if the // FIXME: this only removes the unneeded end_sequence if the
// sequences have been inserted in order. Using a global sort like // sequences have been inserted in order. Using a global sort like
// described in patchLineTableForUnit() and delaying the end_sequene // described in patchLineTableForUnit() and delaying the end_sequene
// elimination to emitLineTableForUnit() we can get rid of all of them. // elimination to emitLineTableForUnit() we can get rid of all of them.
if (InsertPoint != Rows.end() && if (InsertPoint != Rows.end() && InsertPoint->Address == Front &&
InsertPoint->Address == Seq.front().Address && InsertPoint->EndSequence) { InsertPoint->EndSequence) {
*InsertPoint = Seq.front(); *InsertPoint = Seq.front();
Rows.insert(InsertPoint + 1, Seq.begin() + 1, Seq.end()); Rows.insert(InsertPoint + 1, Seq.begin() + 1, Seq.end());
} else { } else {

View File

@ -633,10 +633,8 @@ public:
Top->ExtraData.TerminalDurations.end(), 0uLL); Top->ExtraData.TerminalDurations.end(), 0uLL);
{ {
auto E = std::make_pair(Top, TopSum); auto E = std::make_pair(Top, TopSum);
TopStacksBySum.insert(std::lower_bound(TopStacksBySum.begin(), TopStacksBySum.insert(
TopStacksBySum.end(), E, llvm::lower_bound(TopStacksBySum, E, greater_second), E);
greater_second),
E);
if (TopStacksBySum.size() == 11) if (TopStacksBySum.size() == 11)
TopStacksBySum.pop_back(); TopStacksBySum.pop_back();
} }