diff --git a/lib/Bitcode/Writer/ValueEnumerator.cpp b/lib/Bitcode/Writer/ValueEnumerator.cpp index a1641043b22..9e7b12a93dc 100644 --- a/lib/Bitcode/Writer/ValueEnumerator.cpp +++ b/lib/Bitcode/Writer/ValueEnumerator.cpp @@ -173,29 +173,19 @@ void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map, } } -// Optimize constant ordering. -namespace { - struct CstSortPredicate { - ValueEnumerator &VE; - explicit CstSortPredicate(ValueEnumerator &ve) : VE(ve) {} - bool operator()(const std::pair &LHS, - const std::pair &RHS) { - // Sort by plane. - if (LHS.first->getType() != RHS.first->getType()) - return VE.getTypeID(LHS.first->getType()) < - VE.getTypeID(RHS.first->getType()); - // Then by frequency. - return LHS.second > RHS.second; - } - }; -} - /// OptimizeConstants - Reorder constant pool for denser encoding. void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) { if (CstStart == CstEnd || CstStart+1 == CstEnd) return; - CstSortPredicate P(*this); - std::stable_sort(Values.begin()+CstStart, Values.begin()+CstEnd, P); + std::stable_sort(Values.begin() + CstStart, Values.begin() + CstEnd, + [this](const std::pair &LHS, + const std::pair &RHS) { + // Sort by plane. + if (LHS.first->getType() != RHS.first->getType()) + return getTypeID(LHS.first->getType()) < getTypeID(RHS.first->getType()); + // Then by frequency. + return LHS.second > RHS.second; + }); // Ensure that integer and vector of integer constants are at the start of the // constant pool. This is important so that GEP structure indices come before diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index 760033fff13..10fbd1ab123 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -409,23 +409,6 @@ MachineBasicBlock *MachineBlockPlacement::selectBestSuccessor( return BestSucc; } -namespace { -/// \brief Predicate struct to detect blocks already placed. -class IsBlockPlaced { - const BlockChain &PlacedChain; - const BlockToChainMapType &BlockToChain; - -public: - IsBlockPlaced(const BlockChain &PlacedChain, - const BlockToChainMapType &BlockToChain) - : PlacedChain(PlacedChain), BlockToChain(BlockToChain) {} - - bool operator()(MachineBasicBlock *BB) const { - return BlockToChain.lookup(BB) == &PlacedChain; - } -}; -} - /// \brief Select the best block from a worklist. /// /// This looks through the provided worklist as a list of candidate basic @@ -444,7 +427,9 @@ MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock( // FIXME: If this shows up on profiles, it could be folded (at the cost of // some code complexity) into the loop below. WorkList.erase(std::remove_if(WorkList.begin(), WorkList.end(), - IsBlockPlaced(Chain, BlockToChain)), + [&](MachineBasicBlock *BB) { + return BlockToChain.lookup(BB) == &Chain; + }), WorkList.end()); MachineBasicBlock *BestBlock = 0; diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp index 105d7c2cde5..df38dfcbce9 100644 --- a/lib/CodeGen/MachineSink.cpp +++ b/lib/CodeGen/MachineSink.cpp @@ -98,16 +98,6 @@ namespace { bool PerformTrivialForwardCoalescing(MachineInstr *MI, MachineBasicBlock *MBB); }; - - // SuccessorSorter - Sort Successors according to their loop depth. - struct SuccessorSorter { - SuccessorSorter(MachineLoopInfo *LoopInfo) : LI(LoopInfo) {} - bool operator()(const MachineBasicBlock *LHS, - const MachineBasicBlock *RHS) const { - return LI->getLoopDepth(LHS) < LI->getLoopDepth(RHS); - } - MachineLoopInfo *LI; - }; } // end anonymous namespace char MachineSinking::ID = 0; @@ -553,7 +543,12 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI, // we should sink to. // We give successors with smaller loop depth higher priority. SmallVector Succs(MBB->succ_begin(), MBB->succ_end()); - std::stable_sort(Succs.begin(), Succs.end(), SuccessorSorter(LI)); + // Sort Successors according to their loop depth. + std::stable_sort( + Succs.begin(), Succs.end(), + [this](const MachineBasicBlock *LHS, const MachineBasicBlock *RHS) { + return LI->getLoopDepth(LHS) < LI->getLoopDepth(RHS); + }); for (SmallVectorImpl::iterator SI = Succs.begin(), E = Succs.end(); SI != E; ++SI) { MachineBasicBlock *SuccBlock = *SI; diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 50578502389..4f67da66542 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -8197,14 +8197,6 @@ struct LoadedSlice { }; } -/// \brief Sorts LoadedSlice according to their offset. -struct LoadedSliceSorter { - bool operator()(const LoadedSlice &LHS, const LoadedSlice &RHS) { - assert(LHS.Origin == RHS.Origin && "Different bases not implemented."); - return LHS.getOffsetFromBase() < RHS.getOffsetFromBase(); - } -}; - /// \brief Check that all bits set in \p UsedBits form a dense region, i.e., /// \p UsedBits looks like 0..0 1..1 0..0. static bool areUsedBitsDense(const APInt &UsedBits) { @@ -8248,7 +8240,11 @@ static void adjustCostForPairing(SmallVectorImpl &LoadedSlices, // Sort the slices so that elements that are likely to be next to each // other in memory are next to each other in the list. - std::sort(LoadedSlices.begin(), LoadedSlices.end(), LoadedSliceSorter()); + std::sort(LoadedSlices.begin(), LoadedSlices.end(), + [](const LoadedSlice &LHS, const LoadedSlice &RHS) { + assert(LHS.Origin == RHS.Origin && "Different bases not implemented."); + return LHS.getOffsetFromBase() < RHS.getOffsetFromBase(); + }); const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo(); // First (resp. Second) is the first (resp. Second) potentially candidate // to be placed in a paired load. @@ -8852,17 +8848,6 @@ struct MemOpLink { unsigned SequenceNum; }; -/// Sorts store nodes in a link according to their offset from a shared -// base ptr. -struct ConsecutiveMemoryChainSorter { - bool operator()(MemOpLink LHS, MemOpLink RHS) { - return - LHS.OffsetFromBase < RHS.OffsetFromBase || - (LHS.OffsetFromBase == RHS.OffsetFromBase && - LHS.SequenceNum > RHS.SequenceNum); - } -}; - bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { EVT MemVT = St->getMemoryVT(); int64_t ElementSizeBytes = MemVT.getSizeInBits()/8; @@ -8981,7 +8966,11 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { // Sort the memory operands according to their distance from the base pointer. std::sort(StoreNodes.begin(), StoreNodes.end(), - ConsecutiveMemoryChainSorter()); + [](MemOpLink LHS, MemOpLink RHS) { + return LHS.OffsetFromBase < RHS.OffsetFromBase || + (LHS.OffsetFromBase == RHS.OffsetFromBase && + LHS.SequenceNum > RHS.SequenceNum); + }); // Scan the memory operations on the chain and find the first non-consecutive // store memory address. diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp index fc38f9cc7b8..2e8070f4c1b 100644 --- a/lib/CodeGen/StackColoring.cpp +++ b/lib/CodeGen/StackColoring.cpp @@ -125,20 +125,6 @@ class StackColoring : public MachineFunctionPass { /// once the coloring is done. SmallVector Markers; - /// SlotSizeSorter - A Sort utility for arranging stack slots according - /// to their size. - struct SlotSizeSorter { - MachineFrameInfo *MFI; - SlotSizeSorter(MachineFrameInfo *mfi) : MFI(mfi) { } - bool operator()(int LHS, int RHS) { - // We use -1 to denote a uninteresting slot. Place these slots at the end. - if (LHS == -1) return false; - if (RHS == -1) return true; - // Sort according to size. - return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS); - } -}; - public: static char ID; StackColoring() : MachineFunctionPass(ID) { @@ -767,7 +753,13 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) { // Sort the slots according to their size. Place unused slots at the end. // Use stable sort to guarantee deterministic code generation. std::stable_sort(SortedSlots.begin(), SortedSlots.end(), - SlotSizeSorter(MFI)); + [this](int LHS, int RHS) { + // We use -1 to denote a uninteresting slot. Place these slots at the end. + if (LHS == -1) return false; + if (RHS == -1) return true; + // Sort according to size. + return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS); + }); bool Changed = true; while (Changed) { diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp index 9e807265878..e94206ee3d7 100644 --- a/lib/Support/CommandLine.cpp +++ b/lib/Support/CommandLine.cpp @@ -125,20 +125,12 @@ static ManagedStatic RegisteredOptionCategories; // Initialise the general option category. OptionCategory llvm::cl::GeneralCategory("General options"); -struct HasName { - HasName(StringRef Name) : Name(Name) {} - bool operator()(const OptionCategory *Category) const { - return Name == Category->getName(); - } - StringRef Name; -}; - -void OptionCategory::registerCategory() -{ +void OptionCategory::registerCategory() { assert(std::count_if(RegisteredOptionCategories->begin(), RegisteredOptionCategories->end(), - HasName(getName())) == 0 && - "Duplicate option categories"); + [this](const OptionCategory *Category) { + return getName() == Category->getName(); + }) == 0 && "Duplicate option categories"); RegisteredOptionCategories->insert(this); } diff --git a/lib/Support/Statistic.cpp b/lib/Support/Statistic.cpp index 9c28176b730..56c3b0f5659 100644 --- a/lib/Support/Statistic.cpp +++ b/lib/Support/Statistic.cpp @@ -84,20 +84,6 @@ void Statistic::RegisterStatistic() { } } -namespace { - -struct NameCompare { - bool operator()(const Statistic *LHS, const Statistic *RHS) const { - int Cmp = std::strcmp(LHS->getName(), RHS->getName()); - if (Cmp != 0) return Cmp < 0; - - // Secondary key is the description. - return std::strcmp(LHS->getDesc(), RHS->getDesc()) < 0; - } -}; - -} - // Print information when destroyed, iff command line option is specified. StatisticInfo::~StatisticInfo() { llvm::PrintStatistics(); @@ -124,7 +110,14 @@ void llvm::PrintStatistics(raw_ostream &OS) { } // Sort the fields by name. - std::stable_sort(Stats.Stats.begin(), Stats.Stats.end(), NameCompare()); + std::stable_sort(Stats.Stats.begin(), Stats.Stats.end(), + [](const Statistic *LHS, const Statistic *RHS) { + if (int Cmp = std::strcmp(LHS->getName(), RHS->getName())) + return Cmp < 0; + + // Secondary key is the description. + return std::strcmp(LHS->getDesc(), RHS->getDesc()) < 0; + }); // Print out the statistics header... OS << "===" << std::string(73, '-') << "===\n" diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 61596d54b69..5edce8ceea8 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1724,17 +1724,6 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, return true; } -namespace { - struct OffsetCompare { - bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const { - int LOffset = getMemoryOpOffset(LHS); - int ROffset = getMemoryOpOffset(RHS); - assert(LHS == RHS || LOffset != ROffset); - return LOffset > ROffset; - } - }; -} - bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB, SmallVectorImpl &Ops, unsigned Base, bool isLd, @@ -1742,7 +1731,13 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB, bool RetVal = false; // Sort by offset (in reverse order). - std::sort(Ops.begin(), Ops.end(), OffsetCompare()); + std::sort(Ops.begin(), Ops.end(), + [](const MachineInstr *LHS, const MachineInstr *RHS) { + int LOffset = getMemoryOpOffset(LHS); + int ROffset = getMemoryOpOffset(RHS); + assert(LHS == RHS || LOffset != ROffset); + return LOffset > ROffset; + }); // The loads / stores of the same base are in order. Scan them from first to // last and check for the following: diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp index ffbd83b1bbb..6c5ea4c810a 100644 --- a/lib/Target/Mips/MipsDelaySlotFiller.cpp +++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -65,20 +65,6 @@ namespace { typedef MachineBasicBlock::reverse_iterator ReverseIter; typedef SmallDenseMap BB2BrMap; - /// \brief A functor comparing edge weight of two blocks. - struct CmpWeight { - CmpWeight(const MachineBasicBlock &S, - const MachineBranchProbabilityInfo &P) : Src(S), Prob(P) {} - - bool operator()(const MachineBasicBlock *Dst0, - const MachineBasicBlock *Dst1) const { - return Prob.getEdgeWeight(&Src, Dst0) < Prob.getEdgeWeight(&Src, Dst1); - } - - const MachineBasicBlock &Src; - const MachineBranchProbabilityInfo &Prob; - }; - class RegDefsUses { public: RegDefsUses(TargetMachine &TM); @@ -640,8 +626,12 @@ MachineBasicBlock *Filler::selectSuccBB(MachineBasicBlock &B) const { return NULL; // Select the successor with the larget edge weight. - CmpWeight Cmp(B, getAnalysis()); - MachineBasicBlock *S = *std::max_element(B.succ_begin(), B.succ_end(), Cmp); + auto &Prob = getAnalysis(); + MachineBasicBlock *S = *std::max_element(B.succ_begin(), B.succ_end(), + [&](const MachineBasicBlock *Dst0, + const MachineBasicBlock *Dst1) { + return Prob.getEdgeWeight(&B, Dst0) < Prob.getEdgeWeight(&B, Dst1); + }); return S->isLandingPad() ? NULL : S; } diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp index 8a3d5ea2db9..557ef66dfdf 100644 --- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -239,14 +239,6 @@ namespace { }; - // Sorting function for deterministic behaviour in GCOVBlock::writeOut. - struct StringKeySort { - bool operator()(StringMapEntry *LHS, - StringMapEntry *RHS) const { - return LHS->getKey() < RHS->getKey(); - } - }; - // Represent a basic block in GCOV. Each block has a unique number in the // function, number of lines belonging to each block, and a set of edges to // other blocks. @@ -277,8 +269,11 @@ namespace { write(Len); write(Number); - StringKeySort Sorter; - std::sort(SortedLinesByFile.begin(), SortedLinesByFile.end(), Sorter); + std::sort(SortedLinesByFile.begin(), SortedLinesByFile.end(), + [](StringMapEntry *LHS, + StringMapEntry *RHS) { + return LHS->getKey() < RHS->getKey(); + }); for (SmallVectorImpl *>::iterator I = SortedLinesByFile.begin(), E = SortedLinesByFile.end(); I != E; ++I) diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index ece278f868c..57bd53f3bbd 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -702,22 +702,6 @@ bool DSE::HandleFree(CallInst *F) { return MadeChange; } -namespace { - struct CouldRef { - typedef Value *argument_type; - const CallSite CS; - AliasAnalysis *AA; - - bool operator()(Value *I) { - // See if the call site touches the value. - AliasAnalysis::ModRefResult A = - AA->getModRefInfo(CS, I, getPointerSize(I, *AA)); - - return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref; - } - }; -} - /// handleEndBlock - Remove dead stores to stack-allocated locations in the /// function end block. Ex: /// %A = alloca i32 @@ -819,7 +803,13 @@ bool DSE::handleEndBlock(BasicBlock &BB) { // If the call might load from any of our allocas, then any store above // the call is live. - CouldRef Pred = { CS, AA }; + std::function Pred = [&](Value *I) { + // See if the call site touches the value. + AliasAnalysis::ModRefResult A = + AA->getModRefInfo(CS, I, getPointerSize(I, *AA)); + + return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref; + }; DeadStackObjects.remove_if(Pred); // If all of the allocas were clobbered by the call then we're not going @@ -863,20 +853,6 @@ bool DSE::handleEndBlock(BasicBlock &BB) { return MadeChange; } -namespace { - struct CouldAlias { - typedef Value *argument_type; - const AliasAnalysis::Location &LoadedLoc; - AliasAnalysis *AA; - - bool operator()(Value *I) { - // See if the loaded location could alias the stack location. - AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA)); - return !AA->isNoAlias(StackLoc, LoadedLoc); - } - }; -} - /// RemoveAccessedObjects - Check to see if the specified location may alias any /// of the stack objects in the DeadStackObjects set. If so, they become live /// because the location is being loaded. @@ -896,6 +872,10 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, } // Remove objects that could alias LoadedLoc. - CouldAlias Pred = { LoadedLoc, AA }; + std::function Pred = [&](Value *I) { + // See if the loaded location could alias the stack location. + AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA)); + return !AA->isNoAlias(StackLoc, LoadedLoc); + }; DeadStackObjects.remove_if(Pred); } diff --git a/lib/Transforms/Scalar/GlobalMerge.cpp b/lib/Transforms/Scalar/GlobalMerge.cpp index 161e35a648b..6928c281b81 100644 --- a/lib/Transforms/Scalar/GlobalMerge.cpp +++ b/lib/Transforms/Scalar/GlobalMerge.cpp @@ -124,19 +124,6 @@ namespace { AU.setPreservesCFG(); FunctionPass::getAnalysisUsage(AU); } - - struct GlobalCmp { - const DataLayout *DL; - - GlobalCmp(const DataLayout *DL) : DL(DL) { } - - bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) { - Type *Ty1 = cast(GV1->getType())->getElementType(); - Type *Ty2 = cast(GV2->getType())->getElementType(); - - return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2)); - } - }; }; } // end anonymous namespace @@ -156,7 +143,13 @@ bool GlobalMerge::doMerge(SmallVectorImpl &Globals, unsigned MaxOffset = TLI->getMaximalGlobalOffset(); // FIXME: Find better heuristics - std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(DL)); + std::stable_sort(Globals.begin(), Globals.end(), + [DL](const GlobalVariable *GV1, const GlobalVariable *GV2) { + Type *Ty1 = cast(GV1->getType())->getElementType(); + Type *Ty2 = cast(GV2->getType())->getElementType(); + + return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2)); + }); Type *Int32Ty = Type::getInt32Ty(M.getContext()); diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp index e6d7abe3eb3..b6535a735f0 100644 --- a/lib/Transforms/Scalar/Reassociate.cpp +++ b/lib/Transforms/Scalar/Reassociate.cpp @@ -1547,19 +1547,6 @@ Value *Reassociate::OptimizeAdd(Instruction *I, return 0; } -namespace { - /// \brief Predicate tests whether a ValueEntry's op is in a map. - struct IsValueInMap { - const DenseMap ⤅ - - IsValueInMap(const DenseMap &Map) : Map(Map) {} - - bool operator()(const ValueEntry &Entry) { - return Map.find(Entry.Op) != Map.end(); - } - }; -} - /// \brief Build up a vector of value/power pairs factoring a product. /// /// Given a series of multiplication operands, build a vector of factors and diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index a229107703c..3a0fcc80d2c 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -3255,18 +3255,6 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, return true; } -namespace { -struct IsSliceEndLessOrEqualTo { - uint64_t UpperBound; - - IsSliceEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {} - - bool operator()(const AllocaSlices::iterator &I) { - return I->endOffset() <= UpperBound; - } -}; -} - static void removeFinishedSplitUses(SmallVectorImpl &SplitUses, uint64_t &MaxSplitUseEndOffset, uint64_t Offset) { @@ -3278,7 +3266,9 @@ removeFinishedSplitUses(SmallVectorImpl &SplitUses, size_t SplitUsesOldSize = SplitUses.size(); SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(), - IsSliceEndLessOrEqualTo(Offset)), + [Offset](const AllocaSlices::iterator &I) { + return I->endOffset() <= Offset; + }), SplitUses.end()); if (SplitUsesOldSize == SplitUses.size()) return; @@ -3616,20 +3606,6 @@ bool SROA::promoteAllocas(Function &F) { return true; } -namespace { - /// \brief A predicate to test whether an alloca belongs to a set. - class IsAllocaInSet { - typedef SmallPtrSet SetType; - const SetType &Set; - - public: - typedef AllocaInst *argument_type; - - IsAllocaInSet(const SetType &Set) : Set(Set) {} - bool operator()(AllocaInst *AI) const { return Set.count(AI); } - }; -} - bool SROA::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; @@ -3665,11 +3641,14 @@ bool SROA::runOnFunction(Function &F) { // Remove the deleted allocas from various lists so that we don't try to // continue processing them. if (!DeletedAllocas.empty()) { - Worklist.remove_if(IsAllocaInSet(DeletedAllocas)); - PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas)); + std::function IsInSet = [&](AllocaInst *AI) { + return DeletedAllocas.count(AI); + }; + Worklist.remove_if(IsInSet); + PostPromotionWorklist.remove_if(IsInSet); PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(), PromotableAllocas.end(), - IsAllocaInSet(DeletedAllocas)), + IsInSet), PromotableAllocas.end()); DeletedAllocas.clear(); } diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index e8997a47049..951f6ca218d 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1682,16 +1682,6 @@ Value *BoUpSLP::vectorizeTree() { return VectorizableTree[0].VectorizedValue; } -class DTCmp { - const DominatorTree *DT; - -public: - DTCmp(const DominatorTree *DT) : DT(DT) {} - bool operator()(const BasicBlock *A, const BasicBlock *B) const { - return DT->properlyDominates(A, B); - } -}; - void BoUpSLP::optimizeGatherSequence() { DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() << " gather sequences instructions.\n"); @@ -1730,7 +1720,10 @@ void BoUpSLP::optimizeGatherSequence() { // Sort blocks by domination. This ensures we visit a block after all blocks // dominating it are visited. SmallVector CSEWorkList(CSEBlocks.begin(), CSEBlocks.end()); - std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), DTCmp(DT)); + std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), + [this](const BasicBlock *A, const BasicBlock *B) { + return DT->properlyDominates(A, B); + }); // Perform O(N^2) search over the gather sequences and merge identical // instructions. TODO: We can further optimize this scan if we split the diff --git a/tools/llvm-ar/llvm-ar.cpp b/tools/llvm-ar/llvm-ar.cpp index 7db4b8308ea..f30eb082085 100644 --- a/tools/llvm-ar/llvm-ar.cpp +++ b/tools/llvm-ar/llvm-ar.cpp @@ -475,16 +475,6 @@ void addMember(std::vector &Members, T I, StringRef Name, Members[Pos] = NI; } -namespace { -class HasName { - StringRef Name; - -public: - HasName(StringRef Name) : Name(Name) {} - bool operator()(StringRef Path) { return Name == sys::path::filename(Path); } -}; -} - enum InsertAction { IA_AddOldMember, IA_AddNewMeber, @@ -500,8 +490,9 @@ computeInsertAction(ArchiveOperation Operation, if (Operation == QuickAppend || Members.empty()) return IA_AddOldMember; - std::vector::iterator MI = - std::find_if(Members.begin(), Members.end(), HasName(Name)); + std::vector::iterator MI = std::find_if( + Members.begin(), Members.end(), + [Name](StringRef Path) { return Name == sys::path::filename(Path); }); if (MI == Members.end()) return IA_AddOldMember; diff --git a/utils/TableGen/AsmMatcherEmitter.cpp b/utils/TableGen/AsmMatcherEmitter.cpp index de24cde4ba7..cf4580f659a 100644 --- a/utils/TableGen/AsmMatcherEmitter.cpp +++ b/utils/TableGen/AsmMatcherEmitter.cpp @@ -288,15 +288,6 @@ public: } }; -namespace { -/// Sort ClassInfo pointers independently of pointer value. -struct LessClassInfoPtr { - bool operator()(const ClassInfo *LHS, const ClassInfo *RHS) const { - return *LHS < *RHS; - } -}; -} - /// MatchableInfo - Helper class for storing the necessary information for an /// instruction or alias which is capable of being matched. struct MatchableInfo { @@ -1288,7 +1279,7 @@ void AsmMatcherInfo::buildOperandMatchInfo() { /// Map containing a mask with all operands indices that can be found for /// that class inside a instruction. - typedef std::map OpClassMaskTy; + typedef std::map> OpClassMaskTy; OpClassMaskTy OpClassMask; for (std::vector::const_iterator it = diff --git a/utils/TableGen/CodeGenRegisters.cpp b/utils/TableGen/CodeGenRegisters.cpp index f491d57626d..34f5ca01834 100644 --- a/utils/TableGen/CodeGenRegisters.cpp +++ b/utils/TableGen/CodeGenRegisters.cpp @@ -1705,16 +1705,6 @@ void CodeGenRegBank::computeRegUnitSets() { } } -struct LessUnits { - const CodeGenRegBank &RegBank; - LessUnits(const CodeGenRegBank &RB): RegBank(RB) {} - - bool operator()(unsigned ID1, unsigned ID2) { - return RegBank.getRegPressureSet(ID1).Units.size() - < RegBank.getRegPressureSet(ID2).Units.size(); - } -}; - void CodeGenRegBank::computeDerivedInfo() { computeComposites(); computeSubRegIndexLaneMasks(); @@ -1737,7 +1727,10 @@ void CodeGenRegBank::computeDerivedInfo() { RegUnitSetOrder.push_back(Idx); std::stable_sort(RegUnitSetOrder.begin(), RegUnitSetOrder.end(), - LessUnits(*this)); + [this](unsigned ID1, unsigned ID2) { + return getRegPressureSet(ID1).Units.size() < + getRegPressureSet(ID2).Units.size(); + }); for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) { RegUnitSets[RegUnitSetOrder[Idx]].Order = Idx; } diff --git a/utils/TableGen/CodeGenTarget.cpp b/utils/TableGen/CodeGenTarget.cpp index 3ba49c70b4d..92b3f05a28e 100644 --- a/utils/TableGen/CodeGenTarget.cpp +++ b/utils/TableGen/CodeGenTarget.cpp @@ -289,17 +289,6 @@ GetInstByName(const char *Name, return I->second; } -namespace { -/// SortInstByName - Sorting predicate to sort instructions by name. -/// -struct SortInstByName { - bool operator()(const CodeGenInstruction *Rec1, - const CodeGenInstruction *Rec2) const { - return Rec1->TheDef->getName() < Rec2->TheDef->getName(); - } -}; -} - /// \brief Return all of the instructions defined by the target, ordered by /// their enum value. void CodeGenTarget::ComputeInstrsByEnum() const { @@ -346,8 +335,10 @@ void CodeGenTarget::ComputeInstrsByEnum() const { // All of the instructions are now in random order based on the map iteration. // Sort them by name. - std::sort(InstrsByEnum.begin()+EndOfPredefines, InstrsByEnum.end(), - SortInstByName()); + std::sort(InstrsByEnum.begin() + EndOfPredefines, InstrsByEnum.end(), + [](const CodeGenInstruction *Rec1, const CodeGenInstruction *Rec2) { + return Rec1->TheDef->getName() < Rec2->TheDef->getName(); + }); } diff --git a/utils/TableGen/IntrinsicEmitter.cpp b/utils/TableGen/IntrinsicEmitter.cpp index d366861992d..cf6934cb169 100644 --- a/utils/TableGen/IntrinsicEmitter.cpp +++ b/utils/TableGen/IntrinsicEmitter.cpp @@ -131,20 +131,6 @@ void IntrinsicEmitter::EmitEnumInfo(const std::vector &Ints, OS << "#endif\n\n"; } -struct IntrinsicNameSorter { - IntrinsicNameSorter(const std::vector &I) - : Ints(I) {} - - // Sort in reverse order of intrinsic name so "abc.def" appears after - // "abd.def.ghi" in the overridden name matcher - bool operator()(unsigned i, unsigned j) { - return Ints[i].Name > Ints[j].Name; - } - -private: - const std::vector &Ints; -}; - void IntrinsicEmitter:: EmitFnNameRecognizer(const std::vector &Ints, raw_ostream &OS) { @@ -158,15 +144,17 @@ EmitFnNameRecognizer(const std::vector &Ints, OS << " StringRef NameR(Name+6, Len-6); // Skip over 'llvm.'\n"; OS << " switch (Name[5]) { // Dispatch on first letter.\n"; OS << " default: break;\n"; - IntrinsicNameSorter Sorter(Ints); // Emit the intrinsic matching stuff by first letter. for (std::map >::iterator I = IntMapping.begin(), E = IntMapping.end(); I != E; ++I) { OS << " case '" << I->first << "':\n"; std::vector &IntList = I->second; - // Sort intrinsics in reverse order of their names - std::sort(IntList.begin(), IntList.end(), Sorter); + // Sort in reverse order of intrinsic name so "abc.def" appears after + // "abd.def.ghi" in the overridden name matcher + std::sort(IntList.begin(), IntList.end(), [&](unsigned i, unsigned j) { + return Ints[i].Name > Ints[j].Name; + }); // Emit all the overloaded intrinsics first, build a table of the // non-overloaded ones.