diff --git a/include/llvm/ADT/MapVector.h b/include/llvm/ADT/MapVector.h index bcf273628aa..7fd1570cbf1 100644 --- a/include/llvm/ADT/MapVector.h +++ b/include/llvm/ADT/MapVector.h @@ -17,9 +17,7 @@ #ifndef LLVM_ADT_MAPVECTOR_H #define LLVM_ADT_MAPVECTOR_H -#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/STLExtras.h" #include namespace llvm { @@ -97,7 +95,7 @@ public: if (Result.second) { Vector.push_back(std::make_pair(KV.first, KV.second)); I = Vector.size() - 1; - return std::make_pair(llvm::prior(end()), true); + return std::make_pair(std::prev(end()), true); } return std::make_pair(begin() + I, false); } diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h index 3aa81833532..e41f18d2d3a 100644 --- a/include/llvm/ADT/STLExtras.h +++ b/include/llvm/ADT/STLExtras.h @@ -141,41 +141,6 @@ inline mapped_iterator map_iterator(const ItTy &I, FuncTy F) { return mapped_iterator(I, F); } - -// next/prior - These functions unlike std::advance do not modify the -// passed iterator but return a copy. -// -// next(myIt) returns copy of myIt incremented once -// next(myIt, n) returns copy of myIt incremented n times -// prior(myIt) returns copy of myIt decremented once -// prior(myIt, n) returns copy of myIt decremented n times - -template -inline ItTy next(ItTy it, Dist n) -{ - std::advance(it, n); - return it; -} - -template -inline ItTy next(ItTy it) -{ - return ++it; -} - -template -inline ItTy prior(ItTy it, Dist n) -{ - std::advance(it, -n); - return it; -} - -template -inline ItTy prior(ItTy it) -{ - return --it; -} - //===----------------------------------------------------------------------===// // Extra additions to //===----------------------------------------------------------------------===// diff --git a/include/llvm/ADT/TinyPtrVector.h b/include/llvm/ADT/TinyPtrVector.h index 5b5b1b547fe..1df8d661b4f 100644 --- a/include/llvm/ADT/TinyPtrVector.h +++ b/include/llvm/ADT/TinyPtrVector.h @@ -12,9 +12,7 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerUnion.h" -#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/Support/Compiler.h" namespace llvm { @@ -248,7 +246,7 @@ public: assert(I <= this->end() && "Inserting past the end of the vector."); if (I == end()) { push_back(Elt); - return llvm::prior(end()); + return std::prev(end()); } assert(!Val.isNull() && "Null value with non-end insert iterator."); if (EltTy V = Val.template dyn_cast()) { @@ -271,7 +269,7 @@ public: // If we have a single value, convert to a vector. ptrdiff_t Offset = I - begin(); if (Val.isNull()) { - if (llvm::next(From) == To) { + if (std::next(From) == To) { Val = *From; return begin(); } diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h index b2dac195fda..5e86e756d5a 100644 --- a/include/llvm/CodeGen/MachineBasicBlock.h +++ b/include/llvm/CodeGen/MachineBasicBlock.h @@ -503,7 +503,7 @@ public: /// /// If I points to a bundle of instructions, they are all erased. iterator erase(iterator I) { - return erase(I, llvm::next(I)); + return erase(I, std::next(I)); } /// Remove an instruction from the instruction list and delete it. @@ -542,7 +542,7 @@ public: void splice(iterator Where, MachineBasicBlock *Other, iterator From) { // The range splice() doesn't allow noop moves, but this one does. if (Where != From) - splice(Where, Other, From, llvm::next(From)); + splice(Where, Other, From, std::next(From)); } /// Take a block of instructions from MBB 'Other' in the range [From, To), @@ -750,11 +750,11 @@ public: MachineInstrSpan(MachineBasicBlock::iterator I) : MBB(*I->getParent()), I(I), - B(I == MBB.begin() ? MBB.end() : llvm::prior(I)), - E(llvm::next(I)) {} + B(I == MBB.begin() ? MBB.end() : std::prev(I)), + E(std::next(I)) {} MachineBasicBlock::iterator begin() { - return B == MBB.end() ? MBB.begin() : llvm::next(B); + return B == MBB.end() ? MBB.begin() : std::next(B); } MachineBasicBlock::iterator end() { return E; } bool empty() { return begin() == end(); } diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index aae304df2a2..1150aa933ab 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -408,7 +408,7 @@ public: /// hasOneUse - Return true if there is exactly one use of this node. /// bool hasOneUse() const { - return !use_empty() && llvm::next(use_begin()) == use_end(); + return !use_empty() && std::next(use_begin()) == use_end(); } /// use_size - Return the number of uses of this node. This method takes diff --git a/include/llvm/CodeGen/SlotIndexes.h b/include/llvm/CodeGen/SlotIndexes.h index 984796af864..2b79cfbf3f0 100644 --- a/include/llvm/CodeGen/SlotIndexes.h +++ b/include/llvm/CodeGen/SlotIndexes.h @@ -545,7 +545,7 @@ namespace llvm { std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start); if (itr == idx2MBBMap.end()) { - itr = prior(itr); + itr = std::prev(itr); return itr->second; } @@ -553,7 +553,7 @@ namespace llvm { if (itr->first < end) return 0; - itr = prior(itr); + itr = std::prev(itr); if (itr->first <= start) return itr->second; @@ -581,11 +581,11 @@ namespace llvm { if (Late) { // Insert mi's index immediately before the following instruction. nextItr = getIndexAfter(mi).listEntry(); - prevItr = prior(nextItr); + prevItr = std::prev(nextItr); } else { // Insert mi's index immediately after the preceding instruction. prevItr = getIndexBefore(mi).listEntry(); - nextItr = llvm::next(prevItr); + nextItr = std::next(prevItr); } // Get a number for the new instr, or 0 if there's no room currently. @@ -638,7 +638,7 @@ namespace llvm { /// Add the given MachineBasicBlock into the maps. void insertMBBInMaps(MachineBasicBlock *mbb) { MachineFunction::iterator nextMBB = - llvm::next(MachineFunction::iterator(mbb)); + std::next(MachineFunction::iterator(mbb)); IndexListEntry *startEntry = 0; IndexListEntry *endEntry = 0; diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp index 1823370ce0b..40c81a28542 100644 --- a/lib/Analysis/Lint.cpp +++ b/lib/Analysis/Lint.cpp @@ -603,7 +603,7 @@ void Lint::visitInsertElementInst(InsertElementInst &I) { void Lint::visitUnreachableInst(UnreachableInst &I) { // This isn't undefined behavior, it's merely suspicious. Assert1(&I == I.getParent()->begin() || - prior(BasicBlock::iterator(&I))->mayHaveSideEffects(), + std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(), "Unusual: unreachable immediately preceded by instruction without " "side effects", &I); } diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp index c6c1f922f7d..aae4d640339 100644 --- a/lib/Analysis/LoopInfo.cpp +++ b/lib/Analysis/LoopInfo.cpp @@ -527,8 +527,8 @@ void UnloopUpdater::removeBlocksFromAncestors() { /// nested within unloop. void UnloopUpdater::updateSubloopParents() { while (!Unloop->empty()) { - Loop *Subloop = *llvm::prior(Unloop->end()); - Unloop->removeChildLoop(llvm::prior(Unloop->end())); + Loop *Subloop = *std::prev(Unloop->end()); + Unloop->removeChildLoop(std::prev(Unloop->end())); assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop"); if (Loop *Parent = SubloopParents[Subloop]) @@ -652,7 +652,7 @@ void LoopInfo::updateUnloop(Loop *Unloop) { // Move all of the subloops to the top-level. while (!Unloop->empty()) - LI.addTopLevelLoop(Unloop->removeChildLoop(llvm::prior(Unloop->end()))); + LI.addTopLevelLoop(Unloop->removeChildLoop(std::prev(Unloop->end()))); return; } diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index 32207b2f4a6..5c481fb446f 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -693,7 +693,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { NonLocalDepInfo::iterator Entry = std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries, NonLocalDepEntry(DirtyBB)); - if (Entry != Cache.begin() && prior(Entry)->getBB() == DirtyBB) + if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) --Entry; NonLocalDepEntry *ExistingResult = 0; diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index a822b0d461f..f5739d4eab9 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -193,7 +193,7 @@ void SCEV::print(raw_ostream &OS) const { for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); I != E; ++I) { OS << **I; - if (llvm::next(I) != E) + if (std::next(I) != E) OS << OpStr; } OS << ")"; @@ -3258,7 +3258,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { const SCEV *TotalOffset = getConstant(IntPtrTy, 0); gep_type_iterator GTI = gep_type_begin(GEP); - for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()), + for (GetElementPtrInst::op_iterator I = std::next(GEP->op_begin()), E = GEP->op_end(); I != E; ++I) { Value *Index = *I; diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index b778c6e3467..ed345ab115f 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -1391,7 +1391,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), S->getNoWrapFlags(SCEV::FlagNW))); BasicBlock::iterator NewInsertPt = - llvm::next(BasicBlock::iterator(cast(V))); + std::next(BasicBlock::iterator(cast(V))); BuilderType::InsertPointGuard Guard(Builder); while (isa(NewInsertPt) || isa(NewInsertPt) || isa(NewInsertPt)) @@ -1619,7 +1619,7 @@ Value *SCEVExpander::expand(const SCEV *S) { while (InsertPt != Builder.GetInsertPoint() && (isInsertedInstruction(InsertPt) || isa(InsertPt))) { - InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); + InsertPt = std::next(BasicBlock::iterator(InsertPt)); } break; } diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp index 1600c67a7c6..6ac5de2c617 100644 --- a/lib/CodeGen/Analysis.cpp +++ b/lib/CodeGen/Analysis.cpp @@ -498,8 +498,7 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, // chain interposes between I and the return. if (I->mayHaveSideEffects() || I->mayReadFromMemory() || !isSafeToSpeculativelyExecute(I)) - for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; - --BBI) { + for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) { if (&*BBI == I) break; // Debug info intrinsics do not get in the way of tail call optimization. diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index aeb2e526220..57e5cd6be92 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -1592,7 +1592,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) { // Terminate old register assignments that don't reach MI; MachineFunction::const_iterator PrevMBB = Prev->getParent(); - if (PrevMBB != I && (!AtBlockEntry || llvm::next(PrevMBB) != I) && + if (PrevMBB != I && (!AtBlockEntry || std::next(PrevMBB) != I) && isDbgValueInDefinedReg(Prev)) { // Previous register assignment needs to terminate at the end of // its basic block. @@ -1603,7 +1603,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) { DEBUG(dbgs() << "Dropping DBG_VALUE for empty range:\n" << "\t" << *Prev << "\n"); History.pop_back(); - } else if (llvm::next(PrevMBB) != PrevMBB->getParent()->end()) + } else if (std::next(PrevMBB) != PrevMBB->getParent()->end()) // Terminate after LastMI. History.push_back(LastMI); } diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index a4a3712de8b..f02e5531780 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -383,7 +383,7 @@ void BranchFolder::MaintainLiveIns(MachineBasicBlock *CurMBB, if (RS) { RS->enterBasicBlock(CurMBB); if (!CurMBB->empty()) - RS->forward(prior(CurMBB->end())); + RS->forward(std::prev(CurMBB->end())); BitVector RegsLiveAtExit(TRI->getNumRegs()); RS->getRegsUsed(RegsLiveAtExit, false); for (unsigned int i = 0, e = TRI->getNumRegs(); i != e; i++) @@ -462,7 +462,7 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I, static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB, const TargetInstrInfo *TII) { MachineFunction *MF = CurMBB->getParent(); - MachineFunction::iterator I = llvm::next(MachineFunction::iterator(CurMBB)); + MachineFunction::iterator I = std::next(MachineFunction::iterator(CurMBB)); MachineBasicBlock *TBB = 0, *FBB = 0; SmallVector Cond; DebugLoc dl; // FIXME: this is nowhere @@ -600,12 +600,11 @@ unsigned BranchFolder::ComputeSameTails(unsigned CurHash, unsigned maxCommonTailLength = 0U; SameTails.clear(); MachineBasicBlock::iterator TrialBBI1, TrialBBI2; - MPIterator HighestMPIter = prior(MergePotentials.end()); - for (MPIterator CurMPIter = prior(MergePotentials.end()), + MPIterator HighestMPIter = std::prev(MergePotentials.end()); + for (MPIterator CurMPIter = std::prev(MergePotentials.end()), B = MergePotentials.begin(); - CurMPIter != B && CurMPIter->getHash() == CurHash; - --CurMPIter) { - for (MPIterator I = prior(CurMPIter); I->getHash() == CurHash ; --I) { + CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) { + for (MPIterator I = std::prev(CurMPIter); I->getHash() == CurHash; --I) { unsigned CommonTailLen; if (ProfitableToMerge(CurMPIter->getBlock(), I->getBlock(), minCommonTailLength, @@ -634,9 +633,9 @@ void BranchFolder::RemoveBlocksWithHash(unsigned CurHash, MachineBasicBlock *SuccBB, MachineBasicBlock *PredBB) { MPIterator CurMPIter, B; - for (CurMPIter = prior(MergePotentials.end()), B = MergePotentials.begin(); - CurMPIter->getHash() == CurHash; - --CurMPIter) { + for (CurMPIter = std::prev(MergePotentials.end()), + B = MergePotentials.begin(); + CurMPIter->getHash() == CurHash; --CurMPIter) { // Put the unconditional branch back, if we need one. MachineBasicBlock *CurMBB = CurMPIter->getBlock(); if (SuccBB && CurMBB != PredBB) @@ -868,12 +867,12 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) { // a compile-time infinite loop repeatedly doing and undoing the same // transformations.) - for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); + for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end(); I != E; ++I) { if (I->pred_size() < 2) continue; SmallPtrSet UniquePreds; MachineBasicBlock *IBB = I; - MachineBasicBlock *PredBB = prior(I); + MachineBasicBlock *PredBB = std::prev(I); MergePotentials.clear(); for (MachineBasicBlock::pred_iterator P = I->pred_begin(), E2 = I->pred_end(); @@ -905,7 +904,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) { continue; // This is the QBB case described above if (!FBB) - FBB = llvm::next(MachineFunction::iterator(PBB)); + FBB = std::next(MachineFunction::iterator(PBB)); } // Failing case: the only way IBB can be reached from PBB is via @@ -955,7 +954,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) { // Reinsert an unconditional branch if needed. The 1 below can occur as a // result of removing blocks in TryTailMergeBlocks. - PredBB = prior(I); // this may have been changed in TryTailMergeBlocks + PredBB = std::prev(I); // this may have been changed in TryTailMergeBlocks if (MergePotentials.size() == 1 && MergePotentials.begin()->getBlock() != PredBB) FixTail(MergePotentials.begin()->getBlock(), IBB, TII); @@ -974,7 +973,7 @@ bool BranchFolder::OptimizeBranches(MachineFunction &MF) { // Make sure blocks are numbered in order MF.RenumberBlocks(); - for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); + for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end(); I != E; ) { MachineBasicBlock *MBB = I++; MadeChange |= OptimizeBlock(MBB); @@ -1095,7 +1094,7 @@ ReoptimizeBlock: // Check to see if we can simplify the terminator of the block before this // one. - MachineBasicBlock &PrevBB = *prior(MachineFunction::iterator(MBB)); + MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB)); MachineBasicBlock *PriorTBB = 0, *PriorFBB = 0; SmallVector PriorCond; @@ -1394,7 +1393,8 @@ ReoptimizeBlock: // B elsewhere // next: if (CurFallsThru) { - MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB)); + MachineBasicBlock *NextBB = + std::next(MachineFunction::iterator(MBB)); CurCond.clear(); TII->InsertBranch(*MBB, NextBB, 0, CurCond, DebugLoc()); } diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp index bac9e0eeef2..3dafd6cde2b 100644 --- a/lib/CodeGen/CodeGenPrepare.cpp +++ b/lib/CodeGen/CodeGenPrepare.cpp @@ -253,7 +253,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) { bool CodeGenPrepare::EliminateFallThrough(Function &F) { bool Changed = false; // Scan all of the blocks in the function, except for the entry block. - for (Function::iterator I = llvm::next(F.begin()), E = F.end(); I != E; ) { + for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { BasicBlock *BB = I++; // If the destination block has a single pred, then this is a trivial // edge, just collapse it. @@ -289,7 +289,7 @@ bool CodeGenPrepare::EliminateFallThrough(Function &F) { bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { bool MadeChange = false; // Note that this intentionally skips the entry block. - for (Function::iterator I = llvm::next(F.begin()), E = F.end(); I != E; ) { + for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { BasicBlock *BB = I++; // If this block doesn't end with an uncond branch, ignore it. diff --git a/lib/CodeGen/DeadMachineInstructionElim.cpp b/lib/CodeGen/DeadMachineInstructionElim.cpp index 5efe1ffe820..36069ac79b7 100644 --- a/lib/CodeGen/DeadMachineInstructionElim.cpp +++ b/lib/CodeGen/DeadMachineInstructionElim.cpp @@ -130,7 +130,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) { MachineRegisterInfo::use_iterator nextI; for (MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg), E = MRI->use_end(); I!=E; I=nextI) { - nextI = llvm::next(I); // I is invalidated by the setReg + nextI = std::next(I); // I is invalidated by the setReg MachineOperand& Use = I.getOperand(); MachineInstr *UseMI = Use.getParent(); if (UseMI==MI) diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp index 5447df09cbb..84dd5b98f1f 100644 --- a/lib/CodeGen/EarlyIfConversion.cpp +++ b/lib/CodeGen/EarlyIfConversion.cpp @@ -461,7 +461,7 @@ void SSAIfConv::replacePHIInstrs() { DEBUG(dbgs() << "If-converting " << *PI.PHI); unsigned DstReg = PI.PHI->getOperand(0).getReg(); TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg); - DEBUG(dbgs() << " --> " << *llvm::prior(FirstTerm)); + DEBUG(dbgs() << " --> " << *std::prev(FirstTerm)); PI.PHI->eraseFromParent(); PI.PHI = 0; } @@ -482,7 +482,7 @@ void SSAIfConv::rewritePHIOperands() { unsigned PHIDst = PI.PHI->getOperand(0).getReg(); unsigned DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst)); TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg); - DEBUG(dbgs() << " --> " << *llvm::prior(FirstTerm)); + DEBUG(dbgs() << " --> " << *std::prev(FirstTerm)); // Rewrite PHI operands TPred -> (DstReg, Head), remove FPred. for (unsigned i = PI.PHI->getNumOperands(); i != 1; i -= 2) { diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp index b664b3b1918..e5b4b23c101 100644 --- a/lib/CodeGen/IfConversion.cpp +++ b/lib/CodeGen/IfConversion.cpp @@ -921,7 +921,7 @@ void IfConverter::AnalyzeBlocks(MachineFunction &MF, /// next block). static bool canFallThroughTo(MachineBasicBlock *BB, MachineBasicBlock *ToBB) { MachineFunction::iterator PI = BB; - MachineFunction::iterator I = llvm::next(PI); + MachineFunction::iterator I = std::next(PI); MachineFunction::iterator TI = ToBB; MachineFunction::iterator E = BB->getParent()->end(); while (I != TI) { diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index 8487ee1f272..645267065e6 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -1014,7 +1014,7 @@ static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, char NextLine = '\n'; char SlotIndent = '\t'; - if (llvm::next(B) == E) { + if (std::next(B) == E) { NextLine = ' '; SlotIndent = ' '; } @@ -1171,12 +1171,12 @@ void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill, MachineBasicBlock &MBB = *MI->getParent(); MachineInstrSpan MIS(MI); - TII.storeRegToStackSlot(MBB, llvm::next(MI), NewVReg, isKill, StackSlot, + TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot, MRI.getRegClass(NewVReg), &TRI); - LIS.InsertMachineInstrRangeInMaps(llvm::next(MI), MIS.end()); + LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end()); - DEBUG(dumpMachineInstrRangeWithSlotIndex(llvm::next(MI), MIS.end(), LIS, + DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS, "spill")); ++NumSpills; } diff --git a/lib/CodeGen/LatencyPriorityQueue.cpp b/lib/CodeGen/LatencyPriorityQueue.cpp index deab05a412c..e88d537d318 100644 --- a/lib/CodeGen/LatencyPriorityQueue.cpp +++ b/lib/CodeGen/LatencyPriorityQueue.cpp @@ -119,12 +119,12 @@ void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) { SUnit *LatencyPriorityQueue::pop() { if (empty()) return NULL; std::vector::iterator Best = Queue.begin(); - for (std::vector::iterator I = llvm::next(Queue.begin()), + for (std::vector::iterator I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I) if (Picker(*Best, *I)) Best = I; SUnit *V = *Best; - if (Best != prior(Queue.end())) + if (Best != std::prev(Queue.end())) std::swap(*Best, Queue.back()); Queue.pop_back(); return V; @@ -133,7 +133,7 @@ SUnit *LatencyPriorityQueue::pop() { void LatencyPriorityQueue::remove(SUnit *SU) { assert(!Queue.empty() && "Queue is empty!"); std::vector::iterator I = std::find(Queue.begin(), Queue.end(), SU); - if (I != prior(Queue.end())) + if (I != std::prev(Queue.end())) std::swap(*I, Queue.back()); Queue.pop_back(); } diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp index 52b7ee0f2a6..fd4c91abd72 100644 --- a/lib/CodeGen/LiveDebugVariables.cpp +++ b/lib/CodeGen/LiveDebugVariables.cpp @@ -480,7 +480,7 @@ bool LDVImpl::collectDebugValues(MachineFunction &mf) { // DBG_VALUE has no slot index, use the previous instruction instead. SlotIndex Idx = MBBI == MBB->begin() ? LIS->getMBBStartIdx(MBB) : - LIS->getInstructionIndex(llvm::prior(MBBI)).getRegSlot(); + LIS->getInstructionIndex(std::prev(MBBI)).getRegSlot(); // Handle consecutive DBG_VALUE instructions with the same slot index. do { if (handleDebugValue(MBBI, Idx)) { @@ -914,7 +914,7 @@ findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx, // Don't insert anything after the first terminator, though. return MI->isTerminator() ? MBB->getFirstTerminator() : - llvm::next(MachineBasicBlock::iterator(MI)); + std::next(MachineBasicBlock::iterator(MI)); } DebugLoc UserValue::findDebugLoc() { diff --git a/lib/CodeGen/LiveInterval.cpp b/lib/CodeGen/LiveInterval.cpp index 2b8feb8c3b4..36b34520457 100644 --- a/lib/CodeGen/LiveInterval.cpp +++ b/lib/CodeGen/LiveInterval.cpp @@ -222,13 +222,13 @@ void LiveRange::extendSegmentEndTo(iterator I, SlotIndex NewEnd) { VNInfo *ValNo = I->valno; // Search for the first segment that we can't merge with. - iterator MergeTo = llvm::next(I); + iterator MergeTo = std::next(I); for (; MergeTo != end() && NewEnd >= MergeTo->end; ++MergeTo) { assert(MergeTo->valno == ValNo && "Cannot merge with differing values!"); } // If NewEnd was in the middle of a segment, make sure to get its endpoint. - I->end = std::max(NewEnd, prior(MergeTo)->end); + I->end = std::max(NewEnd, std::prev(MergeTo)->end); // If the newly formed segment now touches the segment after it and if they // have the same value number, merge the two segments into one segment. @@ -239,7 +239,7 @@ void LiveRange::extendSegmentEndTo(iterator I, SlotIndex NewEnd) { } // Erase any dead segments. - segments.erase(llvm::next(I), MergeTo); + segments.erase(std::next(I), MergeTo); } @@ -274,7 +274,7 @@ LiveRange::extendSegmentStartTo(iterator I, SlotIndex NewStart) { MergeTo->end = I->end; } - segments.erase(llvm::next(MergeTo), llvm::next(I)); + segments.erase(std::next(MergeTo), std::next(I)); return MergeTo; } @@ -285,7 +285,7 @@ LiveRange::iterator LiveRange::addSegmentFrom(Segment S, iterator From) { // If the inserted segment starts in the middle or right at the end of // another segment, just extend that segment to contain the segment of S. if (it != begin()) { - iterator B = prior(it); + iterator B = std::prev(it); if (S.valno == B->valno) { if (B->start <= Start && B->end >= Start) { extendSegmentEndTo(B, End); @@ -389,7 +389,7 @@ void LiveRange::removeSegment(SlotIndex Start, SlotIndex End, I->end = Start; // Trim the old segment. // Insert the new one. - segments.insert(llvm::next(I), Segment(End, OldEnd, ValNo)); + segments.insert(std::next(I), Segment(End, OldEnd, ValNo)); } /// removeValNo - Remove all the segments defined by the specified value#. @@ -433,7 +433,7 @@ void LiveRange::join(LiveRange &Other, iterator OutIt = begin(); OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]]; - for (iterator I = llvm::next(OutIt), E = end(); I != E; ++I) { + for (iterator I = std::next(OutIt), E = end(); I != E; ++I) { VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]]; assert(nextValNo != 0 && "Huh?"); @@ -641,10 +641,10 @@ void LiveRange::verify() const { assert(I->valno != 0); assert(I->valno->id < valnos.size()); assert(I->valno == valnos[I->valno->id]); - if (llvm::next(I) != E) { - assert(I->end <= llvm::next(I)->start); - if (I->end == llvm::next(I)->start) - assert(I->valno != llvm::next(I)->valno); + if (std::next(I) != E) { + assert(I->end <= std::next(I)->start); + if (I->end == std::next(I)->start) + assert(I->valno != std::next(I)->valno); } } } diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 4329ffc015b..83d62078cef 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -874,8 +874,8 @@ private: // values. The new range should be placed immediately before NewI, move any // intermediate ranges up. assert(NewI != I && "Inconsistent iterators"); - std::copy(llvm::next(I), NewI, I); - *llvm::prior(NewI) + std::copy(std::next(I), NewI, I); + *std::prev(NewI) = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI); } @@ -920,7 +920,7 @@ private: if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) { // No def, search for the new kill. // This can never be an early clobber kill since there is no def. - llvm::prior(I)->end = findLastUseBefore(Reg).getRegSlot(); + std::prev(I)->end = findLastUseBefore(Reg).getRegSlot(); return; } } @@ -956,7 +956,7 @@ private: // DefVNI is a dead def. It may have been moved across other values in LR, // so move I up to NewI. Slide [NewI;I) down one position. - std::copy_backward(NewI, I, llvm::next(I)); + std::copy_backward(NewI, I, std::next(I)); *NewI = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI); } @@ -968,11 +968,11 @@ private: "No RegMask at OldIdx."); *RI = NewIdx.getRegSlot(); assert((RI == LIS.RegMaskSlots.begin() || - SlotIndex::isEarlierInstr(*llvm::prior(RI), *RI)) && - "Cannot move regmask instruction above another call"); - assert((llvm::next(RI) == LIS.RegMaskSlots.end() || - SlotIndex::isEarlierInstr(*RI, *llvm::next(RI))) && - "Cannot move regmask instruction below another call"); + SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) && + "Cannot move regmask instruction above another call"); + assert((std::next(RI) == LIS.RegMaskSlots.end() || + SlotIndex::isEarlierInstr(*RI, *std::next(RI))) && + "Cannot move regmask instruction below another call"); } // Return the last use of reg between NewIdx and OldIdx. @@ -1125,7 +1125,7 @@ LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB, if (LII->end.isDead()) { SlotIndex prevStart; if (LII != LI.begin()) - prevStart = llvm::prior(LII)->start; + prevStart = std::prev(LII)->start; // FIXME: This could be more efficient if there was a // removeSegment method that returned an iterator. diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index d2df4e5cb8b..769ba2a255e 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -626,7 +626,7 @@ bool MachineBasicBlock::isSuccessor(const MachineBasicBlock *MBB) const { bool MachineBasicBlock::isLayoutSuccessor(const MachineBasicBlock *MBB) const { MachineFunction::const_iterator I(this); - return llvm::next(I) == MachineFunction::const_iterator(MBB); + return std::next(I) == MachineFunction::const_iterator(MBB); } bool MachineBasicBlock::canFallThrough() { @@ -705,7 +705,7 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) { } MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock(); - MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB); + MF->insert(std::next(MachineFunction::iterator(this)), NMBB); DEBUG(dbgs() << "Splitting critical edge:" " BB#" << getNumber() << " -- BB#" << NMBB->getNumber() @@ -848,7 +848,7 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) { // extend to the end of the new split block. bool isLastMBB = - llvm::next(MachineFunction::iterator(NMBB)) == getParent()->end(); + std::next(MachineFunction::iterator(NMBB)) == getParent()->end(); SlotIndex StartIndex = Indexes->getMBBEndIdx(this); SlotIndex PrevIndex = StartIndex.getPrevSlot(); @@ -1063,7 +1063,7 @@ bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA, bool Changed = false; MachineFunction::iterator FallThru = - llvm::next(MachineFunction::iterator(this)); + std::next(MachineFunction::iterator(this)); if (DestA == 0 && DestB == 0) { // Block falls through to successor. diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index 10fbd1ab123..1cb87bb5286 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -494,11 +494,11 @@ void MachineBlockPlacement::buildChain( MachineBasicBlock *LoopHeaderBB = BB; markChainSuccessors(Chain, LoopHeaderBB, BlockWorkList, BlockFilter); - BB = *llvm::prior(Chain.end()); + BB = *std::prev(Chain.end()); for (;;) { assert(BB); assert(BlockToChain[BB] == &Chain); - assert(*llvm::prior(Chain.end()) == BB); + assert(*std::prev(Chain.end()) == BB); // Look for the best viable successor if there is one to place immediately // after this block. @@ -529,7 +529,7 @@ void MachineBlockPlacement::buildChain( << " to " << getBlockNum(BestSucc) << "\n"); markChainSuccessors(SuccChain, LoopHeaderBB, BlockWorkList, BlockFilter); Chain.merge(BestSucc, &SuccChain); - BB = *llvm::prior(Chain.end()); + BB = *std::prev(Chain.end()); } DEBUG(dbgs() << "Finished forming chain for header block " @@ -634,7 +634,7 @@ MachineBlockPlacement::findBestLoopExit(MachineFunction &F, BlockChain &Chain = *BlockToChain[*I]; // Ensure that this block is at the end of a chain; otherwise it could be // mid-way through an inner loop or a successor of an analyzable branch. - if (*I != *llvm::prior(Chain.end())) + if (*I != *std::prev(Chain.end())) continue; // Now walk the successors. We need to establish whether this has a viable @@ -741,7 +741,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain, PI != PE; ++PI) { BlockChain *PredChain = BlockToChain[*PI]; if (!LoopBlockSet.count(*PI) && - (!PredChain || *PI == *llvm::prior(PredChain->end()))) { + (!PredChain || *PI == *std::prev(PredChain->end()))) { ViableTopFallthrough = true; break; } @@ -751,7 +751,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain, // bottom is a viable exiting block. If so, bail out as rotating will // introduce an unnecessary branch. if (ViableTopFallthrough) { - MachineBasicBlock *Bottom = *llvm::prior(LoopChain.end()); + MachineBasicBlock *Bottom = *std::prev(LoopChain.end()); for (MachineBasicBlock::succ_iterator SI = Bottom->succ_begin(), SE = Bottom->succ_end(); SI != SE; ++SI) { @@ -767,7 +767,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain, if (ExitIt == LoopChain.end()) return; - std::rotate(LoopChain.begin(), llvm::next(ExitIt), LoopChain.end()); + std::rotate(LoopChain.begin(), std::next(ExitIt), LoopChain.end()); } /// \brief Forms basic block chains from the natural loop structures. @@ -887,7 +887,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond) || !FI->canFallThrough()) break; - MachineFunction::iterator NextFI(llvm::next(FI)); + MachineFunction::iterator NextFI(std::next(FI)); MachineBasicBlock *NextBB = NextFI; // Ensure that the layout successor is a viable block, as we know that // fallthrough is a possibility. @@ -981,7 +981,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { // Update the terminator of the previous block. if (BI == FunctionChain.begin()) continue; - MachineBasicBlock *PrevBB = llvm::prior(MachineFunction::iterator(*BI)); + MachineBasicBlock *PrevBB = std::prev(MachineFunction::iterator(*BI)); // FIXME: It would be awesome of updateTerminator would just return rather // than assert when the branch cannot be analyzed in order to remove this @@ -1053,7 +1053,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { const BranchProbability ColdProb(1, 5); // 20% BlockFrequency EntryFreq = MBFI->getBlockFreq(F.begin()); BlockFrequency WeightedEntryFreq = EntryFreq * ColdProb; - for (BlockChain::iterator BI = llvm::next(FunctionChain.begin()), + for (BlockChain::iterator BI = std::next(FunctionChain.begin()), BE = FunctionChain.end(); BI != BE; ++BI) { // Don't align non-looping basic blocks. These are unlikely to execute @@ -1079,7 +1079,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { // Check for the existence of a non-layout predecessor which would benefit // from aligning this block. - MachineBasicBlock *LayoutPred = *llvm::prior(BI); + MachineBasicBlock *LayoutPred = *std::prev(BI); // Force alignment if all the predecessors are jumps. We already checked // that the block isn't cold above. @@ -1101,7 +1101,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) { // Check for single-block functions and skip them. - if (llvm::next(F.begin()) == F.end()) + if (std::next(F.begin()) == F.end()) return false; MBPI = &getAnalysis(); @@ -1169,7 +1169,7 @@ INITIALIZE_PASS_END(MachineBlockPlacementStats, "block-placement-stats", bool MachineBlockPlacementStats::runOnMachineFunction(MachineFunction &F) { // Check for single-block functions and skip them. - if (llvm::next(F.begin()) == F.end()) + if (std::next(F.begin()) == F.end()) return false; MBPI = &getAnalysis(); diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp index 35ba7ff35e6..c6f0133da5f 100644 --- a/lib/CodeGen/MachineCSE.cpp +++ b/lib/CodeGen/MachineCSE.cpp @@ -229,7 +229,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI, // Next, collect all defs into PhysDefs. If any is already in PhysRefs // (which currently contains only uses), set the PhysUseDef flag. PhysUseDef = false; - MachineBasicBlock::const_iterator I = MI; I = llvm::next(I); + MachineBasicBlock::const_iterator I = MI; I = std::next(I); for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isDef()) @@ -280,7 +280,7 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, } CrossMBB = true; } - MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I); + MachineBasicBlock::const_iterator I = CSMI; I = std::next(I); MachineBasicBlock::const_iterator E = MI; MachineBasicBlock::const_iterator EE = CSMBB->end(); unsigned LookAheadLeft = LookAheadLimit; diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index d9b0cca9636..26cfc6230a6 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -139,7 +139,7 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) { // Figure out the block number this should have. unsigned BlockNo = 0; if (MBBI != begin()) - BlockNo = prior(MBBI)->getNumber()+1; + BlockNo = std::prev(MBBI)->getNumber() + 1; for (; MBBI != E; ++MBBI, ++BlockNo) { if (MBBI->getNumber() != (int)BlockNo) { @@ -346,7 +346,7 @@ void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const { OS << PrintReg(I->first, TRI); if (I->second) OS << " in " << PrintReg(I->second, TRI); - if (llvm::next(I) != E) + if (std::next(I) != E) OS << ", "; } OS << '\n'; diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp index ddb8595e046..6e5a89aaa29 100644 --- a/lib/CodeGen/MachineInstr.cpp +++ b/lib/CodeGen/MachineInstr.cpp @@ -1643,7 +1643,7 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM, for (mmo_iterator i = memoperands_begin(), e = memoperands_end(); i != e; ++i) { OS << **i; - if (llvm::next(i) != e) + if (std::next(i) != e) OS << " "; } } diff --git a/lib/CodeGen/MachineInstrBundle.cpp b/lib/CodeGen/MachineInstrBundle.cpp index 77bcd1d7c8e..66ed63ebd56 100644 --- a/lib/CodeGen/MachineInstrBundle.cpp +++ b/lib/CodeGen/MachineInstrBundle.cpp @@ -211,7 +211,7 @@ MachineBasicBlock::instr_iterator llvm::finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI) { MachineBasicBlock::instr_iterator E = MBB.instr_end(); - MachineBasicBlock::instr_iterator LastMI = llvm::next(FirstMI); + MachineBasicBlock::instr_iterator LastMI = std::next(FirstMI); while (LastMI != E && LastMI->isInsideBundle()) ++LastMI; finalizeBundle(MBB, FirstMI, LastMI); @@ -235,7 +235,7 @@ bool llvm::finalizeBundles(MachineFunction &MF) { if (!MII->isInsideBundle()) ++MII; else { - MII = finalizeBundle(MBB, llvm::prior(MII)); + MII = finalizeBundle(MBB, std::prev(MII)); Changed = true; } } diff --git a/lib/CodeGen/MachineLoopInfo.cpp b/lib/CodeGen/MachineLoopInfo.cpp index 4e2cfdc4e56..89054d4bd52 100644 --- a/lib/CodeGen/MachineLoopInfo.cpp +++ b/lib/CodeGen/MachineLoopInfo.cpp @@ -50,11 +50,11 @@ MachineBasicBlock *MachineLoop::getTopBlock() { MachineBasicBlock *TopMBB = getHeader(); MachineFunction::iterator Begin = TopMBB->getParent()->begin(); if (TopMBB != Begin) { - MachineBasicBlock *PriorMBB = prior(MachineFunction::iterator(TopMBB)); + MachineBasicBlock *PriorMBB = std::prev(MachineFunction::iterator(TopMBB)); while (contains(PriorMBB)) { TopMBB = PriorMBB; if (TopMBB == Begin) break; - PriorMBB = prior(MachineFunction::iterator(TopMBB)); + PriorMBB = std::prev(MachineFunction::iterator(TopMBB)); } } return TopMBB; @@ -63,12 +63,12 @@ MachineBasicBlock *MachineLoop::getTopBlock() { MachineBasicBlock *MachineLoop::getBottomBlock() { MachineBasicBlock *BotMBB = getHeader(); MachineFunction::iterator End = BotMBB->getParent()->end(); - if (BotMBB != prior(End)) { - MachineBasicBlock *NextMBB = llvm::next(MachineFunction::iterator(BotMBB)); + if (BotMBB != std::prev(End)) { + MachineBasicBlock *NextMBB = std::next(MachineFunction::iterator(BotMBB)); while (contains(NextMBB)) { BotMBB = NextMBB; - if (BotMBB == llvm::next(MachineFunction::iterator(BotMBB))) break; - NextMBB = llvm::next(MachineFunction::iterator(BotMBB)); + if (BotMBB == std::next(MachineFunction::iterator(BotMBB))) break; + NextMBB = std::next(MachineFunction::iterator(BotMBB)); } } return BotMBB; diff --git a/lib/CodeGen/MachineRegisterInfo.cpp b/lib/CodeGen/MachineRegisterInfo.cpp index 9b41390e73e..4029f4a2bbc 100644 --- a/lib/CodeGen/MachineRegisterInfo.cpp +++ b/lib/CodeGen/MachineRegisterInfo.cpp @@ -300,7 +300,7 @@ void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) { MachineInstr *MachineRegisterInfo::getVRegDef(unsigned Reg) const { // Since we are in SSA form, we can use the first definition. def_iterator I = def_begin(Reg); - assert((I.atEnd() || llvm::next(I) == def_end()) && + assert((I.atEnd() || std::next(I) == def_end()) && "getVRegDef assumes a single definition or no definition"); return !I.atEnd() ? &*I : 0; } @@ -311,7 +311,7 @@ MachineInstr *MachineRegisterInfo::getVRegDef(unsigned Reg) const { MachineInstr *MachineRegisterInfo::getUniqueVRegDef(unsigned Reg) const { if (def_empty(Reg)) return 0; def_iterator I = def_begin(Reg); - if (llvm::next(I) != def_end()) + if (std::next(I) != def_end()) return 0; return &*I; } diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp index f10a471d66f..e8b8713d1e4 100644 --- a/lib/CodeGen/MachineScheduler.cpp +++ b/lib/CodeGen/MachineScheduler.cpp @@ -408,8 +408,8 @@ void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) { RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) { // Avoid decrementing RegionEnd for blocks with no terminator. - if (RegionEnd != MBB->end() - || isSchedBoundary(llvm::prior(RegionEnd), MBB, MF, TII, IsPostRA)) { + if (RegionEnd != MBB->end() || + isSchedBoundary(std::prev(RegionEnd), MBB, MF, TII, IsPostRA)) { --RegionEnd; // Count the boundary instruction. --RemainingInstrs; @@ -420,7 +420,7 @@ void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) { unsigned NumRegionInstrs = 0; MachineBasicBlock::iterator I = RegionEnd; for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) { - if (isSchedBoundary(llvm::prior(I), MBB, MF, TII, IsPostRA)) + if (isSchedBoundary(std::prev(I), MBB, MF, TII, IsPostRA)) break; } // Notify the scheduler of the region, even if we may skip scheduling @@ -428,7 +428,7 @@ void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) { Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs); // Skip empty scheduling regions (0 or 1 schedulable instructions). - if (I == RegionEnd || I == llvm::prior(RegionEnd)) { + if (I == RegionEnd || I == std::prev(RegionEnd)) { // Close the current region. Bundle the terminator if needed. // This invalidates 'RegionEnd' and 'I'. Scheduler.exitRegion(); @@ -770,13 +770,13 @@ void ScheduleDAGMI::placeDebugValues() { for (std::vector >::iterator DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { - std::pair P = *prior(DI); + std::pair P = *std::prev(DI); MachineInstr *DbgValue = P.first; MachineBasicBlock::iterator OrigPrevMI = P.second; if (&*RegionBegin == DbgValue) ++RegionBegin; BB->splice(++OrigPrevMI, BB, DbgValue); - if (OrigPrevMI == llvm::prior(RegionEnd)) + if (OrigPrevMI == std::prev(RegionEnd)) RegionEnd = DbgValue; } DbgValues.clear(); @@ -816,8 +816,7 @@ void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb, ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs); // For convenience remember the end of the liveness region. - LiveRegionEnd = - (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); + LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd); SUPressureDiffs.clear(); @@ -1446,19 +1445,19 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { // Check if GlobalLI contains a hole in the vicinity of LocalLI. if (GlobalSegment != GlobalLI->begin()) { // Two address defs have no hole. - if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end, + if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end, GlobalSegment->start)) { return; } // If the prior global segment may be defined by the same two-address // instruction that also defines LocalLI, then can't make a hole here. - if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->start, + if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start, LocalLI->beginIndex())) { return; } // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise // it would be a disconnected component in the live range. - assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() && + assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() && "Disconnected LRG within the scheduling region."); } MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start); diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp index aa89b83e727..738b1e6025a 100644 --- a/lib/CodeGen/MachineVerifier.cpp +++ b/lib/CodeGen/MachineVerifier.cpp @@ -1076,7 +1076,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) { // Verify SSA form. if (MRI->isSSA() && TargetRegisterInfo::isVirtualRegister(Reg) && - llvm::next(MRI->def_begin(Reg)) != MRI->def_end()) + std::next(MRI->def_begin(Reg)) != MRI->def_end()) report("Multiple virtual register defs in SSA form", MO, MONum); // Check LiveInts for a live segment, but only for virtual registers. diff --git a/lib/CodeGen/PHIElimination.cpp b/lib/CodeGen/PHIElimination.cpp index bece8948d3d..b104eb45908 100644 --- a/lib/CodeGen/PHIElimination.cpp +++ b/lib/CodeGen/PHIElimination.cpp @@ -186,7 +186,7 @@ bool PHIElimination::EliminatePHINodes(MachineFunction &MF, // Get an iterator to the first instruction after the last PHI node (this may // also be the end of the basic block). MachineBasicBlock::iterator LastPHIIt = - prior(MBB.SkipPHIsAndLabels(MBB.begin())); + std::prev(MBB.SkipPHIsAndLabels(MBB.begin())); while (MBB.front().isPHI()) LowerPHINode(MBB, LastPHIIt); @@ -222,7 +222,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt) { ++NumLowered; - MachineBasicBlock::iterator AfterPHIsIt = llvm::next(LastPHIIt); + MachineBasicBlock::iterator AfterPHIsIt = std::next(LastPHIIt); // Unlink the PHI node from the basic block, but don't delete the PHI yet. MachineInstr *MPhi = MBB.remove(MBB.begin()); @@ -267,7 +267,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB, // Update live variable information if there is any. if (LV) { - MachineInstr *PHICopy = prior(AfterPHIsIt); + MachineInstr *PHICopy = std::prev(AfterPHIsIt); if (IncomingReg) { LiveVariables::VarInfo &VI = LV->getVarInfo(IncomingReg); @@ -306,7 +306,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB, // Update LiveIntervals for the new copy or implicit def. if (LIS) { - MachineInstr *NewInstr = prior(AfterPHIsIt); + MachineInstr *NewInstr = std::prev(AfterPHIsIt); SlotIndex DestCopyIndex = LIS->InsertMachineInstrInMaps(NewInstr); SlotIndex MBBStartIndex = LIS->getMBBStartIdx(&MBB); @@ -444,7 +444,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB, } } else { // We just inserted this copy. - KillInst = prior(InsertPos); + KillInst = std::prev(InsertPos); } } assert(KillInst->readsRegister(SrcReg) && "Cannot find kill instruction"); @@ -504,7 +504,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB, } } else { // We just inserted this copy. - KillInst = prior(InsertPos); + KillInst = std::prev(InsertPos); } } assert(KillInst->readsRegister(SrcReg) && diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp index 859643f9b8e..bc886971350 100644 --- a/lib/CodeGen/PostRASchedulerList.cpp +++ b/lib/CodeGen/PostRASchedulerList.cpp @@ -306,7 +306,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { MachineBasicBlock::iterator Current = MBB->end(); unsigned Count = MBB->size(), CurrentCount = Count; for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { - MachineInstr *MI = llvm::prior(I); + MachineInstr *MI = std::prev(I); --Count; // Calls are not scheduling boundaries before register allocation, but // post-ra we don't gain anything by scheduling across calls since we @@ -648,13 +648,13 @@ void SchedulePostRATDList::EmitSchedule() { // Update the Begin iterator, as the first instruction in the block // may have been scheduled later. if (i == 0) - RegionBegin = prior(RegionEnd); + RegionBegin = std::prev(RegionEnd); } // Reinsert any remaining debug_values. for (std::vector >::iterator DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { - std::pair P = *prior(DI); + std::pair P = *std::prev(DI); MachineInstr *DbgValue = P.first; MachineBasicBlock::iterator OrigPrivMI = P.second; BB->splice(++OrigPrivMI, BB, DbgValue); diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index f93195625b8..136b1ed4db5 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -759,14 +759,14 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, SPAdj += Size; MachineBasicBlock::iterator PrevI = BB->end(); - if (I != BB->begin()) PrevI = prior(I); + if (I != BB->begin()) PrevI = std::prev(I); TFI->eliminateCallFramePseudoInstr(Fn, *BB, I); // Visit the instructions created by eliminateCallFramePseudoInstr(). if (PrevI == BB->end()) I = BB->begin(); // The replaced instr was the first in the block. else - I = llvm::next(PrevI); + I = std::next(PrevI); continue; } @@ -849,9 +849,9 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) { I = BB->begin(); MachineInstr *MI = I; - MachineBasicBlock::iterator J = llvm::next(I); - MachineBasicBlock::iterator P = I == BB->begin() ? - MachineBasicBlock::iterator(NULL) : llvm::prior(I); + MachineBasicBlock::iterator J = std::next(I); + MachineBasicBlock::iterator P = + I == BB->begin() ? MachineBasicBlock::iterator(NULL) : std::prev(I); // RS should process this instruction before we might scavenge at this // location. This is because we might be replacing a virtual register @@ -894,7 +894,7 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) { // spill code will have been inserted in between I and J. This is a // problem because we need the spill code before I: Move I to just // prior to J. - if (I != llvm::prior(J)) { + if (I != std::prev(J)) { BB->splice(J, BB, I); // Before we move I, we need to prepare the RS to visit I again. diff --git a/lib/CodeGen/RegAllocPBQP.cpp b/lib/CodeGen/RegAllocPBQP.cpp index 347329591b5..3d2451e7960 100644 --- a/lib/CodeGen/RegAllocPBQP.cpp +++ b/lib/CodeGen/RegAllocPBQP.cpp @@ -264,8 +264,8 @@ PBQPRAProblem *PBQPBuilder::build(MachineFunction *mf, const LiveIntervals *lis, const LiveInterval &l1 = lis->getInterval(vr1); const PBQPRAProblem::AllowedSet &vr1Allowed = p->getAllowedSet(vr1); - for (RegSet::const_iterator vr2Itr = llvm::next(vr1Itr); - vr2Itr != vrEnd; ++vr2Itr) { + for (RegSet::const_iterator vr2Itr = std::next(vr1Itr); vr2Itr != vrEnd; + ++vr2Itr) { unsigned vr2 = *vr2Itr; const LiveInterval &l2 = lis->getInterval(vr2); const PBQPRAProblem::AllowedSet &vr2Allowed = p->getAllowedSet(vr2); diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp index 6fc4bffdcf9..ff4631afc7a 100644 --- a/lib/CodeGen/RegisterCoalescer.cpp +++ b/lib/CodeGen/RegisterCoalescer.cpp @@ -801,9 +801,9 @@ bool RegisterCoalescer::reMaterializeTrivialDef(CoalescerPair &CP, MachineBasicBlock *MBB = CopyMI->getParent(); MachineBasicBlock::iterator MII = - llvm::next(MachineBasicBlock::iterator(CopyMI)); + std::next(MachineBasicBlock::iterator(CopyMI)); TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, DefMI, *TRI); - MachineInstr *NewMI = prior(MII); + MachineInstr *NewMI = std::prev(MII); LIS->ReplaceMachineInstrInMaps(CopyMI, NewMI); CopyMI->eraseFromParent(); diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp index 75ebdaa357f..bfd26dc8732 100644 --- a/lib/CodeGen/RegisterScavenging.cpp +++ b/lib/CodeGen/RegisterScavenging.cpp @@ -175,7 +175,7 @@ void RegScavenger::forward() { Tracking = true; } else { assert(MBBI != MBB->end() && "Already past the end of the basic block!"); - MBBI = llvm::next(MBBI); + MBBI = std::next(MBBI); } assert(MBBI != MBB->end() && "Already at the end of the basic block!"); @@ -415,7 +415,7 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC, "Cannot scavenge register without an emergency spill slot!"); TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex, RC, TRI); - MachineBasicBlock::iterator II = prior(I); + MachineBasicBlock::iterator II = std::prev(I); unsigned FIOperandNum = getFrameIndexOperandNum(II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); @@ -423,13 +423,13 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC, // Restore the scavenged register before its use (or first terminator). TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex, RC, TRI); - II = prior(UseMI); + II = std::prev(UseMI); FIOperandNum = getFrameIndexOperandNum(II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); } - Scavenged[SI].Restore = prior(UseMI); + Scavenged[SI].Restore = std::prev(UseMI); // Doing this here leads to infinite regress. // Scavenged[SI].Reg = SReg; diff --git a/lib/CodeGen/ScheduleDAG.cpp b/lib/CodeGen/ScheduleDAG.cpp index bd4c0e209de..d08eb6536e9 100644 --- a/lib/CodeGen/ScheduleDAG.cpp +++ b/lib/CodeGen/ScheduleDAG.cpp @@ -301,8 +301,8 @@ void SUnit::biasCriticalPath() { SUnit::pred_iterator BestI = Preds.begin(); unsigned MaxDepth = BestI->getSUnit()->getDepth(); - for (SUnit::pred_iterator - I = llvm::next(BestI), E = Preds.end(); I != E; ++I) { + for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E; + ++I) { if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth) BestI = I; } diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 3bd75c0aa4d..2ba03933913 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -784,7 +784,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, MachineInstr *DbgMI = NULL; for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; MII != MIE; --MII) { - MachineInstr *MI = prior(MII); + MachineInstr *MI = std::prev(MII); if (MI && DbgMI) { DbgValues.push_back(std::make_pair(DbgMI, MI)); DbgMI = NULL; @@ -800,7 +800,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, if (RPTracker) { PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : 0; RPTracker->recede(/*LiveUses=*/0, PDiff); - assert(RPTracker->getPos() == prior(MII) && "RPTracker can't find MI"); + assert(RPTracker->getPos() == std::prev(MII) && + "RPTracker can't find MI"); } assert((CanHandleTerminators || (!MI->isTerminator() && !MI->isLabel())) && @@ -1427,7 +1428,7 @@ public: const SDep *backtrack() { DFSStack.pop_back(); - return DFSStack.empty() ? 0 : llvm::prior(DFSStack.back().second); + return DFSStack.empty() ? 0 : std::prev(DFSStack.back().second); } const SUnit *getCurr() const { return DFSStack.back().first; } diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index bcea6ad2185..329af7eaf12 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -344,7 +344,7 @@ FastISel::SavePoint FastISel::enterLocalValueArea() { void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) { if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) - LastLocalValue = llvm::prior(FuncInfo.InsertPt); + LastLocalValue = std::prev(FuncInfo.InsertPt); // Restore the previous insert position. FuncInfo.InsertPt = OldInsertPt.InsertPt; diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 3fb2d9bdf55..8f039817dd4 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -88,7 +88,7 @@ bool VectorLegalizer::Run() { // Before we start legalizing vector nodes, check if there are any vectors. bool HasVectors = false; for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), - E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I) { + E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) { // Check if the values of the nodes contain vectors. We don't need to check // the operands because we are going to check their values at some point. for (SDNode::value_iterator J = I->value_begin(), E = I->value_end(); @@ -112,7 +112,7 @@ bool VectorLegalizer::Run() { // node is only legalized after all of its operands are legalized. DAG.AssignTopologicalOrder(); for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), - E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I) + E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) LegalizeOp(SDValue(I, 0)); // Finally, it's possible the root changed. Get the new root. diff --git a/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp b/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp index 1dd2128b8b6..3b3424dfe09 100644 --- a/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp +++ b/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp @@ -603,7 +603,7 @@ SUnit *ResourcePriorityQueue::pop() { std::vector::iterator Best = Queue.begin(); if (!DisableDFASched) { signed BestCost = SUSchedulingCost(*Best); - for (std::vector::iterator I = llvm::next(Queue.begin()), + for (std::vector::iterator I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I) { if (SUSchedulingCost(*I) > BestCost) { @@ -614,14 +614,14 @@ SUnit *ResourcePriorityQueue::pop() { } // Use default TD scheduling mechanism. else { - for (std::vector::iterator I = llvm::next(Queue.begin()), + for (std::vector::iterator I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I) if (Picker(*Best, *I)) Best = I; } SUnit *V = *Best; - if (Best != prior(Queue.end())) + if (Best != std::prev(Queue.end())) std::swap(*Best, Queue.back()); Queue.pop_back(); @@ -633,7 +633,7 @@ SUnit *ResourcePriorityQueue::pop() { void ResourcePriorityQueue::remove(SUnit *SU) { assert(!Queue.empty() && "Queue is empty!"); std::vector::iterator I = std::find(Queue.begin(), Queue.end(), SU); - if (I != prior(Queue.end())) + if (I != std::prev(Queue.end())) std::swap(*I, Queue.back()); Queue.pop_back(); diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp index 1a562d74b41..c49810935d8 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -1708,7 +1708,7 @@ public: assert(SU->NodeQueueId != 0 && "Not in queue!"); std::vector::iterator I = std::find(Queue.begin(), Queue.end(), SU); - if (I != prior(Queue.end())) + if (I != std::prev(Queue.end())) std::swap(*I, Queue.back()); Queue.pop_back(); SU->NodeQueueId = 0; @@ -1738,12 +1738,12 @@ protected: template static SUnit *popFromQueueImpl(std::vector &Q, SF &Picker) { std::vector::iterator Best = Q.begin(); - for (std::vector::iterator I = llvm::next(Q.begin()), + for (std::vector::iterator I = std::next(Q.begin()), E = Q.end(); I != E; ++I) if (Picker(*Best, *I)) Best = I; SUnit *V = *Best; - if (Best != prior(Q.end())) + if (Best != std::prev(Q.end())) std::swap(*Best, Q.back()); Q.pop_back(); return V; diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index 054e3dd840b..5639894d096 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -738,13 +738,13 @@ ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter, if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI() || // Fast-isel may have inserted some instructions, in which case the // BB->back().isPHI() test will not fire when we want it to. - prior(Emitter.getInsertPos())->isPHI()) { + std::prev(Emitter.getInsertPos())->isPHI()) { // Did not insert any instruction. Orders.push_back(std::make_pair(Order, (MachineInstr*)0)); return; } - Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos()))); + Orders.push_back(std::make_pair(Order, std::prev(Emitter.getInsertPos()))); ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order); } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 0ff93a129e4..ed66b8205f5 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2650,7 +2650,7 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases, if (Cases.size() >= 2) // Must recompute end() each iteration because it may be // invalidated by erase if we hold on to it - for (CaseItr I = Cases.begin(), J = llvm::next(Cases.begin()); + for (CaseItr I = Cases.begin(), J = std::next(Cases.begin()); J != Cases.end(); ) { const APInt& nextValue = cast(J->Low)->getValue(); const APInt& currentValue = cast(I->High)->getValue(); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp index 6f980884bea..251e33109bb 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -357,7 +357,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const { for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(), e = MN->memoperands_end(); i != e; ++i) { OS << **i; - if (llvm::next(i) != e) + if (std::next(i) != e) OS << " "; } OS << ">"; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 5534354ab00..01c343818bd 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -457,7 +457,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { if (Def) { MachineBasicBlock::iterator InsertPos = Def; // FIXME: VR def may not be in entry block. - Def->getParent()->insert(llvm::next(InsertPos), MI); + Def->getParent()->insert(std::next(InsertPos), MI); } else DEBUG(dbgs() << "Dropping debug info for dead vreg" << TargetRegisterInfo::virtReg2Index(Reg) << "\n"); @@ -1067,7 +1067,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { // where they are, so we can be sure to emit subsequent instructions // after them. if (FuncInfo->InsertPt != FuncInfo->MBB->begin()) - FastIS->setLastLocalValue(llvm::prior(FuncInfo->InsertPt)); + FastIS->setLastLocalValue(std::prev(FuncInfo->InsertPt)); else FastIS->setLastLocalValue(0); } @@ -1075,7 +1075,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { unsigned NumFastIselRemaining = std::distance(Begin, End); // Do FastISel on as many instructions as possible. for (; BI != Begin; --BI) { - const Instruction *Inst = llvm::prior(BI); + const Instruction *Inst = std::prev(BI); // If we no longer require this instruction, skip it. if (isFoldedOrDeadInstruction(Inst, FuncInfo)) { @@ -1096,7 +1096,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { // Try to fold the load if so. const Instruction *BeforeInst = Inst; while (BeforeInst != Begin) { - BeforeInst = llvm::prior(BasicBlock::const_iterator(BeforeInst)); + BeforeInst = std::prev(BasicBlock::const_iterator(BeforeInst)); if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo)) break; } @@ -1104,7 +1104,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { BeforeInst->hasOneUse() && FastIS->tryToFoldLoad(cast(BeforeInst), Inst)) { // If we succeeded, don't re-select the load. - BI = llvm::next(BasicBlock::const_iterator(BeforeInst)); + BI = std::next(BasicBlock::const_iterator(BeforeInst)); --NumFastIselRemaining; ++NumFastIselSuccess; } diff --git a/lib/CodeGen/SjLjEHPrepare.cpp b/lib/CodeGen/SjLjEHPrepare.cpp index 690e8b1aa24..eaab230655b 100644 --- a/lib/CodeGen/SjLjEHPrepare.cpp +++ b/lib/CodeGen/SjLjEHPrepare.cpp @@ -173,7 +173,7 @@ void SjLjEHPrepare::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal, Type *LPadType = LPI->getType(); Value *LPadVal = UndefValue::get(LPadType); IRBuilder<> Builder( - llvm::next(BasicBlock::iterator(cast(SelVal)))); + std::next(BasicBlock::iterator(cast(SelVal)))); LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val"); LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val"); diff --git a/lib/CodeGen/SlotIndexes.cpp b/lib/CodeGen/SlotIndexes.cpp index 20049a89d15..a6c62617efa 100644 --- a/lib/CodeGen/SlotIndexes.cpp +++ b/lib/CodeGen/SlotIndexes.cpp @@ -129,7 +129,7 @@ void SlotIndexes::renumberIndexes(IndexList::iterator curItr) { const unsigned Space = SlotIndex::InstrDist/2; assert((Space & 3) == 0 && "InstrDist must be a multiple of 2*NUM"); - IndexList::iterator startItr = prior(curItr); + IndexList::iterator startItr = std::prev(curItr); unsigned index = startItr->getIndex(); do { curItr->setIndex(index += Space); diff --git a/lib/CodeGen/SpillPlacement.cpp b/lib/CodeGen/SpillPlacement.cpp index fb5b927d499..5f734697067 100644 --- a/lib/CodeGen/SpillPlacement.cpp +++ b/lib/CodeGen/SpillPlacement.cpp @@ -327,7 +327,7 @@ void SpillPlacement::iterate() { // iteration is not zero, the last node was just updated. bool Changed = false; for (SmallVectorImpl::const_reverse_iterator I = - iteration == 0 ? Linked.rbegin() : llvm::next(Linked.rbegin()), + iteration == 0 ? Linked.rbegin() : std::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) { unsigned n = *I; if (nodes[n].update(nodes)) { @@ -342,7 +342,7 @@ void SpillPlacement::iterate() { // Scan forwards, skipping the first node which was just updated. Changed = false; for (SmallVectorImpl::const_iterator I = - llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) { + std::next(Linked.begin()), E = Linked.end(); I != E; ++I) { unsigned n = *I; if (nodes[n].update(nodes)) { Changed = true; diff --git a/lib/CodeGen/Spiller.cpp b/lib/CodeGen/Spiller.cpp index d5b3a4a9400..560b040a40d 100644 --- a/lib/CodeGen/Spiller.cpp +++ b/lib/CodeGen/Spiller.cpp @@ -143,9 +143,9 @@ protected: if (hasDef) { MachineInstrSpan MIS(miItr); - tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), NewVReg, + tii->storeRegToStackSlot(*mi->getParent(), std::next(miItr), NewVReg, true, ss, trc, tri); - lis->InsertMachineInstrRangeInMaps(llvm::next(miItr), MIS.end()); + lis->InsertMachineInstrRangeInMaps(std::next(miItr), MIS.end()); } } } diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp index 68a15f7fab3..d50c5ed2ecb 100644 --- a/lib/CodeGen/SplitKit.cpp +++ b/lib/CodeGen/SplitKit.cpp @@ -509,7 +509,7 @@ SlotIndex SplitEditor::enterIntvAfter(SlotIndex Idx) { assert(MI && "enterIntvAfter called with invalid index"); VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Idx, *MI->getParent(), - llvm::next(MachineBasicBlock::iterator(MI))); + std::next(MachineBasicBlock::iterator(MI))); return VNI->def; } @@ -570,7 +570,7 @@ SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) { } VNInfo *VNI = defFromParent(0, ParentVNI, Boundary, *MI->getParent(), - llvm::next(MachineBasicBlock::iterator(MI))); + std::next(MachineBasicBlock::iterator(MI))); return VNI->def; } diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp index 5d077932df3..be3253c6f82 100644 --- a/lib/CodeGen/StackMaps.cpp +++ b/lib/CodeGen/StackMaps.cpp @@ -161,7 +161,7 @@ StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const { std::sort(LiveOuts.begin(), LiveOuts.end()); for (LiveOutVec::iterator I = LiveOuts.begin(), E = LiveOuts.end(); I != E; ++I) { - for (LiveOutVec::iterator II = next(I); II != E; ++II) { + for (LiveOutVec::iterator II = std::next(I); II != E; ++II) { if (I->RegNo != II->RegNo) { // Skip all the now invalid entries. I = --II; @@ -192,7 +192,7 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint64_t ID, if (recordResult) { assert(PatchPointOpers(&MI).hasDef() && "Stackmap has no return value."); - parseOperand(MI.operands_begin(), llvm::next(MI.operands_begin()), + parseOperand(MI.operands_begin(), std::next(MI.operands_begin()), Locations, LiveOuts); } @@ -232,7 +232,7 @@ void StackMaps::recordStackMap(const MachineInstr &MI) { assert(MI.getOpcode() == TargetOpcode::STACKMAP && "expected stackmap"); int64_t ID = MI.getOperand(0).getImm(); - recordStackMapOpers(MI, ID, llvm::next(MI.operands_begin(), 2), + recordStackMapOpers(MI, ID, std::next(MI.operands_begin(), 2), MI.operands_end()); } @@ -243,7 +243,7 @@ void StackMaps::recordPatchPoint(const MachineInstr &MI) { int64_t ID = opers.getMetaOper(PatchPointOpers::IDPos).getImm(); MachineInstr::const_mop_iterator MOI = - llvm::next(MI.operands_begin(), opers.getStackMapStartIdx()); + std::next(MI.operands_begin(), opers.getStackMapStartIdx()); recordStackMapOpers(MI, ID, MOI, MI.operands_end(), opers.isAnyReg() && opers.hasDef()); diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp index 349b733418e..4a3487c6e0c 100644 --- a/lib/CodeGen/StackProtector.cpp +++ b/lib/CodeGen/StackProtector.cpp @@ -284,8 +284,7 @@ static CallInst *FindPotentialTailCall(BasicBlock *BB, ReturnInst *RI, const unsigned MaxSearch = 4; bool NoInterposingChain = true; - for (BasicBlock::reverse_iterator I = llvm::next(BB->rbegin()), - E = BB->rend(); + for (BasicBlock::reverse_iterator I = std::next(BB->rbegin()), E = BB->rend(); I != E && SearchCounter < MaxSearch; ++I) { Instruction *Inst = &*I; diff --git a/lib/CodeGen/StackSlotColoring.cpp b/lib/CodeGen/StackSlotColoring.cpp index 77c7047cde5..b95047e9ac7 100644 --- a/lib/CodeGen/StackSlotColoring.cpp +++ b/lib/CodeGen/StackSlotColoring.cpp @@ -385,8 +385,8 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) { toErase.push_back(I); continue; } - - MachineBasicBlock::iterator NextMI = llvm::next(I); + + MachineBasicBlock::iterator NextMI = std::next(I); if (NextMI == MBB->end()) continue; unsigned LoadReg = 0; diff --git a/lib/CodeGen/TailDuplication.cpp b/lib/CodeGen/TailDuplication.cpp index 170a8e19ed4..c493171a8cc 100644 --- a/lib/CodeGen/TailDuplication.cpp +++ b/lib/CodeGen/TailDuplication.cpp @@ -697,7 +697,7 @@ TailDuplicatePass::duplicateSimpleBB(MachineBasicBlock *TailBB, << "From simple Succ: " << *TailBB); MachineBasicBlock *NewTarget = *TailBB->succ_begin(); - MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(PredBB)); + MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(PredBB)); // Make PredFBB explicit. if (PredCond.empty()) @@ -798,7 +798,7 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, // Update PredBB livein. RS->enterBasicBlock(PredBB); if (!PredBB->empty()) - RS->forward(prior(PredBB->end())); + RS->forward(std::prev(PredBB->end())); BitVector RegsLiveAtExit(TRI->getNumRegs()); RS->getRegsUsed(RegsLiveAtExit, false); for (MachineBasicBlock::livein_iterator I = TailBB->livein_begin(), @@ -857,7 +857,7 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, // If TailBB was duplicated into all its predecessors except for the prior // block, which falls through unconditionally, move the contents of this // block into the prior block. - MachineBasicBlock *PrevBB = prior(MachineFunction::iterator(TailBB)); + MachineBasicBlock *PrevBB = std::prev(MachineFunction::iterator(TailBB)); MachineBasicBlock *PriorTBB = 0, *PriorFBB = 0; SmallVector PriorCond; // This has to check PrevBB->succ_size() because EH edges are ignored by diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp index 15105d4b3f7..cb4f8cbe866 100644 --- a/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -255,7 +255,7 @@ sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg, ++KillPos; unsigned NumVisited = 0; - for (MachineBasicBlock::iterator I = llvm::next(OldPos); I != KillPos; ++I) { + for (MachineBasicBlock::iterator I = std::next(OldPos); I != KillPos; ++I) { MachineInstr *OtherMI = I; // DBG_VALUE cannot be counted against the limit. if (OtherMI->isDebugValue()) @@ -417,7 +417,7 @@ static bool isKilled(MachineInstr &MI, unsigned Reg, MachineRegisterInfo::def_iterator Begin = MRI->def_begin(Reg); // If there are multiple defs, we can't do a simple analysis, so just // go with what the kill flag says. - if (llvm::next(Begin) != MRI->def_end()) + if (std::next(Begin) != MRI->def_end()) return true; DefMI = &*Begin; bool IsSrcPhys, IsDstPhys; @@ -647,7 +647,7 @@ TwoAddressInstructionPass::convertInstTo3Addr(MachineBasicBlock::iterator &mi, if (!Sunk) { DistanceMap.insert(std::make_pair(NewMI, Dist)); mi = NewMI; - nmi = llvm::next(mi); + nmi = std::next(mi); } // Update source and destination register maps. @@ -816,7 +816,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi, // Move the copies connected to MI down as well. MachineBasicBlock::iterator Begin = MI; - MachineBasicBlock::iterator AfterMI = llvm::next(Begin); + MachineBasicBlock::iterator AfterMI = std::next(Begin); MachineBasicBlock::iterator End = AfterMI; while (End->isCopy() && Defs.count(End->getOperand(1).getReg())) { @@ -876,7 +876,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi, } // Move debug info as well. - while (Begin != MBB->begin() && llvm::prior(Begin)->isDebugValue()) + while (Begin != MBB->begin() && std::prev(Begin)->isDebugValue()) --Begin; nmi = End; @@ -891,7 +891,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi, LIS->handleMove(CopyMI); InsertPos = CopyMI; } - End = llvm::next(MachineBasicBlock::iterator(MI)); + End = std::next(MachineBasicBlock::iterator(MI)); } // Copies following MI may have been moved as well. @@ -1060,15 +1060,15 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi, // Move the old kill above MI, don't forget to move debug info as well. MachineBasicBlock::iterator InsertPos = mi; - while (InsertPos != MBB->begin() && llvm::prior(InsertPos)->isDebugValue()) + while (InsertPos != MBB->begin() && std::prev(InsertPos)->isDebugValue()) --InsertPos; MachineBasicBlock::iterator From = KillMI; - MachineBasicBlock::iterator To = llvm::next(From); - while (llvm::prior(From)->isDebugValue()) + MachineBasicBlock::iterator To = std::next(From); + while (std::prev(From)->isDebugValue()) --From; MBB->splice(InsertPos, MBB, From, To); - nmi = llvm::prior(InsertPos); // Backtrack so we process the moved instr. + nmi = std::prev(InsertPos); // Backtrack so we process the moved instr. DistanceMap.erase(DI); // Update live variables @@ -1534,7 +1534,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) { Processed.clear(); for (MachineBasicBlock::iterator mi = MBB->begin(), me = MBB->end(); mi != me; ) { - MachineBasicBlock::iterator nmi = llvm::next(mi); + MachineBasicBlock::iterator nmi = std::next(mi); if (mi->isDebugValue()) { mi = nmi; continue; @@ -1689,7 +1689,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) { } MachineBasicBlock::iterator EndMBBI = - llvm::next(MachineBasicBlock::iterator(MI)); + std::next(MachineBasicBlock::iterator(MI)); if (!DefEmitted) { DEBUG(dbgs() << "Turned: " << *MI << " into an IMPLICIT_DEF"); diff --git a/lib/IR/BasicBlock.cpp b/lib/IR/BasicBlock.cpp index 1d7db917915..125b20615bd 100644 --- a/lib/IR/BasicBlock.cpp +++ b/lib/IR/BasicBlock.cpp @@ -304,7 +304,7 @@ BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) { assert(I != InstList.end() && "Trying to get me to create degenerate basic block!"); - BasicBlock *InsertBefore = llvm::next(Function::iterator(this)) + BasicBlock *InsertBefore = std::next(Function::iterator(this)) .getNodePtrUnchecked(); BasicBlock *New = BasicBlock::Create(getContext(), BBName, getParent(), InsertBefore); diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp index ecd2cfd9fd1..79be619e065 100644 --- a/lib/IR/Constants.cpp +++ b/lib/IR/Constants.cpp @@ -1047,7 +1047,7 @@ bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const { if (getOpcode() != Instruction::GetElementPtr) return false; gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this); - User::const_op_iterator OI = llvm::next(this->op_begin()); + User::const_op_iterator OI = std::next(this->op_begin()); // Skip the first index, as it has no static limit. ++GEPI; diff --git a/lib/IR/PassManager.cpp b/lib/IR/PassManager.cpp index d6699f44f8a..8185e55a5e5 100644 --- a/lib/IR/PassManager.cpp +++ b/lib/IR/PassManager.cpp @@ -123,7 +123,7 @@ FunctionAnalysisManager::getResultImpl(void *PassID, Function *F) { if (Inserted) { FunctionAnalysisResultListT &ResultList = FunctionAnalysisResultLists[F]; ResultList.push_back(std::make_pair(PassID, lookupPass(PassID).run(F, this))); - RI->second = llvm::prior(ResultList.end()); + RI->second = std::prev(ResultList.end()); } return *RI->second->second; diff --git a/lib/MC/MCObjectStreamer.cpp b/lib/MC/MCObjectStreamer.cpp index bfa3c8cfa34..119df0a083d 100644 --- a/lib/MC/MCObjectStreamer.cpp +++ b/lib/MC/MCObjectStreamer.cpp @@ -53,7 +53,7 @@ MCFragment *MCObjectStreamer::getCurrentFragment() const { assert(getCurrentSectionData() && "No current section!"); if (CurInsertionPoint != getCurrentSectionData()->getFragmentList().begin()) - return prior(CurInsertionPoint); + return std::prev(CurInsertionPoint); return 0; } diff --git a/lib/Target/AArch64/AArch64BranchFixupPass.cpp b/lib/Target/AArch64/AArch64BranchFixupPass.cpp index 124687a06ce..c03cdde4bc8 100644 --- a/lib/Target/AArch64/AArch64BranchFixupPass.cpp +++ b/lib/Target/AArch64/AArch64BranchFixupPass.cpp @@ -238,10 +238,10 @@ static bool BBHasFallthrough(MachineBasicBlock *MBB) { // Get the next machine basic block in the function. MachineFunction::iterator MBBI = MBB; // Can't fall off end of function. - if (llvm::next(MBBI) == MBB->getParent()->end()) + if (std::next(MBBI) == MBB->getParent()->end()) return false; - MachineBasicBlock *NextBB = llvm::next(MBBI); + MachineBasicBlock *NextBB = std::next(MBBI); for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) if (*I == NextBB) @@ -528,7 +528,7 @@ AArch64BranchFixup::fixupConditionalBr(ImmBranch &Br) { ++NumCBrFixed; if (BMI != MI) { - if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) && + if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) && BMI->getOpcode() == AArch64::Bimm) { // Last MI in the BB is an unconditional branch. We can swap destinations: // b.eq L1 (temporarily b.ne L1 after first change) @@ -575,7 +575,7 @@ AArch64BranchFixup::fixupConditionalBr(ImmBranch &Br) { // b L1 // splitbb/fallthroughbb: // [old b L2/real continuation] - MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB)); + MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB)); DEBUG(dbgs() << " Insert B to BB#" << MI->getOperand(CondBrMBBOperand).getMBB()->getNumber() diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp index 90bfbf88908..410295e3396 100644 --- a/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -237,7 +237,7 @@ AArch64FrameLowering::emitEpilogue(MachineFunction &MF, // Delete the pseudo instruction TC_RETURN. - MachineInstr *NewMI = prior(MBBI); + MachineInstr *NewMI = std::prev(MBBI); MBB.erase(MBBI); MBBI = NewMI; diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index b78ed1c5e1e..447f5005e55 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -629,8 +629,7 @@ AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); const TargetRegisterClass *TRC @@ -724,8 +723,7 @@ AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); unsigned scratch = MRI.createVirtualRegister(TRC); @@ -808,8 +806,7 @@ AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); // thisMBB: @@ -900,8 +897,7 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI, MF->insert(It, EndBB); // Transfer rest of current basic-block to EndBB - EndBB->splice(EndBB->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), + EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)), MBB->end()); EndBB->transferSuccessorsAndUpdatePHIs(MBB); diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 320f54a327e..a4b8953ab21 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -336,7 +336,7 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, // If we can modify the function, delete everything below this // unconditional branch. if (AllowModify) { - MachineBasicBlock::iterator DI = llvm::next(I); + MachineBasicBlock::iterator DI = std::next(I); while (DI != MBB.end()) { MachineInstr *InstToDelete = DI; ++DI; @@ -2176,7 +2176,7 @@ static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg, // Walk down one instruction which is potentially an 'and'. const MachineInstr &Copy = *MI; MachineBasicBlock::iterator AND( - llvm::next(MachineBasicBlock::iterator(MI))); + std::next(MachineBasicBlock::iterator(MI))); if (AND == MI->getParent()->end()) return false; MI = AND; return isSuitableForMask(MI, Copy.getOperand(0).getReg(), @@ -3253,8 +3253,7 @@ static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI, Dist = 0; MachineBasicBlock::const_iterator I = MI; ++I; - MachineBasicBlock::const_instr_iterator II = - llvm::prior(I.getInstrIterator()); + MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator()); assert(II->isInsideBundle() && "Empty bundle?"); int Idx = -1; diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index cff5ce27bca..a50ddcdd182 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -569,10 +569,10 @@ static bool BBHasFallthrough(MachineBasicBlock *MBB) { // Get the next machine basic block in the function. MachineFunction::iterator MBBI = MBB; // Can't fall off end of function. - if (llvm::next(MBBI) == MBB->getParent()->end()) + if (std::next(MBBI) == MBB->getParent()->end()) return false; - MachineBasicBlock *NextBB = llvm::next(MBBI); + MachineBasicBlock *NextBB = std::next(MBBI); for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) if (*I == NextBB) @@ -917,7 +917,7 @@ MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) { CompareMBBNumbers); MachineBasicBlock* WaterBB = *IP; if (WaterBB == OrigBB) - WaterList.insert(llvm::next(IP), NewBB); + WaterList.insert(std::next(IP), NewBB); else WaterList.insert(IP, OrigBB); NewWaterList.insert(OrigBB); @@ -1188,7 +1188,7 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset, return false; unsigned BestGrowth = ~0u; - for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();; + for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();; --IP) { MachineBasicBlock* WaterBB = *IP; // Check if water is in range and is either at a lower address than the @@ -1249,7 +1249,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex, if (isOffsetInRange(UserOffset, CPEOffset, U)) { DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber() << format(", expected CPE offset %#x\n", CPEOffset)); - NewMBB = llvm::next(MachineFunction::iterator(UserMBB)); + NewMBB = std::next(MachineFunction::iterator(UserMBB)); // Add an unconditional branch from UserMBB to fallthrough block. Record // it for branch lengthening; this new branch will not get out of range, // but if the preceding conditional branch is out of range, the targets @@ -1320,8 +1320,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex, MachineInstr *LastIT = 0; for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI); Offset < BaseInsertOffset; - Offset += TII->GetInstSizeInBytes(MI), - MI = llvm::next(MI)) { + Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) { assert(MI != UserMBB->end() && "Fell off end of block"); if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) { CPUser &U = CPUsers[CPUIndex]; @@ -1393,7 +1392,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { NewWaterList.insert(NewIsland); // The new CPE goes before the following block (NewMBB). - NewMBB = llvm::next(MachineFunction::iterator(WaterBB)); + NewMBB = std::next(MachineFunction::iterator(WaterBB)); } else { // No water found. @@ -1405,7 +1404,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { // next iteration for constant pools, but in this context, we don't want // it. Check for this so it will be removed from the WaterList. // Also remove any entry from NewWaterList. - MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB)); + MachineBasicBlock *WaterBB = std::prev(MachineFunction::iterator(NewMBB)); IP = std::find(WaterList.begin(), WaterList.end(), WaterBB); if (IP != WaterList.end()) NewWaterList.erase(WaterBB); @@ -1443,7 +1442,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { // Increase the size of the island block to account for the new entry. BBInfo[NewIsland->getNumber()].Size += Size; - adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland))); + adjustBBOffsetsAfter(std::prev(MachineFunction::iterator(NewIsland))); // Finally, change the CPI in the instruction operand to be ID. for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i) @@ -1592,7 +1591,7 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) { ++NumCBrFixed; if (BMI != MI) { - if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) && + if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) && BMI->getOpcode() == Br.UncondBr) { // Last MI in the BB is an unconditional branch. Can we simply invert the // condition and swap destinations: @@ -1622,7 +1621,7 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) { MBB->back().eraseFromParent(); // BBInfo[SplitBB].Offset is wrong temporarily, fixed below } - MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB)); + MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB)); DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber() << " also invert condition and change dest. to BB#" @@ -2017,7 +2016,7 @@ adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) { SmallVector Cond; SmallVector CondPrior; MachineFunction::iterator BBi = BB; - MachineFunction::iterator OldPrior = prior(BBi); + MachineFunction::iterator OldPrior = std::prev(BBi); // If the block terminator isn't analyzable, don't try to move the block bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond); diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp index 62f9e10252e..c6b706ca142 100644 --- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -1273,7 +1273,7 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) { MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); while (MBBI != E) { - MachineBasicBlock::iterator NMBBI = llvm::next(MBBI); + MachineBasicBlock::iterator NMBBI = std::next(MBBI); Modified |= ExpandMI(MBB, MBBI); MBBI = NMBBI; } diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index 916417882ba..340f49ffead 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -632,7 +632,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF, addReg(JumpTarget.getReg(), RegState::Kill); } - MachineInstr *NewMI = prior(MBBI); + MachineInstr *NewMI = std::prev(MBBI); for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i) NewMI->addOperand(MBBI->getOperand(i)); @@ -1017,7 +1017,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB, } // The last spill instruction inserted should kill the scratch register r4. - llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI); + std::prev(MI)->addRegisterKilled(ARM::R4, TRI); } /// Skip past the code inserted by emitAlignedDPRCS2Spills, and return an @@ -1127,7 +1127,7 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB, .addReg(ARM::R4).addImm(2*(NextReg-R4BaseReg))); // Last store kills r4. - llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI); + std::prev(MI)->addRegisterKilled(ARM::R4, TRI); } bool ARMFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, diff --git a/lib/Target/ARM/ARMHazardRecognizer.cpp b/lib/Target/ARM/ARMHazardRecognizer.cpp index c69d313fd9c..61d4e12cb09 100644 --- a/lib/Target/ARM/ARMHazardRecognizer.cpp +++ b/lib/Target/ARM/ARMHazardRecognizer.cpp @@ -57,7 +57,7 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) { (LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) { MachineBasicBlock::iterator I = LastMI; if (I != LastMI->getParent()->begin()) { - I = llvm::prior(I); + I = std::prev(I); DefMI = &*I; } } diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index abf229b3c40..ebcc2aa9dd5 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -6199,8 +6199,7 @@ ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); // thisMBB: @@ -6284,8 +6283,7 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); const TargetRegisterClass *TRC = isThumb2 ? @@ -6392,8 +6390,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); const TargetRegisterClass *TRC = isThumb2 ? @@ -6512,8 +6509,7 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); const TargetRegisterClass *TRC = isThumb2 ? @@ -7444,8 +7440,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); // Load an immediate to varEnd. @@ -7771,8 +7766,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); BB->addSuccessor(copy0MBB); @@ -7805,7 +7799,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, case ARM::BCCi64: case ARM::BCCZi64: { // If there is an unconditional branch to the other successor, remove it. - BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); + BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); // Compare both parts that make up the double comparison separately for // equality. @@ -7890,8 +7884,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. SinkBB->splice(SinkBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); SinkBB->transferSuccessorsAndUpdatePHIs(BB); BB->addSuccessor(RSBBB); diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 5edce8ceea8..28c32cb0b29 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -484,7 +484,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB, return; // Merge succeeded, update records. - Merges.push_back(prior(Loc)); + Merges.push_back(std::prev(Loc)); // In gathering loads together, we may have moved the imp-def of a register // past one of its uses. This is OK, since we know better than the rest of @@ -812,7 +812,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB, // Try merging with the previous instruction. MachineBasicBlock::iterator BeginMBBI = MBB.begin(); if (MBBI != BeginMBBI) { - MachineBasicBlock::iterator PrevMBBI = prior(MBBI); + MachineBasicBlock::iterator PrevMBBI = std::prev(MBBI); while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue()) --PrevMBBI; if (Mode == ARM_AM::ia && @@ -831,7 +831,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB, // Try merging with the next instruction. MachineBasicBlock::iterator EndMBBI = MBB.end(); if (!DoMerge && MBBI != EndMBBI) { - MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI); + MachineBasicBlock::iterator NextMBBI = std::next(MBBI); while (NextMBBI != EndMBBI && NextMBBI->isDebugValue()) ++NextMBBI; if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) && @@ -959,7 +959,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB, // Try merging with the previous instruction. MachineBasicBlock::iterator BeginMBBI = MBB.begin(); if (MBBI != BeginMBBI) { - MachineBasicBlock::iterator PrevMBBI = prior(MBBI); + MachineBasicBlock::iterator PrevMBBI = std::prev(MBBI); while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue()) --PrevMBBI; if (isMatchingDecrement(PrevMBBI, Base, Bytes, Limit, Pred, PredReg)) { @@ -978,7 +978,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB, // Try merging with the next instruction. MachineBasicBlock::iterator EndMBBI = MBB.end(); if (!DoMerge && MBBI != EndMBBI) { - MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI); + MachineBasicBlock::iterator NextMBBI = std::next(MBBI); while (NextMBBI != EndMBBI && NextMBBI->isDebugValue()) ++NextMBBI; if (!isAM5 && @@ -1122,7 +1122,7 @@ void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) { } if (Loc != MBB.begin()) - RS->forward(prior(Loc)); + RS->forward(std::prev(Loc)); } static int getMemoryOpOffset(const MachineInstr *MI) { @@ -1232,7 +1232,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB, getKillRegState(OddDeadKill) | getUndefRegState(OddUndef)); ++NumSTRD2STM; } - NewBBI = llvm::prior(MBBI); + NewBBI = std::prev(MBBI); } else { // Split into two instructions. unsigned NewOpc = (isLd) @@ -1254,7 +1254,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB, OddReg, OddDeadKill, false, BaseReg, false, BaseUndef, false, OffUndef, Pred, PredReg, TII, isT2); - NewBBI = llvm::prior(MBBI); + NewBBI = std::prev(MBBI); InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc, EvenReg, EvenDeadKill, false, BaseReg, BaseKill, BaseUndef, OffKill, OffUndef, @@ -1274,7 +1274,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB, EvenReg, EvenDeadKill, EvenUndef, BaseReg, false, BaseUndef, false, OffUndef, Pred, PredReg, TII, isT2); - NewBBI = llvm::prior(MBBI); + NewBBI = std::prev(MBBI); InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc2, OddReg, OddDeadKill, OddUndef, BaseReg, BaseKill, BaseUndef, OffKill, OffUndef, @@ -1419,7 +1419,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { // Find a scratch register. unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass); // Process the load / store instructions. - RS->forward(prior(MBBI)); + RS->forward(std::prev(MBBI)); // Merge ops. Merges.clear(); @@ -1441,13 +1441,13 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { ++NumMerges; // RS may be pointing to an instruction that's deleted. - RS->skipTo(prior(MBBI)); + RS->skipTo(std::prev(MBBI)); } else if (NumMemOps == 1) { // Try folding preceding/trailing base inc/dec into the single // load/store. if (MergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) { ++NumMerges; - RS->forward(prior(MBBI)); + RS->forward(std::prev(MBBI)); } } @@ -1490,7 +1490,7 @@ bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) { (MBBI->getOpcode() == ARM::BX_RET || MBBI->getOpcode() == ARM::tBX_RET || MBBI->getOpcode() == ARM::MOVPCLR)) { - MachineInstr *PrevMI = prior(MBBI); + MachineInstr *PrevMI = std::prev(MBBI); unsigned Opcode = PrevMI->getOpcode(); if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD || Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD || diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp index 2e266c2e962..fe59dc011e9 100644 --- a/lib/Target/ARM/MLxExpansionPass.cpp +++ b/lib/Target/ARM/MLxExpansionPass.cpp @@ -312,9 +312,9 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI, dbgs() << "Expanding: " << *MI; dbgs() << " to:\n"; MachineBasicBlock::iterator MII = MI; - MII = llvm::prior(MII); + MII = std::prev(MII); MachineInstr &MI2 = *MII; - MII = llvm::prior(MII); + MII = std::prev(MII); MachineInstr &MI1 = *MII; dbgs() << " " << MI1; dbgs() << " " << MI2; diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp index c2da0b68a2d..996428dbeb1 100644 --- a/lib/Target/ARM/Thumb1FrameLowering.cpp +++ b/lib/Target/ARM/Thumb1FrameLowering.cpp @@ -182,7 +182,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const { int FramePtrOffsetInBlock = 0; unsigned adjustedGPRCS1Size = GPRCS1Size; - if (tryFoldSPUpdateIntoPushPop(STI, MF, prior(MBBI), NumBytes)) { + if (tryFoldSPUpdateIntoPushPop(STI, MF, std::prev(MBBI), NumBytes)) { FramePtrOffsetInBlock = NumBytes; adjustedGPRCS1Size += NumBytes; NumBytes = 0; @@ -365,8 +365,8 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF, } else { if (MBBI->getOpcode() == ARM::tBX_RET && &MBB.front() != MBBI && - prior(MBBI)->getOpcode() == ARM::tPOP) { - MachineBasicBlock::iterator PMBBI = prior(MBBI); + std::prev(MBBI)->getOpcode() == ARM::tPOP) { + MachineBasicBlock::iterator PMBBI = std::prev(MBBI); if (!tryFoldSPUpdateIntoPushPop(STI, MF, PMBBI, NumBytes)) emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes); } else if (!tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes)) diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp index 65a7221d5db..42ac177147a 100644 --- a/lib/Target/ARM/Thumb1RegisterInfo.cpp +++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp @@ -421,7 +421,7 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx, MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Mask); } Offset = (Offset - Mask * Scale); - MachineBasicBlock::iterator NII = llvm::next(II); + MachineBasicBlock::iterator NII = std::next(II); emitThumbRegPlusImmediate(MBB, NII, dl, DestReg, DestReg, Offset, TII, *this); } else { diff --git a/lib/Target/ARM/Thumb2ITBlockPass.cpp b/lib/Target/ARM/Thumb2ITBlockPass.cpp index 0b7d3bb7754..d3e2e0386c0 100644 --- a/lib/Target/ARM/Thumb2ITBlockPass.cpp +++ b/lib/Target/ARM/Thumb2ITBlockPass.cpp @@ -242,7 +242,7 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) { // Finalize the bundle. MachineBasicBlock::instr_iterator LI = LastITMI; - finalizeBundle(MBB, InsertPos.getInstrIterator(), llvm::next(LI)); + finalizeBundle(MBB, InsertPos.getInstrIterator(), std::next(LI)); Modified = true; ++NumITs; diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index 83b1a608917..f4ee00003ab 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -945,7 +945,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end(); MachineBasicBlock::instr_iterator NextMII; for (; MII != E; MII = NextMII) { - NextMII = llvm::next(MII); + NextMII = std::next(MII); MachineInstr *MI = &*MII; if (MI->isBundle()) { @@ -962,7 +962,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) { Modified = true; - MachineBasicBlock::instr_iterator I = prior(NextMII); + MachineBasicBlock::instr_iterator I = std::prev(NextMII); MI = &*I; // Removing and reinserting the first instruction in a bundle will break // up the bundle. Fix the bundling if it was broken. diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp index 3ab796d01c4..60c933be307 100644 --- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -300,7 +300,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1, MachineBasicBlock::iterator I(I1), End(I2); // At O3 we got better results (dhrystone) by being more conservative here. if (!ShouldCombineAggressively) - End = llvm::next(MachineBasicBlock::iterator(I2)); + End = std::next(MachineBasicBlock::iterator(I2)); IsImmUseReg = I1->getOperand(1).isImm() || I1->getOperand(1).isGlobal(); unsigned I1UseReg = IsImmUseReg ? 0 : I1->getOperand(1).getReg(); // Track killed operands. If we move across an instruction that kills our @@ -464,7 +464,7 @@ bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) { /// false if the combine must be inserted at the returned instruction. MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr *I1, bool &DoInsertAtI1) { - MachineBasicBlock::iterator I2 = llvm::next(MachineBasicBlock::iterator(I1)); + MachineBasicBlock::iterator I2 = std::next(MachineBasicBlock::iterator(I1)); unsigned I1DestReg = I1->getOperand(0).getReg(); for (MachineBasicBlock::iterator End = I1->getParent()->end(); I2 != End; diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp index 2b04f25dd67..0ea13d4c80e 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -144,14 +144,14 @@ bool HexagonFrameLowering::hasTailCall(MachineBasicBlock &MBB) const { void HexagonFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { - MachineBasicBlock::iterator MBBI = prior(MBB.end()); + MachineBasicBlock::iterator MBBI = std::prev(MBB.end()); DebugLoc dl = MBBI->getDebugLoc(); // // Only insert deallocframe if we need to. Also at -O0. See comment // in emitPrologue above. // if (hasFP(MF) || MF.getTarget().getOptLevel() == CodeGenOpt::None) { - MachineBasicBlock::iterator MBBI = prior(MBB.end()); + MachineBasicBlock::iterator MBBI = std::prev(MBB.end()); MachineBasicBlock::iterator MBBI_end = MBB.end(); const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); @@ -170,7 +170,7 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF, // Check for RESTORE_DEALLOC_RET_JMP_V4 call. Don't emit an extra DEALLOC // instruction if we encounter it. MachineBasicBlock::iterator BeforeJMPR = - MBB.begin() == MBBI ? MBBI : prior(MBBI); + MBB.begin() == MBBI ? MBBI : std::prev(MBBI); if (BeforeJMPR != MBBI && BeforeJMPR->getOpcode() == Hexagon::RESTORE_DEALLOC_RET_JMP_V4) { // Remove the JMPR node. @@ -190,7 +190,7 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF, // DEALLOCFRAME instruction after it. MachineBasicBlock::iterator Term = MBB.getFirstTerminator(); MachineBasicBlock::iterator I = - Term == MBB.begin() ? MBB.end() : prior(Term); + Term == MBB.begin() ? MBB.end() : std::prev(Term); if (I != MBB.end() && I->getOpcode() == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4) return; diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index fccbcb3d703..c94be7ec573 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -907,7 +907,7 @@ bool HexagonHardwareLoops::isDead(const MachineInstr *MI, // this instruction is dead: both it (and the phi node) can be removed. use_nodbg_iterator I = MRI->use_nodbg_begin(Reg); use_nodbg_iterator End = MRI->use_nodbg_end(); - if (llvm::next(I) != End || !I.getOperand().getParent()->isPHI()) + if (std::next(I) != End || !I.getOperand().getParent()->isPHI()) return false; MachineInstr *OnePhi = I.getOperand().getParent(); @@ -920,7 +920,7 @@ bool HexagonHardwareLoops::isDead(const MachineInstr *MI, use_nodbg_iterator nextJ; for (use_nodbg_iterator J = MRI->use_nodbg_begin(OPReg); J != End; J = nextJ) { - nextJ = llvm::next(J); + nextJ = std::next(J); MachineOperand &Use = J.getOperand(); MachineInstr *UseMI = Use.getParent(); @@ -954,7 +954,7 @@ void HexagonHardwareLoops::removeIfDead(MachineInstr *MI) { MachineRegisterInfo::use_iterator nextI; for (MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg), E = MRI->use_end(); I != E; I = nextI) { - nextI = llvm::next(I); // I is invalidated by the setReg + nextI = std::next(I); // I is invalidated by the setReg MachineOperand &Use = I.getOperand(); MachineInstr *UseMI = Use.getParent(); if (UseMI == MI) @@ -1162,7 +1162,7 @@ bool HexagonHardwareLoops::orderBumpCompare(MachineInstr *BumpI, // Out of order. unsigned PredR = CmpI->getOperand(0).getReg(); bool FoundBump = false; - instr_iterator CmpIt = CmpI, NextIt = llvm::next(CmpIt); + instr_iterator CmpIt = CmpI, NextIt = std::next(CmpIt); for (instr_iterator I = NextIt, E = BB->instr_end(); I != E; ++I) { MachineInstr *In = &*I; for (unsigned i = 0, n = In->getNumOperands(); i < n; ++i) { @@ -1176,7 +1176,7 @@ bool HexagonHardwareLoops::orderBumpCompare(MachineInstr *BumpI, if (In == BumpI) { instr_iterator After = BumpI; instr_iterator From = CmpI; - BB->splice(llvm::next(After), BB, From); + BB->splice(std::next(After), BB, From); FoundBump = true; break; } diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index fff51dda679..8d4a9680b88 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -147,7 +147,7 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB, if (isPredicated(Term) && !AnalyzeBranch(MBB, NewTBB, NewFBB, Cond, false)) { MachineBasicBlock *NextBB = - llvm::next(MachineFunction::iterator(&MBB)); + std::next(MachineFunction::iterator(&MBB)); if (NewTBB == NextBB) { ReverseBranchCondition(Cond); RemoveBranch(MBB); diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index e9d42f3b490..976ff2b0f5a 100644 --- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -237,20 +237,20 @@ bool HexagonPacketizer::runOnMachineFunction(MachineFunction &Fn) { // instruction stream until we find the nearest boundary. MachineBasicBlock::iterator I = RegionEnd; for(;I != MBB->begin(); --I, --RemainingCount) { - if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn)) + if (TII->isSchedulingBoundary(std::prev(I), MBB, Fn)) break; } I = MBB->begin(); // Skip empty scheduling regions. if (I == RegionEnd) { - RegionEnd = llvm::prior(RegionEnd); + RegionEnd = std::prev(RegionEnd); --RemainingCount; continue; } // Skip regions with one instruction. - if (I == llvm::prior(RegionEnd)) { - RegionEnd = llvm::prior(RegionEnd); + if (I == std::prev(RegionEnd)) { + RegionEnd = std::prev(RegionEnd); continue; } diff --git a/lib/Target/MSP430/MSP430FrameLowering.cpp b/lib/Target/MSP430/MSP430FrameLowering.cpp index e504011dfdc..ce078a30db4 100644 --- a/lib/Target/MSP430/MSP430FrameLowering.cpp +++ b/lib/Target/MSP430/MSP430FrameLowering.cpp @@ -71,7 +71,7 @@ void MSP430FrameLowering::emitPrologue(MachineFunction &MF) const { .addReg(MSP430::SPW); // Mark the FramePtr as live-in in every block except the entry. - for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); + for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end(); I != E; ++I) I->addLiveIn(MSP430::FPW); @@ -138,7 +138,7 @@ void MSP430FrameLowering::emitEpilogue(MachineFunction &MF, // Skip the callee-saved pop instructions. while (MBBI != MBB.begin()) { - MachineBasicBlock::iterator PI = prior(MBBI); + MachineBasicBlock::iterator PI = std::prev(MBBI); unsigned Opc = PI->getOpcode(); if (Opc != MSP430::POP16r && !PI->isTerminator()) break; diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index be8e5d86a04..fe163d44c0f 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -1248,8 +1248,7 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI, // Update machine-CFG edges by transferring all successors of the current // block to the block containing instructions after shift. - RemBB->splice(RemBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), + RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)), BB->end()); RemBB->transferSuccessorsAndUpdatePHIs(BB); @@ -1344,8 +1343,7 @@ MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Update machine-CFG edges by transferring all successors of the current // block to the new block which will contain the Phi node for the select. copy1MBB->splice(copy1MBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); copy1MBB->transferSuccessorsAndUpdatePHIs(BB); // Next, add the true and fallthrough blocks as its successors. BB->addSuccessor(copy0MBB); diff --git a/lib/Target/MSP430/MSP430InstrInfo.cpp b/lib/Target/MSP430/MSP430InstrInfo.cpp index 7a0b00ae366..b865734d553 100644 --- a/lib/Target/MSP430/MSP430InstrInfo.cpp +++ b/lib/Target/MSP430/MSP430InstrInfo.cpp @@ -205,8 +205,8 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, } // If the block has any instructions after a JMP, delete them. - while (llvm::next(I) != MBB.end()) - llvm::next(I)->eraseFromParent(); + while (std::next(I) != MBB.end()) + std::next(I)->eraseFromParent(); Cond.clear(); FBB = 0; diff --git a/lib/Target/MSP430/MSP430RegisterInfo.cpp b/lib/Target/MSP430/MSP430RegisterInfo.cpp index 1a5e31240ef..578443167c0 100644 --- a/lib/Target/MSP430/MSP430RegisterInfo.cpp +++ b/lib/Target/MSP430/MSP430RegisterInfo.cpp @@ -142,10 +142,10 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // We need to materialize the offset via add instruction. unsigned DstReg = MI.getOperand(0).getReg(); if (Offset < 0) - BuildMI(MBB, llvm::next(II), dl, TII.get(MSP430::SUB16ri), DstReg) + BuildMI(MBB, std::next(II), dl, TII.get(MSP430::SUB16ri), DstReg) .addReg(DstReg).addImm(-Offset); else - BuildMI(MBB, llvm::next(II), dl, TII.get(MSP430::ADD16ri), DstReg) + BuildMI(MBB, std::next(II), dl, TII.get(MSP430::ADD16ri), DstReg) .addReg(DstReg).addImm(Offset); return; diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp index 5769efcd14f..bb280b720ef 100644 --- a/lib/Target/Mips/Mips16ISelLowering.cpp +++ b/lib/Target/Mips/Mips16ISelLowering.cpp @@ -549,8 +549,7 @@ emitSel16(unsigned Opc, MachineInstr *MI, MachineBasicBlock *BB) const { // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); // Next, add the true and fallthrough blocks as its successors. @@ -612,8 +611,7 @@ MachineBasicBlock *Mips16TargetLowering::emitSelT16 // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); // Next, add the true and fallthrough blocks as its successors. @@ -677,8 +675,7 @@ MachineBasicBlock *Mips16TargetLowering::emitSeliT16 // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); // Next, add the true and fallthrough blocks as its successors. diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp index 1b6b8998518..43c2fbdac68 100644 --- a/lib/Target/Mips/Mips16InstrInfo.cpp +++ b/lib/Target/Mips/Mips16InstrInfo.cpp @@ -411,7 +411,7 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg, BuildMI(MBB, II, DL, get(Mips:: AdduRxRyRz16), Reg).addReg(FrameReg) .addReg(Reg, RegState::Kill); if (FirstRegSaved || SecondRegSaved) { - II = llvm::next(II); + II = std::next(II); if (FirstRegSaved) copyPhysReg(MBB, II, DL, FirstRegSaved, FirstRegSavedTo, true); if (SecondRegSaved) diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp index 8f607b057d2..0d70c3b0406 100644 --- a/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -616,10 +616,10 @@ static bool BBHasFallthrough(MachineBasicBlock *MBB) { // Get the next machine basic block in the function. MachineFunction::iterator MBBI = MBB; // Can't fall off end of function. - if (llvm::next(MBBI) == MBB->getParent()->end()) + if (std::next(MBBI) == MBB->getParent()->end()) return false; - MachineBasicBlock *NextBB = llvm::next(MBBI); + MachineBasicBlock *NextBB = std::next(MBBI); for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) if (*I == NextBB) @@ -932,7 +932,7 @@ MachineBasicBlock *MipsConstantIslands::splitBlockBeforeInstr CompareMBBNumbers); MachineBasicBlock* WaterBB = *IP; if (WaterBB == OrigBB) - WaterList.insert(llvm::next(IP), NewBB); + WaterList.insert(std::next(IP), NewBB); else WaterList.insert(IP, OrigBB); NewWaterList.insert(OrigBB); @@ -1218,7 +1218,7 @@ bool MipsConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset, return false; unsigned BestGrowth = ~0u; - for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();; + for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();; --IP) { MachineBasicBlock* WaterBB = *IP; // Check if water is in range and is either at a lower address than the @@ -1277,7 +1277,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex, if (isOffsetInRange(UserOffset, CPEOffset, U)) { DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber() << format(", expected CPE offset %#x\n", CPEOffset)); - NewMBB = llvm::next(MachineFunction::iterator(UserMBB)); + NewMBB = std::next(MachineFunction::iterator(UserMBB)); // Add an unconditional branch from UserMBB to fallthrough block. Record // it for branch lengthening; this new branch will not get out of range, // but if the preceding conditional branch is out of range, the targets @@ -1330,8 +1330,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex, //MachineInstr *LastIT = 0; for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI); Offset < BaseInsertOffset; - Offset += TII->GetInstSizeInBytes(MI), - MI = llvm::next(MI)) { + Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) { assert(MI != UserMBB->end() && "Fell off end of block"); if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) { CPUser &U = CPUsers[CPUIndex]; @@ -1388,7 +1387,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { NewWaterList.insert(NewIsland); // The new CPE goes before the following block (NewMBB). - NewMBB = llvm::next(MachineFunction::iterator(WaterBB)); + NewMBB = std::next(MachineFunction::iterator(WaterBB)); } else { // No water found. @@ -1406,7 +1405,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { // next iteration for constant pools, but in this context, we don't want // it. Check for this so it will be removed from the WaterList. // Also remove any entry from NewWaterList. - MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB)); + MachineBasicBlock *WaterBB = std::prev(MachineFunction::iterator(NewMBB)); IP = std::find(WaterList.begin(), WaterList.end(), WaterBB); if (IP != WaterList.end()) NewWaterList.erase(WaterBB); @@ -1448,7 +1447,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { // Increase the size of the island block to account for the new entry. BBInfo[NewIsland->getNumber()].Size += Size; - adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland))); + adjustBBOffsetsAfter(std::prev(MachineFunction::iterator(NewIsland))); @@ -1629,7 +1628,7 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) { ++NumCBrFixed; if (BMI != MI) { - if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) && + if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) && isUnconditionalBranch(BMI->getOpcode())) { // Last MI in the BB is an unconditional branch. Can we simply invert the // condition and swap destinations: @@ -1662,7 +1661,7 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) { MBB->back().eraseFromParent(); // BBInfo[SplitBB].Offset is wrong temporarily, fixed below } - MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB)); + MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB)); DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber() << " also invert condition and change dest. to BB#" diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp index 6c5ea4c810a..bf76fab7968 100644 --- a/lib/Target/Mips/MipsDelaySlotFiller.cpp +++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -500,8 +500,8 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { // Bundle the NOP to the instruction with the delay slot. const MipsInstrInfo *TII = static_cast(TM.getInstrInfo()); - BuildMI(MBB, llvm::next(I), I->getDebugLoc(), TII->get(Mips::NOP)); - MIBundleBuilder(MBB, I, llvm::next(llvm::next(I))); + BuildMI(MBB, std::next(I), I->getDebugLoc(), TII->get(Mips::NOP)); + MIBundleBuilder(MBB, I, std::next(I, 2)); } return Changed; @@ -551,8 +551,8 @@ bool Filler::searchBackward(MachineBasicBlock &MBB, Iter Slot) const { if (!searchRange(MBB, ReverseIter(Slot), MBB.rend(), RegDU, MemDU, Filler)) return false; - MBB.splice(llvm::next(Slot), &MBB, llvm::next(Filler).base()); - MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot))); + MBB.splice(std::next(Slot), &MBB, std::next(Filler).base()); + MIBundleBuilder(MBB, Slot, std::next(Slot, 2)); ++UsefulSlots; return true; } @@ -568,11 +568,11 @@ bool Filler::searchForward(MachineBasicBlock &MBB, Iter Slot) const { RegDU.setCallerSaved(*Slot); - if (!searchRange(MBB, llvm::next(Slot), MBB.end(), RegDU, NM, Filler)) + if (!searchRange(MBB, std::next(Slot), MBB.end(), RegDU, NM, Filler)) return false; - MBB.splice(llvm::next(Slot), &MBB, Filler); - MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot))); + MBB.splice(std::next(Slot), &MBB, Filler); + MIBundleBuilder(MBB, Slot, std::next(Slot, 2)); ++UsefulSlots; return true; } diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index abb28f78a16..bee47006256 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -818,7 +818,7 @@ static MachineBasicBlock *expandPseudoDIV(MachineInstr *MI, MachineBasicBlock::iterator I(MI); MachineInstrBuilder MIB; MachineOperand &Divisor = MI->getOperand(2); - MIB = BuildMI(MBB, llvm::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ)) + MIB = BuildMI(MBB, std::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ)) .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill())) .addReg(Mips::ZERO).addImm(7); @@ -968,7 +968,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); // thisMBB: @@ -1054,7 +1054,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); BB->addSuccessor(loopMBB); @@ -1209,7 +1209,7 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); // thisMBB: @@ -1295,7 +1295,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to exitMBB. exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); BB->addSuccessor(loop1MBB); diff --git a/lib/Target/Mips/MipsLongBranch.cpp b/lib/Target/Mips/MipsLongBranch.cpp index 2efe57847ad..2b6a8744cd6 100644 --- a/lib/Target/Mips/MipsLongBranch.cpp +++ b/lib/Target/Mips/MipsLongBranch.cpp @@ -134,7 +134,7 @@ void MipsLongBranch::splitMBB(MachineBasicBlock *MBB) { (!LastBr->isConditionalBranch() && !LastBr->isUnconditionalBranch())) return; - ReverseIter FirstBr = getNonDebugInstr(llvm::next(LastBr), End); + ReverseIter FirstBr = getNonDebugInstr(std::next(LastBr), End); // MBB has only one branch instruction if FirstBr is not a branch // instruction. @@ -154,7 +154,7 @@ void MipsLongBranch::splitMBB(MachineBasicBlock *MBB) { NewMBB->removeSuccessor(Tgt); MBB->addSuccessor(NewMBB); MBB->addSuccessor(Tgt); - MF->insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB); + MF->insert(std::next(MachineFunction::iterator(MBB)), NewMBB); NewMBB->splice(NewMBB->end(), MBB, (++LastBr).base(), MBB->end()); } diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp index 3d3ce0f5721..516262f6506 100644 --- a/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/lib/Target/Mips/MipsSEISelLowering.cpp @@ -2621,7 +2621,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{ const TargetRegisterClass *RC = &Mips::GPR32RegClass; DebugLoc DL = MI->getDebugLoc(); const BasicBlock *LLVM_BB = BB->getBasicBlock(); - MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB)); + MachineFunction::iterator It = std::next(MachineFunction::iterator(BB)); MachineFunction *F = BB->getParent(); MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB); @@ -2631,7 +2631,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{ F->insert(It, Sink); // Transfer the remainder of BB and its successor edges to Sink. - Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)), + Sink->splice(Sink->begin(), BB, std::next(MachineBasicBlock::iterator(MI)), BB->end()); Sink->transferSuccessorsAndUpdatePHIs(BB); @@ -2686,7 +2686,7 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB, const TargetRegisterClass *RC = &Mips::GPR32RegClass; DebugLoc DL = MI->getDebugLoc(); const BasicBlock *LLVM_BB = BB->getBasicBlock(); - MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB)); + MachineFunction::iterator It = std::next(MachineFunction::iterator(BB)); MachineFunction *F = BB->getParent(); MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB); @@ -2696,7 +2696,7 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB, F->insert(It, Sink); // Transfer the remainder of BB and its successor edges to Sink. - Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)), + Sink->splice(Sink->begin(), BB, std::next(MachineBasicBlock::iterator(MI)), BB->end()); Sink->transferSuccessorsAndUpdatePHIs(BB); diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 7dc47be0d51..193ee3097c4 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -6031,8 +6031,7 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, F->insert(It, loopMBB); F->insert(It, exitMBB); exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); MachineRegisterInfo &RegInfo = F->getRegInfo(); @@ -6100,8 +6099,7 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, F->insert(It, loopMBB); F->insert(It, exitMBB); exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); MachineRegisterInfo &RegInfo = F->getRegInfo(); @@ -6253,7 +6251,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + std::next(MachineBasicBlock::iterator(MI)), MBB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); // Note that the structure of the jmp_buf used here is not compatible @@ -6518,8 +6516,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); // Next, add the true and fallthrough blocks as its successors. @@ -6639,8 +6636,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, F->insert(It, midMBB); F->insert(It, exitMBB); exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); // thisMBB: @@ -6710,8 +6706,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, F->insert(It, midMBB); F->insert(It, exitMBB); exitMBB->splice(exitMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); MachineRegisterInfo &RegInfo = F->getRegInfo(); diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index 18357c30d4a..d9907a0baf3 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -1428,7 +1428,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr, CmpInstr->eraseFromParent(); MachineBasicBlock::iterator MII = MI; - BuildMI(*MI->getParent(), llvm::next(MII), MI->getDebugLoc(), + BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(), get(TargetOpcode::COPY), CRReg) .addReg(PPC::CR0, MIOpC != NewOpC ? RegState::Kill : 0); diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/R600/AMDILCFGStructurizer.cpp index 90d541eedad..3f6a95d2f77 100644 --- a/lib/Target/R600/AMDILCFGStructurizer.cpp +++ b/lib/Target/R600/AMDILCFGStructurizer.cpp @@ -1238,7 +1238,7 @@ int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB, numClonedBlock += Num; Num += serialPatternMatch(*HeadMBB->succ_begin()); - Num += serialPatternMatch(*llvm::next(HeadMBB->succ_begin())); + Num += serialPatternMatch(*std::next(HeadMBB->succ_begin())); Num += ifPatternMatch(HeadMBB); assert(Num > 0); @@ -1767,7 +1767,7 @@ void AMDGPUCFGStructurizer::removeRedundantConditionalBranch( if (MBB->succ_size() != 2) return; MachineBasicBlock *MBB1 = *MBB->succ_begin(); - MachineBasicBlock *MBB2 = *llvm::next(MBB->succ_begin()); + MachineBasicBlock *MBB2 = *std::next(MBB->succ_begin()); if (MBB1 != MBB2) return; diff --git a/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/lib/Target/R600/R600ExpandSpecialInstrs.cpp index 0be491c3049..ca1189dac95 100644 --- a/lib/Target/R600/R600ExpandSpecialInstrs.cpp +++ b/lib/Target/R600/R600ExpandSpecialInstrs.cpp @@ -75,7 +75,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { MachineBasicBlock::iterator I = MBB.begin(); while (I != MBB.end()) { MachineInstr &MI = *I; - I = llvm::next(I); + I = std::next(I); // Expand LDS_*_RET instructions if (TII->isLDSRetInstr(MI.getOpcode())) { diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index b9b242a6e89..8c737125c85 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -207,7 +207,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( case AMDGPU::RAT_WRITE_CACHELESS_32_eg: case AMDGPU::RAT_WRITE_CACHELESS_64_eg: case AMDGPU::RAT_WRITE_CACHELESS_128_eg: { - unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0; + unsigned EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0; BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode())) .addOperand(MI->getOperand(0)) @@ -457,9 +457,9 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( // Instruction is left unmodified if its not the last one of its type bool isLastInstructionOfItsType = true; unsigned InstExportType = MI->getOperand(1).getImm(); - for (MachineBasicBlock::iterator NextExportInst = llvm::next(I), + for (MachineBasicBlock::iterator NextExportInst = std::next(I), EndBlock = BB->end(); NextExportInst != EndBlock; - NextExportInst = llvm::next(NextExportInst)) { + NextExportInst = std::next(NextExportInst)) { if (NextExportInst->getOpcode() == AMDGPU::EG_ExportSwz || NextExportInst->getOpcode() == AMDGPU::R600_ExportSwz) { unsigned CurrentInstExportType = NextExportInst->getOperand(1) @@ -470,7 +470,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( } } } - bool EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN)? 1 : 0; + bool EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0; if (!EOP && !isLastInstructionOfItsType) return BB; unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_ExportSwz)? 84 : 40; diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp index 2eca6cf4327..0281dd0cbdb 100644 --- a/lib/Target/R600/R600InstrInfo.cpp +++ b/lib/Target/R600/R600InstrInfo.cpp @@ -717,8 +717,8 @@ R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, } // Remove successive JUMP - while (I != MBB.begin() && llvm::prior(I)->getOpcode() == AMDGPU::JUMP) { - MachineBasicBlock::iterator PriorI = llvm::prior(I); + while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) { + MachineBasicBlock::iterator PriorI = std::prev(I); if (AllowModify) I->removeFromParent(); I = PriorI; @@ -784,7 +784,7 @@ MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) { It != E; ++It) { if (It->getOpcode() == AMDGPU::CF_ALU || It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE) - return llvm::prior(It.base()); + return std::prev(It.base()); } return MBB.end(); } diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp index 9dd4978fb5b..b7b7610b360 100644 --- a/lib/Target/R600/R600Packetizer.cpp +++ b/lib/Target/R600/R600Packetizer.cpp @@ -311,7 +311,7 @@ public: substitutePV(MI, PV); MachineBasicBlock::iterator It = VLIWPacketizerList::addToPacket(MI); if (isTransSlot) { - endPacket(llvm::next(It)->getParent(), llvm::next(It)); + endPacket(std::next(It)->getParent(), std::next(It)); } return It; } @@ -371,20 +371,20 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) { // instruction stream until we find the nearest boundary. MachineBasicBlock::iterator I = RegionEnd; for(;I != MBB->begin(); --I, --RemainingCount) { - if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn)) + if (TII->isSchedulingBoundary(std::prev(I), MBB, Fn)) break; } I = MBB->begin(); // Skip empty scheduling regions. if (I == RegionEnd) { - RegionEnd = llvm::prior(RegionEnd); + RegionEnd = std::prev(RegionEnd); --RemainingCount; continue; } // Skip regions with one instruction. - if (I == llvm::prior(RegionEnd)) { - RegionEnd = llvm::prior(RegionEnd); + if (I == std::prev(RegionEnd)) { + RegionEnd = std::prev(RegionEnd); continue; } diff --git a/lib/Target/R600/SILowerControlFlow.cpp b/lib/Target/R600/SILowerControlFlow.cpp index 50dcf4e70b3..5ec49308668 100644 --- a/lib/Target/R600/SILowerControlFlow.cpp +++ b/lib/Target/R600/SILowerControlFlow.cpp @@ -438,10 +438,10 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { BI != BE; ++BI) { MachineBasicBlock &MBB = *BI; - for (MachineBasicBlock::iterator I = MBB.begin(), Next = llvm::next(I); + for (MachineBasicBlock::iterator I = MBB.begin(), Next = std::next(I); I != MBB.end(); I = Next) { - Next = llvm::next(I); + Next = std::next(I); MachineInstr &MI = *I; if (TII->isDS(MI.getOpcode())) { NeedM0 = true; diff --git a/lib/Target/Sparc/DelaySlotFiller.cpp b/lib/Target/Sparc/DelaySlotFiller.cpp index f23ddc24eab..80df99abf2a 100644 --- a/lib/Target/Sparc/DelaySlotFiller.cpp +++ b/lib/Target/Sparc/DelaySlotFiller.cpp @@ -479,7 +479,7 @@ bool Filler::tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB, && MBBI->getOperand(1).getReg() == SP::G0 && MBBI->getOperand(2).getReg() == SP::G0); - MachineBasicBlock::iterator PrevInst = llvm::prior(MBBI); + MachineBasicBlock::iterator PrevInst = std::prev(MBBI); // It cannot be combined with a bundled instruction. if (PrevInst->isBundledWithSucc()) diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index d52cf26de60..8e720ee546f 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -2940,7 +2940,7 @@ SparcTargetLowering::expandSelectCC(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp index b0aa25b255f..abf6c178455 100644 --- a/lib/Target/Sparc/SparcInstrInfo.cpp +++ b/lib/Target/Sparc/SparcInstrInfo.cpp @@ -158,8 +158,8 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, continue; } - while (llvm::next(I) != MBB.end()) - llvm::next(I)->eraseFromParent(); + while (std::next(I) != MBB.end()) + std::next(I)->eraseFromParent(); Cond.clear(); FBB = 0; diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp index acfb491b953..e75cff3c08b 100644 --- a/lib/Target/SystemZ/SystemZFrameLowering.cpp +++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp @@ -378,8 +378,8 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const { // Mark the FramePtr as live at the beginning of every block except // the entry block. (We'll have marked R11 as live on entry when // saving the GPRs.) - for (MachineFunction::iterator - I = llvm::next(MF.begin()), E = MF.end(); I != E; ++I) + for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end(); + I != E; ++I) I->addLiveIn(SystemZ::R11D); } diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 9d0faf0e6c0..981b594fef9 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -2562,7 +2562,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { MachineFunction &MF = *MBB->getParent(); MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); - MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB); + MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); return NewMBB; } @@ -2572,8 +2572,7 @@ static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, MachineBasicBlock *MBB) { MachineBasicBlock *NewMBB = emitBlockAfter(MBB); NewMBB->splice(NewMBB->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), - MBB->end()); + std::next(MachineBasicBlock::iterator(MI)), MBB->end()); NewMBB->transferSuccessorsAndUpdatePHIs(MBB); return NewMBB; } diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index 55192f9d4e4..1dd7f4fc6fa 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -280,8 +280,8 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, } // If the block has any instructions after a JMP, delete them. - while (llvm::next(I) != MBB.end()) - llvm::next(I)->eraseFromParent(); + while (std::next(I) != MBB.end()) + std::next(I)->eraseFromParent(); Cond.clear(); FBB = 0; diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index c7f8514106f..bfc7840398e 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -432,7 +432,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) { MachineInstr *PrevMI = 0; if (I != BB.begin()) - PrevMI = prior(I); + PrevMI = std::prev(I); ++NumFP; // Keep track of # of pseudo instrs DEBUG(dbgs() << "\nFPInst:\t" << *MI); @@ -475,10 +475,10 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) { } else { MachineBasicBlock::iterator Start = I; // Rewind to first instruction newly inserted. - while (Start != BB.begin() && prior(Start) != PrevI) --Start; + while (Start != BB.begin() && std::prev(Start) != PrevI) --Start; dbgs() << "Inserted instructions:\n\t"; Start->print(dbgs(), &MF.getTarget()); - while (++Start != llvm::next(I)) {} + while (++Start != std::next(I)) {} } dumpStack(); ); @@ -905,7 +905,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) { // Kill registers by popping. if (Kills && I != MBB->begin()) { - MachineBasicBlock::iterator I2 = llvm::prior(I); + MachineBasicBlock::iterator I2 = std::prev(I); while (StackTop) { unsigned KReg = getStackEntry(0); if (!(Kills & (1 << KReg))) diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index fe0ba9520ba..898074ca0f1 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -207,7 +207,7 @@ void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, unsigned StackPtr, uint64_t *NumBytes = NULL) { if (MBBI == MBB.begin()) return; - MachineBasicBlock::iterator PI = prior(MBBI); + MachineBasicBlock::iterator PI = std::prev(MBBI); unsigned Opc = PI->getOpcode(); if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 || Opc == X86::ADD32ri || Opc == X86::ADD32ri8 || @@ -235,7 +235,7 @@ void mergeSPUpdatesDown(MachineBasicBlock &MBB, if (MBBI == MBB.end()) return; - MachineBasicBlock::iterator NI = llvm::next(MBBI); + MachineBasicBlock::iterator NI = std::next(MBBI); if (NI == MBB.end()) return; unsigned Opc = NI->getOpcode(); @@ -268,8 +268,8 @@ static int mergeSPUpdates(MachineBasicBlock &MBB, (!doMergeWithPrevious && MBBI == MBB.end())) return 0; - MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; - MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); + MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI; + MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : std::next(MBBI); unsigned Opc = PI->getOpcode(); int Offset = 0; @@ -537,7 +537,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { } // Mark the FramePtr as live-in in every block except the entry. - for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); + for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end(); I != E; ++I) I->addLiveIn(FramePtr); } else { @@ -783,7 +783,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, // Skip the callee-saved pop instructions. while (MBBI != MBB.begin()) { - MachineBasicBlock::iterator PI = prior(MBBI); + MachineBasicBlock::iterator PI = std::prev(MBBI); unsigned Opc = PI->getOpcode(); if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE && @@ -885,7 +885,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, addReg(JumpTarget.getReg(), RegState::Kill); } - MachineInstr *NewMI = prior(MBBI); + MachineInstr *NewMI = std::prev(MBBI); NewMI->copyImplicitOps(MF, MBBI); // Delete the pseudo instruction TCRETURN. @@ -1555,7 +1555,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, // sure we restore the stack pointer immediately after the call, there may // be spill code inserted between the CALL and ADJCALLSTACKUP instructions. MachineBasicBlock::iterator B = MBB.begin(); - while (I != B && !llvm::prior(I)->isCall()) + while (I != B && !std::prev(I)->isCall()) --I; MBB.insert(I, New); } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 3ae4147a794..49532b8bc1b 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14445,7 +14445,7 @@ static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + std::next(MachineBasicBlock::iterator(MI)), MBB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); // thisMBB: @@ -14679,7 +14679,7 @@ X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + std::next(MachineBasicBlock::iterator(MI)), MBB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); // thisMBB: @@ -14965,7 +14965,7 @@ X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + std::next(MachineBasicBlock::iterator(MI)), MBB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); // thisMBB: @@ -15361,8 +15361,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter( // Transfer the remainder of MBB and its successor edges to endMBB. endMBB->splice(endMBB->begin(), thisMBB, - llvm::next(MachineBasicBlock::iterator(MI)), - thisMBB->end()); + std::next(MachineBasicBlock::iterator(MI)), thisMBB->end()); endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); // Make offsetMBB and overflowMBB successors of thisMBB @@ -15532,8 +15531,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( // Transfer the remainder of MBB and its successor edges to EndMBB. EndMBB->splice(EndMBB->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), - MBB->end()); + std::next(MachineBasicBlock::iterator(MI)), MBB->end()); EndMBB->transferSuccessorsAndUpdatePHIs(MBB); // The original block will now fall through to the XMM save block. @@ -15595,7 +15593,7 @@ static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr, MachineBasicBlock* BB, const TargetRegisterInfo* TRI) { // Scan forward through BB for a use/def of EFLAGS. - MachineBasicBlock::iterator miI(llvm::next(SelectItr)); + MachineBasicBlock::iterator miI(std::next(SelectItr)); for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { const MachineInstr& mi = *miI; if (mi.readsRegister(X86::EFLAGS)) @@ -15660,8 +15658,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); // Add the true and fallthrough blocks as its successors. @@ -15741,8 +15738,8 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, MF->insert(MBBIter, mallocMBB); MF->insert(MBBIter, continueMBB); - continueMBB->splice(continueMBB->begin(), BB, llvm::next - (MachineBasicBlock::iterator(MI)), BB->end()); + continueMBB->splice(continueMBB->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); continueMBB->transferSuccessorsAndUpdatePHIs(BB); // Add code to the main basic block to check if the stack limit has been hit, @@ -15983,7 +15980,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), MBB, - llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + std::next(MachineBasicBlock::iterator(MI)), MBB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); // thisMBB: diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 816804656e8..1be723829fe 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1807,7 +1807,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, MBB.insert(I, MI); } - MachineInstr *NewMI = prior(I); + MachineInstr *NewMI = std::prev(I); NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI); } @@ -2736,8 +2736,8 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, } // If the block has any instructions after a JMP, delete them. - while (llvm::next(I) != MBB.end()) - llvm::next(I)->eraseFromParent(); + while (std::next(I) != MBB.end()) + std::next(I)->eraseFromParent(); Cond.clear(); FBB = 0; diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 67dc19b0aa5..079e886457b 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -1597,8 +1597,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Transfer the remainder of BB and its successor edges to sinkMBB. sinkMBB->splice(sinkMBB->begin(), BB, - llvm::next(MachineBasicBlock::iterator(MI)), - BB->end()); + std::next(MachineBasicBlock::iterator(MI)), BB->end()); sinkMBB->transferSuccessorsAndUpdatePHIs(BB); // Next, add the true and fallthrough blocks as its successors. diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 3ceb8b91874..f73dce444e2 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -2188,7 +2188,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C) { return false; // The first index must be zero. - ConstantInt *CI = dyn_cast(*llvm::next(CE->op_begin())); + ConstantInt *CI = dyn_cast(*std::next(CE->op_begin())); if (!CI || !CI->isZero()) return false; // The remaining indices must be compile-time known integers within the diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp index 557ef66dfdf..95d1fb47225 100644 --- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -883,7 +883,7 @@ void GCOVProfiler::insertIndirectCounterIncrement() { // uint64_t *counter = counters[pred]; // if (!counter) return; Value *ZExtPred = Builder.CreateZExt(Pred, Builder.getInt64Ty()); - Arg = llvm::next(Fn->arg_begin()); + Arg = std::next(Fn->arg_begin()); Arg->setName("counters"); Value *GEP = Builder.CreateGEP(Arg, ZExtPred); Value *Counter = Builder.CreateLoad(GEP, "counter"); diff --git a/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp index 00d9864953d..5a86ba31b2d 100644 --- a/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp +++ b/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp @@ -165,7 +165,7 @@ bool ObjCARCAPElim::runOnModule(Module &M) { if (F->isDeclaration()) continue; // Only look at functions with one basic block. - if (llvm::next(F->begin()) != F->end()) + if (std::next(F->begin()) != F->end()) continue; // Ok, a single-block constructor function definition. Try to optimize it. Changed |= OptimizeBB(F->begin()); diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index 8e2c362ad81..95f01802e86 100644 --- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -958,7 +958,7 @@ static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB, /*isVarArg=*/false); Constant *Callee = M->getOrInsertFunction(Name, FTy); - IRBuilder<> Builder(BB, llvm::prior(BB->end())); + IRBuilder<> Builder(BB, std::prev(BB->end())); Value *PtrName; StringRef Tmp = Ptr->getName(); @@ -1874,7 +1874,7 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, if (isa(Inst)) S.InsertReverseInsertPt(BB->getFirstInsertionPt()); else - S.InsertReverseInsertPt(llvm::next(BasicBlock::iterator(Inst))); + S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst))); S.SetSeq(S_Use); ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use); } else if (Seq == S_Release && IsUser(Class)) { @@ -1888,7 +1888,7 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, if (isa(Inst)) S.InsertReverseInsertPt(BB->getFirstInsertionPt()); else - S.InsertReverseInsertPt(llvm::next(BasicBlock::iterator(Inst))); + S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst))); } break; case S_Stop: @@ -1945,7 +1945,7 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB, // Visit all the instructions, bottom-up. for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) { - Instruction *Inst = llvm::prior(I); + Instruction *Inst = std::prev(I); // Invoke instructions are visited as part of their successors (below). if (isa(Inst)) @@ -2691,12 +2691,12 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) { // within the same block. Theoretically, we could do memdep-style non-local // analysis too, but that would want caching. A better approach would be to // use the technique that EarlyCSE uses. - inst_iterator Current = llvm::prior(I); + inst_iterator Current = std::prev(I); BasicBlock *CurrentBB = Current.getBasicBlockIterator(); for (BasicBlock::iterator B = CurrentBB->begin(), J = Current.getInstructionIterator(); J != B; --J) { - Instruction *EarlierInst = &*llvm::prior(J); + Instruction *EarlierInst = &*std::prev(J); InstructionClass EarlierClass = GetInstructionClass(EarlierInst); switch (EarlierClass) { case IC_LoadWeak: diff --git a/lib/Transforms/Scalar/ConstantHoisting.cpp b/lib/Transforms/Scalar/ConstantHoisting.cpp index 661d19cdbe4..73ec3fe4efb 100644 --- a/lib/Transforms/Scalar/ConstantHoisting.cpp +++ b/lib/Transforms/Scalar/ConstantHoisting.cpp @@ -244,7 +244,7 @@ void ConstantHoisting::FindBaseConstants() { // Simple linear scan through the sorted constant map for viable merge // candidates. ConstantMapType::iterator MinValItr = ConstantMap.begin(); - for (ConstantMapType::iterator I = llvm::next(ConstantMap.begin()), + for (ConstantMapType::iterator I = std::next(ConstantMap.begin()), E = ConstantMap.end(); I != E; ++I) { if (MinValItr->first->getType() == I->first->getType()) { // Check if the constant is in range of an add with immediate. @@ -315,7 +315,7 @@ FindConstantInsertionPoint(Function &F, const ConstantInfo &CI) const { while (BBs.size() >= 2) { BasicBlock *BB, *BB1, *BB2; BB1 = *BBs.begin(); - BB2 = *llvm::next(BBs.begin()); + BB2 = *std::next(BBs.begin()); BB = DT->findNearestCommonDominator(BB1, BB2); if (BB == Entry) return getMatInsertPt(&Entry->front(), DT); diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index 57bd53f3bbd..4bc9dd5aeec 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -680,7 +680,7 @@ bool DSE::HandleFree(CallInst *F) { if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) break; - Instruction *Next = llvm::next(BasicBlock::iterator(Dependency)); + Instruction *Next = std::next(BasicBlock::iterator(Dependency)); // DCE instructions only used to calculate that store DeleteDeadInstruction(Dependency, *MD, TLI); @@ -761,7 +761,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) { for (SmallVectorImpl::iterator I = Pointers.begin(), E = Pointers.end(); I != E; ++I) { dbgs() << **I; - if (llvm::next(I) != E) + if (std::next(I) != E) dbgs() << ", "; } dbgs() << '\n'); diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp index 8ade4ae6626..7c739a5aab4 100644 --- a/lib/Transforms/Scalar/LoopRerollPass.cpp +++ b/lib/Transforms/Scalar/LoopRerollPass.cpp @@ -190,12 +190,12 @@ protected: iterator begin() { assert(Valid && "Using invalid reduction"); - return llvm::next(Instructions.begin()); + return std::next(Instructions.begin()); } const_iterator begin() const { assert(Valid && "Using invalid reduction"); - return llvm::next(Instructions.begin()); + return std::next(Instructions.begin()); } iterator end() { return Instructions.end(); } @@ -561,7 +561,7 @@ bool LoopReroll::findScaleFromMul(Instruction *RealIV, uint64_t &Scale, return false; const SCEVAddRecExpr *RealIVSCEV = cast(SE->getSCEV(RealIV)); Instruction *User1 = cast(*RealIV->use_begin()), - *User2 = cast(*llvm::next(RealIV->use_begin())); + *User2 = cast(*std::next(RealIV->use_begin())); if (!SE->isSCEVable(User1->getType()) || !SE->isSCEVable(User2->getType())) return false; const SCEVAddRecExpr *User1SCEV = diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 093352ba572..2e881e3d53a 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -1294,7 +1294,7 @@ void LSRUse::print(raw_ostream &OS) const { for (SmallVectorImpl::const_iterator I = Offsets.begin(), E = Offsets.end(); I != E; ++I) { OS << *I; - if (llvm::next(I) != E) + if (std::next(I) != E) OS << ','; } OS << '}'; @@ -1561,7 +1561,7 @@ struct IVChain { // begin - return the first increment in the chain. const_iterator begin() const { assert(!Incs.empty()); - return llvm::next(Incs.begin()); + return std::next(Incs.begin()); } const_iterator end() const { return Incs.end(); @@ -2337,7 +2337,7 @@ void LSRInstance::CollectInterestingTypesAndFactors() { for (SmallSetVector::const_iterator I = Strides.begin(), E = Strides.end(); I != E; ++I) for (SmallSetVector::const_iterator NewStrideIter = - llvm::next(I); NewStrideIter != E; ++NewStrideIter) { + std::next(I); NewStrideIter != E; ++NewStrideIter) { const SCEV *OldStride = *I; const SCEV *NewStride = *NewStrideIter; @@ -2737,7 +2737,7 @@ void LSRInstance::CollectChains() { Instruction *IVOpInst = cast(*IVOpIter); if (UniqueOperands.insert(IVOpInst)) ChainInstruction(I, IVOpInst, ChainUsersVec); - IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); + IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); } } // Continue walking down the instructions. } // Continue walking down the domtree. @@ -2828,7 +2828,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, || SE.getSCEV(IVSrc) == Head.IncExpr) { break; } - IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); + IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); } if (IVOpIter == IVOpEnd) { // Gracefully give up on this chain. @@ -3220,10 +3220,10 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, continue; // Collect all operands except *J. - SmallVector InnerAddOps - (((const SmallVector &)AddOps).begin(), J); - InnerAddOps.append - (llvm::next(J), ((const SmallVector &)AddOps).end()); + SmallVector InnerAddOps( + ((const SmallVector &)AddOps).begin(), J); + InnerAddOps.append(std::next(J), + ((const SmallVector &)AddOps).end()); // Don't leave just a constant behind in a register if the constant could // be folded into an immediate field. @@ -3625,8 +3625,9 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // Conservatively examine offsets between this orig reg a few selected // other orig regs. ImmMapTy::const_iterator OtherImms[] = { - Imms.begin(), prior(Imms.end()), - Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) + Imms.begin(), std::prev(Imms.end()), + Imms.lower_bound((Imms.begin()->first + std::prev(Imms.end())->first) / + 2) }; for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { ImmMapTy::const_iterator M = OtherImms[i]; @@ -4292,7 +4293,7 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, // instead of at the end, so that it can be used for other expansions. if (IDom == Inst->getParent() && (!BetterPos || !DT.dominates(Inst, BetterPos))) - BetterPos = llvm::next(BasicBlock::iterator(Inst)); + BetterPos = std::next(BasicBlock::iterator(Inst)); } if (!AllDominate) break; diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index 3a0fcc80d2c..e8a0e159ef5 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -2232,7 +2232,7 @@ private: DL.getTypeStoreSizeInBits(LI.getType()) && "Non-byte-multiple bit width"); // Move the insertion point just past the load so that we can refer to it. - IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI))); + IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI))); // Create a placeholder value with the same type as LI to use as the // basis for the new value. This allows us to replace the uses of LI with // the computed value, and then replace the placeholder with LI, leaving @@ -3295,7 +3295,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) { uint64_t BeginOffset = S.begin()->beginOffset(); - for (AllocaSlices::iterator SI = S.begin(), SJ = llvm::next(SI), SE = S.end(); + for (AllocaSlices::iterator SI = S.begin(), SJ = std::next(SI), SE = S.end(); SI != SE; SI = SJ) { uint64_t MaxEndOffset = SI->endOffset(); @@ -3623,7 +3623,7 @@ bool SROA::runOnFunction(Function &F) { DT = DTWP ? &DTWP->getDomTree() : 0; BasicBlock &EntryBB = F.getEntryBlock(); - for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end()); + for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); I != E; ++I) if (AllocaInst *AI = dyn_cast(I)) Worklist.insert(AI); diff --git a/lib/Transforms/Scalar/Scalarizer.cpp b/lib/Transforms/Scalar/Scalarizer.cpp index 0e607109cbf..f21a23303cd 100644 --- a/lib/Transforms/Scalar/Scalarizer.cpp +++ b/lib/Transforms/Scalar/Scalarizer.cpp @@ -269,7 +269,7 @@ Scatterer Scalarizer::scatter(Instruction *Point, Value *V) { // Put the scattered form of an instruction directly after the // instruction. BasicBlock *BB = VOp->getParent(); - return Scatterer(BB, llvm::next(BasicBlock::iterator(VOp)), + return Scatterer(BB, std::next(BasicBlock::iterator(VOp)), V, &Scattered[V]); } // In the fallback case, just put the scattered before Point and diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp index d8ccdda789b..299b767eaa8 100644 --- a/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -429,7 +429,7 @@ TailCallElim::FindTRECandidate(Instruction *TI, // lower the call to fabs into inline code. if (BB == &F->getEntryBlock() && FirstNonDbg(BB->front()) == CI && - FirstNonDbg(llvm::next(BB->begin())) == TI && + FirstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() && !TTI->isLoweredToCall(CI->getCalledFunction())) { // A single-block function with just a call and a return. Check that diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp index 41ced406661..c96c0304285 100644 --- a/lib/Transforms/Utils/CodeExtractor.cpp +++ b/lib/Transforms/Utils/CodeExtractor.cpp @@ -86,7 +86,7 @@ static SetVector buildExtractionBlockSet(IteratorT BBBegin, } #ifndef NDEBUG - for (SetVector::iterator I = llvm::next(Result.begin()), + for (SetVector::iterator I = std::next(Result.begin()), E = Result.end(); I != E; ++I) for (pred_iterator PI = pred_begin(*I), PE = pred_end(*I); diff --git a/lib/Transforms/Utils/LowerSwitch.cpp b/lib/Transforms/Utils/LowerSwitch.cpp index 2d2a8a54a0f..03d456082cc 100644 --- a/lib/Transforms/Utils/LowerSwitch.cpp +++ b/lib/Transforms/Utils/LowerSwitch.cpp @@ -245,7 +245,8 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) { // Merge case into clusters if (Cases.size()>=2) - for (CaseItr I=Cases.begin(), J=llvm::next(Cases.begin()); J!=Cases.end(); ) { + for (CaseItr I = Cases.begin(), J = std::next(Cases.begin()); + J != Cases.end();) { int64_t nextValue = cast(J->Low)->getSExtValue(); int64_t currentValue = cast(I->High)->getSExtValue(); BasicBlock* nextBB = J->BB; diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp index b1ac7e2f737..8ed3b16c872 100644 --- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp +++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp @@ -485,7 +485,7 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LI->replaceAllUsesWith(UndefValue::get(LI->getType())); else // Otherwise, there was a store before this load, the load takes its value. - LI->replaceAllUsesWith(llvm::prior(I)->second->getOperand(0)); + LI->replaceAllUsesWith(std::prev(I)->second->getOperand(0)); if (AST && LI->getType()->isPointerTy()) AST->deleteValue(LI); diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp index e24acfffaef..a70cfadccab 100644 --- a/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1422,7 +1422,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) { Value *SpeculatedStoreValue = 0; StoreInst *SpeculatedStore = 0; for (BasicBlock::iterator BBI = ThenBB->begin(), - BBE = llvm::prior(ThenBB->end()); + BBE = std::prev(ThenBB->end()); BBI != BBE; ++BBI) { Instruction *I = BBI; // Skip debug info. @@ -1532,7 +1532,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) { // Hoist the instructions. BB->getInstList().splice(BI, ThenBB->getInstList(), ThenBB->begin(), - llvm::prior(ThenBB->end())); + std::prev(ThenBB->end())); // Insert selects and rewrite the PHI operands. IRBuilder Builder(BI); diff --git a/lib/Transforms/Utils/SimplifyIndVar.cpp b/lib/Transforms/Utils/SimplifyIndVar.cpp index 179e6b5ca5d..f6b9ab7e40c 100644 --- a/lib/Transforms/Utils/SimplifyIndVar.cpp +++ b/lib/Transforms/Utils/SimplifyIndVar.cpp @@ -300,7 +300,7 @@ Instruction *SimplifyIndvar::splitOverflowIntrinsic(Instruction *IVUser, return IVUser; BasicBlock *ContinueBB = Branch->getSuccessor(1); - if (llvm::next(pred_begin(ContinueBB)) != pred_end(ContinueBB)) + if (std::next(pred_begin(ContinueBB)) != pred_end(ContinueBB)) return IVUser; // Check if all users of the add are provably NSW. diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp index b2caba30028..4855d4e027f 100644 --- a/lib/Transforms/Vectorize/BBVectorize.cpp +++ b/lib/Transforms/Vectorize/BBVectorize.cpp @@ -1231,7 +1231,7 @@ namespace { if (I->mayWriteToMemory()) WriteSet.add(I); bool JAfterStart = IAfterStart; - BasicBlock::iterator J = llvm::next(I); + BasicBlock::iterator J = std::next(I); for (unsigned ss = 0; J != E && ss <= Config.SearchLimit; ++J, ++ss) { if (J == Start) JAfterStart = true; @@ -1276,7 +1276,7 @@ namespace { // The next call to this function must start after the last instruction // selected during this invocation. if (JAfterStart) { - Start = llvm::next(J); + Start = std::next(J); IAfterStart = JAfterStart = false; } @@ -1453,7 +1453,7 @@ namespace { AliasSetTracker WriteSet(*AA); if (I->mayWriteToMemory()) WriteSet.add(I); - for (BasicBlock::iterator J = llvm::next(I); J != E; ++J) { + for (BasicBlock::iterator J = std::next(I); J != E; ++J) { (void) trackUsesOfI(Users, WriteSet, I, J); if (J == EL) @@ -2841,7 +2841,7 @@ namespace { DenseSet &LoadMoveSetPairs, Instruction *I, Instruction *J) { // Skip to the first instruction past I. - BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I)); + BasicBlock::iterator L = std::next(BasicBlock::iterator(I)); DenseSet Users; AliasSetTracker WriteSet(*AA); @@ -2863,7 +2863,7 @@ namespace { Instruction *&InsertionPt, Instruction *I, Instruction *J) { // Skip to the first instruction past I. - BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I)); + BasicBlock::iterator L = std::next(BasicBlock::iterator(I)); DenseSet Users; AliasSetTracker WriteSet(*AA); @@ -2894,7 +2894,7 @@ namespace { DenseSet &LoadMoveSetPairs, Instruction *I) { // Skip to the first instruction past I. - BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I)); + BasicBlock::iterator L = std::next(BasicBlock::iterator(I)); DenseSet Users; AliasSetTracker WriteSet(*AA); @@ -3165,7 +3165,7 @@ namespace { } // Before removing I, set the iterator to the next instruction. - PI = llvm::next(BasicBlock::iterator(I)); + PI = std::next(BasicBlock::iterator(I)); if (cast(PI) == J) ++PI; diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index bd29d7d359d..1ee92fde9b2 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4339,7 +4339,7 @@ bool MemoryDepChecker::areDepsSafe(AccessAnalysis::DepCandidates &AccessSets, // Check every access pair. while (AI != AE) { CheckDeps.erase(*AI); - EquivalenceClasses::member_iterator OI = llvm::next(AI); + EquivalenceClasses::member_iterator OI = std::next(AI); while (OI != AE) { // Check every accessing instruction pair in program order. for (std::vector::iterator I1 = Accesses[*AI].begin(), diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index 951f6ca218d..10bdb2ff8f5 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1732,7 +1732,7 @@ void BoUpSLP::optimizeGatherSequence() { for (SmallVectorImpl::iterator I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { - assert((I == CSEWorkList.begin() || !DT->dominates(*I, *llvm::prior(I))) && + assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && "Worklist not sorted properly!"); BasicBlock *BB = *I; // For all instructions in blocks containing gather sequences: diff --git a/unittests/ADT/TinyPtrVectorTest.cpp b/unittests/ADT/TinyPtrVectorTest.cpp index 3ae1ae3deb1..d633f967221 100644 --- a/unittests/ADT/TinyPtrVectorTest.cpp +++ b/unittests/ADT/TinyPtrVectorTest.cpp @@ -72,9 +72,9 @@ protected: EXPECT_EQ(Values.size(), V.size()); for (size_t i = 0, e = Values.size(); i != e; ++i) { EXPECT_EQ(Values[i], V[i]); - EXPECT_EQ(Values[i], *llvm::next(V.begin(), i)); + EXPECT_EQ(Values[i], *std::next(V.begin(), i)); } - EXPECT_EQ(V.end(), llvm::next(V.begin(), Values.size())); + EXPECT_EQ(V.end(), std::next(V.begin(), Values.size())); } }; @@ -300,17 +300,17 @@ TYPED_TEST(TinyPtrVectorTest, EraseTest) { this->V.erase(this->V.begin()); this->TestPtrs.erase(this->TestPtrs.begin()); this->expectValues(this->V, this->testArray(41)); - this->V.erase(llvm::next(this->V.begin(), 1)); - this->TestPtrs.erase(llvm::next(this->TestPtrs.begin(), 1)); + this->V.erase(std::next(this->V.begin(), 1)); + this->TestPtrs.erase(std::next(this->TestPtrs.begin(), 1)); this->expectValues(this->V, this->testArray(40)); - this->V.erase(llvm::next(this->V.begin(), 2)); - this->TestPtrs.erase(llvm::next(this->TestPtrs.begin(), 2)); + this->V.erase(std::next(this->V.begin(), 2)); + this->TestPtrs.erase(std::next(this->TestPtrs.begin(), 2)); this->expectValues(this->V, this->testArray(39)); - this->V.erase(llvm::next(this->V.begin(), 5)); - this->TestPtrs.erase(llvm::next(this->TestPtrs.begin(), 5)); + this->V.erase(std::next(this->V.begin(), 5)); + this->TestPtrs.erase(std::next(this->TestPtrs.begin(), 5)); this->expectValues(this->V, this->testArray(38)); - this->V.erase(llvm::next(this->V.begin(), 13)); - this->TestPtrs.erase(llvm::next(this->TestPtrs.begin(), 13)); + this->V.erase(std::next(this->V.begin(), 13)); + this->TestPtrs.erase(std::next(this->TestPtrs.begin(), 13)); this->expectValues(this->V, this->testArray(37)); typename TypeParam::iterator I = this->V.begin(); @@ -332,27 +332,27 @@ TYPED_TEST(TinyPtrVectorTest, EraseRangeTest) { this->appendValues(this->V, this->testArray(42)); this->expectValues(this->V, this->testArray(42)); - this->V.erase(this->V.begin(), llvm::next(this->V.begin(), 1)); + this->V.erase(this->V.begin(), std::next(this->V.begin(), 1)); this->TestPtrs.erase(this->TestPtrs.begin(), - llvm::next(this->TestPtrs.begin(), 1)); + std::next(this->TestPtrs.begin(), 1)); this->expectValues(this->V, this->testArray(41)); - this->V.erase(llvm::next(this->V.begin(), 1), llvm::next(this->V.begin(), 2)); - this->TestPtrs.erase(llvm::next(this->TestPtrs.begin(), 1), - llvm::next(this->TestPtrs.begin(), 2)); + this->V.erase(std::next(this->V.begin(), 1), std::next(this->V.begin(), 2)); + this->TestPtrs.erase(std::next(this->TestPtrs.begin(), 1), + std::next(this->TestPtrs.begin(), 2)); this->expectValues(this->V, this->testArray(40)); - this->V.erase(llvm::next(this->V.begin(), 2), llvm::next(this->V.begin(), 4)); - this->TestPtrs.erase(llvm::next(this->TestPtrs.begin(), 2), - llvm::next(this->TestPtrs.begin(), 4)); + this->V.erase(std::next(this->V.begin(), 2), std::next(this->V.begin(), 4)); + this->TestPtrs.erase(std::next(this->TestPtrs.begin(), 2), + std::next(this->TestPtrs.begin(), 4)); this->expectValues(this->V, this->testArray(38)); - this->V.erase(llvm::next(this->V.begin(), 5), llvm::next(this->V.begin(), 10)); - this->TestPtrs.erase(llvm::next(this->TestPtrs.begin(), 5), - llvm::next(this->TestPtrs.begin(), 10)); + this->V.erase(std::next(this->V.begin(), 5), std::next(this->V.begin(), 10)); + this->TestPtrs.erase(std::next(this->TestPtrs.begin(), 5), + std::next(this->TestPtrs.begin(), 10)); this->expectValues(this->V, this->testArray(33)); - this->V.erase(llvm::next(this->V.begin(), 13), llvm::next(this->V.begin(), 26)); - this->TestPtrs.erase(llvm::next(this->TestPtrs.begin(), 13), - llvm::next(this->TestPtrs.begin(), 26)); + this->V.erase(std::next(this->V.begin(), 13), std::next(this->V.begin(), 26)); + this->TestPtrs.erase(std::next(this->TestPtrs.begin(), 13), + std::next(this->TestPtrs.begin(), 26)); this->expectValues(this->V, this->testArray(20)); - this->V.erase(llvm::next(this->V.begin(), 7), this->V.end()); + this->V.erase(std::next(this->V.begin(), 7), this->V.end()); this->expectValues(this->V, this->testArray(7)); this->V.erase(this->V.begin(), this->V.end()); this->expectValues(this->V, this->testArray(0)); @@ -369,8 +369,8 @@ TYPED_TEST(TinyPtrVectorTest, Insert) { this->V.insert(this->V.begin(), this->TestPtrs[42]); this->TestPtrs.insert(this->TestPtrs.begin(), this->TestPtrs[42]); this->expectValues(this->V, this->testArray(6)); - this->V.insert(llvm::next(this->V.begin(), 3), this->TestPtrs[43]); - this->TestPtrs.insert(llvm::next(this->TestPtrs.begin(), 3), + this->V.insert(std::next(this->V.begin(), 3), this->TestPtrs[43]); + this->TestPtrs.insert(std::next(this->TestPtrs.begin(), 3), this->TestPtrs[43]); this->expectValues(this->V, this->testArray(7)); } @@ -384,30 +384,30 @@ TYPED_TEST(TinyPtrVectorTest, InsertRange) { this->V.insert(this->V.end(), this->TestPtrs.end(), this->TestPtrs.end()); this->expectValues(this->V, this->testArray(0)); this->V.insert(this->V.end(), this->TestPtrs.begin(), - llvm::next(this->TestPtrs.begin())); + std::next(this->TestPtrs.begin())); this->expectValues(this->V, this->testArray(1)); this->V.clear(); this->V.insert(this->V.end(), this->TestPtrs.begin(), - llvm::next(this->TestPtrs.begin(), 2)); + std::next(this->TestPtrs.begin(), 2)); this->expectValues(this->V, this->testArray(2)); this->V.clear(); this->V.insert(this->V.end(), this->TestPtrs.begin(), - llvm::next(this->TestPtrs.begin(), 42)); + std::next(this->TestPtrs.begin(), 42)); this->expectValues(this->V, this->testArray(42)); this->V.clear(); this->V.insert(this->V.end(), - llvm::next(this->TestPtrs.begin(), 5), - llvm::next(this->TestPtrs.begin(), 13)); + std::next(this->TestPtrs.begin(), 5), + std::next(this->TestPtrs.begin(), 13)); this->V.insert(this->V.begin(), - llvm::next(this->TestPtrs.begin(), 0), - llvm::next(this->TestPtrs.begin(), 3)); - this->V.insert(llvm::next(this->V.begin(), 2), - llvm::next(this->TestPtrs.begin(), 2), - llvm::next(this->TestPtrs.begin(), 4)); - this->V.erase(llvm::next(this->V.begin(), 4)); - this->V.insert(llvm::next(this->V.begin(), 4), - llvm::next(this->TestPtrs.begin(), 4), - llvm::next(this->TestPtrs.begin(), 5)); + std::next(this->TestPtrs.begin(), 0), + std::next(this->TestPtrs.begin(), 3)); + this->V.insert(std::next(this->V.begin(), 2), + std::next(this->TestPtrs.begin(), 2), + std::next(this->TestPtrs.begin(), 4)); + this->V.erase(std::next(this->V.begin(), 4)); + this->V.insert(std::next(this->V.begin(), 4), + std::next(this->TestPtrs.begin(), 4), + std::next(this->TestPtrs.begin(), 5)); this->expectValues(this->V, this->testArray(13)); } diff --git a/unittests/ADT/ilistTest.cpp b/unittests/ADT/ilistTest.cpp index 0c0cd0fd56f..134607c2fd4 100644 --- a/unittests/ADT/ilistTest.cpp +++ b/unittests/ADT/ilistTest.cpp @@ -51,15 +51,15 @@ TEST(ilistTest, SpliceOne) { List.splice(List.begin(), List, List.begin()); EXPECT_EQ(1u, List.size()); EXPECT_EQ(1, List.front().Value); - EXPECT_TRUE(llvm::next(List.begin()) == List.end()); + EXPECT_TRUE(std::next(List.begin()) == List.end()); // Altenative noop. Move the first element behind itself. List.push_back(2); List.push_back(3); - List.splice(llvm::next(List.begin()), List, List.begin()); + List.splice(std::next(List.begin()), List, List.begin()); EXPECT_EQ(3u, List.size()); EXPECT_EQ(1, List.front().Value); - EXPECT_EQ(2, llvm::next(List.begin())->Value); + EXPECT_EQ(2, std::next(List.begin())->Value); EXPECT_EQ(3, List.back().Value); } diff --git a/unittests/IR/InstructionsTest.cpp b/unittests/IR/InstructionsTest.cpp index cbdf18c6468..962c07eea14 100644 --- a/unittests/IR/InstructionsTest.cpp +++ b/unittests/IR/InstructionsTest.cpp @@ -65,9 +65,9 @@ TEST(InstructionsTest, BranchInst) { EXPECT_EQ(1U, b0->getNumOperands()); EXPECT_NE(b0->op_begin(), b0->op_end()); - EXPECT_EQ(b0->op_end(), llvm::next(b0->op_begin())); + EXPECT_EQ(b0->op_end(), std::next(b0->op_begin())); - EXPECT_EQ(b0->op_end(), llvm::next(b0->op_begin())); + EXPECT_EQ(b0->op_end(), std::next(b0->op_begin())); IntegerType* Int1 = IntegerType::get(C, 1); Constant* One = ConstantInt::get(Int1, 1, true); diff --git a/utils/TableGen/CodeGenRegisters.cpp b/utils/TableGen/CodeGenRegisters.cpp index 34f5ca01834..6d8aecc3e5e 100644 --- a/utils/TableGen/CodeGenRegisters.cpp +++ b/utils/TableGen/CodeGenRegisters.cpp @@ -1271,7 +1271,7 @@ static void computeUberSets(std::vector &UberSets, assert(USetID && "register number 0 is invalid"); AllocatableRegs.insert((*Regs.begin())->EnumValue); - for (CodeGenRegister::Set::const_iterator I = llvm::next(Regs.begin()), + for (CodeGenRegister::Set::const_iterator I = std::next(Regs.begin()), E = Regs.end(); I != E; ++I) { AllocatableRegs.insert((*I)->EnumValue); UberSetIDs.join(USetID, (*I)->EnumValue); @@ -1311,7 +1311,7 @@ static void computeUberSets(std::vector &UberSets, static void computeUberWeights(std::vector &UberSets, CodeGenRegBank &RegBank) { // Skip the first unallocatable set. - for (std::vector::iterator I = llvm::next(UberSets.begin()), + for (std::vector::iterator I = std::next(UberSets.begin()), E = UberSets.end(); I != E; ++I) { // Initialize all unit weights in this set, and remember the max units/reg. @@ -1552,7 +1552,7 @@ void CodeGenRegBank::computeRegUnitSets() { // Find an existing RegUnitSet. std::vector::const_iterator SetI = findRegUnitSet(RegUnitSets, RegUnitSets.back()); - if (SetI != llvm::prior(RegUnitSets.end())) + if (SetI != std::prev(RegUnitSets.end())) RegUnitSets.pop_back(); } @@ -1617,7 +1617,7 @@ void CodeGenRegBank::computeRegUnitSets() { // Find an existing RegUnitSet, or add the union to the unique sets. std::vector::const_iterator SetI = findRegUnitSet(RegUnitSets, RegUnitSets.back()); - if (SetI != llvm::prior(RegUnitSets.end())) + if (SetI != std::prev(RegUnitSets.end())) RegUnitSets.pop_back(); else { DEBUG(dbgs() << "UnitSet " << RegUnitSets.size()-1