mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-21 18:22:53 +01:00
[C++11] Replace llvm::next and llvm::prior with std::next and std::prev.
Remove the old functions. llvm-svn: 202636
This commit is contained in:
parent
d74da4b3ff
commit
e4eb1b495f
@ -17,9 +17,7 @@
|
||||
#ifndef LLVM_ADT_MAPVECTOR_H
|
||||
#define LLVM_ADT_MAPVECTOR_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
@ -97,7 +95,7 @@ public:
|
||||
if (Result.second) {
|
||||
Vector.push_back(std::make_pair(KV.first, KV.second));
|
||||
I = Vector.size() - 1;
|
||||
return std::make_pair(llvm::prior(end()), true);
|
||||
return std::make_pair(std::prev(end()), true);
|
||||
}
|
||||
return std::make_pair(begin() + I, false);
|
||||
}
|
||||
|
@ -141,41 +141,6 @@ inline mapped_iterator<ItTy, FuncTy> map_iterator(const ItTy &I, FuncTy F) {
|
||||
return mapped_iterator<ItTy, FuncTy>(I, F);
|
||||
}
|
||||
|
||||
|
||||
// next/prior - These functions unlike std::advance do not modify the
|
||||
// passed iterator but return a copy.
|
||||
//
|
||||
// next(myIt) returns copy of myIt incremented once
|
||||
// next(myIt, n) returns copy of myIt incremented n times
|
||||
// prior(myIt) returns copy of myIt decremented once
|
||||
// prior(myIt, n) returns copy of myIt decremented n times
|
||||
|
||||
template <typename ItTy, typename Dist>
|
||||
inline ItTy next(ItTy it, Dist n)
|
||||
{
|
||||
std::advance(it, n);
|
||||
return it;
|
||||
}
|
||||
|
||||
template <typename ItTy>
|
||||
inline ItTy next(ItTy it)
|
||||
{
|
||||
return ++it;
|
||||
}
|
||||
|
||||
template <typename ItTy, typename Dist>
|
||||
inline ItTy prior(ItTy it, Dist n)
|
||||
{
|
||||
std::advance(it, -n);
|
||||
return it;
|
||||
}
|
||||
|
||||
template <typename ItTy>
|
||||
inline ItTy prior(ItTy it)
|
||||
{
|
||||
return --it;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Extra additions to <utility>
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -12,9 +12,7 @@
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/PointerUnion.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Support/Compiler.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
@ -248,7 +246,7 @@ public:
|
||||
assert(I <= this->end() && "Inserting past the end of the vector.");
|
||||
if (I == end()) {
|
||||
push_back(Elt);
|
||||
return llvm::prior(end());
|
||||
return std::prev(end());
|
||||
}
|
||||
assert(!Val.isNull() && "Null value with non-end insert iterator.");
|
||||
if (EltTy V = Val.template dyn_cast<EltTy>()) {
|
||||
@ -271,7 +269,7 @@ public:
|
||||
// If we have a single value, convert to a vector.
|
||||
ptrdiff_t Offset = I - begin();
|
||||
if (Val.isNull()) {
|
||||
if (llvm::next(From) == To) {
|
||||
if (std::next(From) == To) {
|
||||
Val = *From;
|
||||
return begin();
|
||||
}
|
||||
|
@ -503,7 +503,7 @@ public:
|
||||
///
|
||||
/// If I points to a bundle of instructions, they are all erased.
|
||||
iterator erase(iterator I) {
|
||||
return erase(I, llvm::next(I));
|
||||
return erase(I, std::next(I));
|
||||
}
|
||||
|
||||
/// Remove an instruction from the instruction list and delete it.
|
||||
@ -542,7 +542,7 @@ public:
|
||||
void splice(iterator Where, MachineBasicBlock *Other, iterator From) {
|
||||
// The range splice() doesn't allow noop moves, but this one does.
|
||||
if (Where != From)
|
||||
splice(Where, Other, From, llvm::next(From));
|
||||
splice(Where, Other, From, std::next(From));
|
||||
}
|
||||
|
||||
/// Take a block of instructions from MBB 'Other' in the range [From, To),
|
||||
@ -750,11 +750,11 @@ public:
|
||||
MachineInstrSpan(MachineBasicBlock::iterator I)
|
||||
: MBB(*I->getParent()),
|
||||
I(I),
|
||||
B(I == MBB.begin() ? MBB.end() : llvm::prior(I)),
|
||||
E(llvm::next(I)) {}
|
||||
B(I == MBB.begin() ? MBB.end() : std::prev(I)),
|
||||
E(std::next(I)) {}
|
||||
|
||||
MachineBasicBlock::iterator begin() {
|
||||
return B == MBB.end() ? MBB.begin() : llvm::next(B);
|
||||
return B == MBB.end() ? MBB.begin() : std::next(B);
|
||||
}
|
||||
MachineBasicBlock::iterator end() { return E; }
|
||||
bool empty() { return begin() == end(); }
|
||||
|
@ -408,7 +408,7 @@ public:
|
||||
/// hasOneUse - Return true if there is exactly one use of this node.
|
||||
///
|
||||
bool hasOneUse() const {
|
||||
return !use_empty() && llvm::next(use_begin()) == use_end();
|
||||
return !use_empty() && std::next(use_begin()) == use_end();
|
||||
}
|
||||
|
||||
/// use_size - Return the number of uses of this node. This method takes
|
||||
|
@ -545,7 +545,7 @@ namespace llvm {
|
||||
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start);
|
||||
|
||||
if (itr == idx2MBBMap.end()) {
|
||||
itr = prior(itr);
|
||||
itr = std::prev(itr);
|
||||
return itr->second;
|
||||
}
|
||||
|
||||
@ -553,7 +553,7 @@ namespace llvm {
|
||||
if (itr->first < end)
|
||||
return 0;
|
||||
|
||||
itr = prior(itr);
|
||||
itr = std::prev(itr);
|
||||
|
||||
if (itr->first <= start)
|
||||
return itr->second;
|
||||
@ -581,11 +581,11 @@ namespace llvm {
|
||||
if (Late) {
|
||||
// Insert mi's index immediately before the following instruction.
|
||||
nextItr = getIndexAfter(mi).listEntry();
|
||||
prevItr = prior(nextItr);
|
||||
prevItr = std::prev(nextItr);
|
||||
} else {
|
||||
// Insert mi's index immediately after the preceding instruction.
|
||||
prevItr = getIndexBefore(mi).listEntry();
|
||||
nextItr = llvm::next(prevItr);
|
||||
nextItr = std::next(prevItr);
|
||||
}
|
||||
|
||||
// Get a number for the new instr, or 0 if there's no room currently.
|
||||
@ -638,7 +638,7 @@ namespace llvm {
|
||||
/// Add the given MachineBasicBlock into the maps.
|
||||
void insertMBBInMaps(MachineBasicBlock *mbb) {
|
||||
MachineFunction::iterator nextMBB =
|
||||
llvm::next(MachineFunction::iterator(mbb));
|
||||
std::next(MachineFunction::iterator(mbb));
|
||||
|
||||
IndexListEntry *startEntry = 0;
|
||||
IndexListEntry *endEntry = 0;
|
||||
|
@ -603,7 +603,7 @@ void Lint::visitInsertElementInst(InsertElementInst &I) {
|
||||
void Lint::visitUnreachableInst(UnreachableInst &I) {
|
||||
// This isn't undefined behavior, it's merely suspicious.
|
||||
Assert1(&I == I.getParent()->begin() ||
|
||||
prior(BasicBlock::iterator(&I))->mayHaveSideEffects(),
|
||||
std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(),
|
||||
"Unusual: unreachable immediately preceded by instruction without "
|
||||
"side effects", &I);
|
||||
}
|
||||
|
@ -527,8 +527,8 @@ void UnloopUpdater::removeBlocksFromAncestors() {
|
||||
/// nested within unloop.
|
||||
void UnloopUpdater::updateSubloopParents() {
|
||||
while (!Unloop->empty()) {
|
||||
Loop *Subloop = *llvm::prior(Unloop->end());
|
||||
Unloop->removeChildLoop(llvm::prior(Unloop->end()));
|
||||
Loop *Subloop = *std::prev(Unloop->end());
|
||||
Unloop->removeChildLoop(std::prev(Unloop->end()));
|
||||
|
||||
assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop");
|
||||
if (Loop *Parent = SubloopParents[Subloop])
|
||||
@ -652,7 +652,7 @@ void LoopInfo::updateUnloop(Loop *Unloop) {
|
||||
|
||||
// Move all of the subloops to the top-level.
|
||||
while (!Unloop->empty())
|
||||
LI.addTopLevelLoop(Unloop->removeChildLoop(llvm::prior(Unloop->end())));
|
||||
LI.addTopLevelLoop(Unloop->removeChildLoop(std::prev(Unloop->end())));
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -693,7 +693,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
|
||||
NonLocalDepInfo::iterator Entry =
|
||||
std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
|
||||
NonLocalDepEntry(DirtyBB));
|
||||
if (Entry != Cache.begin() && prior(Entry)->getBB() == DirtyBB)
|
||||
if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
|
||||
--Entry;
|
||||
|
||||
NonLocalDepEntry *ExistingResult = 0;
|
||||
|
@ -193,7 +193,7 @@ void SCEV::print(raw_ostream &OS) const {
|
||||
for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
|
||||
I != E; ++I) {
|
||||
OS << **I;
|
||||
if (llvm::next(I) != E)
|
||||
if (std::next(I) != E)
|
||||
OS << OpStr;
|
||||
}
|
||||
OS << ")";
|
||||
@ -3258,7 +3258,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
||||
|
||||
const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
|
||||
gep_type_iterator GTI = gep_type_begin(GEP);
|
||||
for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
|
||||
for (GetElementPtrInst::op_iterator I = std::next(GEP->op_begin()),
|
||||
E = GEP->op_end();
|
||||
I != E; ++I) {
|
||||
Value *Index = *I;
|
||||
|
@ -1391,7 +1391,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
|
||||
S->getNoWrapFlags(SCEV::FlagNW)));
|
||||
BasicBlock::iterator NewInsertPt =
|
||||
llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
|
||||
std::next(BasicBlock::iterator(cast<Instruction>(V)));
|
||||
BuilderType::InsertPointGuard Guard(Builder);
|
||||
while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
|
||||
isa<LandingPadInst>(NewInsertPt))
|
||||
@ -1619,7 +1619,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
|
||||
while (InsertPt != Builder.GetInsertPoint()
|
||||
&& (isInsertedInstruction(InsertPt)
|
||||
|| isa<DbgInfoIntrinsic>(InsertPt))) {
|
||||
InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
|
||||
InsertPt = std::next(BasicBlock::iterator(InsertPt));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -498,8 +498,7 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS,
|
||||
// chain interposes between I and the return.
|
||||
if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
|
||||
!isSafeToSpeculativelyExecute(I))
|
||||
for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
|
||||
--BBI) {
|
||||
for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
|
||||
if (&*BBI == I)
|
||||
break;
|
||||
// Debug info intrinsics do not get in the way of tail call optimization.
|
||||
|
@ -1592,7 +1592,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
|
||||
|
||||
// Terminate old register assignments that don't reach MI;
|
||||
MachineFunction::const_iterator PrevMBB = Prev->getParent();
|
||||
if (PrevMBB != I && (!AtBlockEntry || llvm::next(PrevMBB) != I) &&
|
||||
if (PrevMBB != I && (!AtBlockEntry || std::next(PrevMBB) != I) &&
|
||||
isDbgValueInDefinedReg(Prev)) {
|
||||
// Previous register assignment needs to terminate at the end of
|
||||
// its basic block.
|
||||
@ -1603,7 +1603,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
|
||||
DEBUG(dbgs() << "Dropping DBG_VALUE for empty range:\n"
|
||||
<< "\t" << *Prev << "\n");
|
||||
History.pop_back();
|
||||
} else if (llvm::next(PrevMBB) != PrevMBB->getParent()->end())
|
||||
} else if (std::next(PrevMBB) != PrevMBB->getParent()->end())
|
||||
// Terminate after LastMI.
|
||||
History.push_back(LastMI);
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ void BranchFolder::MaintainLiveIns(MachineBasicBlock *CurMBB,
|
||||
if (RS) {
|
||||
RS->enterBasicBlock(CurMBB);
|
||||
if (!CurMBB->empty())
|
||||
RS->forward(prior(CurMBB->end()));
|
||||
RS->forward(std::prev(CurMBB->end()));
|
||||
BitVector RegsLiveAtExit(TRI->getNumRegs());
|
||||
RS->getRegsUsed(RegsLiveAtExit, false);
|
||||
for (unsigned int i = 0, e = TRI->getNumRegs(); i != e; i++)
|
||||
@ -462,7 +462,7 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I,
|
||||
static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
|
||||
const TargetInstrInfo *TII) {
|
||||
MachineFunction *MF = CurMBB->getParent();
|
||||
MachineFunction::iterator I = llvm::next(MachineFunction::iterator(CurMBB));
|
||||
MachineFunction::iterator I = std::next(MachineFunction::iterator(CurMBB));
|
||||
MachineBasicBlock *TBB = 0, *FBB = 0;
|
||||
SmallVector<MachineOperand, 4> Cond;
|
||||
DebugLoc dl; // FIXME: this is nowhere
|
||||
@ -600,12 +600,11 @@ unsigned BranchFolder::ComputeSameTails(unsigned CurHash,
|
||||
unsigned maxCommonTailLength = 0U;
|
||||
SameTails.clear();
|
||||
MachineBasicBlock::iterator TrialBBI1, TrialBBI2;
|
||||
MPIterator HighestMPIter = prior(MergePotentials.end());
|
||||
for (MPIterator CurMPIter = prior(MergePotentials.end()),
|
||||
MPIterator HighestMPIter = std::prev(MergePotentials.end());
|
||||
for (MPIterator CurMPIter = std::prev(MergePotentials.end()),
|
||||
B = MergePotentials.begin();
|
||||
CurMPIter != B && CurMPIter->getHash() == CurHash;
|
||||
--CurMPIter) {
|
||||
for (MPIterator I = prior(CurMPIter); I->getHash() == CurHash ; --I) {
|
||||
CurMPIter != B && CurMPIter->getHash() == CurHash; --CurMPIter) {
|
||||
for (MPIterator I = std::prev(CurMPIter); I->getHash() == CurHash; --I) {
|
||||
unsigned CommonTailLen;
|
||||
if (ProfitableToMerge(CurMPIter->getBlock(), I->getBlock(),
|
||||
minCommonTailLength,
|
||||
@ -634,9 +633,9 @@ void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
|
||||
MachineBasicBlock *SuccBB,
|
||||
MachineBasicBlock *PredBB) {
|
||||
MPIterator CurMPIter, B;
|
||||
for (CurMPIter = prior(MergePotentials.end()), B = MergePotentials.begin();
|
||||
CurMPIter->getHash() == CurHash;
|
||||
--CurMPIter) {
|
||||
for (CurMPIter = std::prev(MergePotentials.end()),
|
||||
B = MergePotentials.begin();
|
||||
CurMPIter->getHash() == CurHash; --CurMPIter) {
|
||||
// Put the unconditional branch back, if we need one.
|
||||
MachineBasicBlock *CurMBB = CurMPIter->getBlock();
|
||||
if (SuccBB && CurMBB != PredBB)
|
||||
@ -868,12 +867,12 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
|
||||
// a compile-time infinite loop repeatedly doing and undoing the same
|
||||
// transformations.)
|
||||
|
||||
for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
|
||||
for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
|
||||
I != E; ++I) {
|
||||
if (I->pred_size() < 2) continue;
|
||||
SmallPtrSet<MachineBasicBlock *, 8> UniquePreds;
|
||||
MachineBasicBlock *IBB = I;
|
||||
MachineBasicBlock *PredBB = prior(I);
|
||||
MachineBasicBlock *PredBB = std::prev(I);
|
||||
MergePotentials.clear();
|
||||
for (MachineBasicBlock::pred_iterator P = I->pred_begin(),
|
||||
E2 = I->pred_end();
|
||||
@ -905,7 +904,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
|
||||
continue;
|
||||
// This is the QBB case described above
|
||||
if (!FBB)
|
||||
FBB = llvm::next(MachineFunction::iterator(PBB));
|
||||
FBB = std::next(MachineFunction::iterator(PBB));
|
||||
}
|
||||
|
||||
// Failing case: the only way IBB can be reached from PBB is via
|
||||
@ -955,7 +954,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
|
||||
|
||||
// Reinsert an unconditional branch if needed. The 1 below can occur as a
|
||||
// result of removing blocks in TryTailMergeBlocks.
|
||||
PredBB = prior(I); // this may have been changed in TryTailMergeBlocks
|
||||
PredBB = std::prev(I); // this may have been changed in TryTailMergeBlocks
|
||||
if (MergePotentials.size() == 1 &&
|
||||
MergePotentials.begin()->getBlock() != PredBB)
|
||||
FixTail(MergePotentials.begin()->getBlock(), IBB, TII);
|
||||
@ -974,7 +973,7 @@ bool BranchFolder::OptimizeBranches(MachineFunction &MF) {
|
||||
// Make sure blocks are numbered in order
|
||||
MF.RenumberBlocks();
|
||||
|
||||
for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
|
||||
for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
|
||||
I != E; ) {
|
||||
MachineBasicBlock *MBB = I++;
|
||||
MadeChange |= OptimizeBlock(MBB);
|
||||
@ -1095,7 +1094,7 @@ ReoptimizeBlock:
|
||||
|
||||
// Check to see if we can simplify the terminator of the block before this
|
||||
// one.
|
||||
MachineBasicBlock &PrevBB = *prior(MachineFunction::iterator(MBB));
|
||||
MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB));
|
||||
|
||||
MachineBasicBlock *PriorTBB = 0, *PriorFBB = 0;
|
||||
SmallVector<MachineOperand, 4> PriorCond;
|
||||
@ -1394,7 +1393,8 @@ ReoptimizeBlock:
|
||||
// B elsewhere
|
||||
// next:
|
||||
if (CurFallsThru) {
|
||||
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
|
||||
MachineBasicBlock *NextBB =
|
||||
std::next(MachineFunction::iterator(MBB));
|
||||
CurCond.clear();
|
||||
TII->InsertBranch(*MBB, NextBB, 0, CurCond, DebugLoc());
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
|
||||
bool CodeGenPrepare::EliminateFallThrough(Function &F) {
|
||||
bool Changed = false;
|
||||
// Scan all of the blocks in the function, except for the entry block.
|
||||
for (Function::iterator I = llvm::next(F.begin()), E = F.end(); I != E; ) {
|
||||
for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
|
||||
BasicBlock *BB = I++;
|
||||
// If the destination block has a single pred, then this is a trivial
|
||||
// edge, just collapse it.
|
||||
@ -289,7 +289,7 @@ bool CodeGenPrepare::EliminateFallThrough(Function &F) {
|
||||
bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
|
||||
bool MadeChange = false;
|
||||
// Note that this intentionally skips the entry block.
|
||||
for (Function::iterator I = llvm::next(F.begin()), E = F.end(); I != E; ) {
|
||||
for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
|
||||
BasicBlock *BB = I++;
|
||||
|
||||
// If this block doesn't end with an uncond branch, ignore it.
|
||||
|
@ -130,7 +130,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
|
||||
MachineRegisterInfo::use_iterator nextI;
|
||||
for (MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg),
|
||||
E = MRI->use_end(); I!=E; I=nextI) {
|
||||
nextI = llvm::next(I); // I is invalidated by the setReg
|
||||
nextI = std::next(I); // I is invalidated by the setReg
|
||||
MachineOperand& Use = I.getOperand();
|
||||
MachineInstr *UseMI = Use.getParent();
|
||||
if (UseMI==MI)
|
||||
|
@ -461,7 +461,7 @@ void SSAIfConv::replacePHIInstrs() {
|
||||
DEBUG(dbgs() << "If-converting " << *PI.PHI);
|
||||
unsigned DstReg = PI.PHI->getOperand(0).getReg();
|
||||
TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
|
||||
DEBUG(dbgs() << " --> " << *llvm::prior(FirstTerm));
|
||||
DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
|
||||
PI.PHI->eraseFromParent();
|
||||
PI.PHI = 0;
|
||||
}
|
||||
@ -482,7 +482,7 @@ void SSAIfConv::rewritePHIOperands() {
|
||||
unsigned PHIDst = PI.PHI->getOperand(0).getReg();
|
||||
unsigned DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
|
||||
TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
|
||||
DEBUG(dbgs() << " --> " << *llvm::prior(FirstTerm));
|
||||
DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
|
||||
|
||||
// Rewrite PHI operands TPred -> (DstReg, Head), remove FPred.
|
||||
for (unsigned i = PI.PHI->getNumOperands(); i != 1; i -= 2) {
|
||||
|
@ -921,7 +921,7 @@ void IfConverter::AnalyzeBlocks(MachineFunction &MF,
|
||||
/// next block).
|
||||
static bool canFallThroughTo(MachineBasicBlock *BB, MachineBasicBlock *ToBB) {
|
||||
MachineFunction::iterator PI = BB;
|
||||
MachineFunction::iterator I = llvm::next(PI);
|
||||
MachineFunction::iterator I = std::next(PI);
|
||||
MachineFunction::iterator TI = ToBB;
|
||||
MachineFunction::iterator E = BB->getParent()->end();
|
||||
while (I != TI) {
|
||||
|
@ -1014,7 +1014,7 @@ static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
|
||||
char NextLine = '\n';
|
||||
char SlotIndent = '\t';
|
||||
|
||||
if (llvm::next(B) == E) {
|
||||
if (std::next(B) == E) {
|
||||
NextLine = ' ';
|
||||
SlotIndent = ' ';
|
||||
}
|
||||
@ -1171,12 +1171,12 @@ void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
|
||||
MachineBasicBlock &MBB = *MI->getParent();
|
||||
|
||||
MachineInstrSpan MIS(MI);
|
||||
TII.storeRegToStackSlot(MBB, llvm::next(MI), NewVReg, isKill, StackSlot,
|
||||
TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot,
|
||||
MRI.getRegClass(NewVReg), &TRI);
|
||||
|
||||
LIS.InsertMachineInstrRangeInMaps(llvm::next(MI), MIS.end());
|
||||
LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
|
||||
|
||||
DEBUG(dumpMachineInstrRangeWithSlotIndex(llvm::next(MI), MIS.end(), LIS,
|
||||
DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
|
||||
"spill"));
|
||||
++NumSpills;
|
||||
}
|
||||
|
@ -119,12 +119,12 @@ void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
|
||||
SUnit *LatencyPriorityQueue::pop() {
|
||||
if (empty()) return NULL;
|
||||
std::vector<SUnit *>::iterator Best = Queue.begin();
|
||||
for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
|
||||
for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
|
||||
E = Queue.end(); I != E; ++I)
|
||||
if (Picker(*Best, *I))
|
||||
Best = I;
|
||||
SUnit *V = *Best;
|
||||
if (Best != prior(Queue.end()))
|
||||
if (Best != std::prev(Queue.end()))
|
||||
std::swap(*Best, Queue.back());
|
||||
Queue.pop_back();
|
||||
return V;
|
||||
@ -133,7 +133,7 @@ SUnit *LatencyPriorityQueue::pop() {
|
||||
void LatencyPriorityQueue::remove(SUnit *SU) {
|
||||
assert(!Queue.empty() && "Queue is empty!");
|
||||
std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
|
||||
if (I != prior(Queue.end()))
|
||||
if (I != std::prev(Queue.end()))
|
||||
std::swap(*I, Queue.back());
|
||||
Queue.pop_back();
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ bool LDVImpl::collectDebugValues(MachineFunction &mf) {
|
||||
// DBG_VALUE has no slot index, use the previous instruction instead.
|
||||
SlotIndex Idx = MBBI == MBB->begin() ?
|
||||
LIS->getMBBStartIdx(MBB) :
|
||||
LIS->getInstructionIndex(llvm::prior(MBBI)).getRegSlot();
|
||||
LIS->getInstructionIndex(std::prev(MBBI)).getRegSlot();
|
||||
// Handle consecutive DBG_VALUE instructions with the same slot index.
|
||||
do {
|
||||
if (handleDebugValue(MBBI, Idx)) {
|
||||
@ -914,7 +914,7 @@ findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx,
|
||||
|
||||
// Don't insert anything after the first terminator, though.
|
||||
return MI->isTerminator() ? MBB->getFirstTerminator() :
|
||||
llvm::next(MachineBasicBlock::iterator(MI));
|
||||
std::next(MachineBasicBlock::iterator(MI));
|
||||
}
|
||||
|
||||
DebugLoc UserValue::findDebugLoc() {
|
||||
|
@ -222,13 +222,13 @@ void LiveRange::extendSegmentEndTo(iterator I, SlotIndex NewEnd) {
|
||||
VNInfo *ValNo = I->valno;
|
||||
|
||||
// Search for the first segment that we can't merge with.
|
||||
iterator MergeTo = llvm::next(I);
|
||||
iterator MergeTo = std::next(I);
|
||||
for (; MergeTo != end() && NewEnd >= MergeTo->end; ++MergeTo) {
|
||||
assert(MergeTo->valno == ValNo && "Cannot merge with differing values!");
|
||||
}
|
||||
|
||||
// If NewEnd was in the middle of a segment, make sure to get its endpoint.
|
||||
I->end = std::max(NewEnd, prior(MergeTo)->end);
|
||||
I->end = std::max(NewEnd, std::prev(MergeTo)->end);
|
||||
|
||||
// If the newly formed segment now touches the segment after it and if they
|
||||
// have the same value number, merge the two segments into one segment.
|
||||
@ -239,7 +239,7 @@ void LiveRange::extendSegmentEndTo(iterator I, SlotIndex NewEnd) {
|
||||
}
|
||||
|
||||
// Erase any dead segments.
|
||||
segments.erase(llvm::next(I), MergeTo);
|
||||
segments.erase(std::next(I), MergeTo);
|
||||
}
|
||||
|
||||
|
||||
@ -274,7 +274,7 @@ LiveRange::extendSegmentStartTo(iterator I, SlotIndex NewStart) {
|
||||
MergeTo->end = I->end;
|
||||
}
|
||||
|
||||
segments.erase(llvm::next(MergeTo), llvm::next(I));
|
||||
segments.erase(std::next(MergeTo), std::next(I));
|
||||
return MergeTo;
|
||||
}
|
||||
|
||||
@ -285,7 +285,7 @@ LiveRange::iterator LiveRange::addSegmentFrom(Segment S, iterator From) {
|
||||
// If the inserted segment starts in the middle or right at the end of
|
||||
// another segment, just extend that segment to contain the segment of S.
|
||||
if (it != begin()) {
|
||||
iterator B = prior(it);
|
||||
iterator B = std::prev(it);
|
||||
if (S.valno == B->valno) {
|
||||
if (B->start <= Start && B->end >= Start) {
|
||||
extendSegmentEndTo(B, End);
|
||||
@ -389,7 +389,7 @@ void LiveRange::removeSegment(SlotIndex Start, SlotIndex End,
|
||||
I->end = Start; // Trim the old segment.
|
||||
|
||||
// Insert the new one.
|
||||
segments.insert(llvm::next(I), Segment(End, OldEnd, ValNo));
|
||||
segments.insert(std::next(I), Segment(End, OldEnd, ValNo));
|
||||
}
|
||||
|
||||
/// removeValNo - Remove all the segments defined by the specified value#.
|
||||
@ -433,7 +433,7 @@ void LiveRange::join(LiveRange &Other,
|
||||
|
||||
iterator OutIt = begin();
|
||||
OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]];
|
||||
for (iterator I = llvm::next(OutIt), E = end(); I != E; ++I) {
|
||||
for (iterator I = std::next(OutIt), E = end(); I != E; ++I) {
|
||||
VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]];
|
||||
assert(nextValNo != 0 && "Huh?");
|
||||
|
||||
@ -641,10 +641,10 @@ void LiveRange::verify() const {
|
||||
assert(I->valno != 0);
|
||||
assert(I->valno->id < valnos.size());
|
||||
assert(I->valno == valnos[I->valno->id]);
|
||||
if (llvm::next(I) != E) {
|
||||
assert(I->end <= llvm::next(I)->start);
|
||||
if (I->end == llvm::next(I)->start)
|
||||
assert(I->valno != llvm::next(I)->valno);
|
||||
if (std::next(I) != E) {
|
||||
assert(I->end <= std::next(I)->start);
|
||||
if (I->end == std::next(I)->start)
|
||||
assert(I->valno != std::next(I)->valno);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -874,8 +874,8 @@ private:
|
||||
// values. The new range should be placed immediately before NewI, move any
|
||||
// intermediate ranges up.
|
||||
assert(NewI != I && "Inconsistent iterators");
|
||||
std::copy(llvm::next(I), NewI, I);
|
||||
*llvm::prior(NewI)
|
||||
std::copy(std::next(I), NewI, I);
|
||||
*std::prev(NewI)
|
||||
= LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
|
||||
}
|
||||
|
||||
@ -920,7 +920,7 @@ private:
|
||||
if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) {
|
||||
// No def, search for the new kill.
|
||||
// This can never be an early clobber kill since there is no def.
|
||||
llvm::prior(I)->end = findLastUseBefore(Reg).getRegSlot();
|
||||
std::prev(I)->end = findLastUseBefore(Reg).getRegSlot();
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -956,7 +956,7 @@ private:
|
||||
|
||||
// DefVNI is a dead def. It may have been moved across other values in LR,
|
||||
// so move I up to NewI. Slide [NewI;I) down one position.
|
||||
std::copy_backward(NewI, I, llvm::next(I));
|
||||
std::copy_backward(NewI, I, std::next(I));
|
||||
*NewI = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
|
||||
}
|
||||
|
||||
@ -968,11 +968,11 @@ private:
|
||||
"No RegMask at OldIdx.");
|
||||
*RI = NewIdx.getRegSlot();
|
||||
assert((RI == LIS.RegMaskSlots.begin() ||
|
||||
SlotIndex::isEarlierInstr(*llvm::prior(RI), *RI)) &&
|
||||
"Cannot move regmask instruction above another call");
|
||||
assert((llvm::next(RI) == LIS.RegMaskSlots.end() ||
|
||||
SlotIndex::isEarlierInstr(*RI, *llvm::next(RI))) &&
|
||||
"Cannot move regmask instruction below another call");
|
||||
SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) &&
|
||||
"Cannot move regmask instruction above another call");
|
||||
assert((std::next(RI) == LIS.RegMaskSlots.end() ||
|
||||
SlotIndex::isEarlierInstr(*RI, *std::next(RI))) &&
|
||||
"Cannot move regmask instruction below another call");
|
||||
}
|
||||
|
||||
// Return the last use of reg between NewIdx and OldIdx.
|
||||
@ -1125,7 +1125,7 @@ LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
|
||||
if (LII->end.isDead()) {
|
||||
SlotIndex prevStart;
|
||||
if (LII != LI.begin())
|
||||
prevStart = llvm::prior(LII)->start;
|
||||
prevStart = std::prev(LII)->start;
|
||||
|
||||
// FIXME: This could be more efficient if there was a
|
||||
// removeSegment method that returned an iterator.
|
||||
|
@ -626,7 +626,7 @@ bool MachineBasicBlock::isSuccessor(const MachineBasicBlock *MBB) const {
|
||||
|
||||
bool MachineBasicBlock::isLayoutSuccessor(const MachineBasicBlock *MBB) const {
|
||||
MachineFunction::const_iterator I(this);
|
||||
return llvm::next(I) == MachineFunction::const_iterator(MBB);
|
||||
return std::next(I) == MachineFunction::const_iterator(MBB);
|
||||
}
|
||||
|
||||
bool MachineBasicBlock::canFallThrough() {
|
||||
@ -705,7 +705,7 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
|
||||
}
|
||||
|
||||
MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
|
||||
MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
|
||||
MF->insert(std::next(MachineFunction::iterator(this)), NMBB);
|
||||
DEBUG(dbgs() << "Splitting critical edge:"
|
||||
" BB#" << getNumber()
|
||||
<< " -- BB#" << NMBB->getNumber()
|
||||
@ -848,7 +848,7 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
|
||||
// extend to the end of the new split block.
|
||||
|
||||
bool isLastMBB =
|
||||
llvm::next(MachineFunction::iterator(NMBB)) == getParent()->end();
|
||||
std::next(MachineFunction::iterator(NMBB)) == getParent()->end();
|
||||
|
||||
SlotIndex StartIndex = Indexes->getMBBEndIdx(this);
|
||||
SlotIndex PrevIndex = StartIndex.getPrevSlot();
|
||||
@ -1063,7 +1063,7 @@ bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA,
|
||||
bool Changed = false;
|
||||
|
||||
MachineFunction::iterator FallThru =
|
||||
llvm::next(MachineFunction::iterator(this));
|
||||
std::next(MachineFunction::iterator(this));
|
||||
|
||||
if (DestA == 0 && DestB == 0) {
|
||||
// Block falls through to successor.
|
||||
|
@ -494,11 +494,11 @@ void MachineBlockPlacement::buildChain(
|
||||
|
||||
MachineBasicBlock *LoopHeaderBB = BB;
|
||||
markChainSuccessors(Chain, LoopHeaderBB, BlockWorkList, BlockFilter);
|
||||
BB = *llvm::prior(Chain.end());
|
||||
BB = *std::prev(Chain.end());
|
||||
for (;;) {
|
||||
assert(BB);
|
||||
assert(BlockToChain[BB] == &Chain);
|
||||
assert(*llvm::prior(Chain.end()) == BB);
|
||||
assert(*std::prev(Chain.end()) == BB);
|
||||
|
||||
// Look for the best viable successor if there is one to place immediately
|
||||
// after this block.
|
||||
@ -529,7 +529,7 @@ void MachineBlockPlacement::buildChain(
|
||||
<< " to " << getBlockNum(BestSucc) << "\n");
|
||||
markChainSuccessors(SuccChain, LoopHeaderBB, BlockWorkList, BlockFilter);
|
||||
Chain.merge(BestSucc, &SuccChain);
|
||||
BB = *llvm::prior(Chain.end());
|
||||
BB = *std::prev(Chain.end());
|
||||
}
|
||||
|
||||
DEBUG(dbgs() << "Finished forming chain for header block "
|
||||
@ -634,7 +634,7 @@ MachineBlockPlacement::findBestLoopExit(MachineFunction &F,
|
||||
BlockChain &Chain = *BlockToChain[*I];
|
||||
// Ensure that this block is at the end of a chain; otherwise it could be
|
||||
// mid-way through an inner loop or a successor of an analyzable branch.
|
||||
if (*I != *llvm::prior(Chain.end()))
|
||||
if (*I != *std::prev(Chain.end()))
|
||||
continue;
|
||||
|
||||
// Now walk the successors. We need to establish whether this has a viable
|
||||
@ -741,7 +741,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
|
||||
PI != PE; ++PI) {
|
||||
BlockChain *PredChain = BlockToChain[*PI];
|
||||
if (!LoopBlockSet.count(*PI) &&
|
||||
(!PredChain || *PI == *llvm::prior(PredChain->end()))) {
|
||||
(!PredChain || *PI == *std::prev(PredChain->end()))) {
|
||||
ViableTopFallthrough = true;
|
||||
break;
|
||||
}
|
||||
@ -751,7 +751,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
|
||||
// bottom is a viable exiting block. If so, bail out as rotating will
|
||||
// introduce an unnecessary branch.
|
||||
if (ViableTopFallthrough) {
|
||||
MachineBasicBlock *Bottom = *llvm::prior(LoopChain.end());
|
||||
MachineBasicBlock *Bottom = *std::prev(LoopChain.end());
|
||||
for (MachineBasicBlock::succ_iterator SI = Bottom->succ_begin(),
|
||||
SE = Bottom->succ_end();
|
||||
SI != SE; ++SI) {
|
||||
@ -767,7 +767,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
|
||||
if (ExitIt == LoopChain.end())
|
||||
return;
|
||||
|
||||
std::rotate(LoopChain.begin(), llvm::next(ExitIt), LoopChain.end());
|
||||
std::rotate(LoopChain.begin(), std::next(ExitIt), LoopChain.end());
|
||||
}
|
||||
|
||||
/// \brief Forms basic block chains from the natural loop structures.
|
||||
@ -887,7 +887,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
|
||||
if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond) || !FI->canFallThrough())
|
||||
break;
|
||||
|
||||
MachineFunction::iterator NextFI(llvm::next(FI));
|
||||
MachineFunction::iterator NextFI(std::next(FI));
|
||||
MachineBasicBlock *NextBB = NextFI;
|
||||
// Ensure that the layout successor is a viable block, as we know that
|
||||
// fallthrough is a possibility.
|
||||
@ -981,7 +981,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
|
||||
// Update the terminator of the previous block.
|
||||
if (BI == FunctionChain.begin())
|
||||
continue;
|
||||
MachineBasicBlock *PrevBB = llvm::prior(MachineFunction::iterator(*BI));
|
||||
MachineBasicBlock *PrevBB = std::prev(MachineFunction::iterator(*BI));
|
||||
|
||||
// FIXME: It would be awesome of updateTerminator would just return rather
|
||||
// than assert when the branch cannot be analyzed in order to remove this
|
||||
@ -1053,7 +1053,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
|
||||
const BranchProbability ColdProb(1, 5); // 20%
|
||||
BlockFrequency EntryFreq = MBFI->getBlockFreq(F.begin());
|
||||
BlockFrequency WeightedEntryFreq = EntryFreq * ColdProb;
|
||||
for (BlockChain::iterator BI = llvm::next(FunctionChain.begin()),
|
||||
for (BlockChain::iterator BI = std::next(FunctionChain.begin()),
|
||||
BE = FunctionChain.end();
|
||||
BI != BE; ++BI) {
|
||||
// Don't align non-looping basic blocks. These are unlikely to execute
|
||||
@ -1079,7 +1079,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
|
||||
|
||||
// Check for the existence of a non-layout predecessor which would benefit
|
||||
// from aligning this block.
|
||||
MachineBasicBlock *LayoutPred = *llvm::prior(BI);
|
||||
MachineBasicBlock *LayoutPred = *std::prev(BI);
|
||||
|
||||
// Force alignment if all the predecessors are jumps. We already checked
|
||||
// that the block isn't cold above.
|
||||
@ -1101,7 +1101,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
|
||||
|
||||
bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) {
|
||||
// Check for single-block functions and skip them.
|
||||
if (llvm::next(F.begin()) == F.end())
|
||||
if (std::next(F.begin()) == F.end())
|
||||
return false;
|
||||
|
||||
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
|
||||
@ -1169,7 +1169,7 @@ INITIALIZE_PASS_END(MachineBlockPlacementStats, "block-placement-stats",
|
||||
|
||||
bool MachineBlockPlacementStats::runOnMachineFunction(MachineFunction &F) {
|
||||
// Check for single-block functions and skip them.
|
||||
if (llvm::next(F.begin()) == F.end())
|
||||
if (std::next(F.begin()) == F.end())
|
||||
return false;
|
||||
|
||||
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
|
||||
|
@ -229,7 +229,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
|
||||
// Next, collect all defs into PhysDefs. If any is already in PhysRefs
|
||||
// (which currently contains only uses), set the PhysUseDef flag.
|
||||
PhysUseDef = false;
|
||||
MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
|
||||
MachineBasicBlock::const_iterator I = MI; I = std::next(I);
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
const MachineOperand &MO = MI->getOperand(i);
|
||||
if (!MO.isReg() || !MO.isDef())
|
||||
@ -280,7 +280,7 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
|
||||
}
|
||||
CrossMBB = true;
|
||||
}
|
||||
MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I);
|
||||
MachineBasicBlock::const_iterator I = CSMI; I = std::next(I);
|
||||
MachineBasicBlock::const_iterator E = MI;
|
||||
MachineBasicBlock::const_iterator EE = CSMBB->end();
|
||||
unsigned LookAheadLeft = LookAheadLimit;
|
||||
|
@ -139,7 +139,7 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
|
||||
// Figure out the block number this should have.
|
||||
unsigned BlockNo = 0;
|
||||
if (MBBI != begin())
|
||||
BlockNo = prior(MBBI)->getNumber()+1;
|
||||
BlockNo = std::prev(MBBI)->getNumber() + 1;
|
||||
|
||||
for (; MBBI != E; ++MBBI, ++BlockNo) {
|
||||
if (MBBI->getNumber() != (int)BlockNo) {
|
||||
@ -346,7 +346,7 @@ void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
|
||||
OS << PrintReg(I->first, TRI);
|
||||
if (I->second)
|
||||
OS << " in " << PrintReg(I->second, TRI);
|
||||
if (llvm::next(I) != E)
|
||||
if (std::next(I) != E)
|
||||
OS << ", ";
|
||||
}
|
||||
OS << '\n';
|
||||
|
@ -1643,7 +1643,7 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM,
|
||||
for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
|
||||
i != e; ++i) {
|
||||
OS << **i;
|
||||
if (llvm::next(i) != e)
|
||||
if (std::next(i) != e)
|
||||
OS << " ";
|
||||
}
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ MachineBasicBlock::instr_iterator
|
||||
llvm::finalizeBundle(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::instr_iterator FirstMI) {
|
||||
MachineBasicBlock::instr_iterator E = MBB.instr_end();
|
||||
MachineBasicBlock::instr_iterator LastMI = llvm::next(FirstMI);
|
||||
MachineBasicBlock::instr_iterator LastMI = std::next(FirstMI);
|
||||
while (LastMI != E && LastMI->isInsideBundle())
|
||||
++LastMI;
|
||||
finalizeBundle(MBB, FirstMI, LastMI);
|
||||
@ -235,7 +235,7 @@ bool llvm::finalizeBundles(MachineFunction &MF) {
|
||||
if (!MII->isInsideBundle())
|
||||
++MII;
|
||||
else {
|
||||
MII = finalizeBundle(MBB, llvm::prior(MII));
|
||||
MII = finalizeBundle(MBB, std::prev(MII));
|
||||
Changed = true;
|
||||
}
|
||||
}
|
||||
|
@ -50,11 +50,11 @@ MachineBasicBlock *MachineLoop::getTopBlock() {
|
||||
MachineBasicBlock *TopMBB = getHeader();
|
||||
MachineFunction::iterator Begin = TopMBB->getParent()->begin();
|
||||
if (TopMBB != Begin) {
|
||||
MachineBasicBlock *PriorMBB = prior(MachineFunction::iterator(TopMBB));
|
||||
MachineBasicBlock *PriorMBB = std::prev(MachineFunction::iterator(TopMBB));
|
||||
while (contains(PriorMBB)) {
|
||||
TopMBB = PriorMBB;
|
||||
if (TopMBB == Begin) break;
|
||||
PriorMBB = prior(MachineFunction::iterator(TopMBB));
|
||||
PriorMBB = std::prev(MachineFunction::iterator(TopMBB));
|
||||
}
|
||||
}
|
||||
return TopMBB;
|
||||
@ -63,12 +63,12 @@ MachineBasicBlock *MachineLoop::getTopBlock() {
|
||||
MachineBasicBlock *MachineLoop::getBottomBlock() {
|
||||
MachineBasicBlock *BotMBB = getHeader();
|
||||
MachineFunction::iterator End = BotMBB->getParent()->end();
|
||||
if (BotMBB != prior(End)) {
|
||||
MachineBasicBlock *NextMBB = llvm::next(MachineFunction::iterator(BotMBB));
|
||||
if (BotMBB != std::prev(End)) {
|
||||
MachineBasicBlock *NextMBB = std::next(MachineFunction::iterator(BotMBB));
|
||||
while (contains(NextMBB)) {
|
||||
BotMBB = NextMBB;
|
||||
if (BotMBB == llvm::next(MachineFunction::iterator(BotMBB))) break;
|
||||
NextMBB = llvm::next(MachineFunction::iterator(BotMBB));
|
||||
if (BotMBB == std::next(MachineFunction::iterator(BotMBB))) break;
|
||||
NextMBB = std::next(MachineFunction::iterator(BotMBB));
|
||||
}
|
||||
}
|
||||
return BotMBB;
|
||||
|
@ -300,7 +300,7 @@ void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) {
|
||||
MachineInstr *MachineRegisterInfo::getVRegDef(unsigned Reg) const {
|
||||
// Since we are in SSA form, we can use the first definition.
|
||||
def_iterator I = def_begin(Reg);
|
||||
assert((I.atEnd() || llvm::next(I) == def_end()) &&
|
||||
assert((I.atEnd() || std::next(I) == def_end()) &&
|
||||
"getVRegDef assumes a single definition or no definition");
|
||||
return !I.atEnd() ? &*I : 0;
|
||||
}
|
||||
@ -311,7 +311,7 @@ MachineInstr *MachineRegisterInfo::getVRegDef(unsigned Reg) const {
|
||||
MachineInstr *MachineRegisterInfo::getUniqueVRegDef(unsigned Reg) const {
|
||||
if (def_empty(Reg)) return 0;
|
||||
def_iterator I = def_begin(Reg);
|
||||
if (llvm::next(I) != def_end())
|
||||
if (std::next(I) != def_end())
|
||||
return 0;
|
||||
return &*I;
|
||||
}
|
||||
|
@ -408,8 +408,8 @@ void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) {
|
||||
RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
|
||||
|
||||
// Avoid decrementing RegionEnd for blocks with no terminator.
|
||||
if (RegionEnd != MBB->end()
|
||||
|| isSchedBoundary(llvm::prior(RegionEnd), MBB, MF, TII, IsPostRA)) {
|
||||
if (RegionEnd != MBB->end() ||
|
||||
isSchedBoundary(std::prev(RegionEnd), MBB, MF, TII, IsPostRA)) {
|
||||
--RegionEnd;
|
||||
// Count the boundary instruction.
|
||||
--RemainingInstrs;
|
||||
@ -420,7 +420,7 @@ void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) {
|
||||
unsigned NumRegionInstrs = 0;
|
||||
MachineBasicBlock::iterator I = RegionEnd;
|
||||
for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) {
|
||||
if (isSchedBoundary(llvm::prior(I), MBB, MF, TII, IsPostRA))
|
||||
if (isSchedBoundary(std::prev(I), MBB, MF, TII, IsPostRA))
|
||||
break;
|
||||
}
|
||||
// Notify the scheduler of the region, even if we may skip scheduling
|
||||
@ -428,7 +428,7 @@ void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) {
|
||||
Scheduler.enterRegion(MBB, I, RegionEnd, NumRegionInstrs);
|
||||
|
||||
// Skip empty scheduling regions (0 or 1 schedulable instructions).
|
||||
if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
|
||||
if (I == RegionEnd || I == std::prev(RegionEnd)) {
|
||||
// Close the current region. Bundle the terminator if needed.
|
||||
// This invalidates 'RegionEnd' and 'I'.
|
||||
Scheduler.exitRegion();
|
||||
@ -770,13 +770,13 @@ void ScheduleDAGMI::placeDebugValues() {
|
||||
|
||||
for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
|
||||
DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
|
||||
std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
|
||||
std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
|
||||
MachineInstr *DbgValue = P.first;
|
||||
MachineBasicBlock::iterator OrigPrevMI = P.second;
|
||||
if (&*RegionBegin == DbgValue)
|
||||
++RegionBegin;
|
||||
BB->splice(++OrigPrevMI, BB, DbgValue);
|
||||
if (OrigPrevMI == llvm::prior(RegionEnd))
|
||||
if (OrigPrevMI == std::prev(RegionEnd))
|
||||
RegionEnd = DbgValue;
|
||||
}
|
||||
DbgValues.clear();
|
||||
@ -816,8 +816,7 @@ void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
|
||||
ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
|
||||
|
||||
// For convenience remember the end of the liveness region.
|
||||
LiveRegionEnd =
|
||||
(RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
|
||||
LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
|
||||
|
||||
SUPressureDiffs.clear();
|
||||
|
||||
@ -1446,19 +1445,19 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
|
||||
// Check if GlobalLI contains a hole in the vicinity of LocalLI.
|
||||
if (GlobalSegment != GlobalLI->begin()) {
|
||||
// Two address defs have no hole.
|
||||
if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end,
|
||||
if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
|
||||
GlobalSegment->start)) {
|
||||
return;
|
||||
}
|
||||
// If the prior global segment may be defined by the same two-address
|
||||
// instruction that also defines LocalLI, then can't make a hole here.
|
||||
if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->start,
|
||||
if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
|
||||
LocalLI->beginIndex())) {
|
||||
return;
|
||||
}
|
||||
// If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
|
||||
// it would be a disconnected component in the live range.
|
||||
assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() &&
|
||||
assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
|
||||
"Disconnected LRG within the scheduling region.");
|
||||
}
|
||||
MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
|
||||
|
@ -1076,7 +1076,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
|
||||
|
||||
// Verify SSA form.
|
||||
if (MRI->isSSA() && TargetRegisterInfo::isVirtualRegister(Reg) &&
|
||||
llvm::next(MRI->def_begin(Reg)) != MRI->def_end())
|
||||
std::next(MRI->def_begin(Reg)) != MRI->def_end())
|
||||
report("Multiple virtual register defs in SSA form", MO, MONum);
|
||||
|
||||
// Check LiveInts for a live segment, but only for virtual registers.
|
||||
|
@ -186,7 +186,7 @@ bool PHIElimination::EliminatePHINodes(MachineFunction &MF,
|
||||
// Get an iterator to the first instruction after the last PHI node (this may
|
||||
// also be the end of the basic block).
|
||||
MachineBasicBlock::iterator LastPHIIt =
|
||||
prior(MBB.SkipPHIsAndLabels(MBB.begin()));
|
||||
std::prev(MBB.SkipPHIsAndLabels(MBB.begin()));
|
||||
|
||||
while (MBB.front().isPHI())
|
||||
LowerPHINode(MBB, LastPHIIt);
|
||||
@ -222,7 +222,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator LastPHIIt) {
|
||||
++NumLowered;
|
||||
|
||||
MachineBasicBlock::iterator AfterPHIsIt = llvm::next(LastPHIIt);
|
||||
MachineBasicBlock::iterator AfterPHIsIt = std::next(LastPHIIt);
|
||||
|
||||
// Unlink the PHI node from the basic block, but don't delete the PHI yet.
|
||||
MachineInstr *MPhi = MBB.remove(MBB.begin());
|
||||
@ -267,7 +267,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
|
||||
|
||||
// Update live variable information if there is any.
|
||||
if (LV) {
|
||||
MachineInstr *PHICopy = prior(AfterPHIsIt);
|
||||
MachineInstr *PHICopy = std::prev(AfterPHIsIt);
|
||||
|
||||
if (IncomingReg) {
|
||||
LiveVariables::VarInfo &VI = LV->getVarInfo(IncomingReg);
|
||||
@ -306,7 +306,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
|
||||
|
||||
// Update LiveIntervals for the new copy or implicit def.
|
||||
if (LIS) {
|
||||
MachineInstr *NewInstr = prior(AfterPHIsIt);
|
||||
MachineInstr *NewInstr = std::prev(AfterPHIsIt);
|
||||
SlotIndex DestCopyIndex = LIS->InsertMachineInstrInMaps(NewInstr);
|
||||
|
||||
SlotIndex MBBStartIndex = LIS->getMBBStartIdx(&MBB);
|
||||
@ -444,7 +444,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
|
||||
}
|
||||
} else {
|
||||
// We just inserted this copy.
|
||||
KillInst = prior(InsertPos);
|
||||
KillInst = std::prev(InsertPos);
|
||||
}
|
||||
}
|
||||
assert(KillInst->readsRegister(SrcReg) && "Cannot find kill instruction");
|
||||
@ -504,7 +504,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
|
||||
}
|
||||
} else {
|
||||
// We just inserted this copy.
|
||||
KillInst = prior(InsertPos);
|
||||
KillInst = std::prev(InsertPos);
|
||||
}
|
||||
}
|
||||
assert(KillInst->readsRegister(SrcReg) &&
|
||||
|
@ -306,7 +306,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
|
||||
MachineBasicBlock::iterator Current = MBB->end();
|
||||
unsigned Count = MBB->size(), CurrentCount = Count;
|
||||
for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
|
||||
MachineInstr *MI = llvm::prior(I);
|
||||
MachineInstr *MI = std::prev(I);
|
||||
--Count;
|
||||
// Calls are not scheduling boundaries before register allocation, but
|
||||
// post-ra we don't gain anything by scheduling across calls since we
|
||||
@ -648,13 +648,13 @@ void SchedulePostRATDList::EmitSchedule() {
|
||||
// Update the Begin iterator, as the first instruction in the block
|
||||
// may have been scheduled later.
|
||||
if (i == 0)
|
||||
RegionBegin = prior(RegionEnd);
|
||||
RegionBegin = std::prev(RegionEnd);
|
||||
}
|
||||
|
||||
// Reinsert any remaining debug_values.
|
||||
for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
|
||||
DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
|
||||
std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
|
||||
std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
|
||||
MachineInstr *DbgValue = P.first;
|
||||
MachineBasicBlock::iterator OrigPrivMI = P.second;
|
||||
BB->splice(++OrigPrivMI, BB, DbgValue);
|
||||
|
@ -759,14 +759,14 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
|
||||
SPAdj += Size;
|
||||
|
||||
MachineBasicBlock::iterator PrevI = BB->end();
|
||||
if (I != BB->begin()) PrevI = prior(I);
|
||||
if (I != BB->begin()) PrevI = std::prev(I);
|
||||
TFI->eliminateCallFramePseudoInstr(Fn, *BB, I);
|
||||
|
||||
// Visit the instructions created by eliminateCallFramePseudoInstr().
|
||||
if (PrevI == BB->end())
|
||||
I = BB->begin(); // The replaced instr was the first in the block.
|
||||
else
|
||||
I = llvm::next(PrevI);
|
||||
I = std::next(PrevI);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -849,9 +849,9 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
|
||||
I = BB->begin();
|
||||
|
||||
MachineInstr *MI = I;
|
||||
MachineBasicBlock::iterator J = llvm::next(I);
|
||||
MachineBasicBlock::iterator P = I == BB->begin() ?
|
||||
MachineBasicBlock::iterator(NULL) : llvm::prior(I);
|
||||
MachineBasicBlock::iterator J = std::next(I);
|
||||
MachineBasicBlock::iterator P =
|
||||
I == BB->begin() ? MachineBasicBlock::iterator(NULL) : std::prev(I);
|
||||
|
||||
// RS should process this instruction before we might scavenge at this
|
||||
// location. This is because we might be replacing a virtual register
|
||||
@ -894,7 +894,7 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
|
||||
// spill code will have been inserted in between I and J. This is a
|
||||
// problem because we need the spill code before I: Move I to just
|
||||
// prior to J.
|
||||
if (I != llvm::prior(J)) {
|
||||
if (I != std::prev(J)) {
|
||||
BB->splice(J, BB, I);
|
||||
|
||||
// Before we move I, we need to prepare the RS to visit I again.
|
||||
|
@ -264,8 +264,8 @@ PBQPRAProblem *PBQPBuilder::build(MachineFunction *mf, const LiveIntervals *lis,
|
||||
const LiveInterval &l1 = lis->getInterval(vr1);
|
||||
const PBQPRAProblem::AllowedSet &vr1Allowed = p->getAllowedSet(vr1);
|
||||
|
||||
for (RegSet::const_iterator vr2Itr = llvm::next(vr1Itr);
|
||||
vr2Itr != vrEnd; ++vr2Itr) {
|
||||
for (RegSet::const_iterator vr2Itr = std::next(vr1Itr); vr2Itr != vrEnd;
|
||||
++vr2Itr) {
|
||||
unsigned vr2 = *vr2Itr;
|
||||
const LiveInterval &l2 = lis->getInterval(vr2);
|
||||
const PBQPRAProblem::AllowedSet &vr2Allowed = p->getAllowedSet(vr2);
|
||||
|
@ -801,9 +801,9 @@ bool RegisterCoalescer::reMaterializeTrivialDef(CoalescerPair &CP,
|
||||
|
||||
MachineBasicBlock *MBB = CopyMI->getParent();
|
||||
MachineBasicBlock::iterator MII =
|
||||
llvm::next(MachineBasicBlock::iterator(CopyMI));
|
||||
std::next(MachineBasicBlock::iterator(CopyMI));
|
||||
TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, DefMI, *TRI);
|
||||
MachineInstr *NewMI = prior(MII);
|
||||
MachineInstr *NewMI = std::prev(MII);
|
||||
|
||||
LIS->ReplaceMachineInstrInMaps(CopyMI, NewMI);
|
||||
CopyMI->eraseFromParent();
|
||||
|
@ -175,7 +175,7 @@ void RegScavenger::forward() {
|
||||
Tracking = true;
|
||||
} else {
|
||||
assert(MBBI != MBB->end() && "Already past the end of the basic block!");
|
||||
MBBI = llvm::next(MBBI);
|
||||
MBBI = std::next(MBBI);
|
||||
}
|
||||
assert(MBBI != MBB->end() && "Already at the end of the basic block!");
|
||||
|
||||
@ -415,7 +415,7 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
|
||||
"Cannot scavenge register without an emergency spill slot!");
|
||||
TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex,
|
||||
RC, TRI);
|
||||
MachineBasicBlock::iterator II = prior(I);
|
||||
MachineBasicBlock::iterator II = std::prev(I);
|
||||
|
||||
unsigned FIOperandNum = getFrameIndexOperandNum(II);
|
||||
TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
|
||||
@ -423,13 +423,13 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
|
||||
// Restore the scavenged register before its use (or first terminator).
|
||||
TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex,
|
||||
RC, TRI);
|
||||
II = prior(UseMI);
|
||||
II = std::prev(UseMI);
|
||||
|
||||
FIOperandNum = getFrameIndexOperandNum(II);
|
||||
TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
|
||||
}
|
||||
|
||||
Scavenged[SI].Restore = prior(UseMI);
|
||||
Scavenged[SI].Restore = std::prev(UseMI);
|
||||
|
||||
// Doing this here leads to infinite regress.
|
||||
// Scavenged[SI].Reg = SReg;
|
||||
|
@ -301,8 +301,8 @@ void SUnit::biasCriticalPath() {
|
||||
|
||||
SUnit::pred_iterator BestI = Preds.begin();
|
||||
unsigned MaxDepth = BestI->getSUnit()->getDepth();
|
||||
for (SUnit::pred_iterator
|
||||
I = llvm::next(BestI), E = Preds.end(); I != E; ++I) {
|
||||
for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
|
||||
++I) {
|
||||
if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
|
||||
BestI = I;
|
||||
}
|
||||
|
@ -784,7 +784,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
|
||||
MachineInstr *DbgMI = NULL;
|
||||
for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin;
|
||||
MII != MIE; --MII) {
|
||||
MachineInstr *MI = prior(MII);
|
||||
MachineInstr *MI = std::prev(MII);
|
||||
if (MI && DbgMI) {
|
||||
DbgValues.push_back(std::make_pair(DbgMI, MI));
|
||||
DbgMI = NULL;
|
||||
@ -800,7 +800,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
|
||||
if (RPTracker) {
|
||||
PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : 0;
|
||||
RPTracker->recede(/*LiveUses=*/0, PDiff);
|
||||
assert(RPTracker->getPos() == prior(MII) && "RPTracker can't find MI");
|
||||
assert(RPTracker->getPos() == std::prev(MII) &&
|
||||
"RPTracker can't find MI");
|
||||
}
|
||||
|
||||
assert((CanHandleTerminators || (!MI->isTerminator() && !MI->isLabel())) &&
|
||||
@ -1427,7 +1428,7 @@ public:
|
||||
|
||||
const SDep *backtrack() {
|
||||
DFSStack.pop_back();
|
||||
return DFSStack.empty() ? 0 : llvm::prior(DFSStack.back().second);
|
||||
return DFSStack.empty() ? 0 : std::prev(DFSStack.back().second);
|
||||
}
|
||||
|
||||
const SUnit *getCurr() const { return DFSStack.back().first; }
|
||||
|
@ -344,7 +344,7 @@ FastISel::SavePoint FastISel::enterLocalValueArea() {
|
||||
|
||||
void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
|
||||
if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
|
||||
LastLocalValue = llvm::prior(FuncInfo.InsertPt);
|
||||
LastLocalValue = std::prev(FuncInfo.InsertPt);
|
||||
|
||||
// Restore the previous insert position.
|
||||
FuncInfo.InsertPt = OldInsertPt.InsertPt;
|
||||
|
@ -88,7 +88,7 @@ bool VectorLegalizer::Run() {
|
||||
// Before we start legalizing vector nodes, check if there are any vectors.
|
||||
bool HasVectors = false;
|
||||
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
|
||||
E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I) {
|
||||
E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) {
|
||||
// Check if the values of the nodes contain vectors. We don't need to check
|
||||
// the operands because we are going to check their values at some point.
|
||||
for (SDNode::value_iterator J = I->value_begin(), E = I->value_end();
|
||||
@ -112,7 +112,7 @@ bool VectorLegalizer::Run() {
|
||||
// node is only legalized after all of its operands are legalized.
|
||||
DAG.AssignTopologicalOrder();
|
||||
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
|
||||
E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I)
|
||||
E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I)
|
||||
LegalizeOp(SDValue(I, 0));
|
||||
|
||||
// Finally, it's possible the root changed. Get the new root.
|
||||
|
@ -603,7 +603,7 @@ SUnit *ResourcePriorityQueue::pop() {
|
||||
std::vector<SUnit *>::iterator Best = Queue.begin();
|
||||
if (!DisableDFASched) {
|
||||
signed BestCost = SUSchedulingCost(*Best);
|
||||
for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
|
||||
for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
|
||||
E = Queue.end(); I != E; ++I) {
|
||||
|
||||
if (SUSchedulingCost(*I) > BestCost) {
|
||||
@ -614,14 +614,14 @@ SUnit *ResourcePriorityQueue::pop() {
|
||||
}
|
||||
// Use default TD scheduling mechanism.
|
||||
else {
|
||||
for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
|
||||
for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
|
||||
E = Queue.end(); I != E; ++I)
|
||||
if (Picker(*Best, *I))
|
||||
Best = I;
|
||||
}
|
||||
|
||||
SUnit *V = *Best;
|
||||
if (Best != prior(Queue.end()))
|
||||
if (Best != std::prev(Queue.end()))
|
||||
std::swap(*Best, Queue.back());
|
||||
|
||||
Queue.pop_back();
|
||||
@ -633,7 +633,7 @@ SUnit *ResourcePriorityQueue::pop() {
|
||||
void ResourcePriorityQueue::remove(SUnit *SU) {
|
||||
assert(!Queue.empty() && "Queue is empty!");
|
||||
std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
|
||||
if (I != prior(Queue.end()))
|
||||
if (I != std::prev(Queue.end()))
|
||||
std::swap(*I, Queue.back());
|
||||
|
||||
Queue.pop_back();
|
||||
|
@ -1708,7 +1708,7 @@ public:
|
||||
assert(SU->NodeQueueId != 0 && "Not in queue!");
|
||||
std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
|
||||
SU);
|
||||
if (I != prior(Queue.end()))
|
||||
if (I != std::prev(Queue.end()))
|
||||
std::swap(*I, Queue.back());
|
||||
Queue.pop_back();
|
||||
SU->NodeQueueId = 0;
|
||||
@ -1738,12 +1738,12 @@ protected:
|
||||
template<class SF>
|
||||
static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
|
||||
std::vector<SUnit *>::iterator Best = Q.begin();
|
||||
for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
|
||||
for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
|
||||
E = Q.end(); I != E; ++I)
|
||||
if (Picker(*Best, *I))
|
||||
Best = I;
|
||||
SUnit *V = *Best;
|
||||
if (Best != prior(Q.end()))
|
||||
if (Best != std::prev(Q.end()))
|
||||
std::swap(*Best, Q.back());
|
||||
Q.pop_back();
|
||||
return V;
|
||||
|
@ -738,13 +738,13 @@ ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
|
||||
if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI() ||
|
||||
// Fast-isel may have inserted some instructions, in which case the
|
||||
// BB->back().isPHI() test will not fire when we want it to.
|
||||
prior(Emitter.getInsertPos())->isPHI()) {
|
||||
std::prev(Emitter.getInsertPos())->isPHI()) {
|
||||
// Did not insert any instruction.
|
||||
Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
|
||||
return;
|
||||
}
|
||||
|
||||
Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
|
||||
Orders.push_back(std::make_pair(Order, std::prev(Emitter.getInsertPos())));
|
||||
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order);
|
||||
}
|
||||
|
||||
|
@ -2650,7 +2650,7 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
|
||||
if (Cases.size() >= 2)
|
||||
// Must recompute end() each iteration because it may be
|
||||
// invalidated by erase if we hold on to it
|
||||
for (CaseItr I = Cases.begin(), J = llvm::next(Cases.begin());
|
||||
for (CaseItr I = Cases.begin(), J = std::next(Cases.begin());
|
||||
J != Cases.end(); ) {
|
||||
const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
|
||||
const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
|
||||
|
@ -357,7 +357,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
|
||||
for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
|
||||
e = MN->memoperands_end(); i != e; ++i) {
|
||||
OS << **i;
|
||||
if (llvm::next(i) != e)
|
||||
if (std::next(i) != e)
|
||||
OS << " ";
|
||||
}
|
||||
OS << ">";
|
||||
|
@ -457,7 +457,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
|
||||
if (Def) {
|
||||
MachineBasicBlock::iterator InsertPos = Def;
|
||||
// FIXME: VR def may not be in entry block.
|
||||
Def->getParent()->insert(llvm::next(InsertPos), MI);
|
||||
Def->getParent()->insert(std::next(InsertPos), MI);
|
||||
} else
|
||||
DEBUG(dbgs() << "Dropping debug info for dead vreg"
|
||||
<< TargetRegisterInfo::virtReg2Index(Reg) << "\n");
|
||||
@ -1067,7 +1067,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
|
||||
// where they are, so we can be sure to emit subsequent instructions
|
||||
// after them.
|
||||
if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
|
||||
FastIS->setLastLocalValue(llvm::prior(FuncInfo->InsertPt));
|
||||
FastIS->setLastLocalValue(std::prev(FuncInfo->InsertPt));
|
||||
else
|
||||
FastIS->setLastLocalValue(0);
|
||||
}
|
||||
@ -1075,7 +1075,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
|
||||
unsigned NumFastIselRemaining = std::distance(Begin, End);
|
||||
// Do FastISel on as many instructions as possible.
|
||||
for (; BI != Begin; --BI) {
|
||||
const Instruction *Inst = llvm::prior(BI);
|
||||
const Instruction *Inst = std::prev(BI);
|
||||
|
||||
// If we no longer require this instruction, skip it.
|
||||
if (isFoldedOrDeadInstruction(Inst, FuncInfo)) {
|
||||
@ -1096,7 +1096,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
|
||||
// Try to fold the load if so.
|
||||
const Instruction *BeforeInst = Inst;
|
||||
while (BeforeInst != Begin) {
|
||||
BeforeInst = llvm::prior(BasicBlock::const_iterator(BeforeInst));
|
||||
BeforeInst = std::prev(BasicBlock::const_iterator(BeforeInst));
|
||||
if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
|
||||
break;
|
||||
}
|
||||
@ -1104,7 +1104,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
|
||||
BeforeInst->hasOneUse() &&
|
||||
FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
|
||||
// If we succeeded, don't re-select the load.
|
||||
BI = llvm::next(BasicBlock::const_iterator(BeforeInst));
|
||||
BI = std::next(BasicBlock::const_iterator(BeforeInst));
|
||||
--NumFastIselRemaining;
|
||||
++NumFastIselSuccess;
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ void SjLjEHPrepare::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
|
||||
Type *LPadType = LPI->getType();
|
||||
Value *LPadVal = UndefValue::get(LPadType);
|
||||
IRBuilder<> Builder(
|
||||
llvm::next(BasicBlock::iterator(cast<Instruction>(SelVal))));
|
||||
std::next(BasicBlock::iterator(cast<Instruction>(SelVal))));
|
||||
LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val");
|
||||
LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val");
|
||||
|
||||
|
@ -129,7 +129,7 @@ void SlotIndexes::renumberIndexes(IndexList::iterator curItr) {
|
||||
const unsigned Space = SlotIndex::InstrDist/2;
|
||||
assert((Space & 3) == 0 && "InstrDist must be a multiple of 2*NUM");
|
||||
|
||||
IndexList::iterator startItr = prior(curItr);
|
||||
IndexList::iterator startItr = std::prev(curItr);
|
||||
unsigned index = startItr->getIndex();
|
||||
do {
|
||||
curItr->setIndex(index += Space);
|
||||
|
@ -327,7 +327,7 @@ void SpillPlacement::iterate() {
|
||||
// iteration is not zero, the last node was just updated.
|
||||
bool Changed = false;
|
||||
for (SmallVectorImpl<unsigned>::const_reverse_iterator I =
|
||||
iteration == 0 ? Linked.rbegin() : llvm::next(Linked.rbegin()),
|
||||
iteration == 0 ? Linked.rbegin() : std::next(Linked.rbegin()),
|
||||
E = Linked.rend(); I != E; ++I) {
|
||||
unsigned n = *I;
|
||||
if (nodes[n].update(nodes)) {
|
||||
@ -342,7 +342,7 @@ void SpillPlacement::iterate() {
|
||||
// Scan forwards, skipping the first node which was just updated.
|
||||
Changed = false;
|
||||
for (SmallVectorImpl<unsigned>::const_iterator I =
|
||||
llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
|
||||
std::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
|
||||
unsigned n = *I;
|
||||
if (nodes[n].update(nodes)) {
|
||||
Changed = true;
|
||||
|
@ -143,9 +143,9 @@ protected:
|
||||
if (hasDef) {
|
||||
MachineInstrSpan MIS(miItr);
|
||||
|
||||
tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), NewVReg,
|
||||
tii->storeRegToStackSlot(*mi->getParent(), std::next(miItr), NewVReg,
|
||||
true, ss, trc, tri);
|
||||
lis->InsertMachineInstrRangeInMaps(llvm::next(miItr), MIS.end());
|
||||
lis->InsertMachineInstrRangeInMaps(std::next(miItr), MIS.end());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ SlotIndex SplitEditor::enterIntvAfter(SlotIndex Idx) {
|
||||
assert(MI && "enterIntvAfter called with invalid index");
|
||||
|
||||
VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Idx, *MI->getParent(),
|
||||
llvm::next(MachineBasicBlock::iterator(MI)));
|
||||
std::next(MachineBasicBlock::iterator(MI)));
|
||||
return VNI->def;
|
||||
}
|
||||
|
||||
@ -570,7 +570,7 @@ SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) {
|
||||
}
|
||||
|
||||
VNInfo *VNI = defFromParent(0, ParentVNI, Boundary, *MI->getParent(),
|
||||
llvm::next(MachineBasicBlock::iterator(MI)));
|
||||
std::next(MachineBasicBlock::iterator(MI)));
|
||||
return VNI->def;
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
|
||||
std::sort(LiveOuts.begin(), LiveOuts.end());
|
||||
for (LiveOutVec::iterator I = LiveOuts.begin(), E = LiveOuts.end();
|
||||
I != E; ++I) {
|
||||
for (LiveOutVec::iterator II = next(I); II != E; ++II) {
|
||||
for (LiveOutVec::iterator II = std::next(I); II != E; ++II) {
|
||||
if (I->RegNo != II->RegNo) {
|
||||
// Skip all the now invalid entries.
|
||||
I = --II;
|
||||
@ -192,7 +192,7 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
|
||||
|
||||
if (recordResult) {
|
||||
assert(PatchPointOpers(&MI).hasDef() && "Stackmap has no return value.");
|
||||
parseOperand(MI.operands_begin(), llvm::next(MI.operands_begin()),
|
||||
parseOperand(MI.operands_begin(), std::next(MI.operands_begin()),
|
||||
Locations, LiveOuts);
|
||||
}
|
||||
|
||||
@ -232,7 +232,7 @@ void StackMaps::recordStackMap(const MachineInstr &MI) {
|
||||
assert(MI.getOpcode() == TargetOpcode::STACKMAP && "expected stackmap");
|
||||
|
||||
int64_t ID = MI.getOperand(0).getImm();
|
||||
recordStackMapOpers(MI, ID, llvm::next(MI.operands_begin(), 2),
|
||||
recordStackMapOpers(MI, ID, std::next(MI.operands_begin(), 2),
|
||||
MI.operands_end());
|
||||
}
|
||||
|
||||
@ -243,7 +243,7 @@ void StackMaps::recordPatchPoint(const MachineInstr &MI) {
|
||||
int64_t ID = opers.getMetaOper(PatchPointOpers::IDPos).getImm();
|
||||
|
||||
MachineInstr::const_mop_iterator MOI =
|
||||
llvm::next(MI.operands_begin(), opers.getStackMapStartIdx());
|
||||
std::next(MI.operands_begin(), opers.getStackMapStartIdx());
|
||||
recordStackMapOpers(MI, ID, MOI, MI.operands_end(),
|
||||
opers.isAnyReg() && opers.hasDef());
|
||||
|
||||
|
@ -284,8 +284,7 @@ static CallInst *FindPotentialTailCall(BasicBlock *BB, ReturnInst *RI,
|
||||
const unsigned MaxSearch = 4;
|
||||
bool NoInterposingChain = true;
|
||||
|
||||
for (BasicBlock::reverse_iterator I = llvm::next(BB->rbegin()),
|
||||
E = BB->rend();
|
||||
for (BasicBlock::reverse_iterator I = std::next(BB->rbegin()), E = BB->rend();
|
||||
I != E && SearchCounter < MaxSearch; ++I) {
|
||||
Instruction *Inst = &*I;
|
||||
|
||||
|
@ -385,8 +385,8 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
|
||||
toErase.push_back(I);
|
||||
continue;
|
||||
}
|
||||
|
||||
MachineBasicBlock::iterator NextMI = llvm::next(I);
|
||||
|
||||
MachineBasicBlock::iterator NextMI = std::next(I);
|
||||
if (NextMI == MBB->end()) continue;
|
||||
|
||||
unsigned LoadReg = 0;
|
||||
|
@ -697,7 +697,7 @@ TailDuplicatePass::duplicateSimpleBB(MachineBasicBlock *TailBB,
|
||||
<< "From simple Succ: " << *TailBB);
|
||||
|
||||
MachineBasicBlock *NewTarget = *TailBB->succ_begin();
|
||||
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(PredBB));
|
||||
MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(PredBB));
|
||||
|
||||
// Make PredFBB explicit.
|
||||
if (PredCond.empty())
|
||||
@ -798,7 +798,7 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
|
||||
// Update PredBB livein.
|
||||
RS->enterBasicBlock(PredBB);
|
||||
if (!PredBB->empty())
|
||||
RS->forward(prior(PredBB->end()));
|
||||
RS->forward(std::prev(PredBB->end()));
|
||||
BitVector RegsLiveAtExit(TRI->getNumRegs());
|
||||
RS->getRegsUsed(RegsLiveAtExit, false);
|
||||
for (MachineBasicBlock::livein_iterator I = TailBB->livein_begin(),
|
||||
@ -857,7 +857,7 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
|
||||
// If TailBB was duplicated into all its predecessors except for the prior
|
||||
// block, which falls through unconditionally, move the contents of this
|
||||
// block into the prior block.
|
||||
MachineBasicBlock *PrevBB = prior(MachineFunction::iterator(TailBB));
|
||||
MachineBasicBlock *PrevBB = std::prev(MachineFunction::iterator(TailBB));
|
||||
MachineBasicBlock *PriorTBB = 0, *PriorFBB = 0;
|
||||
SmallVector<MachineOperand, 4> PriorCond;
|
||||
// This has to check PrevBB->succ_size() because EH edges are ignored by
|
||||
|
@ -255,7 +255,7 @@ sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg,
|
||||
++KillPos;
|
||||
|
||||
unsigned NumVisited = 0;
|
||||
for (MachineBasicBlock::iterator I = llvm::next(OldPos); I != KillPos; ++I) {
|
||||
for (MachineBasicBlock::iterator I = std::next(OldPos); I != KillPos; ++I) {
|
||||
MachineInstr *OtherMI = I;
|
||||
// DBG_VALUE cannot be counted against the limit.
|
||||
if (OtherMI->isDebugValue())
|
||||
@ -417,7 +417,7 @@ static bool isKilled(MachineInstr &MI, unsigned Reg,
|
||||
MachineRegisterInfo::def_iterator Begin = MRI->def_begin(Reg);
|
||||
// If there are multiple defs, we can't do a simple analysis, so just
|
||||
// go with what the kill flag says.
|
||||
if (llvm::next(Begin) != MRI->def_end())
|
||||
if (std::next(Begin) != MRI->def_end())
|
||||
return true;
|
||||
DefMI = &*Begin;
|
||||
bool IsSrcPhys, IsDstPhys;
|
||||
@ -647,7 +647,7 @@ TwoAddressInstructionPass::convertInstTo3Addr(MachineBasicBlock::iterator &mi,
|
||||
if (!Sunk) {
|
||||
DistanceMap.insert(std::make_pair(NewMI, Dist));
|
||||
mi = NewMI;
|
||||
nmi = llvm::next(mi);
|
||||
nmi = std::next(mi);
|
||||
}
|
||||
|
||||
// Update source and destination register maps.
|
||||
@ -816,7 +816,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
|
||||
|
||||
// Move the copies connected to MI down as well.
|
||||
MachineBasicBlock::iterator Begin = MI;
|
||||
MachineBasicBlock::iterator AfterMI = llvm::next(Begin);
|
||||
MachineBasicBlock::iterator AfterMI = std::next(Begin);
|
||||
|
||||
MachineBasicBlock::iterator End = AfterMI;
|
||||
while (End->isCopy() && Defs.count(End->getOperand(1).getReg())) {
|
||||
@ -876,7 +876,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
|
||||
}
|
||||
|
||||
// Move debug info as well.
|
||||
while (Begin != MBB->begin() && llvm::prior(Begin)->isDebugValue())
|
||||
while (Begin != MBB->begin() && std::prev(Begin)->isDebugValue())
|
||||
--Begin;
|
||||
|
||||
nmi = End;
|
||||
@ -891,7 +891,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
|
||||
LIS->handleMove(CopyMI);
|
||||
InsertPos = CopyMI;
|
||||
}
|
||||
End = llvm::next(MachineBasicBlock::iterator(MI));
|
||||
End = std::next(MachineBasicBlock::iterator(MI));
|
||||
}
|
||||
|
||||
// Copies following MI may have been moved as well.
|
||||
@ -1060,15 +1060,15 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
|
||||
|
||||
// Move the old kill above MI, don't forget to move debug info as well.
|
||||
MachineBasicBlock::iterator InsertPos = mi;
|
||||
while (InsertPos != MBB->begin() && llvm::prior(InsertPos)->isDebugValue())
|
||||
while (InsertPos != MBB->begin() && std::prev(InsertPos)->isDebugValue())
|
||||
--InsertPos;
|
||||
MachineBasicBlock::iterator From = KillMI;
|
||||
MachineBasicBlock::iterator To = llvm::next(From);
|
||||
while (llvm::prior(From)->isDebugValue())
|
||||
MachineBasicBlock::iterator To = std::next(From);
|
||||
while (std::prev(From)->isDebugValue())
|
||||
--From;
|
||||
MBB->splice(InsertPos, MBB, From, To);
|
||||
|
||||
nmi = llvm::prior(InsertPos); // Backtrack so we process the moved instr.
|
||||
nmi = std::prev(InsertPos); // Backtrack so we process the moved instr.
|
||||
DistanceMap.erase(DI);
|
||||
|
||||
// Update live variables
|
||||
@ -1534,7 +1534,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
|
||||
Processed.clear();
|
||||
for (MachineBasicBlock::iterator mi = MBB->begin(), me = MBB->end();
|
||||
mi != me; ) {
|
||||
MachineBasicBlock::iterator nmi = llvm::next(mi);
|
||||
MachineBasicBlock::iterator nmi = std::next(mi);
|
||||
if (mi->isDebugValue()) {
|
||||
mi = nmi;
|
||||
continue;
|
||||
@ -1689,7 +1689,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
|
||||
}
|
||||
|
||||
MachineBasicBlock::iterator EndMBBI =
|
||||
llvm::next(MachineBasicBlock::iterator(MI));
|
||||
std::next(MachineBasicBlock::iterator(MI));
|
||||
|
||||
if (!DefEmitted) {
|
||||
DEBUG(dbgs() << "Turned: " << *MI << " into an IMPLICIT_DEF");
|
||||
|
@ -304,7 +304,7 @@ BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) {
|
||||
assert(I != InstList.end() &&
|
||||
"Trying to get me to create degenerate basic block!");
|
||||
|
||||
BasicBlock *InsertBefore = llvm::next(Function::iterator(this))
|
||||
BasicBlock *InsertBefore = std::next(Function::iterator(this))
|
||||
.getNodePtrUnchecked();
|
||||
BasicBlock *New = BasicBlock::Create(getContext(), BBName,
|
||||
getParent(), InsertBefore);
|
||||
|
@ -1047,7 +1047,7 @@ bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const {
|
||||
if (getOpcode() != Instruction::GetElementPtr) return false;
|
||||
|
||||
gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
|
||||
User::const_op_iterator OI = llvm::next(this->op_begin());
|
||||
User::const_op_iterator OI = std::next(this->op_begin());
|
||||
|
||||
// Skip the first index, as it has no static limit.
|
||||
++GEPI;
|
||||
|
@ -123,7 +123,7 @@ FunctionAnalysisManager::getResultImpl(void *PassID, Function *F) {
|
||||
if (Inserted) {
|
||||
FunctionAnalysisResultListT &ResultList = FunctionAnalysisResultLists[F];
|
||||
ResultList.push_back(std::make_pair(PassID, lookupPass(PassID).run(F, this)));
|
||||
RI->second = llvm::prior(ResultList.end());
|
||||
RI->second = std::prev(ResultList.end());
|
||||
}
|
||||
|
||||
return *RI->second->second;
|
||||
|
@ -53,7 +53,7 @@ MCFragment *MCObjectStreamer::getCurrentFragment() const {
|
||||
assert(getCurrentSectionData() && "No current section!");
|
||||
|
||||
if (CurInsertionPoint != getCurrentSectionData()->getFragmentList().begin())
|
||||
return prior(CurInsertionPoint);
|
||||
return std::prev(CurInsertionPoint);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -238,10 +238,10 @@ static bool BBHasFallthrough(MachineBasicBlock *MBB) {
|
||||
// Get the next machine basic block in the function.
|
||||
MachineFunction::iterator MBBI = MBB;
|
||||
// Can't fall off end of function.
|
||||
if (llvm::next(MBBI) == MBB->getParent()->end())
|
||||
if (std::next(MBBI) == MBB->getParent()->end())
|
||||
return false;
|
||||
|
||||
MachineBasicBlock *NextBB = llvm::next(MBBI);
|
||||
MachineBasicBlock *NextBB = std::next(MBBI);
|
||||
for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
|
||||
E = MBB->succ_end(); I != E; ++I)
|
||||
if (*I == NextBB)
|
||||
@ -528,7 +528,7 @@ AArch64BranchFixup::fixupConditionalBr(ImmBranch &Br) {
|
||||
|
||||
++NumCBrFixed;
|
||||
if (BMI != MI) {
|
||||
if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
|
||||
if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
|
||||
BMI->getOpcode() == AArch64::Bimm) {
|
||||
// Last MI in the BB is an unconditional branch. We can swap destinations:
|
||||
// b.eq L1 (temporarily b.ne L1 after first change)
|
||||
@ -575,7 +575,7 @@ AArch64BranchFixup::fixupConditionalBr(ImmBranch &Br) {
|
||||
// b L1
|
||||
// splitbb/fallthroughbb:
|
||||
// [old b L2/real continuation]
|
||||
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
|
||||
MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB));
|
||||
|
||||
DEBUG(dbgs() << " Insert B to BB#"
|
||||
<< MI->getOperand(CondBrMBBOperand).getMBB()->getNumber()
|
||||
|
@ -237,7 +237,7 @@ AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
|
||||
|
||||
// Delete the pseudo instruction TC_RETURN.
|
||||
MachineInstr *NewMI = prior(MBBI);
|
||||
MachineInstr *NewMI = std::prev(MBBI);
|
||||
MBB.erase(MBBI);
|
||||
MBBI = NewMI;
|
||||
|
||||
|
@ -629,8 +629,7 @@ AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
const TargetRegisterClass *TRC
|
||||
@ -724,8 +723,7 @@ AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
unsigned scratch = MRI.createVirtualRegister(TRC);
|
||||
@ -808,8 +806,7 @@ AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// thisMBB:
|
||||
@ -900,8 +897,7 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
|
||||
MF->insert(It, EndBB);
|
||||
|
||||
// Transfer rest of current basic-block to EndBB
|
||||
EndBB->splice(EndBB->begin(), MBB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)),
|
||||
MBB->end());
|
||||
EndBB->transferSuccessorsAndUpdatePHIs(MBB);
|
||||
|
||||
|
@ -336,7 +336,7 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
|
||||
// If we can modify the function, delete everything below this
|
||||
// unconditional branch.
|
||||
if (AllowModify) {
|
||||
MachineBasicBlock::iterator DI = llvm::next(I);
|
||||
MachineBasicBlock::iterator DI = std::next(I);
|
||||
while (DI != MBB.end()) {
|
||||
MachineInstr *InstToDelete = DI;
|
||||
++DI;
|
||||
@ -2176,7 +2176,7 @@ static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
|
||||
// Walk down one instruction which is potentially an 'and'.
|
||||
const MachineInstr &Copy = *MI;
|
||||
MachineBasicBlock::iterator AND(
|
||||
llvm::next(MachineBasicBlock::iterator(MI)));
|
||||
std::next(MachineBasicBlock::iterator(MI)));
|
||||
if (AND == MI->getParent()->end()) return false;
|
||||
MI = AND;
|
||||
return isSuitableForMask(MI, Copy.getOperand(0).getReg(),
|
||||
@ -3253,8 +3253,7 @@ static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
|
||||
Dist = 0;
|
||||
|
||||
MachineBasicBlock::const_iterator I = MI; ++I;
|
||||
MachineBasicBlock::const_instr_iterator II =
|
||||
llvm::prior(I.getInstrIterator());
|
||||
MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator());
|
||||
assert(II->isInsideBundle() && "Empty bundle?");
|
||||
|
||||
int Idx = -1;
|
||||
|
@ -569,10 +569,10 @@ static bool BBHasFallthrough(MachineBasicBlock *MBB) {
|
||||
// Get the next machine basic block in the function.
|
||||
MachineFunction::iterator MBBI = MBB;
|
||||
// Can't fall off end of function.
|
||||
if (llvm::next(MBBI) == MBB->getParent()->end())
|
||||
if (std::next(MBBI) == MBB->getParent()->end())
|
||||
return false;
|
||||
|
||||
MachineBasicBlock *NextBB = llvm::next(MBBI);
|
||||
MachineBasicBlock *NextBB = std::next(MBBI);
|
||||
for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
|
||||
E = MBB->succ_end(); I != E; ++I)
|
||||
if (*I == NextBB)
|
||||
@ -917,7 +917,7 @@ MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
|
||||
CompareMBBNumbers);
|
||||
MachineBasicBlock* WaterBB = *IP;
|
||||
if (WaterBB == OrigBB)
|
||||
WaterList.insert(llvm::next(IP), NewBB);
|
||||
WaterList.insert(std::next(IP), NewBB);
|
||||
else
|
||||
WaterList.insert(IP, OrigBB);
|
||||
NewWaterList.insert(OrigBB);
|
||||
@ -1188,7 +1188,7 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
|
||||
return false;
|
||||
|
||||
unsigned BestGrowth = ~0u;
|
||||
for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();;
|
||||
for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
|
||||
--IP) {
|
||||
MachineBasicBlock* WaterBB = *IP;
|
||||
// Check if water is in range and is either at a lower address than the
|
||||
@ -1249,7 +1249,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
|
||||
DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
|
||||
<< format(", expected CPE offset %#x\n", CPEOffset));
|
||||
NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
|
||||
NewMBB = std::next(MachineFunction::iterator(UserMBB));
|
||||
// Add an unconditional branch from UserMBB to fallthrough block. Record
|
||||
// it for branch lengthening; this new branch will not get out of range,
|
||||
// but if the preceding conditional branch is out of range, the targets
|
||||
@ -1320,8 +1320,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
MachineInstr *LastIT = 0;
|
||||
for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
|
||||
Offset < BaseInsertOffset;
|
||||
Offset += TII->GetInstSizeInBytes(MI),
|
||||
MI = llvm::next(MI)) {
|
||||
Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) {
|
||||
assert(MI != UserMBB->end() && "Fell off end of block");
|
||||
if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
|
||||
CPUser &U = CPUsers[CPUIndex];
|
||||
@ -1393,7 +1392,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
|
||||
NewWaterList.insert(NewIsland);
|
||||
|
||||
// The new CPE goes before the following block (NewMBB).
|
||||
NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
|
||||
NewMBB = std::next(MachineFunction::iterator(WaterBB));
|
||||
|
||||
} else {
|
||||
// No water found.
|
||||
@ -1405,7 +1404,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
|
||||
// next iteration for constant pools, but in this context, we don't want
|
||||
// it. Check for this so it will be removed from the WaterList.
|
||||
// Also remove any entry from NewWaterList.
|
||||
MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB));
|
||||
MachineBasicBlock *WaterBB = std::prev(MachineFunction::iterator(NewMBB));
|
||||
IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
|
||||
if (IP != WaterList.end())
|
||||
NewWaterList.erase(WaterBB);
|
||||
@ -1443,7 +1442,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
|
||||
|
||||
// Increase the size of the island block to account for the new entry.
|
||||
BBInfo[NewIsland->getNumber()].Size += Size;
|
||||
adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland)));
|
||||
adjustBBOffsetsAfter(std::prev(MachineFunction::iterator(NewIsland)));
|
||||
|
||||
// Finally, change the CPI in the instruction operand to be ID.
|
||||
for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
|
||||
@ -1592,7 +1591,7 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
|
||||
|
||||
++NumCBrFixed;
|
||||
if (BMI != MI) {
|
||||
if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
|
||||
if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
|
||||
BMI->getOpcode() == Br.UncondBr) {
|
||||
// Last MI in the BB is an unconditional branch. Can we simply invert the
|
||||
// condition and swap destinations:
|
||||
@ -1622,7 +1621,7 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
|
||||
MBB->back().eraseFromParent();
|
||||
// BBInfo[SplitBB].Offset is wrong temporarily, fixed below
|
||||
}
|
||||
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
|
||||
MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB));
|
||||
|
||||
DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
|
||||
<< " also invert condition and change dest. to BB#"
|
||||
@ -2017,7 +2016,7 @@ adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
|
||||
SmallVector<MachineOperand, 4> Cond;
|
||||
SmallVector<MachineOperand, 4> CondPrior;
|
||||
MachineFunction::iterator BBi = BB;
|
||||
MachineFunction::iterator OldPrior = prior(BBi);
|
||||
MachineFunction::iterator OldPrior = std::prev(BBi);
|
||||
|
||||
// If the block terminator isn't analyzable, don't try to move the block
|
||||
bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond);
|
||||
|
@ -1273,7 +1273,7 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
|
||||
|
||||
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
|
||||
while (MBBI != E) {
|
||||
MachineBasicBlock::iterator NMBBI = llvm::next(MBBI);
|
||||
MachineBasicBlock::iterator NMBBI = std::next(MBBI);
|
||||
Modified |= ExpandMI(MBB, MBBI);
|
||||
MBBI = NMBBI;
|
||||
}
|
||||
|
@ -632,7 +632,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
addReg(JumpTarget.getReg(), RegState::Kill);
|
||||
}
|
||||
|
||||
MachineInstr *NewMI = prior(MBBI);
|
||||
MachineInstr *NewMI = std::prev(MBBI);
|
||||
for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
|
||||
NewMI->addOperand(MBBI->getOperand(i));
|
||||
|
||||
@ -1017,7 +1017,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB,
|
||||
}
|
||||
|
||||
// The last spill instruction inserted should kill the scratch register r4.
|
||||
llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI);
|
||||
std::prev(MI)->addRegisterKilled(ARM::R4, TRI);
|
||||
}
|
||||
|
||||
/// Skip past the code inserted by emitAlignedDPRCS2Spills, and return an
|
||||
@ -1127,7 +1127,7 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB,
|
||||
.addReg(ARM::R4).addImm(2*(NextReg-R4BaseReg)));
|
||||
|
||||
// Last store kills r4.
|
||||
llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI);
|
||||
std::prev(MI)->addRegisterKilled(ARM::R4, TRI);
|
||||
}
|
||||
|
||||
bool ARMFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
|
@ -57,7 +57,7 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
|
||||
(LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
|
||||
MachineBasicBlock::iterator I = LastMI;
|
||||
if (I != LastMI->getParent()->begin()) {
|
||||
I = llvm::prior(I);
|
||||
I = std::prev(I);
|
||||
DefMI = &*I;
|
||||
}
|
||||
}
|
||||
|
@ -6199,8 +6199,7 @@ ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// thisMBB:
|
||||
@ -6284,8 +6283,7 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
const TargetRegisterClass *TRC = isThumb2 ?
|
||||
@ -6392,8 +6390,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
const TargetRegisterClass *TRC = isThumb2 ?
|
||||
@ -6512,8 +6509,7 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
const TargetRegisterClass *TRC = isThumb2 ?
|
||||
@ -7444,8 +7440,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// Load an immediate to varEnd.
|
||||
@ -7771,8 +7766,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
||||
sinkMBB->splice(sinkMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
BB->addSuccessor(copy0MBB);
|
||||
@ -7805,7 +7799,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
case ARM::BCCi64:
|
||||
case ARM::BCCZi64: {
|
||||
// If there is an unconditional branch to the other successor, remove it.
|
||||
BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
|
||||
// Compare both parts that make up the double comparison separately for
|
||||
// equality.
|
||||
@ -7890,8 +7884,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
||||
SinkBB->splice(SinkBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
SinkBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
BB->addSuccessor(RSBBB);
|
||||
|
@ -484,7 +484,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
|
||||
return;
|
||||
|
||||
// Merge succeeded, update records.
|
||||
Merges.push_back(prior(Loc));
|
||||
Merges.push_back(std::prev(Loc));
|
||||
|
||||
// In gathering loads together, we may have moved the imp-def of a register
|
||||
// past one of its uses. This is OK, since we know better than the rest of
|
||||
@ -812,7 +812,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
|
||||
// Try merging with the previous instruction.
|
||||
MachineBasicBlock::iterator BeginMBBI = MBB.begin();
|
||||
if (MBBI != BeginMBBI) {
|
||||
MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
|
||||
MachineBasicBlock::iterator PrevMBBI = std::prev(MBBI);
|
||||
while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue())
|
||||
--PrevMBBI;
|
||||
if (Mode == ARM_AM::ia &&
|
||||
@ -831,7 +831,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
|
||||
// Try merging with the next instruction.
|
||||
MachineBasicBlock::iterator EndMBBI = MBB.end();
|
||||
if (!DoMerge && MBBI != EndMBBI) {
|
||||
MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
|
||||
MachineBasicBlock::iterator NextMBBI = std::next(MBBI);
|
||||
while (NextMBBI != EndMBBI && NextMBBI->isDebugValue())
|
||||
++NextMBBI;
|
||||
if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
|
||||
@ -959,7 +959,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
|
||||
// Try merging with the previous instruction.
|
||||
MachineBasicBlock::iterator BeginMBBI = MBB.begin();
|
||||
if (MBBI != BeginMBBI) {
|
||||
MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
|
||||
MachineBasicBlock::iterator PrevMBBI = std::prev(MBBI);
|
||||
while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue())
|
||||
--PrevMBBI;
|
||||
if (isMatchingDecrement(PrevMBBI, Base, Bytes, Limit, Pred, PredReg)) {
|
||||
@ -978,7 +978,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
|
||||
// Try merging with the next instruction.
|
||||
MachineBasicBlock::iterator EndMBBI = MBB.end();
|
||||
if (!DoMerge && MBBI != EndMBBI) {
|
||||
MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
|
||||
MachineBasicBlock::iterator NextMBBI = std::next(MBBI);
|
||||
while (NextMBBI != EndMBBI && NextMBBI->isDebugValue())
|
||||
++NextMBBI;
|
||||
if (!isAM5 &&
|
||||
@ -1122,7 +1122,7 @@ void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
|
||||
}
|
||||
|
||||
if (Loc != MBB.begin())
|
||||
RS->forward(prior(Loc));
|
||||
RS->forward(std::prev(Loc));
|
||||
}
|
||||
|
||||
static int getMemoryOpOffset(const MachineInstr *MI) {
|
||||
@ -1232,7 +1232,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
|
||||
getKillRegState(OddDeadKill) | getUndefRegState(OddUndef));
|
||||
++NumSTRD2STM;
|
||||
}
|
||||
NewBBI = llvm::prior(MBBI);
|
||||
NewBBI = std::prev(MBBI);
|
||||
} else {
|
||||
// Split into two instructions.
|
||||
unsigned NewOpc = (isLd)
|
||||
@ -1254,7 +1254,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
|
||||
OddReg, OddDeadKill, false,
|
||||
BaseReg, false, BaseUndef, false, OffUndef,
|
||||
Pred, PredReg, TII, isT2);
|
||||
NewBBI = llvm::prior(MBBI);
|
||||
NewBBI = std::prev(MBBI);
|
||||
InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
|
||||
EvenReg, EvenDeadKill, false,
|
||||
BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
|
||||
@ -1274,7 +1274,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
|
||||
EvenReg, EvenDeadKill, EvenUndef,
|
||||
BaseReg, false, BaseUndef, false, OffUndef,
|
||||
Pred, PredReg, TII, isT2);
|
||||
NewBBI = llvm::prior(MBBI);
|
||||
NewBBI = std::prev(MBBI);
|
||||
InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc2,
|
||||
OddReg, OddDeadKill, OddUndef,
|
||||
BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
|
||||
@ -1419,7 +1419,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
||||
// Find a scratch register.
|
||||
unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass);
|
||||
// Process the load / store instructions.
|
||||
RS->forward(prior(MBBI));
|
||||
RS->forward(std::prev(MBBI));
|
||||
|
||||
// Merge ops.
|
||||
Merges.clear();
|
||||
@ -1441,13 +1441,13 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
||||
++NumMerges;
|
||||
|
||||
// RS may be pointing to an instruction that's deleted.
|
||||
RS->skipTo(prior(MBBI));
|
||||
RS->skipTo(std::prev(MBBI));
|
||||
} else if (NumMemOps == 1) {
|
||||
// Try folding preceding/trailing base inc/dec into the single
|
||||
// load/store.
|
||||
if (MergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) {
|
||||
++NumMerges;
|
||||
RS->forward(prior(MBBI));
|
||||
RS->forward(std::prev(MBBI));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1490,7 +1490,7 @@ bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
|
||||
(MBBI->getOpcode() == ARM::BX_RET ||
|
||||
MBBI->getOpcode() == ARM::tBX_RET ||
|
||||
MBBI->getOpcode() == ARM::MOVPCLR)) {
|
||||
MachineInstr *PrevMI = prior(MBBI);
|
||||
MachineInstr *PrevMI = std::prev(MBBI);
|
||||
unsigned Opcode = PrevMI->getOpcode();
|
||||
if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
|
||||
Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
|
||||
|
@ -312,9 +312,9 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
|
||||
dbgs() << "Expanding: " << *MI;
|
||||
dbgs() << " to:\n";
|
||||
MachineBasicBlock::iterator MII = MI;
|
||||
MII = llvm::prior(MII);
|
||||
MII = std::prev(MII);
|
||||
MachineInstr &MI2 = *MII;
|
||||
MII = llvm::prior(MII);
|
||||
MII = std::prev(MII);
|
||||
MachineInstr &MI1 = *MII;
|
||||
dbgs() << " " << MI1;
|
||||
dbgs() << " " << MI2;
|
||||
|
@ -182,7 +182,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
|
||||
int FramePtrOffsetInBlock = 0;
|
||||
unsigned adjustedGPRCS1Size = GPRCS1Size;
|
||||
if (tryFoldSPUpdateIntoPushPop(STI, MF, prior(MBBI), NumBytes)) {
|
||||
if (tryFoldSPUpdateIntoPushPop(STI, MF, std::prev(MBBI), NumBytes)) {
|
||||
FramePtrOffsetInBlock = NumBytes;
|
||||
adjustedGPRCS1Size += NumBytes;
|
||||
NumBytes = 0;
|
||||
@ -365,8 +365,8 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
} else {
|
||||
if (MBBI->getOpcode() == ARM::tBX_RET &&
|
||||
&MBB.front() != MBBI &&
|
||||
prior(MBBI)->getOpcode() == ARM::tPOP) {
|
||||
MachineBasicBlock::iterator PMBBI = prior(MBBI);
|
||||
std::prev(MBBI)->getOpcode() == ARM::tPOP) {
|
||||
MachineBasicBlock::iterator PMBBI = std::prev(MBBI);
|
||||
if (!tryFoldSPUpdateIntoPushPop(STI, MF, PMBBI, NumBytes))
|
||||
emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes);
|
||||
} else if (!tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes))
|
||||
|
@ -421,7 +421,7 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
|
||||
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Mask);
|
||||
}
|
||||
Offset = (Offset - Mask * Scale);
|
||||
MachineBasicBlock::iterator NII = llvm::next(II);
|
||||
MachineBasicBlock::iterator NII = std::next(II);
|
||||
emitThumbRegPlusImmediate(MBB, NII, dl, DestReg, DestReg, Offset, TII,
|
||||
*this);
|
||||
} else {
|
||||
|
@ -242,7 +242,7 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
|
||||
|
||||
// Finalize the bundle.
|
||||
MachineBasicBlock::instr_iterator LI = LastITMI;
|
||||
finalizeBundle(MBB, InsertPos.getInstrIterator(), llvm::next(LI));
|
||||
finalizeBundle(MBB, InsertPos.getInstrIterator(), std::next(LI));
|
||||
|
||||
Modified = true;
|
||||
++NumITs;
|
||||
|
@ -945,7 +945,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
|
||||
MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end();
|
||||
MachineBasicBlock::instr_iterator NextMII;
|
||||
for (; MII != E; MII = NextMII) {
|
||||
NextMII = llvm::next(MII);
|
||||
NextMII = std::next(MII);
|
||||
|
||||
MachineInstr *MI = &*MII;
|
||||
if (MI->isBundle()) {
|
||||
@ -962,7 +962,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
|
||||
|
||||
if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) {
|
||||
Modified = true;
|
||||
MachineBasicBlock::instr_iterator I = prior(NextMII);
|
||||
MachineBasicBlock::instr_iterator I = std::prev(NextMII);
|
||||
MI = &*I;
|
||||
// Removing and reinserting the first instruction in a bundle will break
|
||||
// up the bundle. Fix the bundling if it was broken.
|
||||
|
@ -300,7 +300,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
|
||||
MachineBasicBlock::iterator I(I1), End(I2);
|
||||
// At O3 we got better results (dhrystone) by being more conservative here.
|
||||
if (!ShouldCombineAggressively)
|
||||
End = llvm::next(MachineBasicBlock::iterator(I2));
|
||||
End = std::next(MachineBasicBlock::iterator(I2));
|
||||
IsImmUseReg = I1->getOperand(1).isImm() || I1->getOperand(1).isGlobal();
|
||||
unsigned I1UseReg = IsImmUseReg ? 0 : I1->getOperand(1).getReg();
|
||||
// Track killed operands. If we move across an instruction that kills our
|
||||
@ -464,7 +464,7 @@ bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
|
||||
/// false if the combine must be inserted at the returned instruction.
|
||||
MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr *I1,
|
||||
bool &DoInsertAtI1) {
|
||||
MachineBasicBlock::iterator I2 = llvm::next(MachineBasicBlock::iterator(I1));
|
||||
MachineBasicBlock::iterator I2 = std::next(MachineBasicBlock::iterator(I1));
|
||||
unsigned I1DestReg = I1->getOperand(0).getReg();
|
||||
|
||||
for (MachineBasicBlock::iterator End = I1->getParent()->end(); I2 != End;
|
||||
|
@ -144,14 +144,14 @@ bool HexagonFrameLowering::hasTailCall(MachineBasicBlock &MBB) const {
|
||||
|
||||
void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB) const {
|
||||
MachineBasicBlock::iterator MBBI = prior(MBB.end());
|
||||
MachineBasicBlock::iterator MBBI = std::prev(MBB.end());
|
||||
DebugLoc dl = MBBI->getDebugLoc();
|
||||
//
|
||||
// Only insert deallocframe if we need to. Also at -O0. See comment
|
||||
// in emitPrologue above.
|
||||
//
|
||||
if (hasFP(MF) || MF.getTarget().getOptLevel() == CodeGenOpt::None) {
|
||||
MachineBasicBlock::iterator MBBI = prior(MBB.end());
|
||||
MachineBasicBlock::iterator MBBI = std::prev(MBB.end());
|
||||
MachineBasicBlock::iterator MBBI_end = MBB.end();
|
||||
|
||||
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
|
||||
@ -170,7 +170,7 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
// Check for RESTORE_DEALLOC_RET_JMP_V4 call. Don't emit an extra DEALLOC
|
||||
// instruction if we encounter it.
|
||||
MachineBasicBlock::iterator BeforeJMPR =
|
||||
MBB.begin() == MBBI ? MBBI : prior(MBBI);
|
||||
MBB.begin() == MBBI ? MBBI : std::prev(MBBI);
|
||||
if (BeforeJMPR != MBBI &&
|
||||
BeforeJMPR->getOpcode() == Hexagon::RESTORE_DEALLOC_RET_JMP_V4) {
|
||||
// Remove the JMPR node.
|
||||
@ -190,7 +190,7 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
// DEALLOCFRAME instruction after it.
|
||||
MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
|
||||
MachineBasicBlock::iterator I =
|
||||
Term == MBB.begin() ? MBB.end() : prior(Term);
|
||||
Term == MBB.begin() ? MBB.end() : std::prev(Term);
|
||||
if (I != MBB.end() &&
|
||||
I->getOpcode() == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4)
|
||||
return;
|
||||
|
@ -907,7 +907,7 @@ bool HexagonHardwareLoops::isDead(const MachineInstr *MI,
|
||||
// this instruction is dead: both it (and the phi node) can be removed.
|
||||
use_nodbg_iterator I = MRI->use_nodbg_begin(Reg);
|
||||
use_nodbg_iterator End = MRI->use_nodbg_end();
|
||||
if (llvm::next(I) != End || !I.getOperand().getParent()->isPHI())
|
||||
if (std::next(I) != End || !I.getOperand().getParent()->isPHI())
|
||||
return false;
|
||||
|
||||
MachineInstr *OnePhi = I.getOperand().getParent();
|
||||
@ -920,7 +920,7 @@ bool HexagonHardwareLoops::isDead(const MachineInstr *MI,
|
||||
use_nodbg_iterator nextJ;
|
||||
for (use_nodbg_iterator J = MRI->use_nodbg_begin(OPReg);
|
||||
J != End; J = nextJ) {
|
||||
nextJ = llvm::next(J);
|
||||
nextJ = std::next(J);
|
||||
MachineOperand &Use = J.getOperand();
|
||||
MachineInstr *UseMI = Use.getParent();
|
||||
|
||||
@ -954,7 +954,7 @@ void HexagonHardwareLoops::removeIfDead(MachineInstr *MI) {
|
||||
MachineRegisterInfo::use_iterator nextI;
|
||||
for (MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg),
|
||||
E = MRI->use_end(); I != E; I = nextI) {
|
||||
nextI = llvm::next(I); // I is invalidated by the setReg
|
||||
nextI = std::next(I); // I is invalidated by the setReg
|
||||
MachineOperand &Use = I.getOperand();
|
||||
MachineInstr *UseMI = Use.getParent();
|
||||
if (UseMI == MI)
|
||||
@ -1162,7 +1162,7 @@ bool HexagonHardwareLoops::orderBumpCompare(MachineInstr *BumpI,
|
||||
// Out of order.
|
||||
unsigned PredR = CmpI->getOperand(0).getReg();
|
||||
bool FoundBump = false;
|
||||
instr_iterator CmpIt = CmpI, NextIt = llvm::next(CmpIt);
|
||||
instr_iterator CmpIt = CmpI, NextIt = std::next(CmpIt);
|
||||
for (instr_iterator I = NextIt, E = BB->instr_end(); I != E; ++I) {
|
||||
MachineInstr *In = &*I;
|
||||
for (unsigned i = 0, n = In->getNumOperands(); i < n; ++i) {
|
||||
@ -1176,7 +1176,7 @@ bool HexagonHardwareLoops::orderBumpCompare(MachineInstr *BumpI,
|
||||
if (In == BumpI) {
|
||||
instr_iterator After = BumpI;
|
||||
instr_iterator From = CmpI;
|
||||
BB->splice(llvm::next(After), BB, From);
|
||||
BB->splice(std::next(After), BB, From);
|
||||
FoundBump = true;
|
||||
break;
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
|
||||
if (isPredicated(Term) && !AnalyzeBranch(MBB, NewTBB, NewFBB, Cond,
|
||||
false)) {
|
||||
MachineBasicBlock *NextBB =
|
||||
llvm::next(MachineFunction::iterator(&MBB));
|
||||
std::next(MachineFunction::iterator(&MBB));
|
||||
if (NewTBB == NextBB) {
|
||||
ReverseBranchCondition(Cond);
|
||||
RemoveBranch(MBB);
|
||||
|
@ -237,20 +237,20 @@ bool HexagonPacketizer::runOnMachineFunction(MachineFunction &Fn) {
|
||||
// instruction stream until we find the nearest boundary.
|
||||
MachineBasicBlock::iterator I = RegionEnd;
|
||||
for(;I != MBB->begin(); --I, --RemainingCount) {
|
||||
if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn))
|
||||
if (TII->isSchedulingBoundary(std::prev(I), MBB, Fn))
|
||||
break;
|
||||
}
|
||||
I = MBB->begin();
|
||||
|
||||
// Skip empty scheduling regions.
|
||||
if (I == RegionEnd) {
|
||||
RegionEnd = llvm::prior(RegionEnd);
|
||||
RegionEnd = std::prev(RegionEnd);
|
||||
--RemainingCount;
|
||||
continue;
|
||||
}
|
||||
// Skip regions with one instruction.
|
||||
if (I == llvm::prior(RegionEnd)) {
|
||||
RegionEnd = llvm::prior(RegionEnd);
|
||||
if (I == std::prev(RegionEnd)) {
|
||||
RegionEnd = std::prev(RegionEnd);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ void MSP430FrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
.addReg(MSP430::SPW);
|
||||
|
||||
// Mark the FramePtr as live-in in every block except the entry.
|
||||
for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
|
||||
for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
|
||||
I != E; ++I)
|
||||
I->addLiveIn(MSP430::FPW);
|
||||
|
||||
@ -138,7 +138,7 @@ void MSP430FrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
|
||||
// Skip the callee-saved pop instructions.
|
||||
while (MBBI != MBB.begin()) {
|
||||
MachineBasicBlock::iterator PI = prior(MBBI);
|
||||
MachineBasicBlock::iterator PI = std::prev(MBBI);
|
||||
unsigned Opc = PI->getOpcode();
|
||||
if (Opc != MSP430::POP16r && !PI->isTerminator())
|
||||
break;
|
||||
|
@ -1248,8 +1248,7 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
|
||||
|
||||
// Update machine-CFG edges by transferring all successors of the current
|
||||
// block to the block containing instructions after shift.
|
||||
RemBB->splice(RemBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
RemBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
@ -1344,8 +1343,7 @@ MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
// Update machine-CFG edges by transferring all successors of the current
|
||||
// block to the new block which will contain the Phi node for the select.
|
||||
copy1MBB->splice(copy1MBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
// Next, add the true and fallthrough blocks as its successors.
|
||||
BB->addSuccessor(copy0MBB);
|
||||
|
@ -205,8 +205,8 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
|
||||
}
|
||||
|
||||
// If the block has any instructions after a JMP, delete them.
|
||||
while (llvm::next(I) != MBB.end())
|
||||
llvm::next(I)->eraseFromParent();
|
||||
while (std::next(I) != MBB.end())
|
||||
std::next(I)->eraseFromParent();
|
||||
Cond.clear();
|
||||
FBB = 0;
|
||||
|
||||
|
@ -142,10 +142,10 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
// We need to materialize the offset via add instruction.
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
if (Offset < 0)
|
||||
BuildMI(MBB, llvm::next(II), dl, TII.get(MSP430::SUB16ri), DstReg)
|
||||
BuildMI(MBB, std::next(II), dl, TII.get(MSP430::SUB16ri), DstReg)
|
||||
.addReg(DstReg).addImm(-Offset);
|
||||
else
|
||||
BuildMI(MBB, llvm::next(II), dl, TII.get(MSP430::ADD16ri), DstReg)
|
||||
BuildMI(MBB, std::next(II), dl, TII.get(MSP430::ADD16ri), DstReg)
|
||||
.addReg(DstReg).addImm(Offset);
|
||||
|
||||
return;
|
||||
|
@ -549,8 +549,7 @@ emitSel16(unsigned Opc, MachineInstr *MI, MachineBasicBlock *BB) const {
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
||||
sinkMBB->splice(sinkMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// Next, add the true and fallthrough blocks as its successors.
|
||||
@ -612,8 +611,7 @@ MachineBasicBlock *Mips16TargetLowering::emitSelT16
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
||||
sinkMBB->splice(sinkMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// Next, add the true and fallthrough blocks as its successors.
|
||||
@ -677,8 +675,7 @@ MachineBasicBlock *Mips16TargetLowering::emitSeliT16
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
||||
sinkMBB->splice(sinkMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// Next, add the true and fallthrough blocks as its successors.
|
||||
|
@ -411,7 +411,7 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
|
||||
BuildMI(MBB, II, DL, get(Mips:: AdduRxRyRz16), Reg).addReg(FrameReg)
|
||||
.addReg(Reg, RegState::Kill);
|
||||
if (FirstRegSaved || SecondRegSaved) {
|
||||
II = llvm::next(II);
|
||||
II = std::next(II);
|
||||
if (FirstRegSaved)
|
||||
copyPhysReg(MBB, II, DL, FirstRegSaved, FirstRegSavedTo, true);
|
||||
if (SecondRegSaved)
|
||||
|
@ -616,10 +616,10 @@ static bool BBHasFallthrough(MachineBasicBlock *MBB) {
|
||||
// Get the next machine basic block in the function.
|
||||
MachineFunction::iterator MBBI = MBB;
|
||||
// Can't fall off end of function.
|
||||
if (llvm::next(MBBI) == MBB->getParent()->end())
|
||||
if (std::next(MBBI) == MBB->getParent()->end())
|
||||
return false;
|
||||
|
||||
MachineBasicBlock *NextBB = llvm::next(MBBI);
|
||||
MachineBasicBlock *NextBB = std::next(MBBI);
|
||||
for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
|
||||
E = MBB->succ_end(); I != E; ++I)
|
||||
if (*I == NextBB)
|
||||
@ -932,7 +932,7 @@ MachineBasicBlock *MipsConstantIslands::splitBlockBeforeInstr
|
||||
CompareMBBNumbers);
|
||||
MachineBasicBlock* WaterBB = *IP;
|
||||
if (WaterBB == OrigBB)
|
||||
WaterList.insert(llvm::next(IP), NewBB);
|
||||
WaterList.insert(std::next(IP), NewBB);
|
||||
else
|
||||
WaterList.insert(IP, OrigBB);
|
||||
NewWaterList.insert(OrigBB);
|
||||
@ -1218,7 +1218,7 @@ bool MipsConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
|
||||
return false;
|
||||
|
||||
unsigned BestGrowth = ~0u;
|
||||
for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();;
|
||||
for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
|
||||
--IP) {
|
||||
MachineBasicBlock* WaterBB = *IP;
|
||||
// Check if water is in range and is either at a lower address than the
|
||||
@ -1277,7 +1277,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
|
||||
DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
|
||||
<< format(", expected CPE offset %#x\n", CPEOffset));
|
||||
NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
|
||||
NewMBB = std::next(MachineFunction::iterator(UserMBB));
|
||||
// Add an unconditional branch from UserMBB to fallthrough block. Record
|
||||
// it for branch lengthening; this new branch will not get out of range,
|
||||
// but if the preceding conditional branch is out of range, the targets
|
||||
@ -1330,8 +1330,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
//MachineInstr *LastIT = 0;
|
||||
for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
|
||||
Offset < BaseInsertOffset;
|
||||
Offset += TII->GetInstSizeInBytes(MI),
|
||||
MI = llvm::next(MI)) {
|
||||
Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) {
|
||||
assert(MI != UserMBB->end() && "Fell off end of block");
|
||||
if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
|
||||
CPUser &U = CPUsers[CPUIndex];
|
||||
@ -1388,7 +1387,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
|
||||
NewWaterList.insert(NewIsland);
|
||||
|
||||
// The new CPE goes before the following block (NewMBB).
|
||||
NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
|
||||
NewMBB = std::next(MachineFunction::iterator(WaterBB));
|
||||
|
||||
} else {
|
||||
// No water found.
|
||||
@ -1406,7 +1405,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
|
||||
// next iteration for constant pools, but in this context, we don't want
|
||||
// it. Check for this so it will be removed from the WaterList.
|
||||
// Also remove any entry from NewWaterList.
|
||||
MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB));
|
||||
MachineBasicBlock *WaterBB = std::prev(MachineFunction::iterator(NewMBB));
|
||||
IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
|
||||
if (IP != WaterList.end())
|
||||
NewWaterList.erase(WaterBB);
|
||||
@ -1448,7 +1447,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
|
||||
|
||||
// Increase the size of the island block to account for the new entry.
|
||||
BBInfo[NewIsland->getNumber()].Size += Size;
|
||||
adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland)));
|
||||
adjustBBOffsetsAfter(std::prev(MachineFunction::iterator(NewIsland)));
|
||||
|
||||
|
||||
|
||||
@ -1629,7 +1628,7 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
|
||||
|
||||
++NumCBrFixed;
|
||||
if (BMI != MI) {
|
||||
if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
|
||||
if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
|
||||
isUnconditionalBranch(BMI->getOpcode())) {
|
||||
// Last MI in the BB is an unconditional branch. Can we simply invert the
|
||||
// condition and swap destinations:
|
||||
@ -1662,7 +1661,7 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
|
||||
MBB->back().eraseFromParent();
|
||||
// BBInfo[SplitBB].Offset is wrong temporarily, fixed below
|
||||
}
|
||||
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
|
||||
MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB));
|
||||
|
||||
DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
|
||||
<< " also invert condition and change dest. to BB#"
|
||||
|
@ -500,8 +500,8 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
|
||||
// Bundle the NOP to the instruction with the delay slot.
|
||||
const MipsInstrInfo *TII =
|
||||
static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
|
||||
BuildMI(MBB, llvm::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
|
||||
MIBundleBuilder(MBB, I, llvm::next(llvm::next(I)));
|
||||
BuildMI(MBB, std::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
|
||||
MIBundleBuilder(MBB, I, std::next(I, 2));
|
||||
}
|
||||
|
||||
return Changed;
|
||||
@ -551,8 +551,8 @@ bool Filler::searchBackward(MachineBasicBlock &MBB, Iter Slot) const {
|
||||
if (!searchRange(MBB, ReverseIter(Slot), MBB.rend(), RegDU, MemDU, Filler))
|
||||
return false;
|
||||
|
||||
MBB.splice(llvm::next(Slot), &MBB, llvm::next(Filler).base());
|
||||
MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
|
||||
MBB.splice(std::next(Slot), &MBB, std::next(Filler).base());
|
||||
MIBundleBuilder(MBB, Slot, std::next(Slot, 2));
|
||||
++UsefulSlots;
|
||||
return true;
|
||||
}
|
||||
@ -568,11 +568,11 @@ bool Filler::searchForward(MachineBasicBlock &MBB, Iter Slot) const {
|
||||
|
||||
RegDU.setCallerSaved(*Slot);
|
||||
|
||||
if (!searchRange(MBB, llvm::next(Slot), MBB.end(), RegDU, NM, Filler))
|
||||
if (!searchRange(MBB, std::next(Slot), MBB.end(), RegDU, NM, Filler))
|
||||
return false;
|
||||
|
||||
MBB.splice(llvm::next(Slot), &MBB, Filler);
|
||||
MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
|
||||
MBB.splice(std::next(Slot), &MBB, Filler);
|
||||
MIBundleBuilder(MBB, Slot, std::next(Slot, 2));
|
||||
++UsefulSlots;
|
||||
return true;
|
||||
}
|
||||
|
@ -818,7 +818,7 @@ static MachineBasicBlock *expandPseudoDIV(MachineInstr *MI,
|
||||
MachineBasicBlock::iterator I(MI);
|
||||
MachineInstrBuilder MIB;
|
||||
MachineOperand &Divisor = MI->getOperand(2);
|
||||
MIB = BuildMI(MBB, llvm::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ))
|
||||
MIB = BuildMI(MBB, std::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ))
|
||||
.addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
|
||||
.addReg(Mips::ZERO).addImm(7);
|
||||
|
||||
@ -968,7 +968,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// thisMBB:
|
||||
@ -1054,7 +1054,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
BB->addSuccessor(loopMBB);
|
||||
@ -1209,7 +1209,7 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// thisMBB:
|
||||
@ -1295,7 +1295,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to exitMBB.
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
BB->addSuccessor(loop1MBB);
|
||||
|
@ -134,7 +134,7 @@ void MipsLongBranch::splitMBB(MachineBasicBlock *MBB) {
|
||||
(!LastBr->isConditionalBranch() && !LastBr->isUnconditionalBranch()))
|
||||
return;
|
||||
|
||||
ReverseIter FirstBr = getNonDebugInstr(llvm::next(LastBr), End);
|
||||
ReverseIter FirstBr = getNonDebugInstr(std::next(LastBr), End);
|
||||
|
||||
// MBB has only one branch instruction if FirstBr is not a branch
|
||||
// instruction.
|
||||
@ -154,7 +154,7 @@ void MipsLongBranch::splitMBB(MachineBasicBlock *MBB) {
|
||||
NewMBB->removeSuccessor(Tgt);
|
||||
MBB->addSuccessor(NewMBB);
|
||||
MBB->addSuccessor(Tgt);
|
||||
MF->insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB);
|
||||
MF->insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
|
||||
|
||||
NewMBB->splice(NewMBB->end(), MBB, (++LastBr).base(), MBB->end());
|
||||
}
|
||||
|
@ -2621,7 +2621,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
|
||||
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
|
||||
DebugLoc DL = MI->getDebugLoc();
|
||||
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
||||
MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
|
||||
MachineFunction::iterator It = std::next(MachineFunction::iterator(BB));
|
||||
MachineFunction *F = BB->getParent();
|
||||
MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
|
||||
MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
|
||||
@ -2631,7 +2631,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
|
||||
F->insert(It, Sink);
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to Sink.
|
||||
Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
Sink->splice(Sink->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
Sink->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
@ -2686,7 +2686,7 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
|
||||
DebugLoc DL = MI->getDebugLoc();
|
||||
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
||||
MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
|
||||
MachineFunction::iterator It = std::next(MachineFunction::iterator(BB));
|
||||
MachineFunction *F = BB->getParent();
|
||||
MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
|
||||
MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
|
||||
@ -2696,7 +2696,7 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
F->insert(It, Sink);
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to Sink.
|
||||
Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
Sink->splice(Sink->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
Sink->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
|
@ -6031,8 +6031,7 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
F->insert(It, loopMBB);
|
||||
F->insert(It, exitMBB);
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
MachineRegisterInfo &RegInfo = F->getRegInfo();
|
||||
@ -6100,8 +6099,7 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
|
||||
F->insert(It, loopMBB);
|
||||
F->insert(It, exitMBB);
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
MachineRegisterInfo &RegInfo = F->getRegInfo();
|
||||
@ -6253,7 +6251,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
||||
sinkMBB->splice(sinkMBB->begin(), MBB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
|
||||
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
|
||||
|
||||
// Note that the structure of the jmp_buf used here is not compatible
|
||||
@ -6518,8 +6516,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
||||
sinkMBB->splice(sinkMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// Next, add the true and fallthrough blocks as its successors.
|
||||
@ -6639,8 +6636,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
F->insert(It, midMBB);
|
||||
F->insert(It, exitMBB);
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
// thisMBB:
|
||||
@ -6710,8 +6706,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
F->insert(It, midMBB);
|
||||
F->insert(It, exitMBB);
|
||||
exitMBB->splice(exitMBB->begin(), BB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)),
|
||||
BB->end());
|
||||
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
MachineRegisterInfo &RegInfo = F->getRegInfo();
|
||||
|
@ -1428,7 +1428,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
|
||||
CmpInstr->eraseFromParent();
|
||||
|
||||
MachineBasicBlock::iterator MII = MI;
|
||||
BuildMI(*MI->getParent(), llvm::next(MII), MI->getDebugLoc(),
|
||||
BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(),
|
||||
get(TargetOpcode::COPY), CRReg)
|
||||
.addReg(PPC::CR0, MIOpC != NewOpC ? RegState::Kill : 0);
|
||||
|
||||
|
@ -1238,7 +1238,7 @@ int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
|
||||
|
||||
numClonedBlock += Num;
|
||||
Num += serialPatternMatch(*HeadMBB->succ_begin());
|
||||
Num += serialPatternMatch(*llvm::next(HeadMBB->succ_begin()));
|
||||
Num += serialPatternMatch(*std::next(HeadMBB->succ_begin()));
|
||||
Num += ifPatternMatch(HeadMBB);
|
||||
assert(Num > 0);
|
||||
|
||||
@ -1767,7 +1767,7 @@ void AMDGPUCFGStructurizer::removeRedundantConditionalBranch(
|
||||
if (MBB->succ_size() != 2)
|
||||
return;
|
||||
MachineBasicBlock *MBB1 = *MBB->succ_begin();
|
||||
MachineBasicBlock *MBB2 = *llvm::next(MBB->succ_begin());
|
||||
MachineBasicBlock *MBB2 = *std::next(MBB->succ_begin());
|
||||
if (MBB1 != MBB2)
|
||||
return;
|
||||
|
||||
|
@ -75,7 +75,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
|
||||
MachineBasicBlock::iterator I = MBB.begin();
|
||||
while (I != MBB.end()) {
|
||||
MachineInstr &MI = *I;
|
||||
I = llvm::next(I);
|
||||
I = std::next(I);
|
||||
|
||||
// Expand LDS_*_RET instructions
|
||||
if (TII->isLDSRetInstr(MI.getOpcode())) {
|
||||
|
@ -207,7 +207,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
|
||||
case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
|
||||
case AMDGPU::RAT_WRITE_CACHELESS_64_eg:
|
||||
case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
|
||||
unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
|
||||
unsigned EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
|
||||
|
||||
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
|
||||
.addOperand(MI->getOperand(0))
|
||||
@ -457,9 +457,9 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
|
||||
// Instruction is left unmodified if its not the last one of its type
|
||||
bool isLastInstructionOfItsType = true;
|
||||
unsigned InstExportType = MI->getOperand(1).getImm();
|
||||
for (MachineBasicBlock::iterator NextExportInst = llvm::next(I),
|
||||
for (MachineBasicBlock::iterator NextExportInst = std::next(I),
|
||||
EndBlock = BB->end(); NextExportInst != EndBlock;
|
||||
NextExportInst = llvm::next(NextExportInst)) {
|
||||
NextExportInst = std::next(NextExportInst)) {
|
||||
if (NextExportInst->getOpcode() == AMDGPU::EG_ExportSwz ||
|
||||
NextExportInst->getOpcode() == AMDGPU::R600_ExportSwz) {
|
||||
unsigned CurrentInstExportType = NextExportInst->getOperand(1)
|
||||
@ -470,7 +470,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
|
||||
}
|
||||
}
|
||||
}
|
||||
bool EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN)? 1 : 0;
|
||||
bool EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
|
||||
if (!EOP && !isLastInstructionOfItsType)
|
||||
return BB;
|
||||
unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_ExportSwz)? 84 : 40;
|
||||
|
@ -717,8 +717,8 @@ R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
|
||||
}
|
||||
|
||||
// Remove successive JUMP
|
||||
while (I != MBB.begin() && llvm::prior(I)->getOpcode() == AMDGPU::JUMP) {
|
||||
MachineBasicBlock::iterator PriorI = llvm::prior(I);
|
||||
while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
|
||||
MachineBasicBlock::iterator PriorI = std::prev(I);
|
||||
if (AllowModify)
|
||||
I->removeFromParent();
|
||||
I = PriorI;
|
||||
@ -784,7 +784,7 @@ MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
|
||||
It != E; ++It) {
|
||||
if (It->getOpcode() == AMDGPU::CF_ALU ||
|
||||
It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
|
||||
return llvm::prior(It.base());
|
||||
return std::prev(It.base());
|
||||
}
|
||||
return MBB.end();
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user