From 671bdb1c9cdaca25833cf378a4d884e91328a4c3 Mon Sep 17 00:00:00 2001 From: David Majnemer Date: Tue, 12 Jul 2016 20:31:46 +0000 Subject: [PATCH] [LoopAccessAnalysis] Some minor cleanups Use range-base for loops. Use auto when appropriate. No functional change is intended. llvm-svn: 275213 --- lib/Analysis/LoopAccessAnalysis.cpp | 36 +++++++++++++---------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp index a6a9304e5f3..e889e2d2c1f 100644 --- a/lib/Analysis/LoopAccessAnalysis.cpp +++ b/lib/Analysis/LoopAccessAnalysis.cpp @@ -105,7 +105,7 @@ void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, } Value *llvm::stripIntegerCast(Value *V) { - if (CastInst *CI = dyn_cast(V)) + if (auto *CI = dyn_cast(V)) if (CI->getOperand(0)->getType()->isIntegerTy()) return CI->getOperand(0); return V; @@ -172,7 +172,7 @@ void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, // For expressions with negative step, the upper bound is ScStart and the // lower bound is ScEnd. - if (const SCEVConstant *CStep = dyn_cast(Step)) { + if (const auto *CStep = dyn_cast(Step)) { if (CStep->getValue()->isNegative()) std::swap(ScStart, ScEnd); } else { @@ -839,11 +839,11 @@ static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, // Make sure there is only one non-const index and analyze that. Value *NonConstIndex = nullptr; - for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) - if (!isa(*Index)) { + for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end())) + if (!isa(Index)) { if (NonConstIndex) return false; - NonConstIndex = *Index; + NonConstIndex = Index; } if (!NonConstIndex) // The recurrence is on the pointer, ignore for now. @@ -976,9 +976,9 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, /// Take the pointer operand from the Load/Store instruction. /// Returns NULL if this is not a valid Load/Store instruction. static Value *getPointerOperand(Value *I) { - if (LoadInst *LI = dyn_cast(I)) + if (auto *LI = dyn_cast(I)) return LI->getPointerOperand(); - if (StoreInst *SI = dyn_cast(I)) + if (auto *SI = dyn_cast(I)) return SI->getPointerOperand(); return nullptr; } @@ -1522,21 +1522,17 @@ void LoopAccessInfo::analyzeLoop() { const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); // For each block. - for (Loop::block_iterator bb = TheLoop->block_begin(), - be = TheLoop->block_end(); bb != be; ++bb) { - + for (BasicBlock *BB : TheLoop->blocks()) { // Scan the BB and collect legal loads and stores. - for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; - ++it) { - + for (Instruction &I : *BB) { // If this is a load, save it. If this instruction can read from memory // but is not a load, then we quit. Notice that we don't handle function // calls that read or write. - if (it->mayReadFromMemory()) { + if (I.mayReadFromMemory()) { // Many math library functions read the rounding mode. We will only // vectorize a loop if it contains known function calls that don't set // the flag. Therefore, it is safe to ignore this read from memory. - CallInst *Call = dyn_cast(it); + auto *Call = dyn_cast(&I); if (Call && getVectorIntrinsicIDForCall(Call, TLI)) continue; @@ -1546,7 +1542,7 @@ void LoopAccessInfo::analyzeLoop() { TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) continue; - LoadInst *Ld = dyn_cast(it); + auto *Ld = dyn_cast(&I); if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { emitAnalysis(LoopAccessReport(Ld) << "read with atomic ordering or volatile read"); @@ -1563,11 +1559,11 @@ void LoopAccessInfo::analyzeLoop() { } // Save 'store' instructions. Abort if other instructions write to memory. - if (it->mayWriteToMemory()) { - StoreInst *St = dyn_cast(it); + if (I.mayWriteToMemory()) { + auto *St = dyn_cast(&I); if (!St) { - emitAnalysis(LoopAccessReport(&*it) << - "instruction cannot be vectorized"); + emitAnalysis(LoopAccessReport(St) + << "instruction cannot be vectorized"); CanVecMem = false; return; }