diff --git a/include/llvm/Analysis/AliasSetTracker.h b/include/llvm/Analysis/AliasSetTracker.h index eac97501c75..d150f8910d0 100644 --- a/include/llvm/Analysis/AliasSetTracker.h +++ b/include/llvm/Analysis/AliasSetTracker.h @@ -121,10 +121,10 @@ class AliasSet : public ilist_node { AliasSet *Forward; /// All instructions without a specific address in this alias set. - /// In rare cases this vector can have a null'ed out WeakVH + /// In rare cases this vector can have a null'ed out WeakTrackingVH /// instances (can happen if some other loop pass deletes an /// instruction in this list). - std::vector UnknownInsts; + std::vector UnknownInsts; /// Number of nodes pointing to this AliasSet plus the number of AliasSets /// forwarding to it. diff --git a/include/llvm/Analysis/AssumptionCache.h b/include/llvm/Analysis/AssumptionCache.h index f833f417c7d..04c6fd70e07 100644 --- a/include/llvm/Analysis/AssumptionCache.h +++ b/include/llvm/Analysis/AssumptionCache.h @@ -43,7 +43,7 @@ class AssumptionCache { /// \brief Vector of weak value handles to calls of the @llvm.assume /// intrinsic. - SmallVector AssumeHandles; + SmallVector AssumeHandles; class AffectedValueCallbackVH final : public CallbackVH { AssumptionCache *AC; @@ -62,12 +62,12 @@ class AssumptionCache { /// \brief A map of values about which an assumption might be providing /// information to the relevant set of assumptions. using AffectedValuesMap = - DenseMap, - AffectedValueCallbackVH::DMI>; + DenseMap, + AffectedValueCallbackVH::DMI>; AffectedValuesMap AffectedValues; /// Get the vector of assumptions which affect a value from the cache. - SmallVector &getOrInsertAffectedValues(Value *V); + SmallVector &getOrInsertAffectedValues(Value *V); /// Copy affected values in the cache for OV to be affected values for NV. void copyAffectedValuesInCache(Value *OV, Value *NV); @@ -120,20 +120,20 @@ public: /// FIXME: We should replace this with pointee_iterator> /// when we can write that to filter out the null values. Then caller code /// will become simpler. - MutableArrayRef assumptions() { + MutableArrayRef assumptions() { if (!Scanned) scanFunction(); return AssumeHandles; } /// \brief Access the list of assumptions which affect this value. - MutableArrayRef assumptionsFor(const Value *V) { + MutableArrayRef assumptionsFor(const Value *V) { if (!Scanned) scanFunction(); auto AVI = AffectedValues.find_as(const_cast(V)); if (AVI == AffectedValues.end()) - return MutableArrayRef(); + return MutableArrayRef(); return AVI->second; } diff --git a/include/llvm/Analysis/CGSCCPassManager.h b/include/llvm/Analysis/CGSCCPassManager.h index 398bbfb0c41..a15a9e18c81 100644 --- a/include/llvm/Analysis/CGSCCPassManager.h +++ b/include/llvm/Analysis/CGSCCPassManager.h @@ -646,7 +646,7 @@ public: LazyCallGraph::SCC *C = &InitialC; // Collect value handles for all of the indirect call sites. - SmallVector CallHandles; + SmallVector CallHandles; // Struct to track the counts of direct and indirect calls in each function // of the SCC. @@ -658,7 +658,7 @@ public: // Put value handles on all of the indirect calls and return the number of // direct calls for each function in the SCC. auto ScanSCC = [](LazyCallGraph::SCC &C, - SmallVectorImpl &CallHandles) { + SmallVectorImpl &CallHandles) { assert(CallHandles.empty() && "Must start with a clear set of handles."); SmallVector CallCounts; @@ -671,7 +671,7 @@ public: ++Count.Direct; } else { ++Count.Indirect; - CallHandles.push_back(WeakVH(&I)); + CallHandles.push_back(WeakTrackingVH(&I)); } } } @@ -699,7 +699,7 @@ public: "Cannot have changed the size of the SCC!"); // Check whether any of the handles were devirtualized. - auto IsDevirtualizedHandle = [&](WeakVH &CallH) { + auto IsDevirtualizedHandle = [&](WeakTrackingVH &CallH) { if (!CallH) return false; auto CS = CallSite(CallH); diff --git a/include/llvm/Analysis/CallGraph.h b/include/llvm/Analysis/CallGraph.h index ea85436ee58..cc4788d3eda 100644 --- a/include/llvm/Analysis/CallGraph.h +++ b/include/llvm/Analysis/CallGraph.h @@ -172,7 +172,7 @@ class CallGraphNode { public: /// \brief A pair of the calling instruction (a call or invoke) /// and the call graph node being called. - typedef std::pair CallRecord; + typedef std::pair CallRecord; public: typedef std::vector CalledFunctionsVector; diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h index bb572dd5603..035b974c5c1 100644 --- a/include/llvm/Analysis/IVUsers.h +++ b/include/llvm/Analysis/IVUsers.h @@ -80,7 +80,7 @@ private: /// OperandValToReplace - The Value of the operand in the user instruction /// that this IVStrideUse is representing. - WeakVH OperandValToReplace; + WeakTrackingVH OperandValToReplace; /// PostIncLoops - The set of loops for which Expr has been adjusted to /// use post-inc mode. This corresponds with SCEVExpander's post-inc concept. diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h index 743faf2b67d..60dafccd84b 100644 --- a/include/llvm/Analysis/MemoryBuiltins.h +++ b/include/llvm/Analysis/MemoryBuiltins.h @@ -235,7 +235,7 @@ class ObjectSizeOffsetEvaluator : public InstVisitor { typedef IRBuilder BuilderTy; - typedef std::pair WeakEvalType; + typedef std::pair WeakEvalType; typedef DenseMap CacheMapTy; typedef SmallPtrSet PtrSetTy; diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h index 517592a3d04..7d16f34e54c 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpander.h +++ b/include/llvm/Analysis/ScalarEvolutionExpander.h @@ -189,7 +189,7 @@ namespace llvm { /// replace congruent phis with their most canonical representative. Return /// the number of phis eliminated. unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, - SmallVectorImpl &DeadInsts, + SmallVectorImpl &DeadInsts, const TargetTransformInfo *TTI = nullptr); /// Insert code to directly compute the specified SCEV expression into the diff --git a/include/llvm/IR/ValueHandle.h b/include/llvm/IR/ValueHandle.h index 17b65c6511c..9435e9404a4 100644 --- a/include/llvm/IR/ValueHandle.h +++ b/include/llvm/IR/ValueHandle.h @@ -34,11 +34,7 @@ protected: /// /// This is to avoid having a vtable for the light-weight handle pointers. The /// fully general Callback version does have a vtable. - enum HandleBaseKind { - Assert, - Callback, - Weak - }; + enum HandleBaseKind { Assert, Callback, WeakTracking }; ValueHandleBase(const ValueHandleBase &RHS) : ValueHandleBase(RHS.PrevPair.getInt(), RHS) {} @@ -145,14 +141,14 @@ private: /// is useful for advisory sorts of information, but should not be used as the /// key of a map (since the map would have to rearrange itself when the pointer /// changes). -class WeakVH : public ValueHandleBase { +class WeakTrackingVH : public ValueHandleBase { public: - WeakVH() : ValueHandleBase(Weak) {} - WeakVH(Value *P) : ValueHandleBase(Weak, P) {} - WeakVH(const WeakVH &RHS) - : ValueHandleBase(Weak, RHS) {} + WeakTrackingVH() : ValueHandleBase(WeakTracking) {} + WeakTrackingVH(Value *P) : ValueHandleBase(WeakTracking, P) {} + WeakTrackingVH(const WeakTrackingVH &RHS) + : ValueHandleBase(WeakTracking, RHS) {} - WeakVH &operator=(const WeakVH &RHS) = default; + WeakTrackingVH &operator=(const WeakTrackingVH &RHS) = default; Value *operator=(Value *RHS) { return ValueHandleBase::operator=(RHS); @@ -170,15 +166,17 @@ public: } }; -// Specialize simplify_type to allow WeakVH to participate in +// Specialize simplify_type to allow WeakTrackingVH to participate in // dyn_cast, isa, etc. -template <> struct simplify_type { +template <> struct simplify_type { typedef Value *SimpleType; - static SimpleType getSimplifiedValue(WeakVH &WVH) { return WVH; } + static SimpleType getSimplifiedValue(WeakTrackingVH &WVH) { return WVH; } }; -template <> struct simplify_type { +template <> struct simplify_type { typedef Value *SimpleType; - static SimpleType getSimplifiedValue(const WeakVH &WVH) { return WVH; } + static SimpleType getSimplifiedValue(const WeakTrackingVH &WVH) { + return WVH; + } }; /// \brief Value handle that asserts if the Value is deleted. @@ -294,7 +292,7 @@ struct isPodLike > { /// Assigning a value to a TrackingVH is always allowed, even if said TrackingVH /// no longer points to a valid value. template class TrackingVH { - WeakVH InnerHandle; + WeakTrackingVH InnerHandle; public: ValueTy *getValPtr() const { @@ -368,7 +366,8 @@ public: /// /// Called when this->getValPtr() is destroyed, inside ~Value(), so you /// may call any non-virtual Value method on getValPtr(), but no subclass - /// methods. If WeakVH were implemented as a CallbackVH, it would use this + /// methods. If WeakTrackingVH were implemented as a CallbackVH, it would use + /// this /// method to call setValPtr(NULL). AssertingVH would use this method to /// cause an assertion failure. /// @@ -379,7 +378,8 @@ public: /// \brief Callback for Value RAUW. /// /// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called, - /// _before_ any of the uses have actually been replaced. If WeakVH were + /// _before_ any of the uses have actually been replaced. If WeakTrackingVH + /// were /// implemented as a CallbackVH, it would use this method to call /// setValPtr(new_value). AssertingVH would do nothing in this method. virtual void allUsesReplacedWith(Value *) {} diff --git a/include/llvm/Transforms/Scalar/NaryReassociate.h b/include/llvm/Transforms/Scalar/NaryReassociate.h index a74bb6cc419..f35707eeb3f 100644 --- a/include/llvm/Transforms/Scalar/NaryReassociate.h +++ b/include/llvm/Transforms/Scalar/NaryReassociate.h @@ -167,7 +167,7 @@ private: // foo(a + b); // if (p2) // bar(a + b); - DenseMap> SeenExprs; + DenseMap> SeenExprs; }; } // namespace llvm diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h index 337305a0a82..0a8903a6ed7 100644 --- a/include/llvm/Transforms/Utils/Cloning.h +++ b/include/llvm/Transforms/Utils/Cloning.h @@ -74,7 +74,7 @@ struct ClonedCodeInfo { /// All cloned call sites that have operand bundles attached are appended to /// this vector. This vector may contain nulls or undefs if some of the /// originally inserted callsites were DCE'ed after they were cloned. - std::vector OperandBundleCallSites; + std::vector OperandBundleCallSites; ClonedCodeInfo() = default; }; @@ -192,7 +192,7 @@ public: /// InlinedCalls - InlineFunction fills this in with callsites that were /// inlined from the callee. This is only filled in if CG is non-null. - SmallVector InlinedCalls; + SmallVector InlinedCalls; /// All of the new call sites inlined into the caller. /// diff --git a/include/llvm/Transforms/Utils/SimplifyIndVar.h b/include/llvm/Transforms/Utils/SimplifyIndVar.h index 6cdeeeb60a6..8d50aeb10d6 100644 --- a/include/llvm/Transforms/Utils/SimplifyIndVar.h +++ b/include/llvm/Transforms/Utils/SimplifyIndVar.h @@ -46,13 +46,13 @@ public: /// simplifyUsersOfIV - Simplify instructions that use this induction variable /// by using ScalarEvolution to analyze the IV's recurrence. bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT, - LoopInfo *LI, SmallVectorImpl &Dead, + LoopInfo *LI, SmallVectorImpl &Dead, IVVisitor *V = nullptr); /// SimplifyLoopIVs - Simplify users of induction variables within this /// loop. This does not actually change or add IVs. bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT, - LoopInfo *LI, SmallVectorImpl &Dead); + LoopInfo *LI, SmallVectorImpl &Dead); } // end namespace llvm diff --git a/include/llvm/Transforms/Utils/ValueMapper.h b/include/llvm/Transforms/Utils/ValueMapper.h index 950ad92afcd..e44dc437342 100644 --- a/include/llvm/Transforms/Utils/ValueMapper.h +++ b/include/llvm/Transforms/Utils/ValueMapper.h @@ -23,7 +23,7 @@ namespace llvm { class Value; class Instruction; -typedef ValueMap ValueToValueMapTy; +typedef ValueMap ValueToValueMapTy; /// This is a class that can be implemented by clients to remap types when /// cloning constants and instructions. diff --git a/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/include/llvm/Transforms/Vectorize/SLPVectorizer.h index d669a8e5b61..10338f7937e 100644 --- a/include/llvm/Transforms/Vectorize/SLPVectorizer.h +++ b/include/llvm/Transforms/Vectorize/SLPVectorizer.h @@ -40,8 +40,8 @@ class BoUpSLP; struct SLPVectorizerPass : public PassInfoMixin { typedef SmallVector StoreList; typedef MapVector StoreListMap; - typedef SmallVector WeakVHList; - typedef MapVector WeakVHListMap; + typedef SmallVector WeakTrackingVHList; + typedef MapVector WeakTrackingVHListMap; ScalarEvolution *SE = nullptr; TargetTransformInfo *TTI = nullptr; @@ -111,7 +111,7 @@ private: StoreListMap Stores; /// The getelementptr instructions in a basic block organized by base pointer. - WeakVHListMap GEPs; + WeakTrackingVHListMap GEPs; }; } diff --git a/lib/Analysis/AssumptionCache.cpp b/lib/Analysis/AssumptionCache.cpp index 1fae9472448..0468c794e81 100644 --- a/lib/Analysis/AssumptionCache.cpp +++ b/lib/Analysis/AssumptionCache.cpp @@ -29,15 +29,16 @@ static cl::opt cl::desc("Enable verification of assumption cache"), cl::init(false)); -SmallVector &AssumptionCache::getOrInsertAffectedValues(Value *V) { +SmallVector & +AssumptionCache::getOrInsertAffectedValues(Value *V) { // Try using find_as first to avoid creating extra value handles just for the // purpose of doing the lookup. auto AVI = AffectedValues.find_as(V); if (AVI != AffectedValues.end()) return AVI->second; - auto AVIP = AffectedValues.insert({ - AffectedValueCallbackVH(V, this), SmallVector()}); + auto AVIP = AffectedValues.insert( + {AffectedValueCallbackVH(V, this), SmallVector()}); return AVIP.first->second; } diff --git a/lib/Analysis/CallGraphSCCPass.cpp b/lib/Analysis/CallGraphSCCPass.cpp index ea70f5752c6..8058e5b1935 100644 --- a/lib/Analysis/CallGraphSCCPass.cpp +++ b/lib/Analysis/CallGraphSCCPass.cpp @@ -204,7 +204,7 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG, // Get the set of call sites currently in the function. for (CallGraphNode::iterator I = CGN->begin(), E = CGN->end(); I != E; ) { // If this call site is null, then the function pass deleted the call - // entirely and the WeakVH nulled it out. + // entirely and the WeakTrackingVH nulled it out. if (!I->first || // If we've already seen this call site, then the FunctionPass RAUW'd // one call with another, which resulted in two "uses" in the edge @@ -347,7 +347,8 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG, DevirtualizedCall = true; // After scanning this function, if we still have entries in callsites, then - // they are dangling pointers. WeakVH should save us for this, so abort if + // they are dangling pointers. WeakTrackingVH should save us for this, so + // abort if // this happens. assert(CallSites.empty() && "Dangling pointers found in call sites map"); diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index 6bc3de7833f..86cbd79aa84 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -1772,9 +1772,10 @@ SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, /// /// This does not depend on any SCEVExpander state but should be used in /// the same context that SCEVExpander is used. -unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, - SmallVectorImpl &DeadInsts, - const TargetTransformInfo *TTI) { +unsigned +SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, + SmallVectorImpl &DeadInsts, + const TargetTransformInfo *TTI) { // Find integer phis in order of increasing width. SmallVector Phis; for (auto &I : *L->getHeader()) { diff --git a/lib/Bitcode/Reader/ValueList.cpp b/lib/Bitcode/Reader/ValueList.cpp index 7152a51cea6..d1a2a11bbfa 100644 --- a/lib/Bitcode/Reader/ValueList.cpp +++ b/lib/Bitcode/Reader/ValueList.cpp @@ -58,7 +58,7 @@ void BitcodeReaderValueList::assignValue(Value *V, unsigned Idx) { if (Idx >= size()) resize(Idx + 1); - WeakVH &OldV = ValuePtrs[Idx]; + WeakTrackingVH &OldV = ValuePtrs[Idx]; if (!OldV) { OldV = V; return; diff --git a/lib/Bitcode/Reader/ValueList.h b/lib/Bitcode/Reader/ValueList.h index 3119d7735e2..72775a3cf3b 100644 --- a/lib/Bitcode/Reader/ValueList.h +++ b/lib/Bitcode/Reader/ValueList.h @@ -20,7 +20,7 @@ namespace llvm { class Constant; class BitcodeReaderValueList { - std::vector ValuePtrs; + std::vector ValuePtrs; /// As we resolve forward-referenced constants, we add information about them /// to this vector. This allows us to resolve them in bulk instead of diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp index 173b8cb1f2d..c6c93811a0f 100644 --- a/lib/CodeGen/CodeGenPrepare.cpp +++ b/lib/CodeGen/CodeGenPrepare.cpp @@ -2226,10 +2226,11 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) { ConstantInt *RetVal = lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); // Substituting this can cause recursive simplifications, which can - // invalidate our iterator. Use a WeakVH to hold onto it in case this + // invalidate our iterator. Use a WeakTrackingVH to hold onto it in case + // this // happens. Value *CurValue = &*CurInstIterator; - WeakVH IterHandle(CurValue); + WeakTrackingVH IterHandle(CurValue); replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); @@ -4442,9 +4443,9 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, // using it. if (Repl->use_empty()) { // This can cause recursive deletion, which can invalidate our iterator. - // Use a WeakVH to hold onto it in case this happens. + // Use a WeakTrackingVH to hold onto it in case this happens. Value *CurValue = &*CurInstIterator; - WeakVH IterHandle(CurValue); + WeakTrackingVH IterHandle(CurValue); BasicBlock *BB = CurInstIterator->getParent(); RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); diff --git a/lib/IR/Value.cpp b/lib/IR/Value.cpp index 2ce0a2ecf2b..bde5b3a8ce0 100644 --- a/lib/IR/Value.cpp +++ b/lib/IR/Value.cpp @@ -820,8 +820,8 @@ void ValueHandleBase::ValueIsDeleted(Value *V) { switch (Entry->getKind()) { case Assert: break; - case Weak: - // Weak just goes to null, which will unlink it from the list. + case WeakTracking: + // WeakTracking just goes to null, which will unlink it from the list. Entry->operator=(nullptr); break; case Callback: @@ -871,7 +871,7 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) { case Assert: // Asserting handle does not follow RAUW implicitly. break; - case Weak: + case WeakTracking: // Weak goes to the new value, which will unlink it from Old's list. Entry->operator=(New); break; @@ -888,12 +888,12 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) { if (Old->HasValueHandle) for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next) switch (Entry->getKind()) { - case Weak: + case WeakTracking: dbgs() << "After RAUW from " << *Old->getType() << " %" << Old->getName() << " to " << *New->getType() << " %" << New->getName() << "\n"; llvm_unreachable( - "A weak value handle still pointed to the old value!\n"); + "A weak tracking value handle still pointed to the old value!\n"); default: break; } diff --git a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp index b7e62075244..d8cb98fe1b1 100644 --- a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp +++ b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp @@ -77,9 +77,10 @@ class SIAnnotateControlFlow : public FunctionPass { void insertElse(BranchInst *Term); - Value *handleLoopCondition(Value *Cond, PHINode *Broken, - llvm::Loop *L, BranchInst *Term, - SmallVectorImpl &LoopPhiConditions); + Value * + handleLoopCondition(Value *Cond, PHINode *Broken, llvm::Loop *L, + BranchInst *Term, + SmallVectorImpl &LoopPhiConditions); void handleLoop(BranchInst *Term); @@ -212,9 +213,8 @@ void SIAnnotateControlFlow::insertElse(BranchInst *Term) { /// \brief Recursively handle the condition leading to a loop Value *SIAnnotateControlFlow::handleLoopCondition( - Value *Cond, PHINode *Broken, - llvm::Loop *L, BranchInst *Term, - SmallVectorImpl &LoopPhiConditions) { + Value *Cond, PHINode *Broken, llvm::Loop *L, BranchInst *Term, + SmallVectorImpl &LoopPhiConditions) { // Only search through PHI nodes which are inside the loop. If we try this // with PHI nodes that are outside of the loop, we end up inserting new PHI @@ -281,7 +281,7 @@ Value *SIAnnotateControlFlow::handleLoopCondition( NewPhi->setIncomingValue(i, PhiArg); } - LoopPhiConditions.push_back(WeakVH(Phi)); + LoopPhiConditions.push_back(WeakTrackingVH(Phi)); return Ret; } @@ -323,7 +323,7 @@ void SIAnnotateControlFlow::handleLoop(BranchInst *Term) { BasicBlock *Target = Term->getSuccessor(1); PHINode *Broken = PHINode::Create(Int64, 0, "phi.broken", &Target->front()); - SmallVector LoopPhiConditions; + SmallVector LoopPhiConditions; Value *Cond = Term->getCondition(); Term->setCondition(BoolTrue); Value *Arg = handleLoopCondition(Cond, Broken, L, Term, LoopPhiConditions); @@ -333,7 +333,7 @@ void SIAnnotateControlFlow::handleLoop(BranchInst *Term) { Term->setCondition(CallInst::Create(Loop, Arg, "", Term)); - for (WeakVH Val : reverse(LoopPhiConditions)) { + for (WeakTrackingVH Val : reverse(LoopPhiConditions)) { if (PHINode *Cond = cast_or_null(Val)) eraseIfUnused(Cond); } diff --git a/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/lib/Target/XCore/XCoreLowerThreadLocal.cpp index 5cc51cd7a99..87532d11ede 100644 --- a/lib/Target/XCore/XCoreLowerThreadLocal.cpp +++ b/lib/Target/XCore/XCoreLowerThreadLocal.cpp @@ -128,11 +128,11 @@ createReplacementInstr(ConstantExpr *CE, Instruction *Instr) { static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) { do { - SmallVector WUsers(CE->user_begin(), CE->user_end()); + SmallVector WUsers(CE->user_begin(), CE->user_end()); std::sort(WUsers.begin(), WUsers.end()); WUsers.erase(std::unique(WUsers.begin(), WUsers.end()), WUsers.end()); while (!WUsers.empty()) - if (WeakVH WU = WUsers.pop_back_val()) { + if (WeakTrackingVH WU = WUsers.pop_back_val()) { if (PHINode *PN = dyn_cast(WU)) { for (int I = 0, E = PN->getNumIncomingValues(); I < E; ++I) if (PN->getIncomingValue(I) == CE) { @@ -159,12 +159,12 @@ static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) { } static bool rewriteNonInstructionUses(GlobalVariable *GV, Pass *P) { - SmallVector WUsers; + SmallVector WUsers; for (User *U : GV->users()) if (!isa(U)) - WUsers.push_back(WeakVH(U)); + WUsers.push_back(WeakTrackingVH(U)); while (!WUsers.empty()) - if (WeakVH WU = WUsers.pop_back_val()) { + if (WeakTrackingVH WU = WUsers.pop_back_val()) { ConstantExpr *CE = dyn_cast(WU); if (!CE || !replaceConstantExprOp(CE, P)) return false; diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index bb2ebb46a4b..f277a51ae65 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -239,7 +239,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, // we delete a constant array, we may also be holding pointer to one of its // elements (or an element of one of its elements if we're dealing with an // array of arrays) in the worklist. - SmallVector WorkList(V->user_begin(), V->user_end()); + SmallVector WorkList(V->user_begin(), V->user_end()); while (!WorkList.empty()) { Value *UV = WorkList.pop_back_val(); if (!UV) diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp index 21bf149e7bc..0e478ba607b 100644 --- a/lib/Transforms/IPO/MergeFunctions.cpp +++ b/lib/Transforms/IPO/MergeFunctions.cpp @@ -207,12 +207,12 @@ private: /// A work queue of functions that may have been modified and should be /// analyzed again. - std::vector Deferred; + std::vector Deferred; /// Checks the rules of order relation introduced among functions set. /// Returns true, if sanity check has been passed, and false if failed. #ifndef NDEBUG - bool doSanityCheck(std::vector &Worklist); + bool doSanityCheck(std::vector &Worklist); #endif /// Insert a ComparableFunction into the FnTree, or merge it away if it's @@ -286,7 +286,7 @@ ModulePass *llvm::createMergeFunctionsPass() { } #ifndef NDEBUG -bool MergeFunctions::doSanityCheck(std::vector &Worklist) { +bool MergeFunctions::doSanityCheck(std::vector &Worklist) { if (const unsigned Max = NumFunctionsForSanityCheck) { unsigned TripleNumber = 0; bool Valid = true; @@ -294,10 +294,12 @@ bool MergeFunctions::doSanityCheck(std::vector &Worklist) { dbgs() << "MERGEFUNC-SANITY: Started for first " << Max << " functions.\n"; unsigned i = 0; - for (std::vector::iterator I = Worklist.begin(), E = Worklist.end(); + for (std::vector::iterator I = Worklist.begin(), + E = Worklist.end(); I != E && i < Max; ++I, ++i) { unsigned j = i; - for (std::vector::iterator J = I; J != E && j < Max; ++J, ++j) { + for (std::vector::iterator J = I; J != E && j < Max; + ++J, ++j) { Function *F1 = cast(*I); Function *F2 = cast(*J); int Res1 = FunctionComparator(F1, F2, &GlobalNumbers).compare(); @@ -315,7 +317,7 @@ bool MergeFunctions::doSanityCheck(std::vector &Worklist) { continue; unsigned k = j; - for (std::vector::iterator K = J; K != E && k < Max; + for (std::vector::iterator K = J; K != E && k < Max; ++k, ++K, ++TripleNumber) { if (K == J) continue; @@ -385,12 +387,12 @@ bool MergeFunctions::runOnModule(Module &M) { // consider merging it. Otherwise it is dropped and never considered again. if ((I != S && std::prev(I)->first == I->first) || (std::next(I) != IE && std::next(I)->first == I->first) ) { - Deferred.push_back(WeakVH(I->second)); + Deferred.push_back(WeakTrackingVH(I->second)); } } do { - std::vector Worklist; + std::vector Worklist; Deferred.swap(Worklist); DEBUG(doSanityCheck(Worklist)); @@ -399,7 +401,7 @@ bool MergeFunctions::runOnModule(Module &M) { DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n'); // Insert functions and merge them. - for (WeakVH &I : Worklist) { + for (WeakTrackingVH &I : Worklist) { if (!I) continue; Function *F = cast(I); diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp index 0f664463625..1eb98b18bfb 100644 --- a/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1948,9 +1948,9 @@ static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo *TLI, return isAllocLikeFn(V, TLI) && V != AI; } -static bool -isAllocSiteRemovable(Instruction *AI, SmallVectorImpl &Users, - const TargetLibraryInfo *TLI) { +static bool isAllocSiteRemovable(Instruction *AI, + SmallVectorImpl &Users, + const TargetLibraryInfo *TLI) { SmallVector Worklist; Worklist.push_back(AI); @@ -2034,7 +2034,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) { // If we have a malloc call which is only used in any amount of comparisons // to null and free calls, delete the calls and replace the comparisons with // true or false as appropriate. - SmallVector Users; + SmallVector Users; if (isAllocSiteRemovable(&MI, Users, &TLI)) { for (unsigned i = 0, e = Users.size(); i != e; ++i) { // Lowering all @llvm.objectsize calls first because they may diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index dcb2a4a0c6e..3953198fe60 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -97,7 +97,7 @@ class IndVarSimplify { TargetLibraryInfo *TLI; const TargetTransformInfo *TTI; - SmallVector DeadInsts; + SmallVector DeadInsts; bool Changed = false; bool isValidRewrite(Value *FromVal, Value *ToVal); @@ -415,8 +415,8 @@ void IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) { Compare->getName()); // In the following deletions, PN may become dead and may be deleted. - // Use a WeakVH to observe whether this happens. - WeakVH WeakPH = PN; + // Use a WeakTrackingVH to observe whether this happens. + WeakTrackingVH WeakPH = PN; // Delete the old floating point exit comparison. The branch starts using the // new comparison. @@ -451,7 +451,7 @@ void IndVarSimplify::rewriteNonIntegerIVs(Loop *L) { // BasicBlock *Header = L->getHeader(); - SmallVector PHIs; + SmallVector PHIs; for (BasicBlock::iterator I = Header->begin(); PHINode *PN = dyn_cast(I); ++I) PHIs.push_back(PN); @@ -901,7 +901,7 @@ class WidenIV { PHINode *WidePhi; Instruction *WideInc; const SCEV *WideIncExpr; - SmallVectorImpl &DeadInsts; + SmallVectorImpl &DeadInsts; SmallPtrSet Widened; SmallVector NarrowIVUsers; @@ -941,20 +941,13 @@ class WidenIV { } public: - WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, - ScalarEvolution *SEv, DominatorTree *DTree, - SmallVectorImpl &DI, bool HasGuards) : - OrigPhi(WI.NarrowIV), - WideType(WI.WidestNativeType), - LI(LInfo), - L(LI->getLoopFor(OrigPhi->getParent())), - SE(SEv), - DT(DTree), - HasGuards(HasGuards), - WidePhi(nullptr), - WideInc(nullptr), - WideIncExpr(nullptr), - DeadInsts(DI) { + WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv, + DominatorTree *DTree, SmallVectorImpl &DI, + bool HasGuards) + : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo), + L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree), + HasGuards(HasGuards), WidePhi(nullptr), WideInc(nullptr), + WideIncExpr(nullptr), DeadInsts(DI) { assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV"); ExtendKindMap[OrigPhi] = WI.IsSigned ? SignExtended : ZeroExtended; } diff --git a/lib/Transforms/Scalar/InferAddressSpaces.cpp b/lib/Transforms/Scalar/InferAddressSpaces.cpp index e6994896d3a..5e116ef2fe7 100644 --- a/lib/Transforms/Scalar/InferAddressSpaces.cpp +++ b/lib/Transforms/Scalar/InferAddressSpaces.cpp @@ -138,7 +138,7 @@ private: // Tries to infer the specific address space of each address expression in // Postorder. - void inferAddressSpaces(ArrayRef Postorder, + void inferAddressSpaces(ArrayRef Postorder, ValueToAddrSpaceMapTy *InferredAddrSpace) const; bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const; @@ -147,7 +147,7 @@ private: // address spaces if InferredAddrSpace says so. Postorder is the postorder of // all flat expressions in the use-def graph of function F. bool - rewriteWithNewAddressSpaces(ArrayRef Postorder, + rewriteWithNewAddressSpaces(ArrayRef Postorder, const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const; @@ -162,7 +162,7 @@ private: std::vector> &PostorderStack, DenseSet &Visited) const; - std::vector collectFlatAddressExpressions(Function &F) const; + std::vector collectFlatAddressExpressions(Function &F) const; Value *cloneValueWithNewAddressSpace( Value *V, unsigned NewAddrSpace, @@ -303,7 +303,7 @@ void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack( // Returns all flat address expressions in function F. The elements are ordered // ordered in postorder. -std::vector +std::vector InferAddressSpaces::collectFlatAddressExpressions(Function &F) const { // This function implements a non-recursive postorder traversal of a partial // use-def graph of function F. @@ -352,7 +352,7 @@ InferAddressSpaces::collectFlatAddressExpressions(Function &F) const { } } - std::vector Postorder; // The resultant postorder. + std::vector Postorder; // The resultant postorder. while (!PostorderStack.empty()) { Value *TopVal = PostorderStack.back().first; // If the operands of the expression on the top are already explored, @@ -583,7 +583,7 @@ bool InferAddressSpaces::runOnFunction(Function &F) { return false; // Collects all flat address expressions in postorder. - std::vector Postorder = collectFlatAddressExpressions(F); + std::vector Postorder = collectFlatAddressExpressions(F); // Runs a data-flow analysis to refine the address spaces of every expression // in Postorder. @@ -596,9 +596,9 @@ bool InferAddressSpaces::runOnFunction(Function &F) { } // Constants need to be tracked through RAUW to handle cases with nested -// constant expressions, so wrap values in WeakVH. +// constant expressions, so wrap values in WeakTrackingVH. void InferAddressSpaces::inferAddressSpaces( - ArrayRef Postorder, + ArrayRef Postorder, ValueToAddrSpaceMapTy *InferredAddrSpace) const { SetVector Worklist(Postorder.begin(), Postorder.end()); // Initially, all expressions are in the uninitialized address space. @@ -810,8 +810,8 @@ static Value::use_iterator skipToNextUser(Value::use_iterator I, } bool InferAddressSpaces::rewriteWithNewAddressSpaces( - ArrayRef Postorder, - const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const { + ArrayRef Postorder, + const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const { // For each address expression to be modified, creates a clone of it with its // pointer operands converted to the new address space. Since the pointer // operands are converted, the clone is naturally in the new address space by @@ -841,7 +841,7 @@ bool InferAddressSpaces::rewriteWithNewAddressSpaces( SmallVector DeadInstructions; // Replaces the uses of the old address expressions with the new ones. - for (const WeakVH &WVH : Postorder) { + for (const WeakTrackingVH &WVH : Postorder) { assert(WVH && "value was unexpectedly deleted"); Value *V = WVH; Value *NewV = ValueWithNewAddrSpace.lookup(V); diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 5042fc18d7c..0f4a1b436ea 100644 --- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -499,7 +499,7 @@ bool LoopIdiomRecognize::runOnLoopBlock( Instruction *Inst = &*I++; // Look for memset instructions, which may be optimized to a larger memset. if (MemSetInst *MSI = dyn_cast(Inst)) { - WeakVH InstPtr(&*I); + WeakTrackingVH InstPtr(&*I); if (!processLoopMemSet(MSI, BECount)) continue; MadeChange = true; diff --git a/lib/Transforms/Scalar/LoopSimplifyCFG.cpp b/lib/Transforms/Scalar/LoopSimplifyCFG.cpp index a5a81c33a8e..35c05e84fd6 100644 --- a/lib/Transforms/Scalar/LoopSimplifyCFG.cpp +++ b/lib/Transforms/Scalar/LoopSimplifyCFG.cpp @@ -40,7 +40,7 @@ static bool simplifyLoopCFG(Loop &L, DominatorTree &DT, LoopInfo &LI) { bool Changed = false; // Copy blocks into a temporary array to avoid iterator invalidation issues // as we remove them. - SmallVector Blocks(L.blocks()); + SmallVector Blocks(L.blocks()); for (auto &Block : Blocks) { // Attempt to merge blocks in the trivial case. Don't modify blocks which diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index af137f6faa6..ccedb98d7fa 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -900,7 +900,7 @@ static bool isHighCostExpansion(const SCEV *S, /// If any of the instructions is the specified set are trivially dead, delete /// them and see if this makes any of their operands subsequently dead. static bool -DeleteTriviallyDeadInstructions(SmallVectorImpl &DeadInsts) { +DeleteTriviallyDeadInstructions(SmallVectorImpl &DeadInsts) { bool Changed = false; while (!DeadInsts.empty()) { @@ -1845,7 +1845,7 @@ class LSRInstance { void FinalizeChain(IVChain &Chain); void CollectChains(); void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, - SmallVectorImpl &DeadInsts); + SmallVectorImpl &DeadInsts); void CollectInterestingTypesAndFactors(); void CollectFixupsAndInitialFormulae(); @@ -1920,19 +1920,15 @@ class LSRInstance { const LSRUse &LU, SCEVExpander &Rewriter) const; - Value *Expand(const LSRUse &LU, const LSRFixup &LF, - const Formula &F, - BasicBlock::iterator IP, - SCEVExpander &Rewriter, - SmallVectorImpl &DeadInsts) const; + Value *Expand(const LSRUse &LU, const LSRFixup &LF, const Formula &F, + BasicBlock::iterator IP, SCEVExpander &Rewriter, + SmallVectorImpl &DeadInsts) const; void RewriteForPHI(PHINode *PN, const LSRUse &LU, const LSRFixup &LF, - const Formula &F, - SCEVExpander &Rewriter, - SmallVectorImpl &DeadInsts) const; - void Rewrite(const LSRUse &LU, const LSRFixup &LF, - const Formula &F, + const Formula &F, SCEVExpander &Rewriter, + SmallVectorImpl &DeadInsts) const; + void Rewrite(const LSRUse &LU, const LSRFixup &LF, const Formula &F, SCEVExpander &Rewriter, - SmallVectorImpl &DeadInsts) const; + SmallVectorImpl &DeadInsts) const; void ImplementSolution(const SmallVectorImpl &Solution); public: @@ -3014,7 +3010,7 @@ static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, /// Generate an add or subtract for each IVInc in a chain to materialize the IV /// user's operand from the previous IV user's operand. void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, - SmallVectorImpl &DeadInsts) { + SmallVectorImpl &DeadInsts) { // Find the new IVOperand for the head of the chain. It may have been replaced // by LSR. const IVInc &Head = Chain.Incs[0]; @@ -4759,12 +4755,10 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, /// Emit instructions for the leading candidate expression for this LSRUse (this /// is called "expanding"). -Value *LSRInstance::Expand(const LSRUse &LU, - const LSRFixup &LF, - const Formula &F, - BasicBlock::iterator IP, +Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF, + const Formula &F, BasicBlock::iterator IP, SCEVExpander &Rewriter, - SmallVectorImpl &DeadInsts) const { + SmallVectorImpl &DeadInsts) const { if (LU.RigidFormula) return LF.OperandValToReplace; @@ -4939,12 +4933,9 @@ Value *LSRInstance::Expand(const LSRUse &LU, /// Helper for Rewrite. PHI nodes are special because the use of their operands /// effectively happens in their predecessor blocks, so the expression may need /// to be expanded in multiple places. -void LSRInstance::RewriteForPHI(PHINode *PN, - const LSRUse &LU, - const LSRFixup &LF, - const Formula &F, - SCEVExpander &Rewriter, - SmallVectorImpl &DeadInsts) const { +void LSRInstance::RewriteForPHI( + PHINode *PN, const LSRUse &LU, const LSRFixup &LF, const Formula &F, + SCEVExpander &Rewriter, SmallVectorImpl &DeadInsts) const { DenseMap Inserted; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) == LF.OperandValToReplace) { @@ -5016,11 +5007,9 @@ void LSRInstance::RewriteForPHI(PHINode *PN, /// Emit instructions for the leading candidate expression for this LSRUse (this /// is called "expanding"), and update the UserInst to reference the newly /// expanded value. -void LSRInstance::Rewrite(const LSRUse &LU, - const LSRFixup &LF, - const Formula &F, - SCEVExpander &Rewriter, - SmallVectorImpl &DeadInsts) const { +void LSRInstance::Rewrite(const LSRUse &LU, const LSRFixup &LF, + const Formula &F, SCEVExpander &Rewriter, + SmallVectorImpl &DeadInsts) const { // First, find an insertion point that dominates UserInst. For PHI nodes, // find the nearest block which dominates all the relevant uses. if (PHINode *PN = dyn_cast(LF.UserInst)) { @@ -5058,7 +5047,7 @@ void LSRInstance::ImplementSolution( const SmallVectorImpl &Solution) { // Keep track of instructions we may have made dead, so that // we can remove them after we are done working. - SmallVector DeadInsts; + SmallVector DeadInsts; SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(), "lsr"); @@ -5308,7 +5297,7 @@ static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE, // Remove any extra phis created by processing inner loops. Changed |= DeleteDeadPHIs(L->getHeader()); if (EnablePhiElim && L->isLoopSimplifyForm()) { - SmallVector DeadInsts; + SmallVector DeadInsts; const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); SCEVExpander Rewriter(SE, DL, "lsr"); #ifndef NDEBUG diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp index 39289340e03..6ef1464e933 100644 --- a/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -1231,11 +1231,12 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val, LoopProcessWorklist.push_back(NewLoop); redoLoop = true; - // Keep a WeakVH holding onto LIC. If the first call to RewriteLoopBody + // Keep a WeakTrackingVH holding onto LIC. If the first call to + // RewriteLoopBody // deletes the instruction (for example by simplifying a PHI that feeds into // the condition that we're unswitching on), we don't rewrite the second // iteration. - WeakVH LICHandle(LIC); + WeakTrackingVH LICHandle(LIC); // Now we rewrite the original code to know that the condition is true and the // new code to know that the condition is false. diff --git a/lib/Transforms/Scalar/NaryReassociate.cpp b/lib/Transforms/Scalar/NaryReassociate.cpp index c5bf2f28d18..d0bfe360389 100644 --- a/lib/Transforms/Scalar/NaryReassociate.cpp +++ b/lib/Transforms/Scalar/NaryReassociate.cpp @@ -211,7 +211,8 @@ bool NaryReassociatePass::doOneIteration(Function &F) { Changed = true; SE->forgetValue(&*I); I->replaceAllUsesWith(NewI); - // If SeenExprs constains I's WeakVH, that entry will be replaced with + // If SeenExprs constains I's WeakTrackingVH, that entry will be + // replaced with // nullptr. RecursivelyDeleteTriviallyDeadInstructions(&*I, TLI); I = NewI->getIterator(); @@ -219,7 +220,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) { // Add the rewritten instruction to SeenExprs; the original instruction // is deleted. const SCEV *NewSCEV = SE->getSCEV(&*I); - SeenExprs[NewSCEV].push_back(WeakVH(&*I)); + SeenExprs[NewSCEV].push_back(WeakTrackingVH(&*I)); // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I) // is equivalent to I. However, ScalarEvolution::getSCEV may // weaken nsw causing NewSCEV not to equal OldSCEV. For example, suppose @@ -239,7 +240,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) { // // This improvement is exercised in @reassociate_gep_nsw in nary-gep.ll. if (NewSCEV != OldSCEV) - SeenExprs[OldSCEV].push_back(WeakVH(&*I)); + SeenExprs[OldSCEV].push_back(WeakTrackingVH(&*I)); } } } @@ -494,7 +495,8 @@ NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr, // future instruction either. Therefore, we pop it out of the stack. This // optimization makes the algorithm O(n). while (!Candidates.empty()) { - // Candidates stores WeakVHs, so a candidate can be nullptr if it's removed + // Candidates stores WeakTrackingVHs, so a candidate can be nullptr if it's + // removed // during rewriting. if (Value *Candidate = Candidates.back()) { Instruction *CandidateInstruction = cast(Candidate); diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp index 3dcab609078..ef29d414160 100644 --- a/lib/Transforms/Scalar/Reassociate.cpp +++ b/lib/Transforms/Scalar/Reassociate.cpp @@ -982,7 +982,7 @@ static unsigned FindInOperandList(SmallVectorImpl &Ops, unsigned i, /// Emit a tree of add instructions, summing Ops together /// and returning the result. Insert the tree before I. static Value *EmitAddTreeOfValues(Instruction *I, - SmallVectorImpl &Ops){ + SmallVectorImpl &Ops) { if (Ops.size() == 1) return Ops.back(); Value *V1 = Ops.back(); @@ -1559,7 +1559,7 @@ Value *ReassociatePass::OptimizeAdd(Instruction *I, ? BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal) : BinaryOperator::CreateFAdd(MaxOccVal, MaxOccVal); - SmallVector NewMulOps; + SmallVector NewMulOps; for (unsigned i = 0; i != Ops.size(); ++i) { // Only try to remove factors from expressions we're allowed to. BinaryOperator *BOp = diff --git a/lib/Transforms/Utils/BasicBlockUtils.cpp b/lib/Transforms/Utils/BasicBlockUtils.cpp index 22af21d55c0..3d5cbfc93f2 100644 --- a/lib/Transforms/Utils/BasicBlockUtils.cpp +++ b/lib/Transforms/Utils/BasicBlockUtils.cpp @@ -78,8 +78,8 @@ void llvm::FoldSingleEntryPHINodes(BasicBlock *BB, bool llvm::DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI) { // Recursively deleting a PHI may cause multiple PHIs to be deleted - // or RAUW'd undef, so use an array of WeakVH for the PHIs to delete. - SmallVector PHIs; + // or RAUW'd undef, so use an array of WeakTrackingVH for the PHIs to delete. + SmallVector PHIs; for (BasicBlock::iterator I = BB->begin(); PHINode *PN = dyn_cast(I); ++I) PHIs.push_back(PN); diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp index 385c12302e0..d5124ac8901 100644 --- a/lib/Transforms/Utils/CloneFunction.cpp +++ b/lib/Transforms/Utils/CloneFunction.cpp @@ -245,7 +245,7 @@ namespace { void PruningFunctionCloner::CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst, std::vector &ToClone){ - WeakVH &BBEntry = VMap[BB]; + WeakTrackingVH &BBEntry = VMap[BB]; // Have we already cloned this block? if (BBEntry) return; @@ -547,7 +547,7 @@ void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, // Make a second pass over the PHINodes now that all of them have been // remapped into the new function, simplifying the PHINode and performing any // recursive simplifications exposed. This will transparently update the - // WeakVH in the VMap. Notably, we rely on that so that if we coalesce + // WeakTrackingVH in the VMap. Notably, we rely on that so that if we coalesce // two PHINodes, the iteration over the old PHIs remains valid, and the // mapping will just map us to the new node (which may not even be a PHI // node). diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp index 364b33d3c4e..ce6b703f352 100644 --- a/lib/Transforms/Utils/Local.cpp +++ b/lib/Transforms/Utils/Local.cpp @@ -562,7 +562,7 @@ void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred) { // that can be removed. BB->removePredecessor(Pred, true); - WeakVH PhiIt = &BB->front(); + WeakTrackingVH PhiIt = &BB->front(); while (PHINode *PN = dyn_cast(PhiIt)) { PhiIt = &*++BasicBlock::iterator(cast(PhiIt)); Value *OldPhiIt = PhiIt; @@ -1476,7 +1476,7 @@ BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI, II->setAttributes(CI->getAttributes()); // Make sure that anything using the call now uses the invoke! This also - // updates the CallGraph if present, because it uses a WeakVH. + // updates the CallGraph if present, because it uses a WeakTrackingVH. CI->replaceAllUsesWith(II); // Delete the original call diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp index 85684a0a994..4ab4d7949d2 100644 --- a/lib/Transforms/Utils/LoopUnroll.cpp +++ b/lib/Transforms/Utils/LoopUnroll.cpp @@ -757,7 +757,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force, // Simplify any new induction variables in the partially unrolled loop. if (SE && !CompletelyUnroll && Count > 1) { - SmallVector DeadInsts; + SmallVector DeadInsts; simplifyLoopIVs(L, SE, DT, LI, DeadInsts); // Aggressively clean up dead instructions that simplifyLoopIVs already diff --git a/lib/Transforms/Utils/SimplifyIndVar.cpp b/lib/Transforms/Utils/SimplifyIndVar.cpp index a4cc6a031ad..02a5d3dbead 100644 --- a/lib/Transforms/Utils/SimplifyIndVar.cpp +++ b/lib/Transforms/Utils/SimplifyIndVar.cpp @@ -51,13 +51,13 @@ namespace { ScalarEvolution *SE; DominatorTree *DT; - SmallVectorImpl &DeadInsts; + SmallVectorImpl &DeadInsts; bool Changed; public: SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, DominatorTree *DT, - LoopInfo *LI,SmallVectorImpl &Dead) + LoopInfo *LI, SmallVectorImpl &Dead) : L(Loop), LI(LI), SE(SE), DT(DT), DeadInsts(Dead), Changed(false) { assert(LI && "IV simplification requires LoopInfo"); } @@ -701,7 +701,7 @@ void IVVisitor::anchor() { } /// Simplify instructions that use this induction variable /// by using ScalarEvolution to analyze the IV's recurrence. bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT, - LoopInfo *LI, SmallVectorImpl &Dead, + LoopInfo *LI, SmallVectorImpl &Dead, IVVisitor *V) { SimplifyIndvar SIV(LI->getLoopFor(CurrIV->getParent()), SE, DT, LI, Dead); SIV.simplifyUsers(CurrIV, V); @@ -711,7 +711,7 @@ bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT, /// Simplify users of induction variables within this /// loop. This does not actually change or add IVs. bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT, - LoopInfo *LI, SmallVectorImpl &Dead) { + LoopInfo *LI, SmallVectorImpl &Dead) { bool Changed = false; for (BasicBlock::iterator I = L->getHeader()->begin(); isa(I); ++I) { Changed |= simplifyUsersOfIV(cast(I), SE, DT, LI, Dead); diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index 55494440470..f112c555205 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -3899,11 +3899,13 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, } /// \brief Check that the Values in the slice in VL array are still existent in -/// the WeakVH array. +/// the WeakTrackingVH array. /// Vectorization of part of the VL array may cause later values in the VL array -/// to become invalid. We track when this has happened in the WeakVH array. -static bool hasValueBeenRAUWed(ArrayRef VL, ArrayRef VH, - unsigned SliceBegin, unsigned SliceSize) { +/// to become invalid. We track when this has happened in the WeakTrackingVH +/// array. +static bool hasValueBeenRAUWed(ArrayRef VL, + ArrayRef VH, unsigned SliceBegin, + unsigned SliceSize) { VL = VL.slice(SliceBegin, SliceSize); VH = VH.slice(SliceBegin, SliceSize); return !std::equal(VL.begin(), VL.end(), VH.begin()); @@ -3921,7 +3923,7 @@ bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef Chain, BoUpSLP &R, return false; // Keep track of values that were deleted by vectorizing in the loop below. - SmallVector TrackValues(Chain.begin(), Chain.end()); + SmallVector TrackValues(Chain.begin(), Chain.end()); bool Changed = false; // Look for profitable vectorizable trees at all offsets, starting at zero. @@ -4107,7 +4109,7 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef VL, BoUpSLP &R, bool Changed = false; // Keep track of values that were deleted by vectorizing in the loop below. - SmallVector TrackValues(VL.begin(), VL.end()); + SmallVector TrackValues(VL.begin(), VL.end()); unsigned NextInst = 0, MaxInst = VL.size(); for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; @@ -4734,7 +4736,7 @@ static Value *getReductionValue(const DominatorTree *DT, PHINode *P, namespace { /// Tracks instructons and its children. -class WeakVHWithLevel final : public CallbackVH { +class WeakTrackingVHWithLevel final : public CallbackVH { /// Operand index of the instruction currently beeing analized. unsigned Level = 0; /// Is this the instruction that should be vectorized, or are we now @@ -4743,8 +4745,8 @@ class WeakVHWithLevel final : public CallbackVH { bool IsInitial = true; public: - explicit WeakVHWithLevel() = default; - WeakVHWithLevel(Value *V) : CallbackVH(V){}; + explicit WeakTrackingVHWithLevel() = default; + WeakTrackingVHWithLevel(Value *V) : CallbackVH(V){}; /// Restart children analysis each time it is repaced by the new instruction. void allUsesReplacedWith(Value *New) override { setValPtr(New); @@ -4771,7 +4773,7 @@ public: cast(getValPtr())->getNumOperands() > Level); return cast(getValPtr())->getOperand(Level++); } - virtual ~WeakVHWithLevel() = default; + virtual ~WeakTrackingVHWithLevel() = default; }; } // namespace @@ -4793,7 +4795,7 @@ static bool canBeVectorized( if (Root->getParent() != BB) return false; - SmallVector Stack(1, Root); + SmallVector Stack(1, Root); SmallSet VisitedInstrs; bool Res = false; while (!Stack.empty()) { @@ -5069,7 +5071,8 @@ bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { SetVector Candidates(GEPList.begin(), GEPList.end()); // Some of the candidates may have already been vectorized after we - // initially collected them. If so, the WeakVHs will have nullified the + // initially collected them. If so, the WeakTrackingVHs will have + // nullified the // values, so remove them from the set of candidates. Candidates.remove(nullptr); diff --git a/unittests/IR/ValueHandleTest.cpp b/unittests/IR/ValueHandleTest.cpp index e19b3a639d3..96331288d07 100644 --- a/unittests/IR/ValueHandleTest.cpp +++ b/unittests/IR/ValueHandleTest.cpp @@ -34,8 +34,8 @@ public: ConcreteCallbackVH(Value *V) : CallbackVH(V) {} }; -TEST_F(ValueHandle, WeakVH_BasicOperation) { - WeakVH WVH(BitcastV.get()); +TEST_F(ValueHandle, WeakTrackingVH_BasicOperation) { + WeakTrackingVH WVH(BitcastV.get()); EXPECT_EQ(BitcastV.get(), WVH); WVH = ConstantV; EXPECT_EQ(ConstantV, WVH); @@ -46,9 +46,9 @@ TEST_F(ValueHandle, WeakVH_BasicOperation) { EXPECT_EQ(Type::getInt32Ty(Context), (*WVH).getType()); } -TEST_F(ValueHandle, WeakVH_Comparisons) { - WeakVH BitcastWVH(BitcastV.get()); - WeakVH ConstantWVH(ConstantV); +TEST_F(ValueHandle, WeakTrackingVH_Comparisons) { + WeakTrackingVH BitcastWVH(BitcastV.get()); + WeakTrackingVH ConstantWVH(ConstantV); EXPECT_TRUE(BitcastWVH == BitcastWVH); EXPECT_TRUE(BitcastV.get() == BitcastWVH); @@ -79,20 +79,20 @@ TEST_F(ValueHandle, WeakVH_Comparisons) { EXPECT_EQ(BV >= CV, BitcastWVH >= ConstantV); } -TEST_F(ValueHandle, WeakVH_FollowsRAUW) { - WeakVH WVH(BitcastV.get()); - WeakVH WVH_Copy(WVH); - WeakVH WVH_Recreated(BitcastV.get()); +TEST_F(ValueHandle, WeakTrackingVH_FollowsRAUW) { + WeakTrackingVH WVH(BitcastV.get()); + WeakTrackingVH WVH_Copy(WVH); + WeakTrackingVH WVH_Recreated(BitcastV.get()); BitcastV->replaceAllUsesWith(ConstantV); EXPECT_EQ(ConstantV, WVH); EXPECT_EQ(ConstantV, WVH_Copy); EXPECT_EQ(ConstantV, WVH_Recreated); } -TEST_F(ValueHandle, WeakVH_NullOnDeletion) { - WeakVH WVH(BitcastV.get()); - WeakVH WVH_Copy(WVH); - WeakVH WVH_Recreated(BitcastV.get()); +TEST_F(ValueHandle, WeakTrackingVH_NullOnDeletion) { + WeakTrackingVH WVH(BitcastV.get()); + WeakTrackingVH WVH_Copy(WVH); + WeakTrackingVH WVH_Recreated(BitcastV.get()); BitcastV.reset(); Value *null_value = nullptr; EXPECT_EQ(null_value, WVH); @@ -343,11 +343,11 @@ TEST_F(ValueHandle, DestroyingOtherVHOnSameValueDoesntBreakIteration) { class DestroyingVH final : public CallbackVH { public: - std::unique_ptr ToClear[2]; + std::unique_ptr ToClear[2]; DestroyingVH(Value *V) { - ToClear[0].reset(new WeakVH(V)); + ToClear[0].reset(new WeakTrackingVH(V)); setValPtr(V); - ToClear[1].reset(new WeakVH(V)); + ToClear[1].reset(new WeakTrackingVH(V)); } void deleted() override { ToClear[0].reset(); @@ -361,9 +361,9 @@ TEST_F(ValueHandle, DestroyingOtherVHOnSameValueDoesntBreakIteration) { }; { - WeakVH ShouldBeVisited1(BitcastV.get()); + WeakTrackingVH ShouldBeVisited1(BitcastV.get()); DestroyingVH C(BitcastV.get()); - WeakVH ShouldBeVisited2(BitcastV.get()); + WeakTrackingVH ShouldBeVisited2(BitcastV.get()); BitcastV->replaceAllUsesWith(ConstantV); EXPECT_EQ(ConstantV, static_cast(ShouldBeVisited1)); @@ -371,9 +371,9 @@ TEST_F(ValueHandle, DestroyingOtherVHOnSameValueDoesntBreakIteration) { } { - WeakVH ShouldBeVisited1(BitcastV.get()); + WeakTrackingVH ShouldBeVisited1(BitcastV.get()); DestroyingVH C(BitcastV.get()); - WeakVH ShouldBeVisited2(BitcastV.get()); + WeakTrackingVH ShouldBeVisited2(BitcastV.get()); BitcastV.reset(); EXPECT_EQ(nullptr, static_cast(ShouldBeVisited1));