1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00
llvm-mirror/lib/Analysis/LoopInfo.cpp

724 lines
24 KiB
C++
Raw Normal View History

//===- LoopInfo.cpp - Natural Loop Calculator -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the LoopInfo class that is used to identify natural loops
// and determine the loop depth of various nodes of the CFG. Note that the
// loops identified may actually be several natural loops that share the same
// header node... not just a single natural loop.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/LoopInfoImpl.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
// Explicitly instantiate methods in LoopInfoImpl.h for IR-level Loops.
template class llvm::LoopBase<BasicBlock, Loop>;
template class llvm::LoopInfoBase<BasicBlock, Loop>;
// Always verify loopinfo if expensive checking is enabled.
#ifdef XDEBUG
static bool VerifyLoopInfo = true;
#else
static bool VerifyLoopInfo = false;
#endif
static cl::opt<bool,true>
VerifyLoopInfoX("verify-loop-info", cl::location(VerifyLoopInfo),
cl::desc("Verify loop info (time consuming)"));
// Loop identifier metadata name.
static const char *const LoopMDName = "llvm.loop";
//===----------------------------------------------------------------------===//
// Loop implementation
//
bool Loop::isLoopInvariant(const Value *V) const {
if (const Instruction *I = dyn_cast<Instruction>(V))
return !contains(I);
return true; // All non-instructions are loop invariant
}
bool Loop::hasLoopInvariantOperands(const Instruction *I) const {
return all_of(I->operands(), [this](Value *V) { return isLoopInvariant(V); });
}
bool Loop::makeLoopInvariant(Value *V, bool &Changed,
Instruction *InsertPt) const {
if (Instruction *I = dyn_cast<Instruction>(V))
return makeLoopInvariant(I, Changed, InsertPt);
return true; // All non-instructions are loop-invariant.
}
bool Loop::makeLoopInvariant(Instruction *I, bool &Changed,
Instruction *InsertPt) const {
// Test if the value is already loop-invariant.
if (isLoopInvariant(I))
return true;
if (!isSafeToSpeculativelyExecute(I))
return false;
if (I->mayReadFromMemory())
return false;
// EH block instructions are immobile.
if (I->isEHPad())
return false;
// Determine the insertion point, unless one was given.
if (!InsertPt) {
BasicBlock *Preheader = getLoopPreheader();
// Without a preheader, hoisting is not feasible.
if (!Preheader)
return false;
InsertPt = Preheader->getTerminator();
}
// Don't hoist instructions with loop-variant operands.
2016-01-15 01:08:10 +01:00
for (Value *Operand : I->operands())
if (!makeLoopInvariant(Operand, Changed, InsertPt))
return false;
2011-08-04 01:45:50 +02:00
// Hoist.
I->moveBefore(InsertPt);
// There is possibility of hoisting this instruction above some arbitrary
// condition. Any metadata defined on it can be control dependent on this
// condition. Conservatively strip it here so that we don't give any wrong
// information to the optimizer.
I->dropUnknownNonDebugMetadata();
Changed = true;
return true;
}
PHINode *Loop::getCanonicalInductionVariable() const {
BasicBlock *H = getHeader();
BasicBlock *Incoming = nullptr, *Backedge = nullptr;
pred_iterator PI = pred_begin(H);
assert(PI != pred_end(H) &&
"Loop must have at least one backedge!");
Backedge = *PI++;
if (PI == pred_end(H)) return nullptr; // dead loop
Incoming = *PI++;
if (PI != pred_end(H)) return nullptr; // multiple backedges?
if (contains(Incoming)) {
if (contains(Backedge))
return nullptr;
std::swap(Incoming, Backedge);
} else if (!contains(Backedge))
return nullptr;
// Loop over all of the PHI nodes, looking for a canonical indvar.
for (BasicBlock::iterator I = H->begin(); isa<PHINode>(I); ++I) {
PHINode *PN = cast<PHINode>(I);
if (ConstantInt *CI =
dyn_cast<ConstantInt>(PN->getIncomingValueForBlock(Incoming)))
if (CI->isNullValue())
if (Instruction *Inc =
dyn_cast<Instruction>(PN->getIncomingValueForBlock(Backedge)))
if (Inc->getOpcode() == Instruction::Add &&
Inc->getOperand(0) == PN)
if (ConstantInt *CI = dyn_cast<ConstantInt>(Inc->getOperand(1)))
if (CI->equalsInt(1))
return PN;
}
return nullptr;
}
bool Loop::isLCSSAForm(DominatorTree &DT) const {
2016-01-15 01:08:10 +01:00
for (BasicBlock *BB : this->blocks()) {
for (Instruction &I : *BB) {
// Tokens can't be used in PHI nodes and live-out tokens prevent loop
// optimizations, so for the purposes of considered LCSSA form, we
// can ignore them.
2016-01-15 01:08:10 +01:00
if (I.getType()->isTokenTy())
continue;
2016-01-15 01:08:10 +01:00
for (Use &U : I.uses()) {
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 04:16:01 +01:00
Instruction *UI = cast<Instruction>(U.getUser());
BasicBlock *UserBB = UI->getParent();
if (PHINode *P = dyn_cast<PHINode>(UI))
UserBB = P->getIncomingBlock(U);
// Check the current block, as a fast-path, before checking whether
// the use is anywhere in the loop. Most values are used in the same
// block they are defined in. Also, blocks not reachable from the
// entry are special; uses in them don't need to go through PHIs.
if (UserBB != BB &&
!contains(UserBB) &&
DT.isReachableFromEntry(UserBB))
return false;
}
}
}
return true;
}
bool Loop::isRecursivelyLCSSAForm(DominatorTree &DT) const {
if (!isLCSSAForm(DT))
return false;
return std::all_of(begin(), end(), [&](const Loop *L) {
return L->isRecursivelyLCSSAForm(DT);
});
}
bool Loop::isLoopSimplifyForm() const {
// Normal-form loops have a preheader, a single backedge, and all of their
// exits have all their predecessors inside the loop.
return getLoopPreheader() && getLoopLatch() && hasDedicatedExits();
}
// Routines that reform the loop CFG and split edges often fail on indirectbr.
bool Loop::isSafeToClone() const {
// Return false if any loop blocks contain indirectbrs, or there are any calls
// to noduplicate functions.
2016-01-15 01:08:10 +01:00
for (BasicBlock *BB : this->blocks()) {
if (isa<IndirectBrInst>(BB->getTerminator()))
return false;
2016-01-15 01:08:10 +01:00
if (const InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
if (II->cannotDuplicate())
return false;
[IR] Reformulate LLVM's EH funclet IR While we have successfully implemented a funclet-oriented EH scheme on top of LLVM IR, our scheme has some notable deficiencies: - catchendpad and cleanupendpad are necessary in the current design but they are difficult to explain to others, even to seasoned LLVM experts. - catchendpad and cleanupendpad are optimization barriers. They cannot be split and force all potentially throwing call-sites to be invokes. This has a noticable effect on the quality of our code generation. - catchpad, while similar in some aspects to invoke, is fairly awkward. It is unsplittable, starts a funclet, and has control flow to other funclets. - The nesting relationship between funclets is currently a property of control flow edges. Because of this, we are forced to carefully analyze the flow graph to see if there might potentially exist illegal nesting among funclets. While we have logic to clone funclets when they are illegally nested, it would be nicer if we had a representation which forbade them upfront. Let's clean this up a bit by doing the following: - Instead, make catchpad more like cleanuppad and landingpad: no control flow, just a bunch of simple operands; catchpad would be splittable. - Introduce catchswitch, a control flow instruction designed to model the constraints of funclet oriented EH. - Make funclet scoping explicit by having funclet instructions consume the token produced by the funclet which contains them. - Remove catchendpad and cleanupendpad. Their presence can be inferred implicitly using coloring information. N.B. The state numbering code for the CLR has been updated but the veracity of it's output cannot be spoken for. An expert should take a look to make sure the results are reasonable. Reviewers: rnk, JosephTremoulet, andrew.w.kaylor Differential Revision: http://reviews.llvm.org/D15139 llvm-svn: 255422
2015-12-12 06:38:55 +01:00
// Return false if any loop blocks contain invokes to EH-pads other than
// landingpads; we don't know how to split those edges yet.
auto *FirstNonPHI = II->getUnwindDest()->getFirstNonPHI();
if (FirstNonPHI->isEHPad() && !isa<LandingPadInst>(FirstNonPHI))
return false;
}
2016-01-15 01:08:10 +01:00
for (Instruction &I : *BB) {
if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
if (CI->cannotDuplicate())
return false;
}
2016-01-15 01:08:10 +01:00
if (I.getType()->isTokenTy() && I.isUsedOutsideOfBlock(BB))
[IR] Add token types This introduces the basic functionality to support "token types". The motivation stems from the need to perform operations on a Value whose provenance cannot be obscured. There are several applications for such a type but my immediate motivation stems from WinEH. Our personality routine enforces a single-entry - single-exit regime for cleanups. After several rounds of optimizations, we may be left with a terminator whose "cleanup-entry block" is not entirely clear because control flow has merged two cleanups together. We have experimented with using labels as operands inside of instructions which are not terminators to indicate where we came from but found that LLVM does not expect such exotic uses of BasicBlocks. Instead, we can use this new type to clearly associate the "entry point" and "exit point" of our cleanup. This is done by having the cleanuppad yield a Token and consuming it at the cleanupret. The token type makes it impossible to obscure or otherwise hide the Value, making it trivial to track the relationship between the two points. What is the burden to the optimizer? Well, it turns out we have already paid down this cost by accepting that there are certain calls that we are not permitted to duplicate, optimizations have to watch out for such instructions anyway. There are additional places in the optimizer that we will probably have to update but early examination has given me the impression that this will not be heroic. Differential Revision: http://reviews.llvm.org/D11861 llvm-svn: 245029
2015-08-14 07:09:07 +02:00
return false;
}
}
return true;
}
MDNode *Loop::getLoopID() const {
MDNode *LoopID = nullptr;
if (isLoopSimplifyForm()) {
LoopID = getLoopLatch()->getTerminator()->getMetadata(LoopMDName);
} else {
// Go through each predecessor of the loop header and check the
// terminator for the metadata.
BasicBlock *H = getHeader();
2016-01-15 01:08:10 +01:00
for (BasicBlock *BB : this->blocks()) {
TerminatorInst *TI = BB->getTerminator();
MDNode *MD = nullptr;
// Check if this terminator branches to the loop header.
2016-01-15 01:08:10 +01:00
for (BasicBlock *Successor : TI->successors()) {
if (Successor == H) {
MD = TI->getMetadata(LoopMDName);
break;
}
}
if (!MD)
return nullptr;
if (!LoopID)
LoopID = MD;
else if (MD != LoopID)
return nullptr;
}
}
if (!LoopID || LoopID->getNumOperands() == 0 ||
LoopID->getOperand(0) != LoopID)
return nullptr;
return LoopID;
}
void Loop::setLoopID(MDNode *LoopID) const {
assert(LoopID && "Loop ID should not be null");
assert(LoopID->getNumOperands() > 0 && "Loop ID needs at least one operand");
assert(LoopID->getOperand(0) == LoopID && "Loop ID should refer to itself");
if (isLoopSimplifyForm()) {
getLoopLatch()->getTerminator()->setMetadata(LoopMDName, LoopID);
return;
}
BasicBlock *H = getHeader();
2016-01-15 01:08:10 +01:00
for (BasicBlock *BB : this->blocks()) {
TerminatorInst *TI = BB->getTerminator();
for (BasicBlock *Successor : TI->successors()) {
if (Successor == H)
TI->setMetadata(LoopMDName, LoopID);
}
}
}
bool Loop::isAnnotatedParallel() const {
MDNode *DesiredLoopIdMetadata = getLoopID();
if (!DesiredLoopIdMetadata)
return false;
// The loop branch contains the parallel loop metadata. In order to ensure
// that any parallel-loop-unaware optimization pass hasn't added loop-carried
// dependencies (thus converted the loop back to a sequential loop), check
// that all the memory instructions in the loop contain parallelism metadata
// that point to the same unique "loop id metadata" the loop branch does.
2016-01-15 01:08:10 +01:00
for (BasicBlock *BB : this->blocks()) {
for (Instruction &I : *BB) {
if (!I.mayReadOrWriteMemory())
continue;
// The memory instruction can refer to the loop identifier metadata
// directly or indirectly through another list metadata (in case of
// nested parallel loops). The loop identifier metadata refers to
// itself so we can check both cases with the same routine.
MDNode *LoopIdMD =
2016-01-15 01:08:10 +01:00
I.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
if (!LoopIdMD)
return false;
bool LoopIdMDFound = false;
for (const MDOperand &MDOp : LoopIdMD->operands()) {
if (MDOp == DesiredLoopIdMetadata) {
LoopIdMDFound = true;
break;
}
}
if (!LoopIdMDFound)
return false;
}
}
return true;
}
bool Loop::hasDedicatedExits() const {
// Each predecessor of each exit block of a normal loop is contained
// within the loop.
SmallVector<BasicBlock *, 4> ExitBlocks;
getExitBlocks(ExitBlocks);
2016-01-15 01:08:10 +01:00
for (BasicBlock *BB : ExitBlocks)
for (BasicBlock *Predecessor : predecessors(BB))
if (!contains(Predecessor))
return false;
// All the requirements are met.
return true;
}
void
Loop::getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const {
assert(hasDedicatedExits() &&
"getUniqueExitBlocks assumes the loop has canonical form exits!");
SmallVector<BasicBlock *, 32> SwitchExitBlocks;
2016-01-15 01:08:10 +01:00
for (BasicBlock *BB : this->blocks()) {
SwitchExitBlocks.clear();
2016-01-15 01:08:10 +01:00
for (BasicBlock *Successor : successors(BB)) {
// If block is inside the loop then it is not an exit block.
if (contains(Successor))
continue;
2016-01-15 01:08:10 +01:00
pred_iterator PI = pred_begin(Successor);
BasicBlock *FirstPred = *PI;
// If current basic block is this exit block's first predecessor
// then only insert exit block in to the output ExitBlocks vector.
// This ensures that same exit block is not inserted twice into
// ExitBlocks vector.
if (BB != FirstPred)
continue;
// If a terminator has more then two successors, for example SwitchInst,
// then it is possible that there are multiple edges from current block
// to one exit block.
2016-01-15 01:08:10 +01:00
if (std::distance(succ_begin(BB), succ_end(BB)) <= 2) {
ExitBlocks.push_back(Successor);
continue;
}
// In case of multiple edges from current block to exit block, collect
// only one edge in ExitBlocks. Use switchExitBlocks to keep track of
// duplicate edges.
if (std::find(SwitchExitBlocks.begin(), SwitchExitBlocks.end(), Successor)
== SwitchExitBlocks.end()) {
SwitchExitBlocks.push_back(Successor);
2016-01-15 01:08:10 +01:00
ExitBlocks.push_back(Successor);
}
}
}
}
BasicBlock *Loop::getUniqueExitBlock() const {
SmallVector<BasicBlock *, 8> UniqueExitBlocks;
getUniqueExitBlocks(UniqueExitBlocks);
if (UniqueExitBlocks.size() == 1)
return UniqueExitBlocks[0];
return nullptr;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void Loop::dump() const {
print(dbgs());
}
#endif
//===----------------------------------------------------------------------===//
// UnloopUpdater implementation
//
namespace {
/// Find the new parent loop for all blocks within the "unloop" whose last
/// backedges has just been removed.
class UnloopUpdater {
Loop *Unloop;
LoopInfo *LI;
LoopBlocksDFS DFS;
// Map unloop's immediate subloops to their nearest reachable parents. Nested
// loops within these subloops will not change parents. However, an immediate
// subloop's new parent will be the nearest loop reachable from either its own
// exits *or* any of its nested loop's exits.
DenseMap<Loop*, Loop*> SubloopParents;
// Flag the presence of an irreducible backedge whose destination is a block
// directly contained by the original unloop.
bool FoundIB;
public:
UnloopUpdater(Loop *UL, LoopInfo *LInfo) :
Unloop(UL), LI(LInfo), DFS(UL), FoundIB(false) {}
void updateBlockParents();
void removeBlocksFromAncestors();
void updateSubloopParents();
protected:
Loop *getNearestLoop(BasicBlock *BB, Loop *BBLoop);
};
} // end anonymous namespace
/// Update the parent loop for all blocks that are directly contained within the
/// original "unloop".
void UnloopUpdater::updateBlockParents() {
if (Unloop->getNumBlocks()) {
// Perform a post order CFG traversal of all blocks within this loop,
// propagating the nearest loop from sucessors to predecessors.
LoopBlocksTraversal Traversal(DFS, LI);
for (LoopBlocksTraversal::POTIterator POI = Traversal.begin(),
POE = Traversal.end(); POI != POE; ++POI) {
Loop *L = LI->getLoopFor(*POI);
Loop *NL = getNearestLoop(*POI, L);
if (NL != L) {
// For reducible loops, NL is now an ancestor of Unloop.
assert((NL != Unloop && (!NL || NL->contains(Unloop))) &&
"uninitialized successor");
LI->changeLoopFor(*POI, NL);
}
else {
// Or the current block is part of a subloop, in which case its parent
// is unchanged.
assert((FoundIB || Unloop->contains(L)) && "uninitialized successor");
}
}
}
// Each irreducible loop within the unloop induces a round of iteration using
// the DFS result cached by Traversal.
bool Changed = FoundIB;
for (unsigned NIters = 0; Changed; ++NIters) {
assert(NIters < Unloop->getNumBlocks() && "runaway iterative algorithm");
// Iterate over the postorder list of blocks, propagating the nearest loop
// from successors to predecessors as before.
Changed = false;
for (LoopBlocksDFS::POIterator POI = DFS.beginPostorder(),
POE = DFS.endPostorder(); POI != POE; ++POI) {
Loop *L = LI->getLoopFor(*POI);
Loop *NL = getNearestLoop(*POI, L);
if (NL != L) {
assert(NL != Unloop && (!NL || NL->contains(Unloop)) &&
"uninitialized successor");
LI->changeLoopFor(*POI, NL);
Changed = true;
}
}
}
}
/// Remove unloop's blocks from all ancestors below their new parents.
void UnloopUpdater::removeBlocksFromAncestors() {
// Remove all unloop's blocks (including those in nested subloops) from
// ancestors below the new parent loop.
for (Loop::block_iterator BI = Unloop->block_begin(),
BE = Unloop->block_end(); BI != BE; ++BI) {
Loop *OuterParent = LI->getLoopFor(*BI);
if (Unloop->contains(OuterParent)) {
while (OuterParent->getParentLoop() != Unloop)
OuterParent = OuterParent->getParentLoop();
OuterParent = SubloopParents[OuterParent];
}
// Remove blocks from former Ancestors except Unloop itself which will be
// deleted.
for (Loop *OldParent = Unloop->getParentLoop(); OldParent != OuterParent;
OldParent = OldParent->getParentLoop()) {
assert(OldParent && "new loop is not an ancestor of the original");
OldParent->removeBlockFromLoop(*BI);
}
}
}
/// Update the parent loop for all subloops directly nested within unloop.
void UnloopUpdater::updateSubloopParents() {
while (!Unloop->empty()) {
Loop *Subloop = *std::prev(Unloop->end());
Unloop->removeChildLoop(std::prev(Unloop->end()));
assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop");
if (Loop *Parent = SubloopParents[Subloop])
Parent->addChildLoop(Subloop);
else
LI->addTopLevelLoop(Subloop);
}
}
/// Return the nearest parent loop among this block's successors. If a successor
/// is a subloop header, consider its parent to be the nearest parent of the
/// subloop's exits.
///
/// For subloop blocks, simply update SubloopParents and return NULL.
Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
// Initially for blocks directly contained by Unloop, NearLoop == Unloop and
// is considered uninitialized.
Loop *NearLoop = BBLoop;
Loop *Subloop = nullptr;
if (NearLoop != Unloop && Unloop->contains(NearLoop)) {
Subloop = NearLoop;
// Find the subloop ancestor that is directly contained within Unloop.
while (Subloop->getParentLoop() != Unloop) {
Subloop = Subloop->getParentLoop();
assert(Subloop && "subloop is not an ancestor of the original loop");
}
// Get the current nearest parent of the Subloop exits, initially Unloop.
NearLoop =
SubloopParents.insert(std::make_pair(Subloop, Unloop)).first->second;
}
succ_iterator I = succ_begin(BB), E = succ_end(BB);
if (I == E) {
assert(!Subloop && "subloop blocks must have a successor");
NearLoop = nullptr; // unloop blocks may now exit the function.
}
for (; I != E; ++I) {
if (*I == BB)
continue; // self loops are uninteresting
Loop *L = LI->getLoopFor(*I);
if (L == Unloop) {
// This successor has not been processed. This path must lead to an
// irreducible backedge.
assert((FoundIB || !DFS.hasPostorder(*I)) && "should have seen IB");
FoundIB = true;
}
if (L != Unloop && Unloop->contains(L)) {
// Successor is in a subloop.
if (Subloop)
continue; // Branching within subloops. Ignore it.
// BB branches from the original into a subloop header.
assert(L->getParentLoop() == Unloop && "cannot skip into nested loops");
// Get the current nearest parent of the Subloop's exits.
L = SubloopParents[L];
// L could be Unloop if the only exit was an irreducible backedge.
}
if (L == Unloop) {
continue;
}
// Handle critical edges from Unloop into a sibling loop.
if (L && !L->contains(Unloop)) {
L = L->getParentLoop();
}
// Remember the nearest parent loop among successors or subloop exits.
if (NearLoop == Unloop || !NearLoop || NearLoop->contains(L))
NearLoop = L;
}
if (Subloop) {
SubloopParents[Subloop] = NearLoop;
return BBLoop;
}
return NearLoop;
}
LoopInfo::LoopInfo(const DominatorTreeBase<BasicBlock> &DomTree) {
analyze(DomTree);
}
void LoopInfo::markAsRemoved(Loop *Unloop) {
assert(!Unloop->isInvalid() && "Loop has already been removed");
Unloop->invalidate();
RemovedLoops.push_back(Unloop);
// First handle the special case of no parent loop to simplify the algorithm.
if (!Unloop->getParentLoop()) {
// Since BBLoop had no parent, Unloop blocks are no longer in a loop.
for (Loop::block_iterator I = Unloop->block_begin(),
E = Unloop->block_end();
I != E; ++I) {
// Don't reparent blocks in subloops.
if (getLoopFor(*I) != Unloop)
continue;
// Blocks no longer have a parent but are still referenced by Unloop until
// the Unloop object is deleted.
changeLoopFor(*I, nullptr);
}
// Remove the loop from the top-level LoopInfo object.
for (iterator I = begin();; ++I) {
assert(I != end() && "Couldn't find loop");
if (*I == Unloop) {
removeLoop(I);
break;
}
}
// Move all of the subloops to the top-level.
while (!Unloop->empty())
addTopLevelLoop(Unloop->removeChildLoop(std::prev(Unloop->end())));
return;
}
// Update the parent loop for all blocks within the loop. Blocks within
// subloops will not change parents.
UnloopUpdater Updater(Unloop, this);
Updater.updateBlockParents();
// Remove blocks from former ancestor loops.
Updater.removeBlocksFromAncestors();
// Add direct subloops as children in their new parent loop.
Updater.updateSubloopParents();
// Remove unloop from its parent loop.
Loop *ParentLoop = Unloop->getParentLoop();
for (Loop::iterator I = ParentLoop->begin();; ++I) {
assert(I != ParentLoop->end() && "Couldn't find loop");
if (*I == Unloop) {
ParentLoop->removeChildLoop(I);
break;
}
}
}
LoopInfo LoopAnalysis::run(Function &F, AnalysisManager<Function> *AM) {
// FIXME: Currently we create a LoopInfo from scratch for every function.
// This may prove to be too wasteful due to deallocating and re-allocating
// memory each time for the underlying map and vector datastructures. At some
// point it may prove worthwhile to use a freelist and recycle LoopInfo
// objects. I don't want to add that kind of complexity until the scope of
// the problem is better understood.
LoopInfo LI;
LI.analyze(AM->getResult<DominatorTreeAnalysis>(F));
return LI;
}
PreservedAnalyses LoopPrinterPass::run(Function &F,
AnalysisManager<Function> *AM) {
AM->getResult<LoopAnalysis>(F).print(OS);
return PreservedAnalyses::all();
}
PrintLoopPass::PrintLoopPass() : OS(dbgs()) {}
PrintLoopPass::PrintLoopPass(raw_ostream &OS, const std::string &Banner)
: OS(OS), Banner(Banner) {}
PreservedAnalyses PrintLoopPass::run(Loop &L) {
OS << Banner;
for (auto *Block : L.blocks())
if (Block)
Block->print(OS);
else
OS << "Printing <null> block";
return PreservedAnalyses::all();
}
//===----------------------------------------------------------------------===//
// LoopInfo implementation
//
char LoopInfoWrapperPass::ID = 0;
INITIALIZE_PASS_BEGIN(LoopInfoWrapperPass, "loops", "Natural Loop Information",
true, true)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(LoopInfoWrapperPass, "loops", "Natural Loop Information",
true, true)
bool LoopInfoWrapperPass::runOnFunction(Function &) {
releaseMemory();
LI.analyze(getAnalysis<DominatorTreeWrapperPass>().getDomTree());
return false;
}
void LoopInfoWrapperPass::verifyAnalysis() const {
// LoopInfoWrapperPass is a FunctionPass, but verifying every loop in the
// function each time verifyAnalysis is called is very expensive. The
// -verify-loop-info option can enable this. In order to perform some
// checking by default, LoopPass has been taught to call verifyLoop manually
// during loop pass sequences.
if (VerifyLoopInfo)
LI.verify();
}
void LoopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<DominatorTreeWrapperPass>();
}
void LoopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
LI.print(OS);
}
//===----------------------------------------------------------------------===//
// LoopBlocksDFS implementation
//
/// Traverse the loop blocks and store the DFS result.
/// Useful for clients that just want the final DFS result and don't need to
/// visit blocks during the initial traversal.
void LoopBlocksDFS::perform(LoopInfo *LI) {
LoopBlocksTraversal Traversal(*this, LI);
for (LoopBlocksTraversal::POTIterator POI = Traversal.begin(),
POE = Traversal.end(); POI != POE; ++POI) ;
}