1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00

[MemorySSA] Teach LoopSimplify to preserve MemorySSA.

Summary:
Preserve MemorySSA in LoopSimplify, in the old pass manager, if the analysis is available.
Do not preserve it in the new pass manager.
Update tests.

Subscribers: nemanjai, jlebar, javed.absar, Prazek, kbarton, zzheng, jsji, llvm-commits, george.burgess.iv, chandlerc

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D60833

llvm-svn: 360270
This commit is contained in:
Alina Sbirlea 2019-05-08 17:05:36 +00:00
parent 3a3c43bf2f
commit 8f619d9fc5
18 changed files with 228 additions and 60 deletions

View File

@ -55,6 +55,7 @@ class DominatorTree;
class LoopInfo;
class Loop;
class MDNode;
class MemorySSAUpdater;
class PHINode;
class raw_ostream;
template <class N, bool IsPostDom> class DominatorTreeBase;
@ -498,7 +499,8 @@ public:
/// If InsertPt is specified, it is the point to hoist instructions to.
/// If null, the terminator of the loop preheader is used.
bool makeLoopInvariant(Value *V, bool &Changed,
Instruction *InsertPt = nullptr) const;
Instruction *InsertPt = nullptr,
MemorySSAUpdater *MSSAU = nullptr) const;
/// If the given instruction is inside of the loop and it can be hoisted, do
/// so to make it trivially loop-invariant.
@ -510,7 +512,8 @@ public:
/// If null, the terminator of the loop preheader is used.
///
bool makeLoopInvariant(Instruction *I, bool &Changed,
Instruction *InsertPt = nullptr) const;
Instruction *InsertPt = nullptr,
MemorySSAUpdater *MSSAU = nullptr) const;
/// Check to see if the loop has a canonical induction variable: an integer
/// recurrence that starts at 0 and increments by one each time through the

View File

@ -105,7 +105,12 @@ public:
/// Update the MemoryPhi in `To` to have a single incoming edge from `From`,
/// following a CFG change that replaced multiple edges (switch) with a direct
/// branch.
void removeDuplicatePhiEdgesBetween(BasicBlock *From, BasicBlock *To);
void removeDuplicatePhiEdgesBetween(const BasicBlock *From,
const BasicBlock *To);
/// Update MemorySSA when inserting a unique backedge block for a loop.
void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader,
BasicBlock *LoopPreheader,
BasicBlock *BackedgeBlock);
/// Update MemorySSA after a loop was cloned, given the blocks in RPO order,
/// the exit blocks and a 1:1 mapping of all blocks and instructions
/// cloned. This involves duplicating all defs and uses in the cloned blocks
@ -240,6 +245,16 @@ public:
/// deleted after this call.
void removeBlocks(const SmallPtrSetImpl<BasicBlock *> &DeadBlocks);
/// Instruction I will be changed to an unreachable. Remove all accesses in
/// I's block that follow I (inclusive), and update the Phis in the blocks'
/// successors.
void changeToUnreachable(const Instruction *I);
/// Conditional branch BI is changed or replaced with an unconditional branch
/// to `To`. Update Phis in BI's successors to remove BI's BB.
void changeCondBranchToUnconditionalTo(const BranchInst *BI,
const BasicBlock *To);
/// Get handle on MemorySSA.
MemorySSA* getMemorySSA() const { return MSSA; }

View File

@ -231,7 +231,8 @@ bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
/// If this basic block is ONLY a setcc and a branch, and if a predecessor
/// branches to us and one of our successors, fold the setcc into the
/// predecessor and use logical operations to pick the right destination.
bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);
bool FoldBranchToCommonDest(BranchInst *BI, MemorySSAUpdater *MSSAU = nullptr,
unsigned BonusInstThreshold = 1);
/// This function takes a virtual register computed by an Instruction and
/// replaces it with a slot in the stack frame, allocated via alloca.
@ -381,7 +382,8 @@ unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
/// instruction, making it and the rest of the code in the block dead.
unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap,
bool PreserveLCSSA = false,
DomTreeUpdater *DTU = nullptr);
DomTreeUpdater *DTU = nullptr,
MemorySSAUpdater *MSSAU = nullptr);
/// Convert the CallInst to InvokeInst with the specified unwind edge basic
/// block. This also splits the basic block where CI is located, because

View File

@ -45,6 +45,8 @@
namespace llvm {
class MemorySSAUpdater;
/// This pass is responsible for loop canonicalization.
class LoopSimplifyPass : public PassInfoMixin<LoopSimplifyPass> {
public:
@ -55,9 +57,11 @@ public:
///
/// This takes a potentially un-simplified loop L (and its children) and turns
/// it into a simplified loop nest with preheaders and single backedges. It will
/// update \c AliasAnalysis and \c ScalarEvolution analyses if they're non-null.
/// update \c DominatorTree, \c LoopInfo, \c ScalarEvolution and \c MemorySSA
/// analyses if they're non-null, and LCSSA if \c PreserveLCSSA is true.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
AssumptionCache *AC, bool PreserveLCSSA);
AssumptionCache *AC, MemorySSAUpdater *MSSAU,
bool PreserveLCSSA);
} // end namespace llvm

View File

@ -51,7 +51,7 @@ class TargetLibraryInfo;
class TargetTransformInfo;
BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
bool PreserveLCSSA);
MemorySSAUpdater *MSSAU, bool PreserveLCSSA);
/// Ensure that all exit blocks of the loop are dedicated exits.
///

View File

@ -19,6 +19,8 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/LoopInfoImpl.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/CFG.h"
@ -64,15 +66,16 @@ bool Loop::hasLoopInvariantOperands(const Instruction *I) const {
return all_of(I->operands(), [this](Value *V) { return isLoopInvariant(V); });
}
bool Loop::makeLoopInvariant(Value *V, bool &Changed,
Instruction *InsertPt) const {
bool Loop::makeLoopInvariant(Value *V, bool &Changed, Instruction *InsertPt,
MemorySSAUpdater *MSSAU) const {
if (Instruction *I = dyn_cast<Instruction>(V))
return makeLoopInvariant(I, Changed, InsertPt);
return makeLoopInvariant(I, Changed, InsertPt, MSSAU);
return true; // All non-instructions are loop-invariant.
}
bool Loop::makeLoopInvariant(Instruction *I, bool &Changed,
Instruction *InsertPt) const {
Instruction *InsertPt,
MemorySSAUpdater *MSSAU) const {
// Test if the value is already loop-invariant.
if (isLoopInvariant(I))
return true;
@ -93,11 +96,14 @@ bool Loop::makeLoopInvariant(Instruction *I, bool &Changed,
}
// Don't hoist instructions with loop-variant operands.
for (Value *Operand : I->operands())
if (!makeLoopInvariant(Operand, Changed, InsertPt))
if (!makeLoopInvariant(Operand, Changed, InsertPt, MSSAU))
return false;
// Hoist.
I->moveBefore(InsertPt);
if (MSSAU)
if (auto *MUD = MSSAU->getMemorySSA()->getMemoryAccess(I))
MSSAU->moveToPlace(MUD, InsertPt->getParent(), MemorySSA::End);
// There is possibility of hoisting this instruction above some arbitrary
// condition. Any metadata defined on it can be control dependent on this

View File

@ -463,8 +463,8 @@ void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
}
}
void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(BasicBlock *From,
BasicBlock *To) {
void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
const BasicBlock *To) {
if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
bool Found = false;
MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
@ -522,6 +522,46 @@ void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
}
}
void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
auto *MPhi = MSSA->getMemoryAccess(Header);
if (!MPhi)
return;
// Create phi node in the backedge block and populate it with the same
// incoming values as MPhi. Skip incoming values coming from Preheader.
auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
bool HasUniqueIncomingValue = true;
MemoryAccess *UniqueValue = nullptr;
for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
BasicBlock *IBB = MPhi->getIncomingBlock(I);
MemoryAccess *IV = MPhi->getIncomingValue(I);
if (IBB != Preheader) {
NewMPhi->addIncoming(IV, IBB);
if (HasUniqueIncomingValue) {
if (!UniqueValue)
UniqueValue = IV;
else if (UniqueValue != IV)
HasUniqueIncomingValue = false;
}
}
}
// Update incoming edges into MPhi. Remove all but the incoming edge from
// Preheader. Add an edge from NewMPhi
auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
MPhi->setIncomingValue(0, AccFromPreheader);
MPhi->setIncomingBlock(0, Preheader);
for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
MPhi->unorderedDeleteIncoming(I);
MPhi->addIncoming(NewMPhi, BEBlock);
// If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
// replaced with the unique value.
if (HasUniqueIncomingValue)
removeMemoryAccess(NewMPhi);
}
void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
ArrayRef<BasicBlock *> ExitBlocks,
const ValueToValueMapTy &VMap,
@ -1223,6 +1263,43 @@ void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
}
}
void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
const BasicBlock *BB = I->getParent();
// Remove memory accesses in BB for I and all following instructions.
auto BBI = I->getIterator(), BBE = BB->end();
// FIXME: If this becomes too expensive, iterate until the first instruction
// with a memory access, then iterate over MemoryAccesses.
while (BBI != BBE)
removeMemoryAccess(&*(BBI++));
// Update phis in BB's successors to remove BB.
SmallVector<WeakVH, 16> UpdatedPHIs;
for (const BasicBlock *Successor : successors(BB)) {
removeDuplicatePhiEdgesBetween(BB, Successor);
if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
MPhi->unorderedDeleteIncomingBlock(BB);
UpdatedPHIs.push_back(MPhi);
}
}
// Optimize trivial phis.
tryRemoveTrivialPhis(UpdatedPHIs);
}
void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst *BI,
const BasicBlock *To) {
const BasicBlock *BB = BI->getParent();
SmallVector<WeakVH, 16> UpdatedPHIs;
for (const BasicBlock *Succ : successors(BB)) {
removeDuplicatePhiEdgesBetween(BB, Succ);
if (Succ != To)
if (auto *MPhi = MSSA->getMemoryAccess(Succ)) {
MPhi->unorderedDeleteIncomingBlock(BB);
UpdatedPHIs.push_back(MPhi);
}
}
// Optimize trivial phis.
tryRemoveTrivialPhis(UpdatedPHIs);
}
MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
MemorySSA::InsertionPlace Point) {

View File

@ -631,7 +631,7 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
// the CTR register because some such uses might be reordered by the
// selection DAG after the mtctr instruction).
if (!Preheader || mightUseCTR(Preheader))
Preheader = InsertPreheaderForLoop(L, DT, LI, PreserveLCSSA);
Preheader = InsertPreheaderForLoop(L, DT, LI, nullptr, PreserveLCSSA);
if (!Preheader)
return MadeChange;

View File

@ -331,7 +331,7 @@ bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
// iteration space), insert a new preheader for the loop.
if (!LoopPredecessor ||
!LoopPredecessor->getTerminator()->getType()->isVoidTy()) {
LoopPredecessor = InsertPreheaderForLoop(L, DT, LI, PreserveLCSSA);
LoopPredecessor = InsertPreheaderForLoop(L, DT, LI, nullptr, PreserveLCSSA);
if (LoopPredecessor)
MadeChange = true;
}

View File

@ -1545,7 +1545,7 @@ bool LoopConstrainer::run() {
// This function canonicalizes the loop into Loop-Simplify and LCSSA forms.
auto CanonicalizeLoop = [&] (Loop *L, bool IsOriginalLoop) {
formLCSSARecursively(*L, DT, &LI, &SE);
simplifyLoop(L, &DT, &LI, &SE, nullptr, true);
simplifyLoop(L, &DT, &LI, &SE, nullptr, nullptr, true);
// Pre/post loops are slow paths, we do not need to perform any loop
// optimizations on them.
if (!IsOriginalLoop)

View File

@ -1362,9 +1362,11 @@ bool LoopInterchangeTransform::adjustLoopBranches() {
// preheaders do not satisfy those conditions.
if (isa<PHINode>(OuterLoopPreHeader->begin()) ||
!OuterLoopPreHeader->getUniquePredecessor())
OuterLoopPreHeader = InsertPreheaderForLoop(OuterLoop, DT, LI, true);
OuterLoopPreHeader =
InsertPreheaderForLoop(OuterLoop, DT, LI, nullptr, true);
if (InnerLoopPreHeader == OuterLoop->getHeader())
InnerLoopPreHeader = InsertPreheaderForLoop(InnerLoop, DT, LI, true);
InnerLoopPreHeader =
InsertPreheaderForLoop(InnerLoop, DT, LI, nullptr, true);
// Adjust the loop preheader
BasicBlock *InnerLoopHeader = InnerLoop->getHeader();

View File

@ -1393,7 +1393,8 @@ PreservedAnalyses LoopUnrollPass::run(Function &F,
// will simplify all loops, regardless of whether anything end up being
// unrolled.
for (auto &L : LI) {
Changed |= simplifyLoop(L, &DT, &LI, &SE, &AC, false /* PreserveLCSSA */);
Changed |=
simplifyLoop(L, &DT, &LI, &SE, &AC, nullptr, false /* PreserveLCSSA */);
Changed |= formLCSSARecursively(*L, DT, &LI, &SE);
}

View File

@ -1894,10 +1894,14 @@ unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
}
unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
bool PreserveLCSSA, DomTreeUpdater *DTU) {
bool PreserveLCSSA, DomTreeUpdater *DTU,
MemorySSAUpdater *MSSAU) {
BasicBlock *BB = I->getParent();
std::vector <DominatorTree::UpdateType> Updates;
if (MSSAU)
MSSAU->changeToUnreachable(I);
// Loop over all of the successors, removing BB's entry from any PHI
// nodes.
if (DTU)

View File

@ -53,9 +53,10 @@
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@ -70,6 +71,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
using namespace llvm;
@ -118,7 +120,8 @@ static void placeSplitBlockCarefully(BasicBlock *NewBB,
/// preheader insertion and analysis updating.
///
BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, DominatorTree *DT,
LoopInfo *LI, bool PreserveLCSSA) {
LoopInfo *LI, MemorySSAUpdater *MSSAU,
bool PreserveLCSSA) {
BasicBlock *Header = L->getHeader();
// Compute the set of predecessors of the loop that are not in the loop.
@ -141,7 +144,7 @@ BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, DominatorTree *DT,
// Split out the loop pre-header.
BasicBlock *PreheaderBB;
PreheaderBB = SplitBlockPredecessors(Header, OutsideBlocks, ".preheader", DT,
LI, nullptr, PreserveLCSSA);
LI, MSSAU, PreserveLCSSA);
if (!PreheaderBB)
return nullptr;
@ -221,7 +224,7 @@ static PHINode *findPHIToPartitionLoops(Loop *L, DominatorTree *DT,
static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
DominatorTree *DT, LoopInfo *LI,
ScalarEvolution *SE, bool PreserveLCSSA,
AssumptionCache *AC) {
AssumptionCache *AC, MemorySSAUpdater *MSSAU) {
// Don't try to separate loops without a preheader.
if (!Preheader)
return nullptr;
@ -255,7 +258,7 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
SE->forgetLoop(L);
BasicBlock *NewBB = SplitBlockPredecessors(Header, OuterLoopPreds, ".outer",
DT, LI, nullptr, PreserveLCSSA);
DT, LI, MSSAU, PreserveLCSSA);
// Make sure that NewBB is put someplace intelligent, which doesn't mess up
// code layout too horribly.
@ -318,7 +321,7 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
// Split edges to exit blocks from the inner loop, if they emerged in the
// process of separating the outer one.
formDedicatedExitBlocks(L, DT, LI, nullptr, PreserveLCSSA);
formDedicatedExitBlocks(L, DT, LI, MSSAU, PreserveLCSSA);
if (PreserveLCSSA) {
// Fix LCSSA form for L. Some values, which previously were only used inside
@ -343,7 +346,8 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
/// and have that block branch to the loop header. This ensures that loops
/// have exactly one backedge.
static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
DominatorTree *DT, LoopInfo *LI) {
DominatorTree *DT, LoopInfo *LI,
MemorySSAUpdater *MSSAU) {
assert(L->getNumBackEdges() > 1 && "Must have > 1 backedge!");
// Get information about the loop
@ -456,6 +460,10 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
// Update dominator information
DT->splitBlock(BEBlock);
if (MSSAU)
MSSAU->updatePhisWhenInsertingUniqueBackedgeBlock(Header, Preheader,
BEBlock);
return BEBlock;
}
@ -463,8 +471,11 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
static bool simplifyOneLoop(Loop *L, SmallVectorImpl<Loop *> &Worklist,
DominatorTree *DT, LoopInfo *LI,
ScalarEvolution *SE, AssumptionCache *AC,
bool PreserveLCSSA) {
MemorySSAUpdater *MSSAU, bool PreserveLCSSA) {
bool Changed = false;
if (MSSAU && VerifyMemorySSA)
MSSAU->getMemorySSA()->verifyMemorySSA();
ReprocessLoop:
// Check to see that no blocks (other than the header) in this loop have
@ -491,11 +502,15 @@ ReprocessLoop:
// Zap the dead pred's terminator and replace it with unreachable.
Instruction *TI = P->getTerminator();
changeToUnreachable(TI, /*UseLLVMTrap=*/false, PreserveLCSSA);
changeToUnreachable(TI, /*UseLLVMTrap=*/false, PreserveLCSSA,
/*DTU=*/nullptr, MSSAU);
Changed = true;
}
}
if (MSSAU && VerifyMemorySSA)
MSSAU->getMemorySSA()->verifyMemorySSA();
// If there are exiting blocks with branches on undef, resolve the undef in
// the direction which will exit the loop. This will help simplify loop
// trip count computations.
@ -520,7 +535,7 @@ ReprocessLoop:
// Does the loop already have a preheader? If so, don't insert one.
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
Preheader = InsertPreheaderForLoop(L, DT, LI, PreserveLCSSA);
Preheader = InsertPreheaderForLoop(L, DT, LI, MSSAU, PreserveLCSSA);
if (Preheader)
Changed = true;
}
@ -529,9 +544,12 @@ ReprocessLoop:
// predecessors that are inside of the loop. This check guarantees that the
// loop preheader/header will dominate the exit blocks. If the exit block has
// predecessors from outside of the loop, split the edge now.
if (formDedicatedExitBlocks(L, DT, LI, nullptr, PreserveLCSSA))
if (formDedicatedExitBlocks(L, DT, LI, MSSAU, PreserveLCSSA))
Changed = true;
if (MSSAU && VerifyMemorySSA)
MSSAU->getMemorySSA()->verifyMemorySSA();
// If the header has more than two predecessors at this point (from the
// preheader and from multiple backedges), we must adjust the loop.
BasicBlock *LoopLatch = L->getLoopLatch();
@ -540,8 +558,8 @@ ReprocessLoop:
// this for loops with a giant number of backedges, just factor them into a
// common backedge instead.
if (L->getNumBackEdges() < 8) {
if (Loop *OuterL =
separateNestedLoop(L, Preheader, DT, LI, SE, PreserveLCSSA, AC)) {
if (Loop *OuterL = separateNestedLoop(L, Preheader, DT, LI, SE,
PreserveLCSSA, AC, MSSAU)) {
++NumNested;
// Enqueue the outer loop as it should be processed next in our
// depth-first nest walk.
@ -558,11 +576,14 @@ ReprocessLoop:
// If we either couldn't, or didn't want to, identify nesting of the loops,
// insert a new block that all backedges target, then make it jump to the
// loop header.
LoopLatch = insertUniqueBackedgeBlock(L, Preheader, DT, LI);
LoopLatch = insertUniqueBackedgeBlock(L, Preheader, DT, LI, MSSAU);
if (LoopLatch)
Changed = true;
}
if (MSSAU && VerifyMemorySSA)
MSSAU->getMemorySSA()->verifyMemorySSA();
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
// Scan over the PHI nodes in the loop header. Since they now have only two
@ -620,9 +641,9 @@ ReprocessLoop:
Instruction *Inst = &*I++;
if (Inst == CI)
continue;
if (!L->makeLoopInvariant(Inst, AnyInvariant,
Preheader ? Preheader->getTerminator()
: nullptr)) {
if (!L->makeLoopInvariant(
Inst, AnyInvariant,
Preheader ? Preheader->getTerminator() : nullptr, MSSAU)) {
AllInvariant = false;
break;
}
@ -639,7 +660,7 @@ ReprocessLoop:
// The block has now been cleared of all instructions except for
// a comparison and a conditional branch. SimplifyCFG may be able
// to fold it now.
if (!FoldBranchToCommonDest(BI))
if (!FoldBranchToCommonDest(BI, MSSAU))
continue;
// Success. The block is now dead, so remove it from the loop,
@ -659,6 +680,10 @@ ReprocessLoop:
DT->changeImmediateDominator(Child, Node->getIDom());
}
DT->eraseNode(ExitingBlock);
if (MSSAU) {
SmallPtrSet<BasicBlock *, 1> ExitBlockSet{ExitingBlock};
MSSAU->removeBlocks(ExitBlockSet);
}
BI->getSuccessor(0)->removePredecessor(
ExitingBlock, /* KeepOneInputPHIs */ PreserveLCSSA);
@ -674,12 +699,15 @@ ReprocessLoop:
if (Changed && SE)
SE->forgetTopmostLoop(L);
if (MSSAU && VerifyMemorySSA)
MSSAU->getMemorySSA()->verifyMemorySSA();
return Changed;
}
bool llvm::simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
ScalarEvolution *SE, AssumptionCache *AC,
bool PreserveLCSSA) {
MemorySSAUpdater *MSSAU, bool PreserveLCSSA) {
bool Changed = false;
#ifndef NDEBUG
@ -707,7 +735,7 @@ bool llvm::simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
while (!Worklist.empty())
Changed |= simplifyOneLoop(Worklist.pop_back_val(), Worklist, DT, LI, SE,
AC, PreserveLCSSA);
AC, MSSAU, PreserveLCSSA);
return Changed;
}
@ -740,6 +768,7 @@ namespace {
AU.addPreserved<DependenceAnalysisWrapperPass>();
AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added.
AU.addPreserved<BranchProbabilityInfoWrapperPass>();
AU.addPreserved<MemorySSAWrapperPass>();
}
/// verifyAnalysis() - Verify LoopSimplifyForm's guarantees.
@ -771,12 +800,21 @@ bool LoopSimplify::runOnFunction(Function &F) {
ScalarEvolution *SE = SEWP ? &SEWP->getSE() : nullptr;
AssumptionCache *AC =
&getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
MemorySSA *MSSA = nullptr;
std::unique_ptr<MemorySSAUpdater> MSSAU;
if (EnableMSSALoopDependency) {
auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>();
if (MSSAAnalysis) {
MSSA = &MSSAAnalysis->getMSSA();
MSSAU = make_unique<MemorySSAUpdater>(MSSA);
}
}
bool PreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
// Simplify each loop nest in the function.
for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
Changed |= simplifyLoop(*I, DT, LI, SE, AC, PreserveLCSSA);
Changed |= simplifyLoop(*I, DT, LI, SE, AC, MSSAU.get(), PreserveLCSSA);
#ifndef NDEBUG
if (PreserveLCSSA) {
@ -797,9 +835,10 @@ PreservedAnalyses LoopSimplifyPass::run(Function &F,
AssumptionCache *AC = &AM.getResult<AssumptionAnalysis>(F);
// Note that we don't preserve LCSSA in the new PM, if you need it run LCSSA
// after simplifying the loops.
// after simplifying the loops. MemorySSA is not preserved either.
for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
Changed |= simplifyLoop(*I, DT, LI, SE, AC, /*PreserveLCSSA*/ false);
Changed |=
simplifyLoop(*I, DT, LI, SE, AC, nullptr, /*PreserveLCSSA*/ false);
if (!Changed)
return PreservedAnalyses::all();

View File

@ -884,11 +884,11 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
// TODO: That potentially might be compile-time expensive. We should try
// to fix the loop-simplified form incrementally.
simplifyLoop(OuterL, DT, LI, SE, AC, PreserveLCSSA);
simplifyLoop(OuterL, DT, LI, SE, AC, nullptr, PreserveLCSSA);
} else {
// Simplify loops for which we might've broken loop-simplify form.
for (Loop *SubLoop : LoopsToSimplify)
simplifyLoop(SubLoop, DT, LI, SE, AC, PreserveLCSSA);
simplifyLoop(SubLoop, DT, LI, SE, AC, nullptr, PreserveLCSSA);
}
}

View File

@ -671,7 +671,7 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI,
SE->forgetTopmostLoop(L);
// FIXME: Incrementally update loop-simplify
simplifyLoop(L, DT, LI, SE, AC, PreserveLCSSA);
simplifyLoop(L, DT, LI, SE, AC, nullptr, PreserveLCSSA);
NumPeeled++;

View File

@ -25,8 +25,9 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
@ -65,6 +66,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <algorithm>
#include <cassert>
@ -291,9 +293,13 @@ isProfitableToFoldUnconditional(BranchInst *SI1, BranchInst *SI2,
/// will be the same as those coming in from ExistPred, an existing predecessor
/// of Succ.
static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred,
BasicBlock *ExistPred) {
BasicBlock *ExistPred,
MemorySSAUpdater *MSSAU = nullptr) {
for (PHINode &PN : Succ->phis())
PN.addIncoming(PN.getIncomingValueForBlock(ExistPred), NewPred);
if (MSSAU)
if (auto *MPhi = MSSAU->getMemorySSA()->getMemoryAccess(Succ))
MPhi->addIncoming(MPhi->getIncomingValueForBlock(ExistPred), NewPred);
}
/// Compute an abstract "cost" of speculating the given instruction,
@ -669,7 +675,8 @@ private:
} // end anonymous namespace
static void EraseTerminatorAndDCECond(Instruction *TI) {
static void EraseTerminatorAndDCECond(Instruction *TI,
MemorySSAUpdater *MSSAU = nullptr) {
Instruction *Cond = nullptr;
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
Cond = dyn_cast<Instruction>(SI->getCondition());
@ -682,7 +689,7 @@ static void EraseTerminatorAndDCECond(Instruction *TI) {
TI->eraseFromParent();
if (Cond)
RecursivelyDeleteTriviallyDeadInstructions(Cond);
RecursivelyDeleteTriviallyDeadInstructions(Cond, nullptr, MSSAU);
}
/// Return true if the specified terminator checks
@ -2546,7 +2553,8 @@ static bool extractPredSuccWeights(BranchInst *PBI, BranchInst *BI,
/// If this basic block is simple enough, and if a predecessor branches to us
/// and one of our successors, fold the block into the predecessor and use
/// logical operations to pick the right destination.
bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) {
bool llvm::FoldBranchToCommonDest(BranchInst *BI, MemorySSAUpdater *MSSAU,
unsigned BonusInstThreshold) {
BasicBlock *BB = BI->getParent();
const unsigned PredCount = pred_size(BB);
@ -2757,7 +2765,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) {
(SuccFalseWeight + SuccTrueWeight) +
PredTrueWeight * SuccFalseWeight);
}
AddPredecessorToBlock(TrueDest, PredBlock, BB);
AddPredecessorToBlock(TrueDest, PredBlock, BB, MSSAU);
PBI->setSuccessor(0, TrueDest);
}
if (PBI->getSuccessor(1) == BB) {
@ -2772,7 +2780,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) {
// FalseWeight is FalseWeight for PBI * FalseWeight for BI.
NewWeights.push_back(PredFalseWeight * SuccFalseWeight);
}
AddPredecessorToBlock(FalseDest, PredBlock, BB);
AddPredecessorToBlock(FalseDest, PredBlock, BB, MSSAU);
PBI->setSuccessor(1, FalseDest);
}
if (NewWeights.size() == 2) {
@ -2820,9 +2828,15 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) {
PHIs[i]->setIncomingValue(PHIs[i]->getBasicBlockIndex(PBI->getParent()),
MergedCond);
}
// PBI is changed to branch to TrueDest below. Remove itself from
// potential phis from all other successors.
if (MSSAU)
MSSAU->changeCondBranchToUnconditionalTo(PBI, TrueDest);
// Change PBI from Conditional to Unconditional.
BranchInst *New_PBI = BranchInst::Create(TrueDest, PBI);
EraseTerminatorAndDCECond(PBI);
EraseTerminatorAndDCECond(PBI, MSSAU);
PBI = New_PBI;
}
@ -5805,7 +5819,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI,
// branches to us and our successor, fold the comparison into the
// predecessor and use logical operations to update the incoming value
// for PHI nodes in common successor.
if (FoldBranchToCommonDest(BI, Options.BonusInstThreshold))
if (FoldBranchToCommonDest(BI, nullptr, Options.BonusInstThreshold))
return requestResimplify();
return false;
}
@ -5869,7 +5883,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
// If this basic block is ONLY a compare and a branch, and if a predecessor
// branches to us and one of our successors, fold the comparison into the
// predecessor and use logical operations to pick the right destination.
if (FoldBranchToCommonDest(BI, Options.BonusInstThreshold))
if (FoldBranchToCommonDest(BI, nullptr, Options.BonusInstThreshold))
return requestResimplify();
// We have a conditional branch to two blocks that are only reachable

View File

@ -7574,7 +7574,8 @@ bool LoopVectorizePass::runImpl(
// will simplify all loops, regardless of whether anything end up being
// vectorized.
for (auto &L : *LI)
Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */);
Changed |=
simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
// Build up a worklist of inner-loops to vectorize. This is necessary as
// the act of vectorizing or partially unrolling a loop creates new loops