mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
Add the beginnings of an update API for preserving MemorySSA
Summary: This adds the beginning of an update API to preserve MemorySSA. In particular, this patch adds a way to remove memory SSA accesses when instructions are deleted. It also adds relevant unit testing infrastructure for MemorySSA's API. (There is an actual user of this API, i will make that diff dependent on this one. In practice, a ton of opt passes remove memory instructions, so it's hopefully an obviously useful API :P) Reviewers: hfinkel, reames, george.burgess.iv Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D17157 llvm-svn: 262362
This commit is contained in:
parent
1c312f2b01
commit
07e4e63d22
@ -516,6 +516,14 @@ public:
|
||||
return It == PerBlockAccesses.end() ? nullptr : It->second.get();
|
||||
}
|
||||
|
||||
/// \brief Remove a MemoryAccess from MemorySSA, including updating all
|
||||
/// definitions and uses.
|
||||
/// This should be called when a memory instruction that has a MemoryAccess
|
||||
/// associated with it is erased from the program. For example, if a store or
|
||||
/// load is simply erased (not replaced), removeMemoryAccess should be called
|
||||
/// on the MemoryAccess for that store/load.
|
||||
void removeMemoryAccess(MemoryAccess *);
|
||||
|
||||
enum InsertionPlace { Beginning, End };
|
||||
|
||||
/// \brief Given two memory accesses in the same basic block, determine
|
||||
@ -545,6 +553,7 @@ private:
|
||||
bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
|
||||
MemoryAccess *createNewAccess(Instruction *, bool ignoreNonMemory = false);
|
||||
MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
|
||||
void removeFromLookups(MemoryAccess *);
|
||||
|
||||
MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *);
|
||||
void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
|
||||
@ -663,6 +672,12 @@ public:
|
||||
/// starting from the use side of the memory def.
|
||||
virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
|
||||
MemoryLocation &) = 0;
|
||||
/// \brief Given a memory access, invalidate anything this walker knows about
|
||||
/// that access.
|
||||
/// This API is used by walkers that store information to perform basic cache
|
||||
/// invalidation. This will be called by MemorySSA at appropriate times for
|
||||
/// the walker it uses or returns.
|
||||
virtual void invalidateInfo(MemoryAccess *) {}
|
||||
|
||||
protected:
|
||||
MemorySSA *MSSA;
|
||||
@ -720,6 +735,7 @@ public:
|
||||
MemoryAccess *getClobberingMemoryAccess(const Instruction *) override;
|
||||
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
|
||||
MemoryLocation &) override;
|
||||
void invalidateInfo(MemoryAccess *) override;
|
||||
|
||||
protected:
|
||||
struct UpwardsMemoryQuery;
|
||||
|
@ -427,6 +427,76 @@ bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
|
||||
return true;
|
||||
}
|
||||
|
||||
/// \brief If all arguments of a MemoryPHI are defined by the same incoming
|
||||
/// argument, return that argument.
|
||||
static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
|
||||
MemoryAccess *MA = nullptr;
|
||||
|
||||
for (auto &Arg : MP->operands()) {
|
||||
if (!MA)
|
||||
MA = cast<MemoryAccess>(Arg);
|
||||
else if (MA != Arg)
|
||||
return nullptr;
|
||||
}
|
||||
return MA;
|
||||
}
|
||||
|
||||
/// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
|
||||
///
|
||||
/// Because of the way the intrusive list and use lists work, it is important to
|
||||
/// do removal in the right order.
|
||||
void MemorySSA::removeFromLookups(MemoryAccess *MA) {
|
||||
assert(MA->use_empty() &&
|
||||
"Trying to remove memory access that still has uses");
|
||||
if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
|
||||
MUD->setDefiningAccess(nullptr);
|
||||
// Invalidate our walker's cache if necessary
|
||||
if (!isa<MemoryUse>(MA))
|
||||
Walker->invalidateInfo(MA);
|
||||
// The call below to erase will destroy MA, so we can't change the order we
|
||||
// are doing things here
|
||||
Value *MemoryInst;
|
||||
if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
|
||||
MemoryInst = MUD->getMemoryInst();
|
||||
} else {
|
||||
MemoryInst = MA->getBlock();
|
||||
}
|
||||
ValueToMemoryAccess.erase(MemoryInst);
|
||||
|
||||
auto &Accesses = PerBlockAccesses.find(MA->getBlock())->second;
|
||||
Accesses->erase(MA);
|
||||
if (Accesses->empty()) {
|
||||
PerBlockAccesses.erase(MA->getBlock());
|
||||
}
|
||||
}
|
||||
|
||||
void MemorySSA::removeMemoryAccess(MemoryAccess *MA) {
|
||||
assert(!isLiveOnEntryDef(MA) && "Trying to remove the live on entry def");
|
||||
// We can only delete phi nodes if they have no uses, or we can replace all
|
||||
// uses with a single definition.
|
||||
MemoryAccess *NewDefTarget = nullptr;
|
||||
if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
|
||||
// Note that it is sufficient to know that all edges of the phi node have
|
||||
// the same argument. If they do, by the definition of dominance frontiers
|
||||
// (which we used to place this phi), that argument must dominate this phi,
|
||||
// and thus, must dominate the phi's uses, and so we will not hit the assert
|
||||
// below.
|
||||
NewDefTarget = onlySingleValue(MP);
|
||||
assert((NewDefTarget || MP->use_empty()) &&
|
||||
"We can't delete this memory phi");
|
||||
} else {
|
||||
NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
|
||||
}
|
||||
|
||||
// Re-point the uses at our defining access
|
||||
if (!MA->use_empty())
|
||||
MA->replaceAllUsesWith(NewDefTarget);
|
||||
|
||||
// The call below to erase will destroy MA, so we can't change the order we
|
||||
// are doing things here
|
||||
removeFromLookups(MA);
|
||||
}
|
||||
|
||||
void MemorySSA::print(raw_ostream &OS) const {
|
||||
MemorySSAAnnotatedWriter Writer(this);
|
||||
F.print(OS, &Writer);
|
||||
@ -712,6 +782,43 @@ struct CachingMemorySSAWalker::UpwardsMemoryQuery {
|
||||
OriginalAccess(nullptr), DL(nullptr) {}
|
||||
};
|
||||
|
||||
void CachingMemorySSAWalker::invalidateInfo(MemoryAccess *MA) {
|
||||
|
||||
// TODO: We can do much better cache invalidation with differently stored
|
||||
// caches. For now, for MemoryUses, we simply remove them
|
||||
// from the cache, and kill the entire call/non-call cache for everything
|
||||
// else. The problem is for phis or defs, currently we'd need to follow use
|
||||
// chains down and invalidate anything below us in the chain that currently
|
||||
// terminates at this access.
|
||||
|
||||
// See if this is a MemoryUse, if so, just remove the cached info. MemoryUse
|
||||
// is by definition never a barrier, so nothing in the cache could point to
|
||||
// this use. In that case, we only need invalidate the info for the use
|
||||
// itself.
|
||||
|
||||
if (MemoryUse *MU = dyn_cast<MemoryUse>(MA)) {
|
||||
UpwardsMemoryQuery Q;
|
||||
Instruction *I = MU->getMemoryInst();
|
||||
Q.IsCall = bool(ImmutableCallSite(I));
|
||||
Q.Inst = I;
|
||||
if (!Q.IsCall)
|
||||
Q.StartingLoc = MemoryLocation::get(I);
|
||||
doCacheRemove(MA, Q, Q.StartingLoc);
|
||||
return;
|
||||
}
|
||||
// If it is not a use, the best we can do right now is destroy the cache.
|
||||
bool IsCall = false;
|
||||
|
||||
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
|
||||
Instruction *I = MUD->getMemoryInst();
|
||||
IsCall = bool(ImmutableCallSite(I));
|
||||
}
|
||||
if (IsCall)
|
||||
CachedUpwardsClobberingCall.clear();
|
||||
else
|
||||
CachedUpwardsClobberingAccess.clear();
|
||||
}
|
||||
|
||||
void CachingMemorySSAWalker::doCacheRemove(const MemoryAccess *M,
|
||||
const UpwardsMemoryQuery &Q,
|
||||
const MemoryLocation &Loc) {
|
||||
|
@ -9,5 +9,6 @@ add_llvm_unittest(UtilsTests
|
||||
Cloning.cpp
|
||||
IntegerDivision.cpp
|
||||
Local.cpp
|
||||
MemorySSA.cpp
|
||||
ValueMapperTest.cpp
|
||||
)
|
||||
|
87
unittests/Transforms/Utils/MemorySSA.cpp
Normal file
87
unittests/Transforms/Utils/MemorySSA.cpp
Normal file
@ -0,0 +1,87 @@
|
||||
//===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#include "llvm/IR/DataLayout.h"
|
||||
#include "llvm/Transforms/Utils/MemorySSA.h"
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/IRBuilder.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/IR/LLVMContext.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
TEST(MemorySSA, RemoveMemoryAccess) {
|
||||
LLVMContext &C(getGlobalContext());
|
||||
IRBuilder<> B(C);
|
||||
DataLayout DL("e-i64:64-f80:128-n8:16:32:64-S128");
|
||||
TargetLibraryInfoImpl TLII;
|
||||
TargetLibraryInfo TLI(TLII);
|
||||
|
||||
// We create a diamond where there is a store on one side, and then a load
|
||||
// after the merge point. This enables us to test a bunch of different
|
||||
// removal cases.
|
||||
std::unique_ptr<Function> F(Function::Create(
|
||||
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
||||
GlobalValue::ExternalLinkage, "F"));
|
||||
BasicBlock *Entry(BasicBlock::Create(C, "", F.get()));
|
||||
BasicBlock *Left(BasicBlock::Create(C, "", F.get()));
|
||||
BasicBlock *Right(BasicBlock::Create(C, "", F.get()));
|
||||
BasicBlock *Merge(BasicBlock::Create(C, "", F.get()));
|
||||
B.SetInsertPoint(Entry);
|
||||
B.CreateCondBr(B.getTrue(), Left, Right);
|
||||
B.SetInsertPoint(Left);
|
||||
Argument *PointerArg = &*F->arg_begin();
|
||||
StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
|
||||
BranchInst::Create(Merge, Left);
|
||||
BranchInst::Create(Merge, Right);
|
||||
B.SetInsertPoint(Merge);
|
||||
LoadInst *LoadInst = B.CreateLoad(PointerArg);
|
||||
|
||||
std::unique_ptr<MemorySSA> MSSA(new MemorySSA(*F));
|
||||
std::unique_ptr<DominatorTree> DT(new DominatorTree(*F));
|
||||
std::unique_ptr<AssumptionCache> AC(new AssumptionCache(*F));
|
||||
AAResults *AA = new AAResults();
|
||||
BasicAAResult *BAA = new BasicAAResult(DL, TLI, *AC, &*DT);
|
||||
AA->addAAResult(*BAA);
|
||||
MemorySSAWalker *Walker = MSSA->buildMemorySSA(AA, &*DT);
|
||||
// Before, the load will be a use of a phi<store, liveonentry>. It should be
|
||||
// the same after.
|
||||
MemoryUse *LoadAccess = cast<MemoryUse>(MSSA->getMemoryAccess(LoadInst));
|
||||
MemoryDef *StoreAccess = cast<MemoryDef>(MSSA->getMemoryAccess(StoreInst));
|
||||
MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
|
||||
EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
|
||||
// The load is currently clobbered by one of the phi arguments, so the walker
|
||||
// should determine the clobbering access as the phi.
|
||||
EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
|
||||
MSSA->removeMemoryAccess(StoreAccess);
|
||||
MSSA->verifyMemorySSA();
|
||||
// After the removeaccess, let's see if we got the right accesses
|
||||
// The load should still point to the phi ...
|
||||
EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
|
||||
// but we should now get live on entry for the clobbering definition of the
|
||||
// load, since it will walk past the phi node since every argument is the
|
||||
// same.
|
||||
EXPECT_TRUE(
|
||||
MSSA->isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
|
||||
|
||||
// The phi should now be a two entry phi with two live on entry defs.
|
||||
for (const auto &Op : DefiningAccess->operands()) {
|
||||
MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
|
||||
EXPECT_TRUE(MSSA->isLiveOnEntryDef(Operand));
|
||||
}
|
||||
|
||||
// Now we try to remove the single valued phi
|
||||
MSSA->removeMemoryAccess(DefiningAccess);
|
||||
MSSA->verifyMemorySSA();
|
||||
// Now the load should be a load of live on entry.
|
||||
EXPECT_TRUE(MSSA->isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user