1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

Revert "GVN-hoist: fix store past load dependence analysis (PR30216, PR30499)"

This CL didn't actually address the test case in PR30499, and clang
still crashes.

Also revert dependent change "Memory-SSA cleanup of clobbers interface, NFC"

Reverts r283965 and r283967.

llvm-svn: 284093
This commit is contained in:
Reid Kleckner 2016-10-13 00:18:26 +00:00
parent 233216e34f
commit 95149fb393
5 changed files with 90 additions and 190 deletions

View File

@ -974,10 +974,6 @@ inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
// Return true when MD may alias MU, return false otherwise.
bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
AliasAnalysis &AA);
} // end namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_MEMORYSSA_H

View File

@ -19,12 +19,12 @@
// 2. geps when corresponding load/store cannot be hoisted.
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/MemorySSA.h"
@ -55,10 +55,10 @@ static cl::opt<int> MaxDepthInBB(
cl::desc("Hoist instructions from the beginning of the BB up to the "
"maximum specified depth (default = 100, unlimited = -1)"));
static cl::opt<int>
MaxChainLength("gvn-hoist-max-chain-length", cl::Hidden, cl::init(10),
cl::desc("Maximum length of dependent chains to hoist "
"(default = 10, unlimited = -1)"));
static cl::opt<int> MaxChainLength(
"gvn-hoist-max-chain-length", cl::Hidden, cl::init(10),
cl::desc("Maximum length of dependent chains to hoist "
"(default = 10, unlimited = -1)"));
namespace {
@ -89,7 +89,7 @@ public:
ADFS = DFSNumber.lookup(BA);
BDFS = DFSNumber.lookup(BB);
}
assert(ADFS && BDFS);
assert (ADFS && BDFS);
return ADFS < BDFS;
}
};
@ -213,7 +213,7 @@ public:
for (const BasicBlock *BB : depth_first(&F.getEntryBlock())) {
DFSNumber[BB] = ++BBI;
unsigned I = 0;
for (auto &Inst : *BB)
for (auto &Inst: *BB)
DFSNumber[&Inst] = ++I;
}
@ -239,7 +239,6 @@ public:
return Res;
}
private:
GVN::ValueTable VN;
DominatorTree *DT;
@ -323,42 +322,38 @@ private:
/* Return true when I1 appears before I2 in the instructions of BB. */
bool firstInBB(const Instruction *I1, const Instruction *I2) {
assert(I1->getParent() == I2->getParent());
assert (I1->getParent() == I2->getParent());
unsigned I1DFS = DFSNumber.lookup(I1);
unsigned I2DFS = DFSNumber.lookup(I2);
assert(I1DFS && I2DFS);
assert (I1DFS && I2DFS);
return I1DFS < I2DFS;
}
// Return true when there are memory uses of Def in BB.
bool hasMemoryUse(const Instruction *NewPt, MemoryDef *Def,
const BasicBlock *BB) {
const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
if (!Acc)
return false;
Instruction *OldPt = Def->getMemoryInst();
// Return true when there are users of Def in BB.
bool hasMemoryUseOnPath(MemoryAccess *Def, const BasicBlock *BB,
const Instruction *OldPt) {
const BasicBlock *DefBB = Def->getBlock();
const BasicBlock *OldBB = OldPt->getParent();
const BasicBlock *NewBB = NewPt->getParent();
bool ReachedNewPt = false;
for (const MemoryAccess &MA : *Acc)
if (const MemoryUse *MU = dyn_cast<MemoryUse>(&MA)) {
Instruction *Insn = MU->getMemoryInst();
for (User *U : Def->users())
if (auto *MU = dyn_cast<MemoryUse>(U)) {
// FIXME: MU->getBlock() does not get updated when we move the instruction.
BasicBlock *UBB = MU->getMemoryInst()->getParent();
// Only analyze uses in BB.
if (BB != UBB)
continue;
// Do not check whether MU aliases Def when MU occurs after OldPt.
if (BB == OldBB && firstInBB(OldPt, Insn))
break;
// Do not check whether MU aliases Def when MU occurs before NewPt.
if (BB == NewBB) {
if (!ReachedNewPt) {
if (firstInBB(Insn, NewPt))
continue;
ReachedNewPt = true;
}
// A use in the same block as the Def is on the path.
if (UBB == DefBB) {
assert(MSSA->locallyDominates(Def, MU) && "def not dominating use");
return true;
}
if (defClobbersUseOrDef(Def, MU, *AA))
if (UBB != OldBB)
return true;
// It is only harmful to hoist when the use is before OldPt.
if (firstInBB(MU->getMemoryInst(), OldPt))
return true;
}
@ -366,18 +361,17 @@ private:
}
// Return true when there are exception handling or loads of memory Def
// between Def and NewPt. This function is only called for stores: Def is
// the MemoryDef of the store to be hoisted.
// between OldPt and NewPt.
// Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and
// return true when the counter NBBsOnAllPaths reaces 0, except when it is
// initialized to -1 which is unlimited.
bool hasEHOrLoadsOnPath(const Instruction *NewPt, MemoryDef *Def,
int &NBBsOnAllPaths) {
bool hasEHOrLoadsOnPath(const Instruction *NewPt, const Instruction *OldPt,
MemoryAccess *Def, int &NBBsOnAllPaths) {
const BasicBlock *NewBB = NewPt->getParent();
const BasicBlock *OldBB = Def->getBlock();
const BasicBlock *OldBB = OldPt->getParent();
assert(DT->dominates(NewBB, OldBB) && "invalid path");
assert(DT->dominates(Def->getDefiningAccess()->getBlock(), NewBB) &&
assert(DT->dominates(Def->getBlock(), NewBB) &&
"def does not dominate new hoisting point");
// Walk all basic blocks reachable in depth-first iteration on the inverse
@ -396,7 +390,7 @@ private:
return true;
// Check that we do not move a store past loads.
if (hasMemoryUse(NewPt, Def, *I))
if (hasMemoryUseOnPath(Def, *I, OldPt))
return true;
// Stop walk once the limit is reached.
@ -479,7 +473,7 @@ private:
// Check for unsafe hoistings due to side effects.
if (K == InsKind::Store) {
if (hasEHOrLoadsOnPath(NewPt, dyn_cast<MemoryDef>(U), NBBsOnAllPaths))
if (hasEHOrLoadsOnPath(NewPt, OldPt, D, NBBsOnAllPaths))
return false;
} else if (hasEHOnPath(NewBB, OldBB, NBBsOnAllPaths))
return false;
@ -653,8 +647,7 @@ private:
for (const Use &Op : I->operands())
if (const auto *Inst = dyn_cast<Instruction>(&Op))
if (!DT->dominates(Inst->getParent(), HoistPt)) {
if (const GetElementPtrInst *GepOp =
dyn_cast<GetElementPtrInst>(Inst)) {
if (const GetElementPtrInst *GepOp = dyn_cast<GetElementPtrInst>(Inst)) {
if (!allGepOperandsAvailable(GepOp, HoistPt))
return false;
// Gep is available if all operands of GepOp are available.
@ -671,8 +664,7 @@ private:
void makeGepsAvailable(Instruction *Repl, BasicBlock *HoistPt,
const SmallVecInsn &InstructionsToHoist,
Instruction *Gep) const {
assert(allGepOperandsAvailable(Gep, HoistPt) &&
"GEP operands not available");
assert(allGepOperandsAvailable(Gep, HoistPt) && "GEP operands not available");
Instruction *ClonedGep = Gep->clone();
for (unsigned i = 0, e = Gep->getNumOperands(); i != e; ++i)
@ -976,7 +968,8 @@ public:
};
} // namespace
PreservedAnalyses GVNHoistPass::run(Function &F, FunctionAnalysisManager &AM) {
PreservedAnalyses GVNHoistPass::run(Function &F,
FunctionAnalysisManager &AM) {
DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
AliasAnalysis &AA = AM.getResult<AAManager>(F);
MemoryDependenceResults &MD = AM.getResult<MemoryDependenceAnalysis>(F);

View File

@ -169,6 +169,44 @@ template <> struct DenseMapInfo<MemoryLocOrCall> {
return LHS == RHS;
}
};
}
namespace {
struct UpwardsMemoryQuery {
// True if our original query started off as a call
bool IsCall;
// The pointer location we started the query with. This will be empty if
// IsCall is true.
MemoryLocation StartingLoc;
// This is the instruction we were querying about.
const Instruction *Inst;
// The MemoryAccess we actually got called with, used to test local domination
const MemoryAccess *OriginalAccess;
UpwardsMemoryQuery()
: IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {}
UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
: IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
if (!IsCall)
StartingLoc = MemoryLocation::get(Inst);
}
};
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
AliasAnalysis &AA) {
Instruction *Inst = MD->getMemoryInst();
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
switch (II->getIntrinsicID()) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
default:
return false;
}
}
return false;
}
enum class Reorderability { Always, IfNoAlias, Never };
@ -210,6 +248,17 @@ static Reorderability getLoadReorderability(const LoadInst *Use,
return Result;
}
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
const Instruction *I) {
// If the memory can't be changed, then loads of the memory can't be
// clobbered.
//
// FIXME: We should handle invariant groups, as well. It's a bit harder,
// because we need to pay close attention to invariant group barriers.
return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
AA.pointsToConstantMemory(I));
}
static bool instructionClobbersQuery(MemoryDef *MD,
const MemoryLocation &UseLoc,
const Instruction *UseInst,
@ -254,62 +303,6 @@ static bool instructionClobbersQuery(MemoryDef *MD,
return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod;
}
// Return true when MD may alias MU, return false otherwise.
bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
AliasAnalysis &AA) {
Instruction *Insn = MU->getMemoryInst();
return instructionClobbersQuery(MD, MemoryLocation::get(Insn), Insn, AA);
}
}
namespace {
struct UpwardsMemoryQuery {
// True if our original query started off as a call
bool IsCall;
// The pointer location we started the query with. This will be empty if
// IsCall is true.
MemoryLocation StartingLoc;
// This is the instruction we were querying about.
const Instruction *Inst;
// The MemoryAccess we actually got called with, used to test local domination
const MemoryAccess *OriginalAccess;
UpwardsMemoryQuery()
: IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {}
UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
: IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
if (!IsCall)
StartingLoc = MemoryLocation::get(Inst);
}
};
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
AliasAnalysis &AA) {
Instruction *Inst = MD->getMemoryInst();
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
switch (II->getIntrinsicID()) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
default:
return false;
}
}
return false;
}
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
const Instruction *I) {
// If the memory can't be changed, then loads of the memory can't be
// clobbered.
//
// FIXME: We should handle invariant groups, as well. It's a bit harder,
// because we need to pay close attention to invariant group barriers.
return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
AA.pointsToConstantMemory(I));
}
static bool instructionClobbersQuery(MemoryDef *MD, MemoryUse *MU,
const MemoryLocOrCall &UseMLOC,
AliasAnalysis &AA) {

View File

@ -1,52 +0,0 @@
; RUN: opt -S -gvn-hoist < %s | FileCheck %s
; Make sure the two stores @B do not get hoisted past the load @B.
; CHECK-LABEL: define i8* @Foo
; CHECK: store
; CHECK: store
; CHECK: load
; CHECK: store
@A = external global i8
@B = external global i8*
define i8* @Foo() {
store i8 0, i8* @A
br i1 undef, label %if.then, label %if.else
if.then:
store i8* null, i8** @B
ret i8* null
if.else:
%1 = load i8*, i8** @B
store i8* null, i8** @B
ret i8* %1
}
; Make sure the two stores @B do not get hoisted past the store @GlobalVar.
; CHECK-LABEL: define i8* @Fun
; CHECK: store
; CHECK: store
; CHECK: store
; CHECK: store
; CHECK: load
@GlobalVar = internal global i8 0
define i8* @Fun() {
store i8 0, i8* @A
br i1 undef, label %if.then, label %if.else
if.then:
store i8* null, i8** @B
ret i8* null
if.else:
store i8 0, i8* @GlobalVar
store i8* null, i8** @B
%1 = load i8*, i8** @B
ret i8* %1
}

View File

@ -1,30 +0,0 @@
; RUN: opt -S -gvn-hoist < %s
define void @_Z3fn2v() #0 {
entry:
%a = alloca i8*, align 8
%b = alloca i32, align 4
%0 = load i8*, i8** %a, align 8
store i8 0, i8* %0, align 1
%1 = load i32, i32* %b, align 4
%tobool = icmp ne i32 %1, 0
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %entry
%call = call i64 @_Z3fn1v() #2
%conv = trunc i64 %call to i32
store i32 %conv, i32* %b, align 4
br label %if.end
if.end: ; preds = %if.then, %entry
%2 = load i8*, i8** %a, align 8
store i8 0, i8* %2, align 1
ret void
}
; Function Attrs: nounwind readonly
declare i64 @_Z3fn1v() #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind readonly }