mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 12:12:47 +01:00
[GVN] non-functional code movement
Summary: Move some code around, in preparation for later fixes to the non-integral addrspace handling (D59661) Patch By Jameson Nash <jameson@juliacomputing.com> Reviewed By: reames, loladiro Differential Revision: https://reviews.llvm.org/D59729 llvm-svn: 362853
This commit is contained in:
parent
f47f332c7d
commit
3460115c23
@ -859,11 +859,12 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||
|
||||
const DataLayout &DL = LI->getModule()->getDataLayout();
|
||||
|
||||
Instruction *DepInst = DepInfo.getInst();
|
||||
if (DepInfo.isClobber()) {
|
||||
// If the dependence is to a store that writes to a superset of the bits
|
||||
// read by the load, we can extract the bits we need for the load from the
|
||||
// stored value.
|
||||
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
|
||||
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
|
||||
// Can't forward from non-atomic to atomic without violating memory model.
|
||||
if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
|
||||
int Offset =
|
||||
@ -879,7 +880,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||
// load i32* P
|
||||
// load i8* (P+1)
|
||||
// if we have this, replace the later with an extraction from the former.
|
||||
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
|
||||
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
|
||||
// If this is a clobber and L is the first instruction in its block, then
|
||||
// we have the first instruction in the entry block.
|
||||
// Can't forward from non-atomic to atomic without violating memory model.
|
||||
@ -896,7 +897,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||
|
||||
// If the clobbering value is a memset/memcpy/memmove, see if we can
|
||||
// forward a value on from it.
|
||||
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
|
||||
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
|
||||
if (Address && !LI->isAtomic()) {
|
||||
int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address,
|
||||
DepMI, DL);
|
||||
@ -910,8 +911,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||
LLVM_DEBUG(
|
||||
// fast print dep, using operator<< on instruction is too slow.
|
||||
dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
|
||||
Instruction *I = DepInfo.getInst();
|
||||
dbgs() << " is clobbered by " << *I << '\n';);
|
||||
dbgs() << " is clobbered by " << *DepInst << '\n';);
|
||||
if (ORE->allowExtraAnalysis(DEBUG_TYPE))
|
||||
reportMayClobberedLoad(LI, DepInfo, DT, ORE);
|
||||
|
||||
@ -919,8 +919,6 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||
}
|
||||
assert(DepInfo.isDef() && "follows from above");
|
||||
|
||||
Instruction *DepInst = DepInfo.getInst();
|
||||
|
||||
// Loading the allocation -> undef.
|
||||
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
|
||||
// Loading immediately after lifetime begin -> undef.
|
||||
@ -939,9 +937,8 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||
// Reject loads and stores that are to the same address but are of
|
||||
// different types if we have to. If the stored value is larger or equal to
|
||||
// the loaded value, we can reuse it.
|
||||
if (S->getValueOperand()->getType() != LI->getType() &&
|
||||
!canCoerceMustAliasedValueToLoad(S->getValueOperand(),
|
||||
LI->getType(), DL))
|
||||
if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(),
|
||||
DL))
|
||||
return false;
|
||||
|
||||
// Can't forward from non-atomic to atomic without violating memory model.
|
||||
@ -956,8 +953,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
||||
// If the types mismatch and we can't handle it, reject reuse of the load.
|
||||
// If the stored value is larger or equal to the loaded value, we can reuse
|
||||
// it.
|
||||
if (LD->getType() != LI->getType() &&
|
||||
!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
|
||||
if (!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
|
||||
return false;
|
||||
|
||||
// Can't forward from non-atomic to atomic without violating memory model.
|
||||
|
@ -14,13 +14,17 @@ namespace VNCoercion {
|
||||
/// Return true if coerceAvailableValueToLoadType will succeed.
|
||||
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
|
||||
const DataLayout &DL) {
|
||||
Type *StoredTy = StoredVal->getType();
|
||||
if (StoredTy == LoadTy)
|
||||
return true;
|
||||
|
||||
// If the loaded or stored value is an first class array or struct, don't try
|
||||
// to transform them. We need to be able to bitcast to integer.
|
||||
if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
|
||||
StoredVal->getType()->isStructTy() || StoredVal->getType()->isArrayTy())
|
||||
if (LoadTy->isStructTy() || LoadTy->isArrayTy() || StoredTy->isStructTy() ||
|
||||
StoredTy->isArrayTy())
|
||||
return false;
|
||||
|
||||
uint64_t StoreSize = DL.getTypeSizeInBits(StoredVal->getType());
|
||||
uint64_t StoreSize = DL.getTypeSizeInBits(StoredTy);
|
||||
|
||||
// The store size must be byte-aligned to support future type casts.
|
||||
if (llvm::alignTo(StoreSize, 8) != StoreSize)
|
||||
@ -306,7 +310,7 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
|
||||
return -1;
|
||||
|
||||
GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL));
|
||||
if (!GV || !GV->isConstant())
|
||||
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
|
||||
return -1;
|
||||
|
||||
// See if the access is within the bounds of the transfer.
|
||||
|
Loading…
Reference in New Issue
Block a user