1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[MemorySSA] Be more conservative when traversing MemoryPhis.

I think we need to be even more conservative when traversing memory
phis, to make sure we catch any loop carried dependences.

This approach updates fillInCurrentPair to use unknown sizes for
locations when we walk over a phi, unless the location is guaranteed to
be loop-invariant for any possible loop. Using an unknown size for
locations should ensure we catch all memory accesses to locations after
the given memory location, which includes loop-carried dependences.

Reviewed By: asbirlea

Differential Revision: https://reviews.llvm.org/D87778
This commit is contained in:
Florian Hahn 2020-09-17 22:09:53 +01:00
parent fd6a8d455b
commit 57f2631c85
2 changed files with 43 additions and 9 deletions

View File

@ -88,6 +88,7 @@
#include "llvm/IR/DerivedUser.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
@ -1217,27 +1218,61 @@ public:
BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
private:
/// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
/// loop. In particular, this guarantees that it only references a single
/// MemoryLocation during execution of the containing function.
bool IsGuaranteedLoopInvariant(Value *Ptr) const {
auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) {
Ptr = Ptr->stripPointerCasts();
if (auto *I = dyn_cast<Instruction>(Ptr)) {
if (isa<AllocaInst>(Ptr))
return true;
return false;
}
return true;
};
Ptr = Ptr->stripPointerCasts();
if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
GEP->hasAllConstantIndices();
}
return IsGuaranteedLoopInvariantBase(Ptr);
}
void fillInCurrentPair() {
CurrentPair.first = *DefIterator;
CurrentPair.second = Location;
if (WalkingPhi && Location.Ptr) {
// Mark size as unknown, if the location is not guaranteed to be
// loop-invariant for any possible loop in the function. Setting the size
// to unknown guarantees that any memory accesses that access locations
// after the pointer are considered as clobbers, which is important to
// catch loop carried dependences.
if (Location.Ptr &&
!IsGuaranteedLoopInvariant(const_cast<Value *>(Location.Ptr)))
CurrentPair.second = Location.getWithNewSize(LocationSize::unknown());
PHITransAddr Translator(
const_cast<Value *>(Location.Ptr),
OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
DefIterator.getPhiArgBlock(), DT,
true)) {
if (Translator.getAddr() != Location.Ptr) {
CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
Value *TransAddr = Translator.getAddr();
if (TransAddr != Location.Ptr) {
CurrentPair.second = CurrentPair.second.getWithNewPtr(TransAddr);
if (TransAddr &&
!IsGuaranteedLoopInvariant(const_cast<Value *>(TransAddr)))
CurrentPair.second =
CurrentPair.second.getWithNewSize(LocationSize::unknown());
if (PerformedPhiTranslation)
*PerformedPhiTranslation = true;
return;
}
} else {
CurrentPair.second = Location.getWithNewSize(LocationSize::unknown());
return;
}
}
CurrentPair.second = Location;
}
MemoryAccessPair CurrentPair;

View File

@ -481,8 +481,7 @@ define void @another_loop_clobber() {
; CHECK-NEXT: ; 4 = MemoryPhi({entry,1},{cond.read,3})
; CHECK-LABEL: cond.read:
; NOLIMIT: ; MemoryUse(liveOnEntry)
; LIMIT: ; MemoryUse(4)
; CHECK: ; MemoryUse(4)
; CHECK-NEXT: %use = load i32, i32* %ptr.1, align 4
; CHECK-NEXT: ; 2 = MemoryDef(4)
; CHECK-NEXT: %c.2 = call i1 @cond(i32 %use)