1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[MemorySSA] Pass DT to the upward iterator for proper PhiTranslation.

Summary:
A valid DominatorTree is needed to do PhiTranslation.
Before this patch, a MemoryUse could be optimized to an access outside a loop, while the address it loads from is modified in the loop.
This can lead to a miscompile.

Reviewers: george.burgess.iv

Subscribers: Prazek, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D79068
This commit is contained in:
Alina Sbirlea 2020-04-28 22:57:19 -07:00
parent 832c6c8b84
commit dfbd550d47
3 changed files with 59 additions and 11 deletions

View File

@ -723,6 +723,8 @@ public:
return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
}
DominatorTree &getDomTree() const { return *DT; }
void dump() const;
void print(raw_ostream &) const;
@ -1179,9 +1181,9 @@ class upward_defs_iterator
using BaseT = upward_defs_iterator::iterator_facade_base;
public:
upward_defs_iterator(const MemoryAccessPair &Info)
upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT)
: DefIterator(Info.first), Location(Info.second),
OriginalAccess(Info.first) {
OriginalAccess(Info.first), DT(DT) {
CurrentPair.first = nullptr;
WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
@ -1220,12 +1222,13 @@ private:
const_cast<Value *>(Location.Ptr),
OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
DefIterator.getPhiArgBlock(), nullptr,
false))
DefIterator.getPhiArgBlock(), DT,
false)) {
if (Translator.getAddr() != Location.Ptr) {
CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
return;
}
}
}
CurrentPair.second = Location;
}
@ -1235,17 +1238,19 @@ private:
MemoryLocation Location;
MemoryAccess *OriginalAccess = nullptr;
bool WalkingPhi = false;
DominatorTree *DT = nullptr;
};
inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
return upward_defs_iterator(Pair);
inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair,
DominatorTree &DT) {
return upward_defs_iterator(Pair, &DT);
}
inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
inline iterator_range<upward_defs_iterator>
upward_defs(const MemoryAccessPair &Pair) {
return make_range(upward_defs_begin(Pair), upward_defs_end());
upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) {
return make_range(upward_defs_begin(Pair, DT), upward_defs_end());
}
/// Walks the defining accesses of MemoryDefs. Stops after we hit something that

View File

@ -466,7 +466,8 @@ checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
assert(isa<MemoryPhi>(MA));
Worklist.append(
upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second},
MSSA.getDomTree()),
upward_defs_end());
}
}
@ -595,8 +596,8 @@ template <class AliasAnalysisType> class ClobberWalker {
void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
ListIndex PriorNode) {
auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
upward_defs_end());
auto UpwardDefs = make_range(
upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT), upward_defs_end());
for (const MemoryAccessPair &P : UpwardDefs) {
PausedSearches.push_back(Paths.size());
Paths.emplace_back(P.second, P.first, PriorNode);

View File

@ -193,3 +193,45 @@ if.end:
br label %while.cond
}
; CHECK-LABEL: define i32 @use_not_optimized_due_to_backedge
define i32 @use_not_optimized_due_to_backedge(i32* nocapture %m_i_strides, i32* nocapture readonly %eval_left_dims) {
entry:
; CHECK: 1 = MemoryDef(liveOnEntry)
; CHECK_NEXT: store i32 1, i32* %m_i_strides, align 4
store i32 1, i32* %m_i_strides, align 4
br label %for.body
for.cond.cleanup: ; preds = %for.inc
ret i32 %m_i_size.1
for.body: ; preds = %entry, %for.inc
; CHECK: 4 = MemoryPhi({entry,1},{for.inc,3})
; CHECK-NEXT: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%m_i_size.022 = phi i32 [ 1, %entry ], [ %m_i_size.1, %for.inc ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%cmp1 = icmp eq i64 %indvars.iv, 0
%arrayidx2 = getelementptr inbounds i32, i32* %m_i_strides, i64 %indvars.iv
; CHECK: MemoryUse(4) MayAlias
; CHECK-NEXT: %0 = load i32, i32* %arrayidx2, align 4
%0 = load i32, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %eval_left_dims, i64 %indvars.iv
; CHECK: MemoryUse(4) MayAlias
; CHECK-NEXT: %1 = load i32, i32* %arrayidx4, align 4
%1 = load i32, i32* %arrayidx4, align 4
%mul = mul nsw i32 %1, %0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%arrayidx7 = getelementptr inbounds i32, i32* %m_i_strides, i64 %indvars.iv.next
; CHECK: 2 = MemoryDef(4)
; CHECK-NEXT: store i32 %mul, i32* %arrayidx7, align 4
store i32 %mul, i32* %arrayidx7, align 4
br label %for.inc
for.inc: ; preds = %for.body, %if.then
; CHECK: 3 = MemoryPhi({for.body,4},{if.then,2})
; CHECK-NEXT: %m_i_size.1 = phi i32 [ %m_i_size.022, %if.then ], [ %mul, %for.body ]
%m_i_size.1 = phi i32 [ %m_i_size.022, %if.then ], [ %mul, %for.body ]
br i1 %cmp1, label %for.body, label %for.cond.cleanup
}