1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[MemorySSA] Make the visitor more careful with calls.

Prior to this patch, the MemorySSA caching visitor would cache all
calls that it visited. When paired with phi optimization, this can be
problematic. Consider:

define void @foo() {
  ; 1 = MemoryDef(liveOnEntry)
  call void @clobberFunction()
  br i1 undef, label %if.end, label %if.then

if.then:
  ; MemoryUse(??)
  call void @readOnlyFunction()
  ; 2 = MemoryDef(1)
  call void @clobberFunction()
  br label %if.end

if.end:
  ; 3 = MemoryPhi(...)
  ; MemoryUse(?)
  call void @readOnlyFunction()
  ret void
}

When optimizing MemoryUse(?), we visit defs 1 and 2, so we note to
cache them later. We ultimately end up not being able to optimize
passed the Phi, so we set MemoryUse(?) to point to the Phi. We then
cache the clobbering call for def 1 to be the Phi.

This commit changes this behavior so that we wipe out any calls
added to VisistedCalls while visiting the defs of a phi we couldn't
optimize.

Aside: With this patch, we now can bootstrap clang/LLVM without a
single MemorySSA verifier failure. Woohoo. :)

llvm-svn: 264820
This commit is contained in:
George Burgess IV 2016-03-30 03:12:08 +00:00
parent d7d8eed23b
commit 5b40ed6e26
2 changed files with 38 additions and 5 deletions

View File

@ -763,11 +763,11 @@ struct CachingMemorySSAWalker::UpwardsMemoryQuery {
const Instruction *Inst;
// Set of visited Instructions for this query.
DenseSet<MemoryAccessPair> Visited;
// Set of visited call accesses for this query. This is separated out because
// you can always cache and lookup the result of call queries (IE when IsCall
// == true) for every call in the chain. The calls have no AA location
// Vector of visited call accesses for this query. This is separated out
// because you can always cache and lookup the result of call queries (IE when
// IsCall == true) for every call in the chain. The calls have no AA location
// associated with them with them, and thus, no context dependence.
SmallPtrSet<const MemoryAccess *, 32> VisitedCalls;
SmallVector<const MemoryAccess *, 32> VisitedCalls;
// The MemoryAccess we actually got called with, used to test local domination
const MemoryAccess *OriginalAccess;
// The Datalayout for the module we started in
@ -862,7 +862,7 @@ bool CachingMemorySSAWalker::instructionClobbersQuery(
// If this is a call, mark it for caching
if (ImmutableCallSite(DefMemoryInst))
Q.VisitedCalls.insert(MD);
Q.VisitedCalls.push_back(MD);
ModRefInfo I = AA->getModRefInfo(DefMemoryInst, ImmutableCallSite(Q.Inst));
return I != MRI_NoModRef;
}
@ -918,6 +918,8 @@ MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk(
break;
}
std::size_t InitialVisitedCallSize = Q.VisitedCalls.size();
// Recurse on PHI nodes, since we need to change locations.
// TODO: Allow graphtraits on pairs, which would turn this whole function
// into a normal single depth first walk.
@ -947,6 +949,10 @@ MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk(
if (!ModifyingAccess) {
assert(FirstDef && "Found a Phi with no upward defs?");
ModifyingAccess = FirstDef;
} else {
// If we can't optimize this Phi, then we can't safely cache any of the
// calls we visited when trying to optimize it. Wipe them out now.
Q.VisitedCalls.resize(InitialVisitedCallSize);
}
break;
}

View File

@ -24,3 +24,30 @@ define i32 @foo() {
%3 = add i32 %2, %1
ret i32 %3
}
declare void @readEverything() readonly
declare void @clobberEverything()
; CHECK-LABEL: define void @bar
define void @bar() {
; CHECK: 1 = MemoryDef(liveOnEntry)
; CHECK-NEXT: call void @clobberEverything()
call void @clobberEverything()
br i1 undef, label %if.end, label %if.then
if.then:
; CHECK: MemoryUse(1)
; CHECK-NEXT: call void @readEverything()
call void @readEverything()
; CHECK: 2 = MemoryDef(1)
; CHECK-NEXT: call void @clobberEverything()
call void @clobberEverything()
br label %if.end
if.end:
; CHECK: 3 = MemoryPhi({%0,1},{if.then,2})
; CHECK: MemoryUse(3)
; CHECK-NEXT: call void @readEverything()
call void @readEverything()
ret void
}