1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00

[PM] Fix a nasty bug in the new PM where we failed to properly

invalidation of analyses when merging SCCs.

While I've added a bunch of testing of this, it takes something much
more like the inliner to really trigger this as you need to have
partially-analyzed SCCs with updates at just the right time. So I've
added a direct test for this using the inliner and verifying the
domtree. Without the changes here, this test ends up finding a stale
dominator tree.

However, to handle this properly, we need to invalidate analyses
*before* merging the SCCs. After talking to Philip and Sanjoy about this
they convinced me this was the right approach. To do this, we need
a callback mechanism when merging SCCs so we can observe the cycle that
will be merged before the merge happens. This API update ended up being
surprisingly easy.

With this commit, the new PM passes the test-suite again. It hadn't
since MemorySSA was enabled for EarlyCSE as that also will find this bug
very quickly.

llvm-svn: 307498
This commit is contained in:
Chandler Carruth 2017-07-09 13:45:11 +00:00
parent 267b806fd9
commit fda9b703c9
5 changed files with 156 additions and 43 deletions

View File

@ -652,17 +652,23 @@ public:
/// Make an existing internal ref edge into a call edge.
///
/// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
/// If that happens, the deleted SCC pointers are returned. These SCCs are
/// not in a valid state any longer but the pointers will remain valid
/// until destruction of the parent graph instance for the purpose of
/// clearing cached information.
/// If that happens, the optional callback \p MergedCB will be invoked (if
/// provided) on the SCCs being merged away prior to actually performing
/// the merge. Note that this will never include the target SCC as that
/// will be the SCC functions are merged into to resolve the cycle. Once
/// this function returns, these merged SCCs are not in a valid state but
/// the pointers will remain valid until destruction of the parent graph
/// instance for the purpose of clearing cached information. This function
/// also returns 'true' if a cycle was formed and some SCCs merged away as
/// a convenience.
///
/// After this operation, both SourceN's SCC and TargetN's SCC may move
/// position within this RefSCC's postorder list. Any SCCs merged are
/// merged into the TargetN's SCC in order to preserve reachability analyses
/// which took place on that SCC.
SmallVector<SCC *, 1> switchInternalEdgeToCall(Node &SourceN,
Node &TargetN);
bool switchInternalEdgeToCall(
Node &SourceN, Node &TargetN,
function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
/// Make an existing internal call edge between separate SCCs into a ref
/// edge.

View File

@ -570,25 +570,48 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
// Otherwise we are switching an internal ref edge to a call edge. This
// may merge away some SCCs, and we add those to the UpdateResult. We also
// need to make sure to update the worklist in the event SCCs have moved
// before the current one in the post-order sequence.
// before the current one in the post-order sequence
bool HasFunctionAnalysisProxy = false;
auto InitialSCCIndex = RC->find(*C) - RC->begin();
auto InvalidatedSCCs = RC->switchInternalEdgeToCall(N, *CallTarget);
if (!InvalidatedSCCs.empty()) {
bool FormedCycle = RC->switchInternalEdgeToCall(
N, *CallTarget, [&](ArrayRef<SCC *> MergedSCCs) {
for (SCC *MergedC : MergedSCCs) {
assert(MergedC != &TargetC && "Cannot merge away the target SCC!");
HasFunctionAnalysisProxy |=
AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(
*MergedC) != nullptr;
// Mark that this SCC will no longer be valid.
UR.InvalidatedSCCs.insert(MergedC);
// FIXME: We should really do a 'clear' here to forcibly release
// memory, but we don't have a good way of doing that and
// preserving the function analyses.
auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
AM.invalidate(*MergedC, PA);
}
});
// If we formed a cycle by creating this call, we need to update more data
// structures.
if (FormedCycle) {
C = &TargetC;
assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
// If one of the invalidated SCCs had a cached proxy to a function
// analysis manager, we need to create a proxy in the new current SCC as
// the invaliadted SCCs had their functions moved.
if (HasFunctionAnalysisProxy)
AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G);
// Any analyses cached for this SCC are no longer precise as the shape
// has changed by introducing this cycle.
AM.invalidate(*C, PreservedAnalyses::none());
for (SCC *InvalidatedC : InvalidatedSCCs) {
assert(InvalidatedC != C && "Cannot invalidate the current SCC!");
UR.InvalidatedSCCs.insert(InvalidatedC);
// Also clear any cached analyses for the SCCs that are dead. This
// isn't really necessary for correctness but can release memory.
AM.clear(*InvalidatedC);
}
// has changed by introducing this cycle. However, we have taken care to
// update the proxies so it remains valide.
auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
AM.invalidate(*C, PA);
}
auto NewSCCIndex = RC->find(*C) - RC->begin();
if (InitialSCCIndex < NewSCCIndex) {

View File

@ -456,8 +456,10 @@ updatePostorderSequenceForEdgeInsertion(
return make_range(SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx);
}
SmallVector<LazyCallGraph::SCC *, 1>
LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
bool
LazyCallGraph::RefSCC::switchInternalEdgeToCall(
Node &SourceN, Node &TargetN,
function_ref<void(ArrayRef<SCC *> MergeSCCs)> MergeCB) {
assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
SmallVector<SCC *, 1> DeletedSCCs;
@ -475,7 +477,7 @@ LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
// we've just added more connectivity.
if (&SourceSCC == &TargetSCC) {
SourceN->setEdgeKind(TargetN, Edge::Call);
return DeletedSCCs;
return false; // No new cycle.
}
// At this point we leverage the postorder list of SCCs to detect when the
@ -488,7 +490,7 @@ LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
int TargetIdx = SCCIndices[&TargetSCC];
if (TargetIdx < SourceIdx) {
SourceN->setEdgeKind(TargetN, Edge::Call);
return DeletedSCCs;
return false; // No new cycle.
}
// Compute the SCCs which (transitively) reach the source.
@ -555,12 +557,16 @@ LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
SourceSCC, TargetSCC, SCCs, SCCIndices, ComputeSourceConnectedSet,
ComputeTargetConnectedSet);
// Run the user's callback on the merged SCCs before we actually merge them.
if (MergeCB)
MergeCB(makeArrayRef(MergeRange.begin(), MergeRange.end()));
// If the merge range is empty, then adding the edge didn't actually form any
// new cycles. We're done.
if (MergeRange.begin() == MergeRange.end()) {
// Now that the SCC structure is finalized, flip the kind to call.
SourceN->setEdgeKind(TargetN, Edge::Call);
return DeletedSCCs;
return false; // No new cycle.
}
#ifndef NDEBUG
@ -596,8 +602,8 @@ LazyCallGraph::RefSCC::switchInternalEdgeToCall(Node &SourceN, Node &TargetN) {
// Now that the SCC structure is finalized, flip the kind to call.
SourceN->setEdgeKind(TargetN, Edge::Call);
// And we're done!
return DeletedSCCs;
// And we're done, but we did form a new cycle.
return true;
}
void LazyCallGraph::RefSCC::switchTrivialInternalEdgeToRef(Node &SourceN,

View File

@ -127,3 +127,80 @@ entry:
ret void
; CHECK: ret void
}
; The 'test2_' prefixed code works to carefully trigger forming an SCC with
; a dominator tree for one of the functions but not the other and without even
; a function analysis manager proxy for the SCC that things get merged into.
; Without proper handling when updating the call graph this will find a stale
; dominator tree.
@test2_global = external global i32, align 4
define void @test2_hoge(i1 (i32*)* %arg) {
; CHECK-LABEL: define void @test2_hoge(
bb:
%tmp2 = call zeroext i1 %arg(i32* @test2_global)
; CHECK: call zeroext i1 %arg(
br label %bb3
bb3:
%tmp5 = call zeroext i1 %arg(i32* @test2_global)
; CHECK: call zeroext i1 %arg(
br i1 %tmp5, label %bb3, label %bb6
bb6:
ret void
}
define zeroext i1 @test2_widget(i32* %arg) {
; CHECK-LABEL: define zeroext i1 @test2_widget(
bb:
%tmp1 = alloca i8, align 1
%tmp2 = alloca i32, align 4
call void @test2_quux()
; CHECK-NOT: call
;
; CHECK: call zeroext i1 @test2_widget(i32* @test2_global)
; CHECK-NEXT: br label %[[NEW_BB:.*]]
;
; CHECK: [[NEW_BB]]:
; CHECK-NEXT: call zeroext i1 @test2_widget(i32* @test2_global)
;
; CHECK: {{.*}}:
call void @test2_hoge.1(i32* %arg)
; CHECK-NEXT: call void @test2_hoge.1(
%tmp4 = call zeroext i1 @test2_barney(i32* %tmp2)
%tmp5 = zext i1 %tmp4 to i32
store i32 %tmp5, i32* %tmp2, align 4
%tmp6 = call zeroext i1 @test2_barney(i32* null)
call void @test2_ham(i8* %tmp1)
; CHECK: call void @test2_ham(
call void @test2_quux()
; CHECK-NOT: call
;
; CHECK: call zeroext i1 @test2_widget(i32* @test2_global)
; CHECK-NEXT: br label %[[NEW_BB:.*]]
;
; CHECK: [[NEW_BB]]:
; CHECK-NEXT: call zeroext i1 @test2_widget(i32* @test2_global)
;
; CHECK: {{.*}}:
ret i1 true
; CHECK-NEXT: ret i1 true
}
define internal void @test2_quux() {
; CHECK-NOT: @test2_quux
bb:
call void @test2_hoge(i1 (i32*)* @test2_widget)
ret void
}
declare void @test2_hoge.1(i32*)
declare zeroext i1 @test2_barney(i32*)
declare void @test2_ham(i8*)

View File

@ -1277,9 +1277,10 @@ TEST(LazyCallGraphTest, InternalEdgeMutation) {
// be invalidated.
LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
auto InvalidatedSCCs = RC.switchInternalEdgeToCall(A, C);
ASSERT_EQ(1u, InvalidatedSCCs.size());
EXPECT_EQ(&AC, InvalidatedSCCs[0]);
EXPECT_TRUE(RC.switchInternalEdgeToCall(A, C, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
ASSERT_EQ(1u, MergedCs.size());
EXPECT_EQ(&AC, MergedCs[0]);
}));
EXPECT_EQ(2, CC.size());
EXPECT_EQ(&CC, CG.lookupSCC(A));
EXPECT_EQ(&CC, CG.lookupSCC(C));
@ -1586,8 +1587,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
// Switch the ref edge from A -> D to a call edge. This should have no
// effect as it is already in postorder and no new cycles are formed.
auto MergedCs = RC.switchInternalEdgeToCall(A, D);
EXPECT_EQ(0u, MergedCs.size());
EXPECT_FALSE(RC.switchInternalEdgeToCall(A, D));
ASSERT_EQ(4, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&BC, &RC[1]);
@ -1596,8 +1596,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
// Switch B -> C to a call edge. This doesn't form any new cycles but does
// require reordering the SCCs.
MergedCs = RC.switchInternalEdgeToCall(B, C);
EXPECT_EQ(0u, MergedCs.size());
EXPECT_FALSE(RC.switchInternalEdgeToCall(B, C));
ASSERT_EQ(4, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&CC, &RC[1]);
@ -1605,9 +1604,10 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
EXPECT_EQ(&AC, &RC[3]);
// Switch C -> B to a call edge. This forms a cycle and forces merging SCCs.
MergedCs = RC.switchInternalEdgeToCall(C, B);
ASSERT_EQ(1u, MergedCs.size());
EXPECT_EQ(&CC, MergedCs[0]);
EXPECT_TRUE(RC.switchInternalEdgeToCall(C, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
ASSERT_EQ(1u, MergedCs.size());
EXPECT_EQ(&CC, MergedCs[0]);
}));
ASSERT_EQ(3, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&BC, &RC[1]);
@ -1720,8 +1720,7 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallNoCycleInterleaved) {
// Switch C3 -> B1 to a call edge. This doesn't form any new cycles but does
// require reordering the SCCs in the face of tricky internal node
// structures.
auto MergedCs = RC.switchInternalEdgeToCall(C3, B1);
EXPECT_EQ(0u, MergedCs.size());
EXPECT_FALSE(RC.switchInternalEdgeToCall(C3, B1));
ASSERT_EQ(8, RC.size());
EXPECT_EQ(&DC, &RC[0]);
EXPECT_EQ(&B3C, &RC[1]);
@ -1852,10 +1851,12 @@ TEST(LazyCallGraphTest, InternalRefEdgeToCallBothPartitionAndMerge) {
// C F C | |
// \ / \ / |
// G G |
auto MergedCs = RC.switchInternalEdgeToCall(F, B);
ASSERT_EQ(2u, MergedCs.size());
EXPECT_EQ(&FC, MergedCs[0]);
EXPECT_EQ(&DC, MergedCs[1]);
EXPECT_TRUE(RC.switchInternalEdgeToCall(
F, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
ASSERT_EQ(2u, MergedCs.size());
EXPECT_EQ(&FC, MergedCs[0]);
EXPECT_EQ(&DC, MergedCs[1]);
}));
EXPECT_EQ(3, BC.size());
// And make sure the postorder was updated.