1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 12:43:36 +01:00
llvm-mirror/test/Transforms/Inline/devirtualize-3.ll
Arthur Eubanks f4ee2f482f Reland [CGSCC] Detect devirtualization in more cases
The devirtualization wrapper misses cases where if it wraps a pass
manager, an individual pass may devirtualize an indirect call created by
a previous pass. For example, inlining may create a new indirect call
which is devirtualized by instcombine. Currently the devirtualization
wrapper will not see that because it only checks cgscc edges at the very
beginning and end of the pass (manager) it wraps.

This fixes some tests testing this exact behavior in the legacy PM.

Instead of checking WeakTrackingVHs for CallBases at the very beginning
and end of the pass it wraps, check every time
updateCGAndAnalysisManagerForPass() is called.

check-llvm and check-clang with -abort-on-max-devirt-iterations-reached
on by default doesn't show any failures outside of tests specifically
testing it so it doesn't needlessly rerun passes more than necessary.
(The NPM -O2/3 pipeline run the inliner/function simplification pipeline
under a devirtualization repeater pass up to 4 times by default).

http://llvm-compile-time-tracker.com/?config=O3&stat=instructions&remote=aeubanks
shows that 7zip has ~1% compile time regression. I looked at it and saw
that there indeed was devirtualization happening that was not previously
caught, so now it reruns the CGSCC pipeline on some SCCs, which is WAI.

The initial land assumed CallBase WeakTrackingVHs would always be
CallBases, but they can be RAUW'd with undef.

Reviewed By: asbirlea

Differential Revision: https://reviews.llvm.org/D89587
2020-11-23 21:28:59 -08:00

81 lines
4.1 KiB
LLVM

; RUN: opt -basic-aa -S -O2 < %s | FileCheck %s
; RUN: opt -aa-pipeline=basic-aa -S -passes='default<O2>' < %s | FileCheck %s
; PR5009
; CHECK: define i32 @main()
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @exit(i32 38)
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.0.0"
%struct.cont_t = type { void (i8*, i32)*, i8* }
%struct.foo_sf_t = type { %struct.cont_t*, i32 }
define i32 @main() nounwind ssp {
entry:
%cont = alloca %struct.cont_t, align 8 ; <%struct.cont_t*> [#uses=4]
%tmp = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
%tmp1 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=2]
store void (i8*, i32)* bitcast (void (%struct.cont_t*, i32)* @quit to void (i8*, i32)*), void (i8*, i32)** %tmp1
%tmp2 = load void (i8*, i32)*, void (i8*, i32)** %tmp1 ; <void (i8*, i32)*> [#uses=1]
store void (i8*, i32)* %tmp2, void (i8*, i32)** %tmp
%tmp3 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 1 ; <i8**> [#uses=1]
store i8* null, i8** %tmp3
call void @foo(%struct.cont_t* %cont)
ret i32 0
}
define internal void @quit(%struct.cont_t* %cont, i32 %rcode) nounwind ssp {
entry:
call void @exit(i32 %rcode) noreturn
unreachable
}
define internal void @foo(%struct.cont_t* %c) nounwind ssp {
entry:
%sf = alloca %struct.foo_sf_t, align 8 ; <%struct.foo_sf_t*> [#uses=3]
%next = alloca %struct.cont_t, align 8 ; <%struct.cont_t*> [#uses=3]
%tmp = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
store %struct.cont_t* %c, %struct.cont_t** %tmp
%tmp2 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 2, i32* %tmp2
%tmp4 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %next, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
store void (i8*, i32)* bitcast (void (%struct.foo_sf_t*, i32)* @foo2 to void (i8*, i32)*), void (i8*, i32)** %tmp4
%tmp5 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %next, i32 0, i32 1 ; <i8**> [#uses=1]
%conv = bitcast %struct.foo_sf_t* %sf to i8* ; <i8*> [#uses=1]
store i8* %conv, i8** %tmp5
call void @bar(%struct.cont_t* %next, i32 14)
ret void
}
define internal void @foo2(%struct.foo_sf_t* %sf, i32 %y) nounwind ssp {
entry:
%tmp1 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
%tmp2 = load %struct.cont_t*, %struct.cont_t** %tmp1 ; <%struct.cont_t*> [#uses=1]
%tmp3 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %tmp2, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
%tmp4 = load void (i8*, i32)*, void (i8*, i32)** %tmp3 ; <void (i8*, i32)*> [#uses=1]
%tmp6 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
%tmp7 = load %struct.cont_t*, %struct.cont_t** %tmp6 ; <%struct.cont_t*> [#uses=1]
%conv = bitcast %struct.cont_t* %tmp7 to i8* ; <i8*> [#uses=1]
%tmp9 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 1 ; <i32*> [#uses=1]
%tmp10 = load i32, i32* %tmp9 ; <i32> [#uses=1]
%mul = mul i32 %tmp10, %y ; <i32> [#uses=1]
call void %tmp4(i8* %conv, i32 %mul)
ret void
}
define internal void @bar(%struct.cont_t* %c, i32 %y) nounwind ssp {
entry:
%tmp1 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %c, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
%tmp2 = load void (i8*, i32)*, void (i8*, i32)** %tmp1 ; <void (i8*, i32)*> [#uses=1]
%tmp4 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %c, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp5 = load i8*, i8** %tmp4 ; <i8*> [#uses=1]
%add = add nsw i32 %y, 5 ; <i32> [#uses=1]
call void %tmp2(i8* %tmp5, i32 %add)
ret void
}
declare void @exit(i32) noreturn