1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

Remove readnone from invariant.group.barrier

Summary:
Readnone attribute would cause CSE of two barriers with
the same argument, which is invalid by example:

    struct Base {
          virtual int foo() { return 42; }
    };

    struct Derived1 : Base {
          int foo() override { return 50; }
    };

    struct Derived2 : Base {
          int foo() override { return 100; }
    };

    void foo() {
        Base *x = new Base{};
        new (x) Derived1{};
        int a = std::launder(x)->foo();
        new (x) Derived2{};
        int b = std::launder(x)->foo();
    }

Here 2 calls of std::launder will produce @llvm.invariant.group.barrier,
which would be merged into one call, causing devirtualization
to devirtualize second call into Derived1::foo() instead of
Derived2::foo()

Reviewers: chandlerc, dberlin, hfinkel

Subscribers: llvm-commits, rsmith, amharc

Differential Revision: https://reviews.llvm.org/D31531

llvm-svn: 300101
This commit is contained in:
Piotr Padlewski 2017-04-12 20:45:12 +00:00
parent f54522eae6
commit cd98794503
3 changed files with 86 additions and 1 deletions

View File

@ -615,9 +615,16 @@ def int_invariant_end : Intrinsic<[],
llvm_anyptr_ty], llvm_anyptr_ty],
[IntrArgMemOnly, NoCapture<2>]>; [IntrArgMemOnly, NoCapture<2>]>;
// invariant.group.barrier can't be marked with 'readnone' (IntrNoMem),
// because it would cause CSE of two barriers with the same argument.
// Readonly and argmemonly says that barrier only reads its argument and
// it can be CSE only if memory didn't change between 2 barriers call,
// which is valid.
// The argument also can't be marked with 'returned' attribute, because
// it would remove barrier.
def int_invariant_group_barrier : Intrinsic<[llvm_ptr_ty], def int_invariant_group_barrier : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty], [llvm_ptr_ty],
[IntrNoMem]>; [IntrReadMem, IntrArgMemOnly]>;
//===------------------------ Stackmap Intrinsics -------------------------===// //===------------------------ Stackmap Intrinsics -------------------------===//
// //

View File

@ -16,6 +16,8 @@ define i32 @foo(i32* %a) {
store i32 1, i32* @g, align 4 store i32 1, i32* @g, align 4
%1 = bitcast i32* %a to i8* %1 = bitcast i32* %a to i8*
; CHECK: MemoryUse(2)
; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
%a8 = call i8* @llvm.invariant.group.barrier(i8* %1) %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
%a32 = bitcast i8* %a8 to i32* %a32 = bitcast i8* %a8 to i32*
@ -33,6 +35,8 @@ define i32 @skipBarrier(i32* %a) {
store i32 0, i32* %a, align 4, !invariant.group !0 store i32 0, i32* %a, align 4, !invariant.group !0
%1 = bitcast i32* %a to i8* %1 = bitcast i32* %a to i8*
; CHECK: MemoryUse(1)
; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
%a8 = call i8* @llvm.invariant.group.barrier(i8* %1) %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
%a32 = bitcast i8* %a8 to i32* %a32 = bitcast i8* %a8 to i32*
@ -50,6 +54,8 @@ define i32 @skipBarrier2(i32* %a) {
%v = load i32, i32* %a, align 4, !invariant.group !0 %v = load i32, i32* %a, align 4, !invariant.group !0
%1 = bitcast i32* %a to i8* %1 = bitcast i32* %a to i8*
; CHECK: MemoryUse(liveOnEntry)
; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
%a8 = call i8* @llvm.invariant.group.barrier(i8* %1) %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
%a32 = bitcast i8* %a8 to i32* %a32 = bitcast i8* %a8 to i32*
@ -79,6 +85,8 @@ define i32 @handleInvariantGroups(i32* %a) {
; CHECK-NEXT: store i32 1 ; CHECK-NEXT: store i32 1
store i32 1, i32* @g, align 4 store i32 1, i32* @g, align 4
%1 = bitcast i32* %a to i8* %1 = bitcast i32* %a to i8*
; CHECK: MemoryUse(2)
; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
%a8 = call i8* @llvm.invariant.group.barrier(i8* %1) %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
%a32 = bitcast i8* %a8 to i32* %a32 = bitcast i8* %a8 to i32*
@ -135,6 +143,9 @@ entry:
; CHECK: 2 = MemoryDef(1) ; CHECK: 2 = MemoryDef(1)
; CHECK-NEXT: call void @clobber ; CHECK-NEXT: call void @clobber
call void @clobber8(i8* %p) call void @clobber8(i8* %p)
; CHECK: MemoryUse(2)
; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier(i8* %p)
%after = call i8* @llvm.invariant.group.barrier(i8* %p) %after = call i8* @llvm.invariant.group.barrier(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End br i1 undef, label %Loop.Body, label %Loop.End
@ -179,6 +190,9 @@ entry:
; CHECK: 2 = MemoryDef(1) ; CHECK: 2 = MemoryDef(1)
; CHECK-NEXT: call void @clobber ; CHECK-NEXT: call void @clobber
call void @clobber8(i8* %p) call void @clobber8(i8* %p)
; CHECK: MemoryUse(2)
; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier(i8* %p)
%after = call i8* @llvm.invariant.group.barrier(i8* %p) %after = call i8* @llvm.invariant.group.barrier(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End br i1 undef, label %Loop.Body, label %Loop.End
@ -238,6 +252,8 @@ entry:
; CHECK: 2 = MemoryDef(1) ; CHECK: 2 = MemoryDef(1)
; CHECK-NEXT: call void @clobber ; CHECK-NEXT: call void @clobber
call void @clobber8(i8* %p) call void @clobber8(i8* %p)
; CHECK: MemoryUse(2)
; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier(i8* %p)
%after = call i8* @llvm.invariant.group.barrier(i8* %p) %after = call i8* @llvm.invariant.group.barrier(i8* %p)
br i1 undef, label %Loop.Pre, label %Loop.End br i1 undef, label %Loop.Pre, label %Loop.End

View File

@ -0,0 +1,62 @@
; RUN: opt -S -early-cse < %s | FileCheck %s
; RUN: opt -S -gvn < %s | FileCheck %s
; RUN: opt -S -newgvn < %s | FileCheck %s
; RUN: opt -S -O3 < %s | FileCheck %s
; These tests checks if passes with CSE functionality can do CSE on
; invariant.group.barrier, that is prohibited if there is a memory clobber
; between barriers call.
; CHECK-LABEL: define i8 @optimizable()
define i8 @optimizable() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: call i8* @llvm.invariant.group.barrier
%ptr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
; CHECK-NOT: call i8* @llvm.invariant.group.barrier
%ptr3 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr)
; CHECK: call void @use(i8* {{.*}}%ptr2)
call void @use(i8* %ptr2)
; CHECK: call void @use(i8* {{.*}}%ptr2)
call void @use(i8* %ptr3)
; CHECK: load i8, i8* %ptr2, {{.*}}!invariant.group
%v = load i8, i8* %ptr3, !invariant.group !0
ret i8 %v
}
; CHECK-LABEL: define i8 @unoptimizable()
define i8 @unoptimizable() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: call i8* @llvm.invariant.group.barrier
%ptr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
call void @clobber(i8* %ptr)
; CHECK: call i8* @llvm.invariant.group.barrier
%ptr3 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr)
; CHECK: call void @use(i8* {{.*}}%ptr2)
call void @use(i8* %ptr2)
; CHECK: call void @use(i8* {{.*}}%ptr3)
call void @use(i8* %ptr3)
; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
%v = load i8, i8* %ptr3, !invariant.group !0
ret i8 %v
}
declare void @use(i8* readonly)
declare void @clobber(i8*)
; CHECK: Function Attrs: argmemonly nounwind readonly
; CHECK-NEXT: declare i8* @llvm.invariant.group.barrier(i8*)
declare i8* @llvm.invariant.group.barrier(i8*)
!0 = !{}