1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00

Revert "Revert "Temporarily do not drop volatile stores before unreachable""

This reverts commit 52aeacfbf5ce5f949efe0eae029e56db171ea1f7.
There isn't full agreement on a path forward yet, but there is agreement that
this shouldn't land as-is.  See discussion on https://reviews.llvm.org/D105338

Also reverts unreviewed "[clang] Improve `-Wnull-dereference` diag to be more in-line with reality"
This reverts commit f4877c78c0fc98be47b926439bbfe33d5e1d1b6d.

And all the related changes to tests:
This reverts commit 9a0152799f8e4a59e0483728c9f11c8a7805616f.
This reverts commit 3f7c9cc27422f7302cf5a683eeb3978e6cb84270.
This reverts commit 329f8197ef59f9bd23328b52d623ba768b51dbb2.
This reverts commit aa9f58cc2c48ca6cfc853a2467cd775dc7622746.
This reverts commit 2df37d5ddd38091aafbb7d338660e58836f4ac80.
This reverts commit a72a44181264fd83e05be958c2712cbd4560aba7.
This commit is contained in:
Nico Weber 2021-07-09 11:39:40 -04:00
parent b2f9152456
commit 312c986138
10 changed files with 97 additions and 61 deletions

View File

@ -2888,6 +2888,14 @@ Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) {
// Otherwise, this instruction can be freely erased,
// even if it is not side-effect free.
// Temporarily disable removal of volatile stores preceding unreachable,
// pending a potential LangRef change permitting volatile stores to trap.
// TODO: Either remove this code, or properly integrate the check into
// isGuaranteedToTransferExecutionToSuccessor().
if (auto *SI = dyn_cast<StoreInst>(Prev))
if (SI->isVolatile())
return nullptr; // Can not drop this instruction. We're done here.
// A value may still have uses before we process it here (for example, in
// another unreachable block), so convert those to poison.
replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));

View File

@ -2302,6 +2302,9 @@ static bool markAliveBlocks(Function &F,
// that they should be changed to unreachable by passes that can't
// modify the CFG.
// Don't touch volatile stores.
if (SI->isVolatile()) continue;
Value *Ptr = SI->getOperand(1);
if (isa<UndefValue>(Ptr) ||

View File

@ -4672,6 +4672,14 @@ bool SimplifyCFGOpt::simplifyUnreachable(UnreachableInst *UI) {
// Otherwise, this instruction can be freely erased,
// even if it is not side-effect free.
// Temporarily disable removal of volatile stores preceding unreachable,
// pending a potential LangRef change permitting volatile stores to trap.
// TODO: Either remove this code, or properly integrate the check into
// isGuaranteedToTransferExecutionToSuccessor().
if (auto *SI = dyn_cast<StoreInst>(&*BBI))
if (SI->isVolatile())
break; // Can not drop this instruction. We're done here.
// Note that deleting EH's here is in fact okay, although it involves a bit
// of subtle reasoning. If this inst is an EH, all the predecessors of this
// block will be the unwind edges of Invoke/CatchSwitch/CleanupReturn,

View File

@ -4,7 +4,7 @@
; Long branch is assumed because the block has a higher alignment
; requirement than the function.
define i32 @invert_bcc_block_align_higher_func(i32 %x, i32 %y, i32* %dst) align 4 #0 {
define i32 @invert_bcc_block_align_higher_func(i32 %x, i32 %y) align 4 #0 {
; CHECK-LABEL: invert_bcc_block_align_higher_func:
; CHECK: ; %bb.0: ; %common.ret
; CHECK-NEXT: cmp w0, w1
@ -12,17 +12,17 @@ define i32 @invert_bcc_block_align_higher_func(i32 %x, i32 %y, i32* %dst) align
; CHECK-NEXT: mov w9, #42
; CHECK-NEXT: cset w0, ne
; CHECK-NEXT: csel w8, w9, w8, eq
; CHECK-NEXT: str w8, [x2]
; CHECK-NEXT: str w8, [x8]
; CHECK-NEXT: ret
%1 = icmp eq i32 %x, %y
br i1 %1, label %bb1, label %bb2
bb2:
store volatile i32 9, i32* %dst
store volatile i32 9, i32* undef
ret i32 1
bb1:
store volatile i32 42, i32* %dst
store volatile i32 42, i32* undef
ret i32 0
}

View File

@ -1,28 +1,27 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-apple-darwin -aarch64-bcc-offset-bits=3 < %s | FileCheck %s
define i32 @invert_bcc(float %x, float %y, i32* %dst0, i32* %dst1) #0 {
define i32 @invert_bcc(float %x, float %y) #0 {
; CHECK-LABEL: invert_bcc:
; CHECK: ; %bb.0:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: b.ne LBB0_3
; CHECK-NEXT: mov w8, #42
; CHECK-NEXT: b.pl LBB0_3
; CHECK-NEXT: b LBB0_2
; CHECK-NEXT: LBB0_3:
; CHECK-NEXT: b.vc LBB0_1
; CHECK-NEXT: b LBB0_2
; CHECK-NEXT: LBB0_1: ; %bb2
; CHECK-NEXT: b.gt LBB0_2
; CHECK-NEXT: ; %bb.1: ; %common.ret
; CHECK-NEXT: str w8, [x8]
; CHECK-NEXT: ret
; CHECK-NEXT: LBB0_2: ; %bb2
; CHECK-NEXT: mov w0, #1
; CHECK-NEXT: mov w8, #9
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: nop
; CHECK-NEXT: nop
; CHECK-NEXT: ; InlineAsm End
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: mov w0, #1
; CHECK-NEXT: ret
; CHECK-NEXT: LBB0_2: ; %bb1
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: mov w8, #42
; CHECK-NEXT: str w8, [x1]
; CHECK-NEXT: str w8, [x8]
; CHECK-NEXT: ret
%1 = fcmp ueq float %x, %y
br i1 %1, label %bb1, label %bb2
@ -32,11 +31,11 @@ bb2:
"nop
nop",
""() #0
store volatile i32 9, i32* %dst0
store volatile i32 9, i32* undef
ret i32 1
bb1:
store volatile i32 42, i32* %dst1
store volatile i32 42, i32* undef
ret i32 0
}

View File

@ -16,18 +16,18 @@ entry:
; CHECK: mul i32
; CHECK-NOT: call i32
define amdgpu_kernel void @caller(i32 %x, i32 addrspace(1)* %dst) {
define amdgpu_kernel void @caller(i32 %x) {
entry:
%res = call i32 @callee(i32 %x)
store volatile i32 %res, i32 addrspace(1)* %dst
store volatile i32 %res, i32 addrspace(1)* undef
ret void
}
; CHECK-LABEL: @alias_caller(
; CHECK-NOT: call
define amdgpu_kernel void @alias_caller(i32 %x, i32 addrspace(1)* %dst) {
define amdgpu_kernel void @alias_caller(i32 %x) {
entry:
%res = call i32 @c_alias(i32 %x)
store volatile i32 %res, i32 addrspace(1)* %dst
store volatile i32 %res, i32 addrspace(1)* undef
ret void
}

View File

@ -3,38 +3,51 @@
; NUM-COUNT-3: endbr64
; SJLJ-LABEL: main:
; SJLJ: # %bb.0: # %entry
; SJLJ-NEXT: endbr64
; SJLJ: callq _Unwind_SjLj_Register@PLT
; SJLJ-NEXT: .Ltmp0:
; SJLJ-NEXT: callq _Z3foov
; SJLJ-NEXT: .Ltmp1:
; SJLJ-NEXT: # %bb.1: # %invoke.cont
; SJLJ: .LBB0_6: # %return
; SJLJ: callq _Unwind_SjLj_Unregister@PLT
; SJLJ: retq
; SJLJ-NEXT: .LBB0_7:
; SJLJ-NEXT: endbr64
; SJLJ: jb .LBB0_8
; SJLJ-NEXT: # %bb.9:
; SJLJ-NEXT: ud2
; SJLJ-NEXT: .LBB0_8:
; SJLJ: jmpq *(%rcx,%rax,8)
; SJLJ-NEXT: .LBB0_2: # %lpad
; SJLJ-NEXT: .Ltmp2:
; SJLJ-NEXT: endbr64
; SJLJ: jne .LBB0_4
; SJLJ-NEXT: # %bb.3: # %catch3
; SJLJ: callq __cxa_begin_catch
; SJLJ: jmp .LBB0_5
; SJLJ-NEXT: .LBB0_4: # %catch
; SJLJ: callq __cxa_begin_catch
; SJLJ: cmpb $3, %al
; SJLJ-NEXT: .LBB0_5: # %return
; SJLJ-NEXT: setne %cl
; SJLJ: callq __cxa_end_catch
; SJLJ-NEXT: jmp .LBB0_6
;SJLJ: main: # @main
;SJLJ-NEXT: .Lfunc_begin0:
;SJLJ-NEXT: # %bb.0: # %entry
;SJLJ-NEXT: endbr64
;SJLJ-NEXT: pushq %rbp
;SJLJ: callq _Unwind_SjLj_Register
;SJLJ-NEXT: .Ltmp0:
;SJLJ-NEXT: callq _Z3foov
;SJLJ-NEXT: .Ltmp1:
;SJLJ-NEXT: # %bb.1: # %invoke.cont
;SJLJ-NEXT: movl
;SJLJ-NEXT: .LBB0_7: # %return
;SJLJ: callq _Unwind_SjLj_Unregister
;SJLJ: retq
;SJLJ-NEXT: .LBB0_9:
;SJLJ-NEXT: endbr64
;SJLJ-NEXT: movl
;SJLJ-NEXT: cmpl
;SJLJ-NEXT: jb .LBB0_10
;SJLJ-NEXT: # %bb.11:
;SJLJ-NEXT: ud2
;SJLJ-NEXT: .LBB0_10:
;SJLJ-NEXT: leaq .LJTI0_0(%rip), %rcx
;SJLJ-NEXT: jmpq *(%rcx,%rax,8)
;SJLJ-NEXT: .LBB0_2: # %lpad
;SJLJ-NEXT: .Ltmp2:
;SJLJ-NEXT: endbr64
;SJLJ: jne .LBB0_4
;SJLJ-NEXT: # %bb.3: # %catch3
;SJLJ: callq __cxa_begin_catch
;SJLJ: jmp .LBB0_6
;SJLJ-NEXT: .LBB0_4: # %catch.fallthrough
;SJLJ-NEXT: cmpl
;SJLJ-NEXT: jne .LBB0_8
;SJLJ-NEXT: # %bb.5: # %catch
;SJLJ: callq __cxa_begin_catch
;SJLJ: cmpb
;SJLJ-NEXT: .LBB0_6: # %return
;SJLJ: callq __cxa_end_catch
;SJLJ-NEXT: jmp .LBB0_7
;SJLJ-NEXT: .LBB0_8: # %eh.resume
;SJLJ-NEXT: movl
;SJLJ-NEXT: .Lfunc_end0:
;SJLJ: .LJTI0_0:
;SJLJ-NEXT: .quad .LBB0_2
@_ZTIi = external dso_local constant i8*
@_ZTIc = external dso_local constant i8*

View File

@ -25,6 +25,7 @@ define void @volatile_store_before_unreachable(i1 %c, i8* %p) {
; CHECK-LABEL: @volatile_store_before_unreachable(
; CHECK-NEXT: br i1 [[C:%.*]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: true:
; CHECK-NEXT: store volatile i8 0, i8* [[P:%.*]], align 1
; CHECK-NEXT: unreachable
; CHECK: false:
; CHECK-NEXT: ret void

View File

@ -76,8 +76,8 @@ entry:
define void @test3() nounwind {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.trap()
; CHECK-NEXT: unreachable
; CHECK-NEXT: store volatile i32 4, i32* null, align 4
; CHECK-NEXT: ret void
;
entry:
store volatile i32 4, i32* null
@ -101,8 +101,11 @@ entry:
define void @test4(i1 %C, i32* %P) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[C:%.*]], true
; CHECK-NEXT: call void @llvm.assume(i1 [[TMP0]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[T:%.*]], label [[F:%.*]]
; CHECK: T:
; CHECK-NEXT: store volatile i32 0, i32* [[P:%.*]], align 4
; CHECK-NEXT: unreachable
; CHECK: F:
; CHECK-NEXT: ret void
;
entry:

View File

@ -4813,9 +4813,10 @@ void UnitTest::AddTestPartResult(
// with clang/gcc we can achieve the same effect on x86 by invoking int3
asm("int3");
#else
// While some debuggers don't correctly trap abort(), we can't perform
// volatile store to null since it will be removed by clang and not trap.
__builtin_trap();
// Dereference nullptr through a volatile pointer to prevent the compiler
// from removing. We use this rather than abort() or __builtin_trap() for
// portability: some debuggers don't correctly trap abort().
*static_cast<volatile int*>(nullptr) = 1;
#endif // GTEST_OS_WINDOWS
} else if (GTEST_FLAG(throw_on_failure)) {
#if GTEST_HAS_EXCEPTIONS