From 79c123d251f12d0b607c390d590923617827e874 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 27 Apr 2021 15:39:06 +0100 Subject: [PATCH] Revert rG9b7a0a50355d5 - Revert "[X86] Add support for reusing ZF etc. from locked XADD instructions (PR20841)" Still causing some sanitizer buildbot failures. --- lib/Target/X86/X86InstrInfo.cpp | 10 ++++------ test/CodeGen/X86/atomic-eflags-reuse.ll | 1 + test/CodeGen/X86/atomic-flags.ll | 7 +++++++ 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index beb343d06df..981c735c423 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -4020,12 +4020,10 @@ inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag, case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr: case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm: case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm: - case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: - case X86::LXADD64: case X86::LXADD32: case X86::LXADD16: case X86::LXADD8: - // TODO: Add additional LOCK/XADD instructions when we have test coverage. - case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1: case X86::SAR64r1: - case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1: case X86::SHR64r1: - case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1: case X86::SHL64r1: + case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: + case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1: + case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1: + case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1: case X86::LZCNT16rr: case X86::LZCNT16rm: case X86::LZCNT32rr: case X86::LZCNT32rm: case X86::LZCNT64rr: case X86::LZCNT64rm: diff --git a/test/CodeGen/X86/atomic-eflags-reuse.ll b/test/CodeGen/X86/atomic-eflags-reuse.ll index 200f55bd922..b5a27892ad2 100644 --- a/test/CodeGen/X86/atomic-eflags-reuse.ll +++ b/test/CodeGen/X86/atomic-eflags-reuse.ll @@ -228,6 +228,7 @@ define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: lock xaddq %rax, (%rdi) +; CHECK-NEXT: testq %rax, %rax ; CHECK-NEXT: movl $12, %eax ; CHECK-NEXT: movl $34, %ecx ; CHECK-NEXT: cmovsl %eax, %ecx diff --git a/test/CodeGen/X86/atomic-flags.ll b/test/CodeGen/X86/atomic-flags.ll index 42cf28751b5..dfd916f8121 100644 --- a/test/CodeGen/X86/atomic-flags.ll +++ b/test/CodeGen/X86/atomic-flags.ll @@ -131,6 +131,7 @@ define zeroext i1 @xadd_cmp0_i64(i64* %x) nounwind { ; X64: # %bb.0: ; X64-NEXT: movl $1, %eax ; X64-NEXT: lock xaddq %rax, (%rdi) +; X64-NEXT: testq %rax, %rax ; X64-NEXT: sete %al ; X64-NEXT: retq ; @@ -166,6 +167,7 @@ define zeroext i1 @xadd_cmp0_i32(i32* %x) nounwind { ; X64: # %bb.0: ; X64-NEXT: movl $1, %eax ; X64-NEXT: lock xaddl %eax, (%rdi) +; X64-NEXT: testl %eax, %eax ; X64-NEXT: setne %al ; X64-NEXT: retq ; @@ -174,6 +176,7 @@ define zeroext i1 @xadd_cmp0_i32(i32* %x) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl $1, %ecx ; X86-NEXT: lock xaddl %ecx, (%eax) +; X86-NEXT: testl %ecx, %ecx ; X86-NEXT: setne %al ; X86-NEXT: retl %add = atomicrmw add i32* %x, i32 1 seq_cst @@ -186,6 +189,7 @@ define zeroext i1 @xadd_cmp0_i16(i16* %x) nounwind { ; X64: # %bb.0: ; X64-NEXT: movw $1, %ax ; X64-NEXT: lock xaddw %ax, (%rdi) +; X64-NEXT: testw %ax, %ax ; X64-NEXT: sete %al ; X64-NEXT: retq ; @@ -194,6 +198,7 @@ define zeroext i1 @xadd_cmp0_i16(i16* %x) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movw $1, %cx ; X86-NEXT: lock xaddw %cx, (%eax) +; X86-NEXT: testw %cx, %cx ; X86-NEXT: sete %al ; X86-NEXT: retl %add = atomicrmw add i16* %x, i16 1 seq_cst @@ -206,6 +211,7 @@ define zeroext i1 @xadd_cmp0_i8(i8* %x) nounwind { ; X64: # %bb.0: ; X64-NEXT: movb $1, %al ; X64-NEXT: lock xaddb %al, (%rdi) +; X64-NEXT: testb %al, %al ; X64-NEXT: setne %al ; X64-NEXT: retq ; @@ -214,6 +220,7 @@ define zeroext i1 @xadd_cmp0_i8(i8* %x) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movb $1, %cl ; X86-NEXT: lock xaddb %cl, (%eax) +; X86-NEXT: testb %cl, %cl ; X86-NEXT: setne %al ; X86-NEXT: retl %add = atomicrmw add i8* %x, i8 1 seq_cst