1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[X86] Remove X32 check lines from a test that doesn't have an X32 FileCheck prefix. Regenerate the test using update_llc_test_checks. NFC

llvm-svn: 356535
This commit is contained in:
Craig Topper 2019-03-20 03:13:28 +00:00
parent b9932c7b68
commit 8e62d2c40a

View File

@ -1,249 +1,302 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -O0 -mtriple=x86_64-- -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X64
@sc64 = external global i64
@fsc64 = external global double
define void @atomic_fetch_add64() nounwind {
; X64-LABEL: atomic_fetch_add64:
; X32-LABEL: atomic_fetch_add64:
; X64-LABEL: atomic_fetch_add64:
; X64: # %bb.0: # %entry
; X64-NEXT: lock incq {{.*}}(%rip)
; X64-NEXT: lock addq $3, {{.*}}(%rip)
; X64-NEXT: movl $5, %eax
; X64-NEXT: lock xaddq %rax, {{.*}}(%rip)
; X64-NEXT: lock addq %rax, {{.*}}(%rip)
; X64-NEXT: retq
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X64: lock
; X64: incq
%t2 = atomicrmw add i64* @sc64, i64 3 acquire
; X64: lock
; X64: addq $3
%t3 = atomicrmw add i64* @sc64, i64 5 acquire
; X64: lock
; X64: xaddq
%t4 = atomicrmw add i64* @sc64, i64 %t3 acquire
; X64: lock
; X64: addq
ret void
; X64: ret
}
define void @atomic_fetch_sub64() nounwind {
; X64-LABEL: atomic_fetch_sub64:
; X32-LABEL: atomic_fetch_sub64:
; X64-LABEL: atomic_fetch_sub64:
; X64: # %bb.0:
; X64-NEXT: lock decq {{.*}}(%rip)
; X64-NEXT: lock subq $3, {{.*}}(%rip)
; X64-NEXT: movq $-5, %rax
; X64-NEXT: lock xaddq %rax, {{.*}}(%rip)
; X64-NEXT: lock subq %rax, {{.*}}(%rip)
; X64-NEXT: retq
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
; X64: lock
; X64: decq
%t2 = atomicrmw sub i64* @sc64, i64 3 acquire
; X64: lock
; X64: subq $3
%t3 = atomicrmw sub i64* @sc64, i64 5 acquire
; X64: lock
; X64: xaddq
%t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire
; X64: lock
; X64: subq
ret void
; X64: ret
}
define void @atomic_fetch_and64() nounwind {
; X64-LABEL: atomic_fetch_and64:
; X32-LABEL: atomic_fetch_and64:
; X64-LABEL: atomic_fetch_and64:
; X64: # %bb.0:
; X64-NEXT: lock andq $3, {{.*}}(%rip)
; X64-NEXT: movq sc64, %rax
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: .LBB2_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movl %eax, %ecx
; X64-NEXT: andl $5, %ecx
; X64-NEXT: movl %ecx, %edx
; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip)
; X64-NEXT: sete %sil
; X64-NEXT: testb $1, %sil
; X64-NEXT: movq %rax, %rdx
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: jne .LBB2_2
; X64-NEXT: jmp .LBB2_1
; X64-NEXT: .LBB2_2: # %atomicrmw.end
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: lock andq %rax, {{.*}}(%rip)
; X64-NEXT: retq
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
; X64: lock
; X64: andq $3
%t2 = atomicrmw and i64* @sc64, i64 5 acquire
; X64: andl
; X64: lock
; X64: cmpxchgq
%t3 = atomicrmw and i64* @sc64, i64 %t2 acquire
; X64: lock
; X64: andq
ret void
; X64: ret
}
define void @atomic_fetch_or64() nounwind {
; X64-LABEL: atomic_fetch_or64:
; X32-LABEL: atomic_fetch_or64:
; X64-LABEL: atomic_fetch_or64:
; X64: # %bb.0:
; X64-NEXT: lock orq $3, {{.*}}(%rip)
; X64-NEXT: movq sc64, %rax
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: .LBB3_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: orq $5, %rcx
; X64-NEXT: lock cmpxchgq %rcx, {{.*}}(%rip)
; X64-NEXT: sete %dl
; X64-NEXT: testb $1, %dl
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: jne .LBB3_2
; X64-NEXT: jmp .LBB3_1
; X64-NEXT: .LBB3_2: # %atomicrmw.end
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: lock orq %rax, {{.*}}(%rip)
; X64-NEXT: retq
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
; X64: lock
; X64: orq $3
%t2 = atomicrmw or i64* @sc64, i64 5 acquire
; X64: orq
; X64: lock
; X64: cmpxchgq
%t3 = atomicrmw or i64* @sc64, i64 %t2 acquire
; X64: lock
; X64: orq
ret void
; X64: ret
}
define void @atomic_fetch_xor64() nounwind {
; X64-LABEL: atomic_fetch_xor64:
; X32-LABEL: atomic_fetch_xor64:
; X64-LABEL: atomic_fetch_xor64:
; X64: # %bb.0:
; X64-NEXT: lock xorq $3, {{.*}}(%rip)
; X64-NEXT: movq sc64, %rax
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: .LBB4_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: xorq $5, %rcx
; X64-NEXT: lock cmpxchgq %rcx, {{.*}}(%rip)
; X64-NEXT: sete %dl
; X64-NEXT: testb $1, %dl
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: jne .LBB4_2
; X64-NEXT: jmp .LBB4_1
; X64-NEXT: .LBB4_2: # %atomicrmw.end
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: lock xorq %rax, {{.*}}(%rip)
; X64-NEXT: retq
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X64: lock
; X64: xorq $3
%t2 = atomicrmw xor i64* @sc64, i64 5 acquire
; X64: xorq
; X64: lock
; X64: cmpxchgq
%t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire
; X64: lock
; X64: xorq
ret void
; X64: ret
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
; X64-LABEL: atomic_fetch_nand64:
; X32-LABEL: atomic_fetch_nand64:
; X64-LABEL: atomic_fetch_nand64:
; X64: # %bb.0:
; X64-NEXT: movq sc64, %rax
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: .LBB5_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; X64-NEXT: andq %rdx, %rcx
; X64-NEXT: notq %rcx
; X64-NEXT: lock cmpxchgq %rcx, {{.*}}(%rip)
; X64-NEXT: sete %sil
; X64-NEXT: testb $1, %sil
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: jne .LBB5_2
; X64-NEXT: jmp .LBB5_1
; X64-NEXT: .LBB5_2: # %atomicrmw.end
; X64-NEXT: retq
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X64: andq
; X64: notq
; X64: lock
; X64: cmpxchgq
; X32: andl
; X32: andl
; X32: notl
; X32: notl
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_max64(i64 %x) nounwind {
; X64-LABEL: atomic_fetch_max64:
; X32-LABEL: atomic_fetch_max64:
; X64-LABEL: atomic_fetch_max64:
; X64: # %bb.0:
; X64-NEXT: movq sc64, %rax
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: .LBB6_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; X64-NEXT: subq %rdx, %rcx
; X64-NEXT: cmovgeq %rax, %rdx
; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip)
; X64-NEXT: sete %sil
; X64-NEXT: testb $1, %sil
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: jne .LBB6_2
; X64-NEXT: jmp .LBB6_1
; X64-NEXT: .LBB6_2: # %atomicrmw.end
; X64-NEXT: retq
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
; X32: cmpl
; X32: cmpl
; X32: cmov
; X32: cmov
; X32: cmov
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_min64(i64 %x) nounwind {
; X64-LABEL: atomic_fetch_min64:
; X32-LABEL: atomic_fetch_min64:
; X64-LABEL: atomic_fetch_min64:
; X64: # %bb.0:
; X64-NEXT: movq sc64, %rax
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: .LBB7_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; X64-NEXT: subq %rdx, %rcx
; X64-NEXT: cmovleq %rax, %rdx
; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip)
; X64-NEXT: sete %sil
; X64-NEXT: testb $1, %sil
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: jne .LBB7_2
; X64-NEXT: jmp .LBB7_1
; X64-NEXT: .LBB7_2: # %atomicrmw.end
; X64-NEXT: retq
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
; X32: cmpl
; X32: cmpl
; X32: cmov
; X32: cmov
; X32: cmov
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
; X64-LABEL: atomic_fetch_umax64:
; X32-LABEL: atomic_fetch_umax64:
; X64-LABEL: atomic_fetch_umax64:
; X64: # %bb.0:
; X64-NEXT: movq sc64, %rax
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: .LBB8_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; X64-NEXT: subq %rdx, %rcx
; X64-NEXT: cmovaq %rax, %rdx
; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip)
; X64-NEXT: sete %sil
; X64-NEXT: testb $1, %sil
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: jne .LBB8_2
; X64-NEXT: jmp .LBB8_1
; X64-NEXT: .LBB8_2: # %atomicrmw.end
; X64-NEXT: retq
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
; X32: cmpl
; X32: cmpl
; X32: cmov
; X32: cmov
; X32: cmov
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
; X64-LABEL: atomic_fetch_umin64:
; X32-LABEL: atomic_fetch_umin64:
; X64-LABEL: atomic_fetch_umin64:
; X64: # %bb.0:
; X64-NEXT: movq sc64, %rax
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: .LBB9_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; X64-NEXT: subq %rdx, %rcx
; X64-NEXT: cmovbeq %rax, %rdx
; X64-NEXT: lock cmpxchgq %rdx, {{.*}}(%rip)
; X64-NEXT: sete %sil
; X64-NEXT: testb $1, %sil
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: jne .LBB9_2
; X64-NEXT: jmp .LBB9_1
; X64-NEXT: .LBB9_2: # %atomicrmw.end
; X64-NEXT: retq
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
; X32: cmpl
; X32: cmpl
; X32: cmov
; X32: cmov
; X32: cmov
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_cmpxchg64() nounwind {
; X64-LABEL: atomic_fetch_cmpxchg64:
; X32-LABEL: atomic_fetch_cmpxchg64:
; X64-LABEL: atomic_fetch_cmpxchg64:
; X64: # %bb.0:
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: # kill: def $rax killed $eax
; X64-NEXT: movl $1, %ecx
; X64-NEXT: lock cmpxchgq %rcx, {{.*}}(%rip)
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: retq
%t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X64: lock
; X64: cmpxchgq
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_store64(i64 %x) nounwind {
; X64-LABEL: atomic_fetch_store64:
; X32-LABEL: atomic_fetch_store64:
; X64-LABEL: atomic_fetch_store64:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, {{.*}}(%rip)
; X64-NEXT: retq
store atomic i64 %x, i64* @sc64 release, align 8
; X64-NOT: lock
; X64: movq
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
; X64-LABEL: atomic_fetch_swap64:
; X32-LABEL: atomic_fetch_swap64:
; X64-LABEL: atomic_fetch_swap64:
; X64: # %bb.0:
; X64-NEXT: xchgq %rdi, {{.*}}(%rip)
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: retq
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X64-NOT: lock
; X64: xchgq
; X32: lock
; X32: xchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_swapf64(double %x) nounwind {
; X64-LABEL: atomic_fetch_swapf64:
; X32-LABEL: atomic_fetch_swapf64:
; X64-LABEL: atomic_fetch_swapf64:
; X64: # %bb.0:
; X64-NEXT: movq %xmm0, %rax
; X64-NEXT: xchgq %rax, {{.*}}(%rip)
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: retq
%t1 = atomicrmw xchg double* @fsc64, double %x acquire
; X64-NOT: lock
; X64: xchgq
; X32: lock
; X32: xchg8b
ret void
; X64: ret
; X32: ret
}