1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-30 23:42:52 +01:00
llvm-mirror/test/CodeGen/X86/atomic64.ll
Michael Liao 34658dca78 Re-work X86 code generation of atomic ops with spin-loop
- Rewrite/merge pseudo-atomic instruction emitters to address the
  following issue:
  * Reduce one unnecessary load in spin-loop

    previously the spin-loop looks like

        thisMBB:
        newMBB:
          ld  t1 = [bitinstr.addr]
          op  t2 = t1, [bitinstr.val]
          not t3 = t2  (if Invert)
          mov EAX = t1
          lcs dest = [bitinstr.addr], t3  [EAX is implicit]
          bz  newMBB
          fallthrough -->nextMBB

    the 'ld' at the beginning of newMBB should be lift out of the loop
    as lcs (or CMPXCHG on x86) will load the current memory value into
    EAX. This loop is refined as:

        thisMBB:
          EAX = LOAD [MI.addr]
        mainMBB:
          t1 = OP [MI.val], EAX
          LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined]
          JNE mainMBB
        sinkMBB:

  * Remove immopc as, so far, all pseudo-atomic instructions has
    all-register form only, there is no immedidate operand.

  * Remove unnecessary attributes/modifiers in pseudo-atomic instruction
    td

  * Fix issues in PR13458

- Add comprehensive tests on atomic ops on various data types.
  NOTE: Some of them are turned off due to missing functionality.

- Revise tests due to the new spin-loop generated.

llvm-svn: 164281
2012-09-20 03:06:15 +00:00

217 lines
4.6 KiB
LLVM

; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64
@sc64 = external global i64
define void @atomic_fetch_add64() nounwind {
; X64: atomic_fetch_add64
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X64: lock
; X64: incq
%t2 = atomicrmw add i64* @sc64, i64 3 acquire
; X64: lock
; X64: addq $3
%t3 = atomicrmw add i64* @sc64, i64 5 acquire
; X64: lock
; X64: xaddq
%t4 = atomicrmw add i64* @sc64, i64 %t3 acquire
; X64: lock
; X64: addq
ret void
; X64: ret
}
define void @atomic_fetch_sub64() nounwind {
; X64: atomic_fetch_sub64
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
; X64: lock
; X64: decq
%t2 = atomicrmw sub i64* @sc64, i64 3 acquire
; X64: lock
; X64: subq $3
%t3 = atomicrmw sub i64* @sc64, i64 5 acquire
; X64: lock
; X64: xaddq
%t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire
; X64: lock
; X64: subq
ret void
; X64: ret
}
define void @atomic_fetch_and64() nounwind {
; X64: atomic_fetch_and64
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
; X64: lock
; X64: andq $3
%t2 = atomicrmw and i64* @sc64, i64 5 acquire
; X64: andq
; X64: lock
; X64: cmpxchgq
%t3 = atomicrmw and i64* @sc64, i64 %t2 acquire
; X64: lock
; X64: andq
ret void
; X64: ret
}
define void @atomic_fetch_or64() nounwind {
; X64: atomic_fetch_or64
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
; X64: lock
; X64: orq $3
%t2 = atomicrmw or i64* @sc64, i64 5 acquire
; X64: orq
; X64: lock
; X64: cmpxchgq
%t3 = atomicrmw or i64* @sc64, i64 %t2 acquire
; X64: lock
; X64: orq
ret void
; X64: ret
}
define void @atomic_fetch_xor64() nounwind {
; X64: atomic_fetch_xor64
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X64: lock
; X64: xorq $3
%t2 = atomicrmw xor i64* @sc64, i64 5 acquire
; X64: xorq
; X64: lock
; X64: cmpxchgq
%t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire
; X64: lock
; X64: xorq
ret void
; X64: ret
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
; X64: atomic_fetch_nand64
; X32: atomic_fetch_nand64
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X64: andq
; X64: notq
; X64: lock
; X64: cmpxchgq
; X32: andl
; X32: andl
; X32: notl
; X32: notl
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_max64(i64 %x) nounwind {
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
; X64: cmpq
; X64: cmov
; X64: lock
; X64: cmpxchgq
; X32: cmpl
; X32: cmpl
; X32: cmov
; X32: cmov
; X32: cmov
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_min64(i64 %x) nounwind {
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
; X64: cmpq
; X64: cmov
; X64: lock
; X64: cmpxchgq
; X32: cmpl
; X32: cmpl
; X32: cmov
; X32: cmov
; X32: cmov
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
; X64: cmpq
; X64: cmov
; X64: lock
; X64: cmpxchgq
; X32: cmpl
; X32: cmpl
; X32: cmov
; X32: cmov
; X32: cmov
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
; X64: cmpq
; X64: cmov
; X64: lock
; X64: cmpxchgq
; X32: cmpl
; X32: cmpl
; X32: cmov
; X32: cmov
; X32: cmov
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_cmpxchg64() nounwind {
%t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
; X64: lock
; X64: cmpxchgq
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_store64(i64 %x) nounwind {
store atomic i64 %x, i64* @sc64 release, align 8
; X64-NOT: lock
; X64: movq
; X32: lock
; X32: cmpxchg8b
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X64-NOT: lock
; X64: xchgq
; X32: lock
; X32: xchg8b
ret void
; X64: ret
; X32: ret
}