1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00
llvm-mirror/test/CodeGen/SystemZ/cmpxchg-01.ll
Tim Northover b9ec29d7c5 IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.

As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.

At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.

By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.

Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.

Summary for out of tree users:
------------------------------

+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.

llvm-svn: 210903
2014-06-13 14:24:07 +00:00

58 lines
2.1 KiB
LLVM

; Test 8-bit compare and swap.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-MAIN
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT
; Check compare and swap with a variable.
; - CHECK is for the main loop.
; - CHECK-SHIFT makes sure that the negated shift count used by the second
; RLL is set up correctly. The negation is independent of the NILL and L
; tested in CHECK. CHECK-SHIFT also checks that %r3 is not modified before
; being used in the RISBG (in contrast to things like atomic addition,
; which shift %r3 left so that %b is at the high end of the word).
define i8 @f1(i8 %dummy, i8 *%src, i8 %cmp, i8 %swap) {
; CHECK-MAIN-LABEL: f1:
; CHECK-MAIN: sllg [[SHIFT:%r[1-9]+]], %r3, 3
; CHECK-MAIN: nill %r3, 65532
; CHECK-MAIN: l [[OLD:%r[0-9]+]], 0(%r3)
; CHECK-MAIN: [[LOOP:\.[^ ]*]]:
; CHECK-MAIN: rll %r2, [[OLD]], 8([[SHIFT]])
; CHECK-MAIN: risbg %r4, %r2, 32, 55, 0
; CHECK-MAIN: crjlh %r2, %r4, [[EXIT:\.[^ ]*]]
; CHECK-MAIN: risbg %r5, %r2, 32, 55, 0
; CHECK-MAIN: rll [[NEW:%r[0-9]+]], %r5, -8({{%r[1-9]+}})
; CHECK-MAIN: cs [[OLD]], [[NEW]], 0(%r3)
; CHECK-MAIN: jl [[LOOP]]
; CHECK-MAIN: [[EXIT]]:
; CHECK-MAIN-NOT: %r2
; CHECK-MAIN: br %r14
;
; CHECK-SHIFT-LABEL: f1:
; CHECK-SHIFT: sllg [[SHIFT:%r[1-9]+]], %r3, 3
; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
; CHECK-SHIFT: rll
; CHECK-SHIFT: rll {{%r[0-9]+}}, %r5, -8([[NEGSHIFT]])
%pair = cmpxchg i8 *%src, i8 %cmp, i8 %swap seq_cst seq_cst
%res = extractvalue { i8, i1 } %pair, 0
ret i8 %res
}
; Check compare and swap with constants. We should force the constants into
; registers and use the sequence above.
define i8 @f2(i8 *%src) {
; CHECK-LABEL: f2:
; CHECK: lhi [[CMP:%r[0-9]+]], 42
; CHECK: risbg [[CMP]], {{%r[0-9]+}}, 32, 55, 0
; CHECK: risbg
; CHECK: br %r14
;
; CHECK-SHIFT-LABEL: f2:
; CHECK-SHIFT: lhi [[SWAP:%r[0-9]+]], 88
; CHECK-SHIFT: risbg
; CHECK-SHIFT: risbg [[SWAP]], {{%r[0-9]+}}, 32, 55, 0
; CHECK-SHIFT: br %r14
%pair = cmpxchg i8 *%src, i8 42, i8 88 seq_cst seq_cst
%res = extractvalue { i8, i1 } %pair, 0
ret i8 %res
}