1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[RISCV][NFC] Add CHECK lines for atomic operations on RV64I

As or RV32I, we include these for completeness. Committing now to make it
easier to review the RV64A patch.

llvm-svn: 350962
This commit is contained in:
Alex Bradbury 2019-01-11 19:46:48 +00:00
parent 896f341f88
commit fa2f6e25bc
4 changed files with 5770 additions and 0 deletions

View File

@ -3,6 +3,8 @@
; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IA %s ; RUN: | FileCheck -check-prefix=RV32IA %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) { define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
; RV32I-LABEL: cmpxchg_i8_monotonic_monotonic: ; RV32I-LABEL: cmpxchg_i8_monotonic_monotonic:
@ -41,6 +43,19 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB0_1 ; RV32IA-NEXT: bnez a5, .LBB0_1
; RV32IA-NEXT: .LBB0_3: ; RV32IA-NEXT: .LBB0_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_monotonic_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
ret void ret void
} }
@ -82,6 +97,19 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB1_1 ; RV32IA-NEXT: bnez a5, .LBB1_1
; RV32IA-NEXT: .LBB1_3: ; RV32IA-NEXT: .LBB1_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_acquire_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: addi a3, zero, 2
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic
ret void ret void
} }
@ -123,6 +151,19 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB2_1 ; RV32IA-NEXT: bnez a5, .LBB2_1
; RV32IA-NEXT: .LBB2_3: ; RV32IA-NEXT: .LBB2_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_acquire_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: addi a3, zero, 2
; RV64I-NEXT: mv a4, a3
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire
ret void ret void
} }
@ -164,6 +205,19 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB3_1 ; RV32IA-NEXT: bnez a5, .LBB3_1
; RV32IA-NEXT: .LBB3_3: ; RV32IA-NEXT: .LBB3_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_release_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: addi a3, zero, 3
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
ret void ret void
} }
@ -205,6 +259,19 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB4_1 ; RV32IA-NEXT: bnez a5, .LBB4_1
; RV32IA-NEXT: .LBB4_3: ; RV32IA-NEXT: .LBB4_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_release_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: addi a3, zero, 3
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
ret void ret void
} }
@ -246,6 +313,19 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB5_1 ; RV32IA-NEXT: bnez a5, .LBB5_1
; RV32IA-NEXT: .LBB5_3: ; RV32IA-NEXT: .LBB5_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_acq_rel_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: addi a3, zero, 4
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic
ret void ret void
} }
@ -287,6 +367,19 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB6_1 ; RV32IA-NEXT: bnez a5, .LBB6_1
; RV32IA-NEXT: .LBB6_3: ; RV32IA-NEXT: .LBB6_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_acq_rel_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: addi a3, zero, 4
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire
ret void ret void
} }
@ -328,6 +421,19 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB7_1 ; RV32IA-NEXT: bnez a5, .LBB7_1
; RV32IA-NEXT: .LBB7_3: ; RV32IA-NEXT: .LBB7_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_seq_cst_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic
ret void ret void
} }
@ -369,6 +475,19 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB8_1 ; RV32IA-NEXT: bnez a5, .LBB8_1
; RV32IA-NEXT: .LBB8_3: ; RV32IA-NEXT: .LBB8_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_seq_cst_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire
ret void ret void
} }
@ -410,6 +529,19 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) {
; RV32IA-NEXT: bnez a5, .LBB9_1 ; RV32IA-NEXT: bnez a5, .LBB9_1
; RV32IA-NEXT: .LBB9_3: ; RV32IA-NEXT: .LBB9_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i8_seq_cst_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sb a1, 7(sp)
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: mv a4, a3
; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst
ret void ret void
} }
@ -452,6 +584,19 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB10_1 ; RV32IA-NEXT: bnez a5, .LBB10_1
; RV32IA-NEXT: .LBB10_3: ; RV32IA-NEXT: .LBB10_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_monotonic_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
ret void ret void
} }
@ -494,6 +639,19 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB11_1 ; RV32IA-NEXT: bnez a5, .LBB11_1
; RV32IA-NEXT: .LBB11_3: ; RV32IA-NEXT: .LBB11_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_acquire_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: addi a3, zero, 2
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic
ret void ret void
} }
@ -536,6 +694,19 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB12_1 ; RV32IA-NEXT: bnez a5, .LBB12_1
; RV32IA-NEXT: .LBB12_3: ; RV32IA-NEXT: .LBB12_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_acquire_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: addi a3, zero, 2
; RV64I-NEXT: mv a4, a3
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire
ret void ret void
} }
@ -578,6 +749,19 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB13_1 ; RV32IA-NEXT: bnez a5, .LBB13_1
; RV32IA-NEXT: .LBB13_3: ; RV32IA-NEXT: .LBB13_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_release_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: addi a3, zero, 3
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
ret void ret void
} }
@ -620,6 +804,19 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB14_1 ; RV32IA-NEXT: bnez a5, .LBB14_1
; RV32IA-NEXT: .LBB14_3: ; RV32IA-NEXT: .LBB14_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_release_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: addi a3, zero, 3
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
ret void ret void
} }
@ -662,6 +859,19 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB15_1 ; RV32IA-NEXT: bnez a5, .LBB15_1
; RV32IA-NEXT: .LBB15_3: ; RV32IA-NEXT: .LBB15_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_acq_rel_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: addi a3, zero, 4
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic
ret void ret void
} }
@ -704,6 +914,19 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB16_1 ; RV32IA-NEXT: bnez a5, .LBB16_1
; RV32IA-NEXT: .LBB16_3: ; RV32IA-NEXT: .LBB16_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_acq_rel_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: addi a3, zero, 4
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire
ret void ret void
} }
@ -746,6 +969,19 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB17_1 ; RV32IA-NEXT: bnez a5, .LBB17_1
; RV32IA-NEXT: .LBB17_3: ; RV32IA-NEXT: .LBB17_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_seq_cst_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic
ret void ret void
} }
@ -788,6 +1024,19 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB18_1 ; RV32IA-NEXT: bnez a5, .LBB18_1
; RV32IA-NEXT: .LBB18_3: ; RV32IA-NEXT: .LBB18_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_seq_cst_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire
ret void ret void
} }
@ -830,6 +1079,19 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) {
; RV32IA-NEXT: bnez a5, .LBB19_1 ; RV32IA-NEXT: bnez a5, .LBB19_1
; RV32IA-NEXT: .LBB19_3: ; RV32IA-NEXT: .LBB19_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i16_seq_cst_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: mv a4, a3
; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst
ret void ret void
} }
@ -858,6 +1120,19 @@ define void @cmpxchg_i32_monotonic_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB20_1 ; RV32IA-NEXT: bnez a4, .LBB20_1
; RV32IA-NEXT: .LBB20_3: ; RV32IA-NEXT: .LBB20_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_monotonic_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
ret void ret void
} }
@ -886,6 +1161,19 @@ define void @cmpxchg_i32_acquire_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB21_1 ; RV32IA-NEXT: bnez a4, .LBB21_1
; RV32IA-NEXT: .LBB21_3: ; RV32IA-NEXT: .LBB21_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_acquire_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: addi a3, zero, 2
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic
ret void ret void
} }
@ -914,6 +1202,19 @@ define void @cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB22_1 ; RV32IA-NEXT: bnez a4, .LBB22_1
; RV32IA-NEXT: .LBB22_3: ; RV32IA-NEXT: .LBB22_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_acquire_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: addi a3, zero, 2
; RV64I-NEXT: mv a4, a3
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire
ret void ret void
} }
@ -942,6 +1243,19 @@ define void @cmpxchg_i32_release_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB23_1 ; RV32IA-NEXT: bnez a4, .LBB23_1
; RV32IA-NEXT: .LBB23_3: ; RV32IA-NEXT: .LBB23_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_release_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: addi a3, zero, 3
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
ret void ret void
} }
@ -970,6 +1284,19 @@ define void @cmpxchg_i32_release_acquire(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB24_1 ; RV32IA-NEXT: bnez a4, .LBB24_1
; RV32IA-NEXT: .LBB24_3: ; RV32IA-NEXT: .LBB24_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_release_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: addi a3, zero, 3
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
ret void ret void
} }
@ -998,6 +1325,19 @@ define void @cmpxchg_i32_acq_rel_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB25_1 ; RV32IA-NEXT: bnez a4, .LBB25_1
; RV32IA-NEXT: .LBB25_3: ; RV32IA-NEXT: .LBB25_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_acq_rel_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: addi a3, zero, 4
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic
ret void ret void
} }
@ -1026,6 +1366,19 @@ define void @cmpxchg_i32_acq_rel_acquire(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB26_1 ; RV32IA-NEXT: bnez a4, .LBB26_1
; RV32IA-NEXT: .LBB26_3: ; RV32IA-NEXT: .LBB26_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_acq_rel_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: addi a3, zero, 4
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire
ret void ret void
} }
@ -1054,6 +1407,19 @@ define void @cmpxchg_i32_seq_cst_monotonic(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB27_1 ; RV32IA-NEXT: bnez a4, .LBB27_1
; RV32IA-NEXT: .LBB27_3: ; RV32IA-NEXT: .LBB27_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_seq_cst_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic
ret void ret void
} }
@ -1082,6 +1448,19 @@ define void @cmpxchg_i32_seq_cst_acquire(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB28_1 ; RV32IA-NEXT: bnez a4, .LBB28_1
; RV32IA-NEXT: .LBB28_3: ; RV32IA-NEXT: .LBB28_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_seq_cst_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire
ret void ret void
} }
@ -1110,6 +1489,19 @@ define void @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 %cmp, i32 %val) {
; RV32IA-NEXT: bnez a4, .LBB29_1 ; RV32IA-NEXT: bnez a4, .LBB29_1
; RV32IA-NEXT: .LBB29_3: ; RV32IA-NEXT: .LBB29_3:
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i32_seq_cst_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sw a1, 4(sp)
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: mv a4, a3
; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst
ret void ret void
} }
@ -1146,6 +1538,19 @@ define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_monotonic_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
ret void ret void
} }
@ -1184,6 +1589,19 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_acquire_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: addi a3, zero, 2
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic
ret void ret void
} }
@ -1220,6 +1638,19 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_acquire_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: addi a3, zero, 2
; RV64I-NEXT: mv a4, a3
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire
ret void ret void
} }
@ -1258,6 +1689,19 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_release_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: addi a3, zero, 3
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
ret void ret void
} }
@ -1296,6 +1740,19 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_release_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: addi a3, zero, 3
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
ret void ret void
} }
@ -1334,6 +1791,19 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_acq_rel_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: addi a3, zero, 4
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic
ret void ret void
} }
@ -1372,6 +1842,19 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_acq_rel_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: addi a3, zero, 4
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire
ret void ret void
} }
@ -1410,6 +1893,19 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_seq_cst_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic
ret void ret void
} }
@ -1448,6 +1944,19 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_seq_cst_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: addi a4, zero, 2
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire
ret void ret void
} }
@ -1484,6 +1993,19 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: cmpxchg_i64_seq_cst_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: sd a1, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: addi a3, zero, 5
; RV64I-NEXT: mv a4, a3
; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst
ret void ret void
} }

View File

@ -3,12 +3,21 @@
; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
define void @fence_acquire() nounwind { define void @fence_acquire() nounwind {
; RV32I-LABEL: fence_acquire: ; RV32I-LABEL: fence_acquire:
; RV32I: # %bb.0: ; RV32I: # %bb.0:
; RV32I-NEXT: fence r, rw ; RV32I-NEXT: fence r, rw
; RV32I-NEXT: ret ; RV32I-NEXT: ret
;
; RV64I-LABEL: fence_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: fence r, rw
; RV64I-NEXT: ret
fence acquire fence acquire
ret void ret void
} }
@ -18,6 +27,11 @@ define void @fence_release() nounwind {
; RV32I: # %bb.0: ; RV32I: # %bb.0:
; RV32I-NEXT: fence rw, w ; RV32I-NEXT: fence rw, w
; RV32I-NEXT: ret ; RV32I-NEXT: ret
;
; RV64I-LABEL: fence_release:
; RV64I: # %bb.0:
; RV64I-NEXT: fence rw, w
; RV64I-NEXT: ret
fence release fence release
ret void ret void
} }
@ -27,6 +41,11 @@ define void @fence_acq_rel() nounwind {
; RV32I: # %bb.0: ; RV32I: # %bb.0:
; RV32I-NEXT: fence.tso ; RV32I-NEXT: fence.tso
; RV32I-NEXT: ret ; RV32I-NEXT: ret
;
; RV64I-LABEL: fence_acq_rel:
; RV64I: # %bb.0:
; RV64I-NEXT: fence.tso
; RV64I-NEXT: ret
fence acq_rel fence acq_rel
ret void ret void
} }
@ -36,6 +55,11 @@ define void @fence_seq_cst() nounwind {
; RV32I: # %bb.0: ; RV32I: # %bb.0:
; RV32I-NEXT: fence rw, rw ; RV32I-NEXT: fence rw, rw
; RV32I-NEXT: ret ; RV32I-NEXT: ret
;
; RV64I-LABEL: fence_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: fence rw, rw
; RV64I-NEXT: ret
fence seq_cst fence seq_cst
ret void ret void
} }

View File

@ -3,6 +3,8 @@
; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \ ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IA %s ; RUN: | FileCheck -check-prefix=RV32IA %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
define i8 @atomic_load_i8_unordered(i8 *%a) nounwind { define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
; RV32I-LABEL: atomic_load_i8_unordered: ; RV32I-LABEL: atomic_load_i8_unordered:
@ -19,6 +21,16 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i8, i8* %a unordered, align 1 %1 = load atomic i8, i8* %a unordered, align 1
ret i8 %1 ret i8 %1
} }
@ -38,6 +50,16 @@ define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i8, i8* %a monotonic, align 1 %1 = load atomic i8, i8* %a monotonic, align 1
ret i8 %1 ret i8 %1
} }
@ -58,6 +80,16 @@ define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 2
; RV64I-NEXT: call __atomic_load_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i8, i8* %a acquire, align 1 %1 = load atomic i8, i8* %a acquire, align 1
ret i8 %1 ret i8 %1
} }
@ -79,6 +111,16 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 5
; RV64I-NEXT: call __atomic_load_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i8, i8* %a seq_cst, align 1 %1 = load atomic i8, i8* %a seq_cst, align 1
ret i8 %1 ret i8 %1
} }
@ -98,6 +140,16 @@ define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: lh a0, 0(a0) ; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i16_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i16, i16* %a unordered, align 2 %1 = load atomic i16, i16* %a unordered, align 2
ret i16 %1 ret i16 %1
} }
@ -117,6 +169,16 @@ define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: lh a0, 0(a0) ; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i16, i16* %a monotonic, align 2 %1 = load atomic i16, i16* %a monotonic, align 2
ret i16 %1 ret i16 %1
} }
@ -137,6 +199,16 @@ define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
; RV32IA-NEXT: lh a0, 0(a0) ; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i16_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 2
; RV64I-NEXT: call __atomic_load_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i16, i16* %a acquire, align 2 %1 = load atomic i16, i16* %a acquire, align 2
ret i16 %1 ret i16 %1
} }
@ -158,6 +230,16 @@ define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
; RV32IA-NEXT: lh a0, 0(a0) ; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i16_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 5
; RV64I-NEXT: call __atomic_load_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i16, i16* %a seq_cst, align 2 %1 = load atomic i16, i16* %a seq_cst, align 2
ret i16 %1 ret i16 %1
} }
@ -177,6 +259,16 @@ define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i32_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i32, i32* %a unordered, align 4 %1 = load atomic i32, i32* %a unordered, align 4
ret i32 %1 ret i32 %1
} }
@ -196,6 +288,16 @@ define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i32, i32* %a monotonic, align 4 %1 = load atomic i32, i32* %a monotonic, align 4
ret i32 %1 ret i32 %1
} }
@ -216,6 +318,16 @@ define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i32_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 2
; RV64I-NEXT: call __atomic_load_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i32, i32* %a acquire, align 4 %1 = load atomic i32, i32* %a acquire, align 4
ret i32 %1 ret i32 %1
} }
@ -237,6 +349,16 @@ define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: fence r, rw
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i32_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 5
; RV64I-NEXT: call __atomic_load_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i32, i32* %a seq_cst, align 4 %1 = load atomic i32, i32* %a seq_cst, align 4
ret i32 %1 ret i32 %1
} }
@ -261,6 +383,16 @@ define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i64_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i64, i64* %a unordered, align 8 %1 = load atomic i64, i64* %a unordered, align 8
ret i64 %1 ret i64 %1
} }
@ -285,6 +417,16 @@ define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i64, i64* %a monotonic, align 8 %1 = load atomic i64, i64* %a monotonic, align 8
ret i64 %1 ret i64 %1
} }
@ -309,6 +451,16 @@ define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i64_acquire:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 2
; RV64I-NEXT: call __atomic_load_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i64, i64* %a acquire, align 8 %1 = load atomic i64, i64* %a acquire, align 8
ret i64 %1 ret i64 %1
} }
@ -333,6 +485,16 @@ define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i64_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a1, zero, 5
; RV64I-NEXT: call __atomic_load_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = load atomic i64, i64* %a seq_cst, align 8 %1 = load atomic i64, i64* %a seq_cst, align 8
ret i64 %1 ret i64 %1
} }
@ -352,6 +514,16 @@ define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: sb a1, 0(a0) ; RV32IA-NEXT: sb a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i8_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_store_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i8 %b, i8* %a unordered, align 1 store atomic i8 %b, i8* %a unordered, align 1
ret void ret void
} }
@ -371,6 +543,16 @@ define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: sb a1, 0(a0) ; RV32IA-NEXT: sb a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_store_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i8 %b, i8* %a monotonic, align 1 store atomic i8 %b, i8* %a monotonic, align 1
ret void ret void
} }
@ -391,6 +573,16 @@ define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind {
; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sb a1, 0(a0) ; RV32IA-NEXT: sb a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i8_release:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a2, zero, 3
; RV64I-NEXT: call __atomic_store_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i8 %b, i8* %a release, align 1 store atomic i8 %b, i8* %a release, align 1
ret void ret void
} }
@ -411,6 +603,16 @@ define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sb a1, 0(a0) ; RV32IA-NEXT: sb a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i8_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a2, zero, 5
; RV64I-NEXT: call __atomic_store_1
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i8 %b, i8* %a seq_cst, align 1 store atomic i8 %b, i8* %a seq_cst, align 1
ret void ret void
} }
@ -430,6 +632,16 @@ define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: sh a1, 0(a0) ; RV32IA-NEXT: sh a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i16_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_store_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i16 %b, i16* %a unordered, align 2 store atomic i16 %b, i16* %a unordered, align 2
ret void ret void
} }
@ -449,6 +661,16 @@ define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: sh a1, 0(a0) ; RV32IA-NEXT: sh a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_store_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i16 %b, i16* %a monotonic, align 2 store atomic i16 %b, i16* %a monotonic, align 2
ret void ret void
} }
@ -469,6 +691,16 @@ define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind {
; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sh a1, 0(a0) ; RV32IA-NEXT: sh a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i16_release:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a2, zero, 3
; RV64I-NEXT: call __atomic_store_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i16 %b, i16* %a release, align 2 store atomic i16 %b, i16* %a release, align 2
ret void ret void
} }
@ -489,6 +721,16 @@ define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sh a1, 0(a0) ; RV32IA-NEXT: sh a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i16_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a2, zero, 5
; RV64I-NEXT: call __atomic_store_2
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i16 %b, i16* %a seq_cst, align 2 store atomic i16 %b, i16* %a seq_cst, align 2
ret void ret void
} }
@ -508,6 +750,16 @@ define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: sw a1, 0(a0) ; RV32IA-NEXT: sw a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i32_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_store_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i32 %b, i32* %a unordered, align 4 store atomic i32 %b, i32* %a unordered, align 4
ret void ret void
} }
@ -527,6 +779,16 @@ define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32IA: # %bb.0: ; RV32IA: # %bb.0:
; RV32IA-NEXT: sw a1, 0(a0) ; RV32IA-NEXT: sw a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_store_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i32 %b, i32* %a monotonic, align 4 store atomic i32 %b, i32* %a monotonic, align 4
ret void ret void
} }
@ -547,6 +809,16 @@ define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind {
; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sw a1, 0(a0) ; RV32IA-NEXT: sw a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i32_release:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a2, zero, 3
; RV64I-NEXT: call __atomic_store_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i32 %b, i32* %a release, align 4 store atomic i32 %b, i32* %a release, align 4
ret void ret void
} }
@ -567,6 +839,16 @@ define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: fence rw, w
; RV32IA-NEXT: sw a1, 0(a0) ; RV32IA-NEXT: sw a1, 0(a0)
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i32_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a2, zero, 5
; RV64I-NEXT: call __atomic_store_4
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i32 %b, i32* %a seq_cst, align 4 store atomic i32 %b, i32* %a seq_cst, align 4
ret void ret void
} }
@ -591,6 +873,16 @@ define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i64_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_store_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i64 %b, i64* %a unordered, align 8 store atomic i64 %b, i64* %a unordered, align 8
ret void ret void
} }
@ -615,6 +907,16 @@ define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_store_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i64 %b, i64* %a monotonic, align 8 store atomic i64 %b, i64* %a monotonic, align 8
ret void ret void
} }
@ -639,6 +941,16 @@ define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i64_release:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a2, zero, 3
; RV64I-NEXT: call __atomic_store_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i64 %b, i64* %a release, align 8 store atomic i64 %b, i64* %a release, align 8
ret void ret void
} }
@ -663,6 +975,16 @@ define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: lw ra, 12(sp)
; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret ; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_store_i64_seq_cst:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp)
; RV64I-NEXT: addi a2, zero, 5
; RV64I-NEXT: call __atomic_store_8
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
store atomic i64 %b, i64* %a seq_cst, align 8 store atomic i64 %b, i64* %a seq_cst, align 8
ret void ret void
} }

File diff suppressed because it is too large Load Diff