1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00
llvm-mirror/test/CodeGen/RISCV/atomic-signext.ll
Jessica Clarke 8c42ad8897 [SelectionDAG][Mips][PowerPC][RISCV][WebAssembly] Teach computeKnownBits/ComputeNumSignBits about atomics
Unlike normal loads these don't have an extension field, but we know
from TargetLowering whether these are sign-extending or zero-extending,
and so can optimise away unnecessary extensions.

This was noticed on RISC-V, where sign extensions in the calling
convention would result in unnecessary explicit extension instructions,
but this also fixes some Mips inefficiencies. PowerPC sees churn in the
tests as all the zero extensions are only for promoting 32-bit to
64-bit, but these zero extensions are still not optimised away as they
should be, likely due to i32 being a legal type.

This also simplifies the WebAssembly code somewhat, which currently
works around the lack of target-independent combines with some ugly
patterns that break once they're optimised away.

Re-landed with correct handling in ComputeNumSignBits for Tmp == VTBits,
where zero-extending atomics were incorrectly returning 0 rather than
the (slightly confusing) required return value of 1.

Re-landed again after D102819 fixed PowerPC to correctly zero-extend all
of its atomics as it claimed to do, since the combination of that bug
and this optimisation caused buildbot regressions.

Reviewed By: RKSimon, atanasyan

Differential Revision: https://reviews.llvm.org/D101342
2021-05-20 20:34:23 +01:00

3758 lines
128 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IA %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IA %s
define signext i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
; RV32I-LABEL: atomic_load_i8_unordered:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a1, zero
; RV32I-NEXT: call __atomic_load_1@plt
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i8_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_1@plt
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i8_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lb a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i8, i8* %a unordered, align 1
ret i8 %1
}
define signext i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
; RV32I-LABEL: atomic_load_i16_unordered:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a1, zero
; RV32I-NEXT: call __atomic_load_2@plt
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i16_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i16_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_2@plt
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i16_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lh a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i16, i16* %a unordered, align 2
ret i16 %1
}
define signext i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
; RV32I-LABEL: atomic_load_i32_unordered:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a1, zero
; RV32I-NEXT: call __atomic_load_4@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomic_load_i32_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i32_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a1, zero
; RV64I-NEXT: call __atomic_load_4@plt
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomic_load_i32_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lw a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i32, i32* %a unordered, align 4
ret i32 %1
}
define signext i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_xchg_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_exchange_1@plt
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_xchg_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: addi a3, zero, 255
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: andi a1, a1, 255
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a4, (a2)
; RV32IA-NEXT: mv a5, a1
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: sc.w a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB3_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a4, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_xchg_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_exchange_1@plt
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: addi a3, zero, 255
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: andi a1, a1, 255
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a4, (a2)
; RV64IA-NEXT: mv a5, a1
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: sc.w a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB3_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a4, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw xchg i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_add_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_add_1@plt
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_add_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: addi a3, zero, 255
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: andi a1, a1, 255
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a4, (a2)
; RV32IA-NEXT: add a5, a4, a1
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: sc.w a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB4_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a4, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_add_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_add_1@plt
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_add_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: addi a3, zero, 255
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: andi a1, a1, 255
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a4, (a2)
; RV64IA-NEXT: add a5, a4, a1
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: sc.w a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB4_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a4, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw add i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_sub_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_sub_1@plt
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_sub_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: addi a3, zero, 255
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: andi a1, a1, 255
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a4, (a2)
; RV32IA-NEXT: sub a5, a4, a1
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: sc.w a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB5_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a4, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_sub_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_sub_1@plt
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_sub_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: addi a3, zero, 255
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: andi a1, a1, 255
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a4, (a2)
; RV64IA-NEXT: sub a5, a4, a1
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: sc.w a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB5_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a4, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw sub i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_and_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_and_1@plt
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_and_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: addi a3, zero, 255
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: not a3, a3
; RV32IA-NEXT: andi a1, a1, 255
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: or a1, a3, a1
; RV32IA-NEXT: amoand.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_and_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_and_1@plt
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_and_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: addi a3, zero, 255
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: not a3, a3
; RV64IA-NEXT: andi a1, a1, 255
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: or a1, a3, a1
; RV64IA-NEXT: amoand.w a1, a1, (a2)
; RV64IA-NEXT: srlw a0, a1, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw and i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_nand_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_nand_1@plt
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_nand_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: addi a3, zero, 255
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: andi a1, a1, 255
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a4, (a2)
; RV32IA-NEXT: and a5, a4, a1
; RV32IA-NEXT: not a5, a5
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: sc.w a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB7_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a4, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_nand_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_nand_1@plt
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_nand_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: addi a3, zero, 255
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: andi a1, a1, 255
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a4, (a2)
; RV64IA-NEXT: and a5, a4, a1
; RV64IA-NEXT: not a5, a5
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: sc.w a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB7_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a4, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw nand i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_or_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_or_1@plt
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_or_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: andi a1, a1, 255
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: amoor.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_or_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_or_1@plt
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_or_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: andi a1, a1, 255
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: amoor.w a1, a1, (a2)
; RV64IA-NEXT: srlw a0, a1, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw or i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_xor_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_xor_1@plt
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_xor_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: andi a1, a1, 255
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: amoxor.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_xor_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_xor_1@plt
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_xor_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: andi a1, a1, 255
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-NEXT: srlw a0, a1, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw xor i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_max_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: slli a0, a1, 24
; RV32I-NEXT: srai s1, a0, 24
; RV32I-NEXT: j .LBB10_2
; RV32I-NEXT: .LBB10_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB10_2 Depth=1
; RV32I-NEXT: sb a3, 15(sp)
; RV32I-NEXT: addi a1, sp, 15
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
; RV32I-NEXT: lb a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB10_4
; RV32I-NEXT: .LBB10_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: slli a0, a3, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: blt s1, a0, .LBB10_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB10_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: j .LBB10_1
; RV32I-NEXT: .LBB10_4: # %atomicrmw.end
; RV32I-NEXT: slli a0, a3, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_max_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a6, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: andi a3, a0, 24
; RV32IA-NEXT: addi a4, zero, 255
; RV32IA-NEXT: sll a7, a4, a0
; RV32IA-NEXT: slli a1, a1, 24
; RV32IA-NEXT: srai a1, a1, 24
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: addi a5, zero, 24
; RV32IA-NEXT: sub a3, a5, a3
; RV32IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a5, (a6)
; RV32IA-NEXT: and a4, a5, a7
; RV32IA-NEXT: mv a2, a5
; RV32IA-NEXT: sll a4, a4, a3
; RV32IA-NEXT: sra a4, a4, a3
; RV32IA-NEXT: bge a4, a1, .LBB10_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1
; RV32IA-NEXT: xor a2, a5, a1
; RV32IA-NEXT: and a2, a2, a7
; RV32IA-NEXT: xor a2, a5, a2
; RV32IA-NEXT: .LBB10_3: # in Loop: Header=BB10_1 Depth=1
; RV32IA-NEXT: sc.w a2, a2, (a6)
; RV32IA-NEXT: bnez a2, .LBB10_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a5, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_max_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: slli a0, a1, 56
; RV64I-NEXT: srai s1, a0, 56
; RV64I-NEXT: j .LBB10_2
; RV64I-NEXT: .LBB10_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB10_2 Depth=1
; RV64I-NEXT: sb a3, 15(sp)
; RV64I-NEXT: addi a1, sp, 15
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
; RV64I-NEXT: lb a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB10_4
; RV64I-NEXT: .LBB10_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: slli a0, a3, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: blt s1, a0, .LBB10_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB10_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB10_1
; RV64I-NEXT: .LBB10_4: # %atomicrmw.end
; RV64I-NEXT: slli a0, a3, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_max_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a6, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: andi a3, a0, 24
; RV64IA-NEXT: addi a4, zero, 255
; RV64IA-NEXT: sllw a7, a4, a0
; RV64IA-NEXT: slli a1, a1, 56
; RV64IA-NEXT: srai a1, a1, 56
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: addi a5, zero, 56
; RV64IA-NEXT: sub a3, a5, a3
; RV64IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a5, (a6)
; RV64IA-NEXT: and a4, a5, a7
; RV64IA-NEXT: mv a2, a5
; RV64IA-NEXT: sll a4, a4, a3
; RV64IA-NEXT: sra a4, a4, a3
; RV64IA-NEXT: bge a4, a1, .LBB10_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1
; RV64IA-NEXT: xor a2, a5, a1
; RV64IA-NEXT: and a2, a2, a7
; RV64IA-NEXT: xor a2, a5, a2
; RV64IA-NEXT: .LBB10_3: # in Loop: Header=BB10_1 Depth=1
; RV64IA-NEXT: sc.w a2, a2, (a6)
; RV64IA-NEXT: bnez a2, .LBB10_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a5, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw max i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_min_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: slli a0, a1, 24
; RV32I-NEXT: srai s1, a0, 24
; RV32I-NEXT: j .LBB11_2
; RV32I-NEXT: .LBB11_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB11_2 Depth=1
; RV32I-NEXT: sb a3, 15(sp)
; RV32I-NEXT: addi a1, sp, 15
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
; RV32I-NEXT: lb a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB11_4
; RV32I-NEXT: .LBB11_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: slli a0, a3, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bge s1, a0, .LBB11_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB11_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: j .LBB11_1
; RV32I-NEXT: .LBB11_4: # %atomicrmw.end
; RV32I-NEXT: slli a0, a3, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_min_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a6, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: andi a3, a0, 24
; RV32IA-NEXT: addi a4, zero, 255
; RV32IA-NEXT: sll a7, a4, a0
; RV32IA-NEXT: slli a1, a1, 24
; RV32IA-NEXT: srai a1, a1, 24
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: addi a5, zero, 24
; RV32IA-NEXT: sub a3, a5, a3
; RV32IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a5, (a6)
; RV32IA-NEXT: and a4, a5, a7
; RV32IA-NEXT: mv a2, a5
; RV32IA-NEXT: sll a4, a4, a3
; RV32IA-NEXT: sra a4, a4, a3
; RV32IA-NEXT: bge a1, a4, .LBB11_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1
; RV32IA-NEXT: xor a2, a5, a1
; RV32IA-NEXT: and a2, a2, a7
; RV32IA-NEXT: xor a2, a5, a2
; RV32IA-NEXT: .LBB11_3: # in Loop: Header=BB11_1 Depth=1
; RV32IA-NEXT: sc.w a2, a2, (a6)
; RV32IA-NEXT: bnez a2, .LBB11_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a5, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_min_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: slli a0, a1, 56
; RV64I-NEXT: srai s1, a0, 56
; RV64I-NEXT: j .LBB11_2
; RV64I-NEXT: .LBB11_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB11_2 Depth=1
; RV64I-NEXT: sb a3, 15(sp)
; RV64I-NEXT: addi a1, sp, 15
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
; RV64I-NEXT: lb a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB11_4
; RV64I-NEXT: .LBB11_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: slli a0, a3, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bge s1, a0, .LBB11_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB11_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB11_1
; RV64I-NEXT: .LBB11_4: # %atomicrmw.end
; RV64I-NEXT: slli a0, a3, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_min_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a6, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: andi a3, a0, 24
; RV64IA-NEXT: addi a4, zero, 255
; RV64IA-NEXT: sllw a7, a4, a0
; RV64IA-NEXT: slli a1, a1, 56
; RV64IA-NEXT: srai a1, a1, 56
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: addi a5, zero, 56
; RV64IA-NEXT: sub a3, a5, a3
; RV64IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a5, (a6)
; RV64IA-NEXT: and a4, a5, a7
; RV64IA-NEXT: mv a2, a5
; RV64IA-NEXT: sll a4, a4, a3
; RV64IA-NEXT: sra a4, a4, a3
; RV64IA-NEXT: bge a1, a4, .LBB11_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1
; RV64IA-NEXT: xor a2, a5, a1
; RV64IA-NEXT: and a2, a2, a7
; RV64IA-NEXT: xor a2, a5, a2
; RV64IA-NEXT: .LBB11_3: # in Loop: Header=BB11_1 Depth=1
; RV64IA-NEXT: sc.w a2, a2, (a6)
; RV64IA-NEXT: bnez a2, .LBB11_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a5, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw min i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_umax_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: andi s1, a1, 255
; RV32I-NEXT: j .LBB12_2
; RV32I-NEXT: .LBB12_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB12_2 Depth=1
; RV32I-NEXT: sb a3, 15(sp)
; RV32I-NEXT: addi a1, sp, 15
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
; RV32I-NEXT: lb a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB12_4
; RV32I-NEXT: .LBB12_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: andi a0, a3, 255
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bltu s1, a0, .LBB12_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB12_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: j .LBB12_1
; RV32I-NEXT: .LBB12_4: # %atomicrmw.end
; RV32I-NEXT: slli a0, a3, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_umax_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a6, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: addi a3, zero, 255
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: andi a1, a1, 255
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a4, (a6)
; RV32IA-NEXT: and a2, a4, a3
; RV32IA-NEXT: mv a5, a4
; RV32IA-NEXT: bgeu a2, a1, .LBB12_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1
; RV32IA-NEXT: xor a5, a4, a1
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: .LBB12_3: # in Loop: Header=BB12_1 Depth=1
; RV32IA-NEXT: sc.w a5, a5, (a6)
; RV32IA-NEXT: bnez a5, .LBB12_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a4, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_umax_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: andi s1, a1, 255
; RV64I-NEXT: j .LBB12_2
; RV64I-NEXT: .LBB12_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB12_2 Depth=1
; RV64I-NEXT: sb a3, 15(sp)
; RV64I-NEXT: addi a1, sp, 15
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
; RV64I-NEXT: lb a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB12_4
; RV64I-NEXT: .LBB12_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: andi a0, a3, 255
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bltu s1, a0, .LBB12_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB12_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB12_1
; RV64I-NEXT: .LBB12_4: # %atomicrmw.end
; RV64I-NEXT: slli a0, a3, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_umax_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a6, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: addi a3, zero, 255
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: andi a1, a1, 255
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a4, (a6)
; RV64IA-NEXT: and a2, a4, a3
; RV64IA-NEXT: mv a5, a4
; RV64IA-NEXT: bgeu a2, a1, .LBB12_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1
; RV64IA-NEXT: xor a5, a4, a1
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: .LBB12_3: # in Loop: Header=BB12_1 Depth=1
; RV64IA-NEXT: sc.w a5, a5, (a6)
; RV64IA-NEXT: bnez a5, .LBB12_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a4, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw umax i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
; RV32I-LABEL: atomicrmw_umin_i8_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: andi s1, a1, 255
; RV32I-NEXT: j .LBB13_2
; RV32I-NEXT: .LBB13_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB13_2 Depth=1
; RV32I-NEXT: sb a3, 15(sp)
; RV32I-NEXT: addi a1, sp, 15
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_1@plt
; RV32I-NEXT: lb a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB13_4
; RV32I-NEXT: .LBB13_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: andi a0, a3, 255
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bgeu s1, a0, .LBB13_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB13_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: j .LBB13_1
; RV32I-NEXT: .LBB13_4: # %atomicrmw.end
; RV32I-NEXT: slli a0, a3, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_umin_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a6, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: addi a3, zero, 255
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: andi a1, a1, 255
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a4, (a6)
; RV32IA-NEXT: and a2, a4, a3
; RV32IA-NEXT: mv a5, a4
; RV32IA-NEXT: bgeu a1, a2, .LBB13_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1
; RV32IA-NEXT: xor a5, a4, a1
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: .LBB13_3: # in Loop: Header=BB13_1 Depth=1
; RV32IA-NEXT: sc.w a5, a5, (a6)
; RV32IA-NEXT: bnez a5, .LBB13_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a4, a0
; RV32IA-NEXT: slli a0, a0, 24
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_umin_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: andi s1, a1, 255
; RV64I-NEXT: j .LBB13_2
; RV64I-NEXT: .LBB13_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB13_2 Depth=1
; RV64I-NEXT: sb a3, 15(sp)
; RV64I-NEXT: addi a1, sp, 15
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_1@plt
; RV64I-NEXT: lb a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB13_4
; RV64I-NEXT: .LBB13_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: andi a0, a3, 255
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bgeu s1, a0, .LBB13_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB13_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB13_1
; RV64I-NEXT: .LBB13_4: # %atomicrmw.end
; RV64I-NEXT: slli a0, a3, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_umin_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a6, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: addi a3, zero, 255
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: andi a1, a1, 255
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a4, (a6)
; RV64IA-NEXT: and a2, a4, a3
; RV64IA-NEXT: mv a5, a4
; RV64IA-NEXT: bgeu a1, a2, .LBB13_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1
; RV64IA-NEXT: xor a5, a4, a1
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: .LBB13_3: # in Loop: Header=BB13_1 Depth=1
; RV64IA-NEXT: sc.w a5, a5, (a6)
; RV64IA-NEXT: bnez a5, .LBB13_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a4, a0
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
%1 = atomicrmw umin i8* %a, i8 %b monotonic
ret i8 %1
}
define signext i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_xchg_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_exchange_2@plt
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_xchg_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: lui a3, 16
; RV32IA-NEXT: addi a3, a3, -1
; RV32IA-NEXT: sll a4, a3, a0
; RV32IA-NEXT: and a1, a1, a3
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a3, (a2)
; RV32IA-NEXT: mv a5, a1
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: sc.w a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB14_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a3, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_xchg_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_exchange_2@plt
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: lui a3, 16
; RV64IA-NEXT: addiw a3, a3, -1
; RV64IA-NEXT: sllw a4, a3, a0
; RV64IA-NEXT: and a1, a1, a3
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a3, (a2)
; RV64IA-NEXT: mv a5, a1
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: sc.w a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB14_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a3, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw xchg i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_add_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_add_2@plt
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_add_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: lui a3, 16
; RV32IA-NEXT: addi a3, a3, -1
; RV32IA-NEXT: sll a4, a3, a0
; RV32IA-NEXT: and a1, a1, a3
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a3, (a2)
; RV32IA-NEXT: add a5, a3, a1
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: sc.w a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB15_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a3, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_add_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_add_2@plt
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_add_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: lui a3, 16
; RV64IA-NEXT: addiw a3, a3, -1
; RV64IA-NEXT: sllw a4, a3, a0
; RV64IA-NEXT: and a1, a1, a3
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a3, (a2)
; RV64IA-NEXT: add a5, a3, a1
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: sc.w a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB15_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a3, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw add i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_sub_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_sub_2@plt
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_sub_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: lui a3, 16
; RV32IA-NEXT: addi a3, a3, -1
; RV32IA-NEXT: sll a4, a3, a0
; RV32IA-NEXT: and a1, a1, a3
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a3, (a2)
; RV32IA-NEXT: sub a5, a3, a1
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: sc.w a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB16_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a3, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_sub_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_sub_2@plt
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_sub_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: lui a3, 16
; RV64IA-NEXT: addiw a3, a3, -1
; RV64IA-NEXT: sllw a4, a3, a0
; RV64IA-NEXT: and a1, a1, a3
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a3, (a2)
; RV64IA-NEXT: sub a5, a3, a1
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: sc.w a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB16_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a3, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw sub i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_and_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_and_2@plt
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_and_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: lui a3, 16
; RV32IA-NEXT: addi a3, a3, -1
; RV32IA-NEXT: sll a4, a3, a0
; RV32IA-NEXT: not a4, a4
; RV32IA-NEXT: and a1, a1, a3
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: or a1, a4, a1
; RV32IA-NEXT: amoand.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_and_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_and_2@plt
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_and_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: lui a3, 16
; RV64IA-NEXT: addiw a3, a3, -1
; RV64IA-NEXT: sllw a4, a3, a0
; RV64IA-NEXT: not a4, a4
; RV64IA-NEXT: and a1, a1, a3
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: or a1, a4, a1
; RV64IA-NEXT: amoand.w a1, a1, (a2)
; RV64IA-NEXT: srlw a0, a1, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw and i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_nand_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_nand_2@plt
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_nand_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: lui a3, 16
; RV32IA-NEXT: addi a3, a3, -1
; RV32IA-NEXT: sll a4, a3, a0
; RV32IA-NEXT: and a1, a1, a3
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a3, (a2)
; RV32IA-NEXT: and a5, a3, a1
; RV32IA-NEXT: not a5, a5
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: sc.w a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB18_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a3, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_nand_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_nand_2@plt
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_nand_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: lui a3, 16
; RV64IA-NEXT: addiw a3, a3, -1
; RV64IA-NEXT: sllw a4, a3, a0
; RV64IA-NEXT: and a1, a1, a3
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a3, (a2)
; RV64IA-NEXT: and a5, a3, a1
; RV64IA-NEXT: not a5, a5
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: sc.w a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB18_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a3, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw nand i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_or_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_or_2@plt
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_or_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: lui a3, 16
; RV32IA-NEXT: addi a3, a3, -1
; RV32IA-NEXT: and a1, a1, a3
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: amoor.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_or_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_or_2@plt
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_or_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: lui a3, 16
; RV64IA-NEXT: addiw a3, a3, -1
; RV64IA-NEXT: and a1, a1, a3
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: amoor.w a1, a1, (a2)
; RV64IA-NEXT: srlw a0, a1, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw or i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_xor_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_xor_2@plt
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_xor_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: lui a3, 16
; RV32IA-NEXT: addi a3, a3, -1
; RV32IA-NEXT: and a1, a1, a3
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: amoxor.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_xor_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_xor_2@plt
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_xor_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: lui a3, 16
; RV64IA-NEXT: addiw a3, a3, -1
; RV64IA-NEXT: and a1, a1, a3
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-NEXT: srlw a0, a1, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw xor i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_max_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lhu a3, 0(a0)
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: slli a0, a1, 16
; RV32I-NEXT: srai s1, a0, 16
; RV32I-NEXT: j .LBB21_2
; RV32I-NEXT: .LBB21_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB21_2 Depth=1
; RV32I-NEXT: sh a3, 14(sp)
; RV32I-NEXT: addi a1, sp, 14
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_2@plt
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB21_4
; RV32I-NEXT: .LBB21_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: slli a0, a3, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: blt s1, a0, .LBB21_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB21_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: j .LBB21_1
; RV32I-NEXT: .LBB21_4: # %atomicrmw.end
; RV32I-NEXT: slli a0, a3, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_max_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a6, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: andi a3, a0, 24
; RV32IA-NEXT: lui a4, 16
; RV32IA-NEXT: addi a4, a4, -1
; RV32IA-NEXT: sll a7, a4, a0
; RV32IA-NEXT: slli a1, a1, 16
; RV32IA-NEXT: srai a1, a1, 16
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: addi a5, zero, 16
; RV32IA-NEXT: sub a3, a5, a3
; RV32IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a5, (a6)
; RV32IA-NEXT: and a4, a5, a7
; RV32IA-NEXT: mv a2, a5
; RV32IA-NEXT: sll a4, a4, a3
; RV32IA-NEXT: sra a4, a4, a3
; RV32IA-NEXT: bge a4, a1, .LBB21_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB21_1 Depth=1
; RV32IA-NEXT: xor a2, a5, a1
; RV32IA-NEXT: and a2, a2, a7
; RV32IA-NEXT: xor a2, a5, a2
; RV32IA-NEXT: .LBB21_3: # in Loop: Header=BB21_1 Depth=1
; RV32IA-NEXT: sc.w a2, a2, (a6)
; RV32IA-NEXT: bnez a2, .LBB21_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a5, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_max_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lhu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: slli a0, a1, 48
; RV64I-NEXT: srai s1, a0, 48
; RV64I-NEXT: j .LBB21_2
; RV64I-NEXT: .LBB21_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB21_2 Depth=1
; RV64I-NEXT: sh a3, 14(sp)
; RV64I-NEXT: addi a1, sp, 14
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_2@plt
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB21_4
; RV64I-NEXT: .LBB21_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: slli a0, a3, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: blt s1, a0, .LBB21_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB21_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB21_1
; RV64I-NEXT: .LBB21_4: # %atomicrmw.end
; RV64I-NEXT: slli a0, a3, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_max_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a6, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: andi a3, a0, 24
; RV64IA-NEXT: lui a4, 16
; RV64IA-NEXT: addiw a4, a4, -1
; RV64IA-NEXT: sllw a7, a4, a0
; RV64IA-NEXT: slli a1, a1, 48
; RV64IA-NEXT: srai a1, a1, 48
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: addi a5, zero, 48
; RV64IA-NEXT: sub a3, a5, a3
; RV64IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a5, (a6)
; RV64IA-NEXT: and a4, a5, a7
; RV64IA-NEXT: mv a2, a5
; RV64IA-NEXT: sll a4, a4, a3
; RV64IA-NEXT: sra a4, a4, a3
; RV64IA-NEXT: bge a4, a1, .LBB21_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB21_1 Depth=1
; RV64IA-NEXT: xor a2, a5, a1
; RV64IA-NEXT: and a2, a2, a7
; RV64IA-NEXT: xor a2, a5, a2
; RV64IA-NEXT: .LBB21_3: # in Loop: Header=BB21_1 Depth=1
; RV64IA-NEXT: sc.w a2, a2, (a6)
; RV64IA-NEXT: bnez a2, .LBB21_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a5, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw max i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_min_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lhu a3, 0(a0)
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: slli a0, a1, 16
; RV32I-NEXT: srai s1, a0, 16
; RV32I-NEXT: j .LBB22_2
; RV32I-NEXT: .LBB22_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB22_2 Depth=1
; RV32I-NEXT: sh a3, 14(sp)
; RV32I-NEXT: addi a1, sp, 14
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_2@plt
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB22_4
; RV32I-NEXT: .LBB22_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: slli a0, a3, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bge s1, a0, .LBB22_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB22_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: j .LBB22_1
; RV32I-NEXT: .LBB22_4: # %atomicrmw.end
; RV32I-NEXT: slli a0, a3, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_min_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a6, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: andi a3, a0, 24
; RV32IA-NEXT: lui a4, 16
; RV32IA-NEXT: addi a4, a4, -1
; RV32IA-NEXT: sll a7, a4, a0
; RV32IA-NEXT: slli a1, a1, 16
; RV32IA-NEXT: srai a1, a1, 16
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: addi a5, zero, 16
; RV32IA-NEXT: sub a3, a5, a3
; RV32IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a5, (a6)
; RV32IA-NEXT: and a4, a5, a7
; RV32IA-NEXT: mv a2, a5
; RV32IA-NEXT: sll a4, a4, a3
; RV32IA-NEXT: sra a4, a4, a3
; RV32IA-NEXT: bge a1, a4, .LBB22_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB22_1 Depth=1
; RV32IA-NEXT: xor a2, a5, a1
; RV32IA-NEXT: and a2, a2, a7
; RV32IA-NEXT: xor a2, a5, a2
; RV32IA-NEXT: .LBB22_3: # in Loop: Header=BB22_1 Depth=1
; RV32IA-NEXT: sc.w a2, a2, (a6)
; RV32IA-NEXT: bnez a2, .LBB22_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a5, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_min_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lhu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: slli a0, a1, 48
; RV64I-NEXT: srai s1, a0, 48
; RV64I-NEXT: j .LBB22_2
; RV64I-NEXT: .LBB22_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB22_2 Depth=1
; RV64I-NEXT: sh a3, 14(sp)
; RV64I-NEXT: addi a1, sp, 14
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_2@plt
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB22_4
; RV64I-NEXT: .LBB22_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: slli a0, a3, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bge s1, a0, .LBB22_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB22_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB22_1
; RV64I-NEXT: .LBB22_4: # %atomicrmw.end
; RV64I-NEXT: slli a0, a3, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_min_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a6, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: andi a3, a0, 24
; RV64IA-NEXT: lui a4, 16
; RV64IA-NEXT: addiw a4, a4, -1
; RV64IA-NEXT: sllw a7, a4, a0
; RV64IA-NEXT: slli a1, a1, 48
; RV64IA-NEXT: srai a1, a1, 48
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: addi a5, zero, 48
; RV64IA-NEXT: sub a3, a5, a3
; RV64IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a5, (a6)
; RV64IA-NEXT: and a4, a5, a7
; RV64IA-NEXT: mv a2, a5
; RV64IA-NEXT: sll a4, a4, a3
; RV64IA-NEXT: sra a4, a4, a3
; RV64IA-NEXT: bge a1, a4, .LBB22_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB22_1 Depth=1
; RV64IA-NEXT: xor a2, a5, a1
; RV64IA-NEXT: and a2, a2, a7
; RV64IA-NEXT: xor a2, a5, a2
; RV64IA-NEXT: .LBB22_3: # in Loop: Header=BB22_1 Depth=1
; RV64IA-NEXT: sc.w a2, a2, (a6)
; RV64IA-NEXT: bnez a2, .LBB22_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a5, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw min i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_umax_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: lhu a1, 0(a0)
; RV32I-NEXT: lui a0, 16
; RV32I-NEXT: addi s0, a0, -1
; RV32I-NEXT: and s1, s2, s0
; RV32I-NEXT: j .LBB23_2
; RV32I-NEXT: .LBB23_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB23_2 Depth=1
; RV32I-NEXT: sh a1, 10(sp)
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_2@plt
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB23_4
; RV32I-NEXT: .LBB23_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: and a0, a1, s0
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: bltu s1, a0, .LBB23_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB23_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: j .LBB23_1
; RV32I-NEXT: .LBB23_4: # %atomicrmw.end
; RV32I-NEXT: slli a0, a1, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_umax_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a6, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: lui a3, 16
; RV32IA-NEXT: addi a3, a3, -1
; RV32IA-NEXT: sll a4, a3, a0
; RV32IA-NEXT: and a1, a1, a3
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a3, (a6)
; RV32IA-NEXT: and a2, a3, a4
; RV32IA-NEXT: mv a5, a3
; RV32IA-NEXT: bgeu a2, a1, .LBB23_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB23_1 Depth=1
; RV32IA-NEXT: xor a5, a3, a1
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: .LBB23_3: # in Loop: Header=BB23_1 Depth=1
; RV32IA-NEXT: sc.w a5, a5, (a6)
; RV32IA-NEXT: bnez a5, .LBB23_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a3, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_umax_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: lhu a1, 0(a0)
; RV64I-NEXT: lui a0, 16
; RV64I-NEXT: addiw s0, a0, -1
; RV64I-NEXT: and s1, s2, s0
; RV64I-NEXT: j .LBB23_2
; RV64I-NEXT: .LBB23_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB23_2 Depth=1
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: mv a0, s3
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_2@plt
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB23_4
; RV64I-NEXT: .LBB23_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: and a0, a1, s0
; RV64I-NEXT: mv a2, a1
; RV64I-NEXT: bltu s1, a0, .LBB23_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB23_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB23_1
; RV64I-NEXT: .LBB23_4: # %atomicrmw.end
; RV64I-NEXT: slli a0, a1, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_umax_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a6, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: lui a3, 16
; RV64IA-NEXT: addiw a3, a3, -1
; RV64IA-NEXT: sllw a4, a3, a0
; RV64IA-NEXT: and a1, a1, a3
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a3, (a6)
; RV64IA-NEXT: and a2, a3, a4
; RV64IA-NEXT: mv a5, a3
; RV64IA-NEXT: bgeu a2, a1, .LBB23_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB23_1 Depth=1
; RV64IA-NEXT: xor a5, a3, a1
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: .LBB23_3: # in Loop: Header=BB23_1 Depth=1
; RV64IA-NEXT: sc.w a5, a5, (a6)
; RV64IA-NEXT: bnez a5, .LBB23_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a3, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw umax i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
; RV32I-LABEL: atomicrmw_umin_i16_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: lhu a1, 0(a0)
; RV32I-NEXT: lui a0, 16
; RV32I-NEXT: addi s0, a0, -1
; RV32I-NEXT: and s1, s2, s0
; RV32I-NEXT: j .LBB24_2
; RV32I-NEXT: .LBB24_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB24_2 Depth=1
; RV32I-NEXT: sh a1, 10(sp)
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_2@plt
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB24_4
; RV32I-NEXT: .LBB24_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: and a0, a1, s0
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: bgeu s1, a0, .LBB24_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB24_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: j .LBB24_1
; RV32I-NEXT: .LBB24_4: # %atomicrmw.end
; RV32I-NEXT: slli a0, a1, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_umin_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a6, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: lui a3, 16
; RV32IA-NEXT: addi a3, a3, -1
; RV32IA-NEXT: sll a4, a3, a0
; RV32IA-NEXT: and a1, a1, a3
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a3, (a6)
; RV32IA-NEXT: and a2, a3, a4
; RV32IA-NEXT: mv a5, a3
; RV32IA-NEXT: bgeu a1, a2, .LBB24_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB24_1 Depth=1
; RV32IA-NEXT: xor a5, a3, a1
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: .LBB24_3: # in Loop: Header=BB24_1 Depth=1
; RV32IA-NEXT: sc.w a5, a5, (a6)
; RV32IA-NEXT: bnez a5, .LBB24_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a3, a0
; RV32IA-NEXT: slli a0, a0, 16
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_umin_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: lhu a1, 0(a0)
; RV64I-NEXT: lui a0, 16
; RV64I-NEXT: addiw s0, a0, -1
; RV64I-NEXT: and s1, s2, s0
; RV64I-NEXT: j .LBB24_2
; RV64I-NEXT: .LBB24_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB24_2 Depth=1
; RV64I-NEXT: sh a1, 6(sp)
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: mv a0, s3
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_2@plt
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB24_4
; RV64I-NEXT: .LBB24_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: and a0, a1, s0
; RV64I-NEXT: mv a2, a1
; RV64I-NEXT: bgeu s1, a0, .LBB24_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB24_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB24_1
; RV64I-NEXT: .LBB24_4: # %atomicrmw.end
; RV64I-NEXT: slli a0, a1, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_umin_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a6, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: lui a3, 16
; RV64IA-NEXT: addiw a3, a3, -1
; RV64IA-NEXT: sllw a4, a3, a0
; RV64IA-NEXT: and a1, a1, a3
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a3, (a6)
; RV64IA-NEXT: and a2, a3, a4
; RV64IA-NEXT: mv a5, a3
; RV64IA-NEXT: bgeu a1, a2, .LBB24_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB24_1 Depth=1
; RV64IA-NEXT: xor a5, a3, a1
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: .LBB24_3: # in Loop: Header=BB24_1 Depth=1
; RV64IA-NEXT: sc.w a5, a5, (a6)
; RV64IA-NEXT: bnez a5, .LBB24_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a3, a0
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
%1 = atomicrmw umin i16* %a, i16 %b monotonic
ret i16 %1
}
define signext i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_xchg_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_exchange_4@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_xchg_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoswap.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_xchg_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_exchange_4@plt
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_xchg_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoswap.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw xchg i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_add_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_add_4@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_add_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoadd.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_add_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_add_4@plt
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_add_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoadd.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw add i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_sub_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_sub_4@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_sub_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: neg a1, a1
; RV32IA-NEXT: amoadd.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_sub_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_sub_4@plt
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_sub_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: neg a1, a1
; RV64IA-NEXT: amoadd.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw sub i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_and_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_and_4@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_and_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoand.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_and_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_and_4@plt
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_and_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoand.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw and i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_nand_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_nand_4@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_nand_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: lr.w a2, (a0)
; RV32IA-NEXT: and a3, a2, a1
; RV32IA-NEXT: not a3, a3
; RV32IA-NEXT: sc.w a3, a3, (a0)
; RV32IA-NEXT: bnez a3, .LBB29_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: mv a0, a2
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_nand_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_nand_4@plt
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_nand_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.w a2, (a0)
; RV64IA-NEXT: and a3, a2, a1
; RV64IA-NEXT: not a3, a3
; RV64IA-NEXT: sc.w a3, a3, (a0)
; RV64IA-NEXT: bnez a3, .LBB29_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: mv a0, a2
; RV64IA-NEXT: ret
%1 = atomicrmw nand i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_or_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_or_4@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_or_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoor.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_or_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_or_4@plt
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_or_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoor.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw or i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_xor_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, zero
; RV32I-NEXT: call __atomic_fetch_xor_4@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_xor_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoxor.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_xor_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_xor_4@plt
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_xor_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoxor.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw xor i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_max_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lw a3, 0(a0)
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: j .LBB32_2
; RV32I-NEXT: .LBB32_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB32_2 Depth=1
; RV32I-NEXT: sw a3, 0(sp)
; RV32I-NEXT: mv a1, sp
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_4@plt
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB32_4
; RV32I-NEXT: .LBB32_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: blt s1, a3, .LBB32_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB32_2 Depth=1
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: j .LBB32_1
; RV32I-NEXT: .LBB32_4: # %atomicrmw.end
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_max_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amomax.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_max_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lwu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: sext.w s1, a1
; RV64I-NEXT: j .LBB32_2
; RV64I-NEXT: .LBB32_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB32_2 Depth=1
; RV64I-NEXT: sw a3, 12(sp)
; RV64I-NEXT: addi a1, sp, 12
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_4@plt
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB32_4
; RV64I-NEXT: .LBB32_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: sext.w a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: blt s1, a0, .LBB32_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB32_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB32_1
; RV64I-NEXT: .LBB32_4: # %atomicrmw.end
; RV64I-NEXT: sext.w a0, a3
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_max_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomax.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw max i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_min_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lw a3, 0(a0)
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: j .LBB33_2
; RV32I-NEXT: .LBB33_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB33_2 Depth=1
; RV32I-NEXT: sw a3, 0(sp)
; RV32I-NEXT: mv a1, sp
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_4@plt
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB33_4
; RV32I-NEXT: .LBB33_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bge s1, a3, .LBB33_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB33_2 Depth=1
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: j .LBB33_1
; RV32I-NEXT: .LBB33_4: # %atomicrmw.end
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_min_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amomin.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_min_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lwu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: sext.w s1, a1
; RV64I-NEXT: j .LBB33_2
; RV64I-NEXT: .LBB33_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB33_2 Depth=1
; RV64I-NEXT: sw a3, 12(sp)
; RV64I-NEXT: addi a1, sp, 12
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_4@plt
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB33_4
; RV64I-NEXT: .LBB33_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: sext.w a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bge s1, a0, .LBB33_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB33_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB33_1
; RV64I-NEXT: .LBB33_4: # %atomicrmw.end
; RV64I-NEXT: sext.w a0, a3
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_min_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomin.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw min i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_umax_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lw a3, 0(a0)
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: j .LBB34_2
; RV32I-NEXT: .LBB34_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB34_2 Depth=1
; RV32I-NEXT: sw a3, 0(sp)
; RV32I-NEXT: mv a1, sp
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_4@plt
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB34_4
; RV32I-NEXT: .LBB34_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bltu s1, a3, .LBB34_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB34_2 Depth=1
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: j .LBB34_1
; RV32I-NEXT: .LBB34_4: # %atomicrmw.end
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_umax_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amomaxu.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_umax_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lwu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: sext.w s1, a1
; RV64I-NEXT: j .LBB34_2
; RV64I-NEXT: .LBB34_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB34_2 Depth=1
; RV64I-NEXT: sw a3, 12(sp)
; RV64I-NEXT: addi a1, sp, 12
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_4@plt
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB34_4
; RV64I-NEXT: .LBB34_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: sext.w a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bltu s1, a0, .LBB34_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB34_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB34_1
; RV64I-NEXT: .LBB34_4: # %atomicrmw.end
; RV64I-NEXT: sext.w a0, a3
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_umax_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomaxu.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw umax i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
; RV32I-LABEL: atomicrmw_umin_i32_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lw a3, 0(a0)
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: j .LBB35_2
; RV32I-NEXT: .LBB35_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB35_2 Depth=1
; RV32I-NEXT: sw a3, 0(sp)
; RV32I-NEXT: mv a1, sp
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: call __atomic_compare_exchange_4@plt
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB35_4
; RV32I-NEXT: .LBB35_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bgeu s1, a3, .LBB35_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB35_2 Depth=1
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: j .LBB35_1
; RV32I-NEXT: .LBB35_4: # %atomicrmw.end
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_umin_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amominu.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_umin_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lwu a3, 0(a0)
; RV64I-NEXT: mv s2, a1
; RV64I-NEXT: sext.w s1, a1
; RV64I-NEXT: j .LBB35_2
; RV64I-NEXT: .LBB35_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB35_2 Depth=1
; RV64I-NEXT: sw a3, 12(sp)
; RV64I-NEXT: addi a1, sp, 12
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_4@plt
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB35_4
; RV64I-NEXT: .LBB35_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: sext.w a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bgeu s1, a0, .LBB35_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB35_2 Depth=1
; RV64I-NEXT: mv a2, s2
; RV64I-NEXT: j .LBB35_1
; RV64I-NEXT: .LBB35_4: # %atomicrmw.end
; RV64I-NEXT: sext.w a0, a3
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_umin_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amominu.w a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw umin i32* %a, i32 %b monotonic
ret i32 %1
}
define signext i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_xchg_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: call __atomic_exchange_8@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_xchg_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv a3, zero
; RV32IA-NEXT: call __atomic_exchange_8@plt
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_xchg_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_exchange_8@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_xchg_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoswap.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw xchg i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_add_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: call __atomic_fetch_add_8@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_add_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv a3, zero
; RV32IA-NEXT: call __atomic_fetch_add_8@plt
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_add_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_add_8@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_add_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoadd.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw add i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_sub_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: call __atomic_fetch_sub_8@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_sub_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv a3, zero
; RV32IA-NEXT: call __atomic_fetch_sub_8@plt
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_sub_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_sub_8@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_sub_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: neg a1, a1
; RV64IA-NEXT: amoadd.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw sub i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_and_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: call __atomic_fetch_and_8@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_and_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv a3, zero
; RV32IA-NEXT: call __atomic_fetch_and_8@plt
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_and_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_and_8@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_and_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoand.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw and i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_nand_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: call __atomic_fetch_nand_8@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_nand_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv a3, zero
; RV32IA-NEXT: call __atomic_fetch_nand_8@plt
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_nand_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_nand_8@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_nand_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1
; RV64IA-NEXT: lr.d a2, (a0)
; RV64IA-NEXT: and a3, a2, a1
; RV64IA-NEXT: not a3, a3
; RV64IA-NEXT: sc.d a3, a3, (a0)
; RV64IA-NEXT: bnez a3, .LBB40_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: mv a0, a2
; RV64IA-NEXT: ret
%1 = atomicrmw nand i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_or_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: call __atomic_fetch_or_8@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_or_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv a3, zero
; RV32IA-NEXT: call __atomic_fetch_or_8@plt
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_or_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_or_8@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_or_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoor.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw or i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_xor_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a3, zero
; RV32I-NEXT: call __atomic_fetch_xor_8@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_xor_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv a3, zero
; RV32IA-NEXT: call __atomic_fetch_xor_8@plt
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_xor_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a2, zero
; RV64I-NEXT: call __atomic_fetch_xor_8@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_xor_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoxor.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw xor i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_max_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lw a5, 4(a0)
; RV32I-NEXT: lw a4, 0(a0)
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: j .LBB43_2
; RV32I-NEXT: .LBB43_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV32I-NEXT: sw a4, 8(sp)
; RV32I-NEXT: sw a5, 12(sp)
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: mv a5, zero
; RV32I-NEXT: call __atomic_compare_exchange_8@plt
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB43_7
; RV32I-NEXT: .LBB43_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: beq a5, s1, .LBB43_4
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV32I-NEXT: slt a0, s1, a5
; RV32I-NEXT: j .LBB43_5
; RV32I-NEXT: .LBB43_4: # in Loop: Header=BB43_2 Depth=1
; RV32I-NEXT: sltu a0, s2, a4
; RV32I-NEXT: .LBB43_5: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: mv a3, a5
; RV32I-NEXT: bnez a0, .LBB43_1
; RV32I-NEXT: # %bb.6: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: mv a3, s1
; RV32I-NEXT: j .LBB43_1
; RV32I-NEXT: .LBB43_7: # %atomicrmw.end
; RV32I-NEXT: mv a0, a4
; RV32I-NEXT: mv a1, a5
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_max_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
; RV32IA-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv s0, a0
; RV32IA-NEXT: lw a5, 4(a0)
; RV32IA-NEXT: lw a4, 0(a0)
; RV32IA-NEXT: mv s1, a2
; RV32IA-NEXT: mv s2, a1
; RV32IA-NEXT: j .LBB43_2
; RV32IA-NEXT: .LBB43_1: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV32IA-NEXT: sw a4, 8(sp)
; RV32IA-NEXT: sw a5, 12(sp)
; RV32IA-NEXT: addi a1, sp, 8
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: mv a4, zero
; RV32IA-NEXT: mv a5, zero
; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB43_7
; RV32IA-NEXT: .LBB43_2: # %atomicrmw.start
; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: beq a5, s1, .LBB43_4
; RV32IA-NEXT: # %bb.3: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV32IA-NEXT: slt a0, s1, a5
; RV32IA-NEXT: j .LBB43_5
; RV32IA-NEXT: .LBB43_4: # in Loop: Header=BB43_2 Depth=1
; RV32IA-NEXT: sltu a0, s2, a4
; RV32IA-NEXT: .LBB43_5: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV32IA-NEXT: mv a2, a4
; RV32IA-NEXT: mv a3, a5
; RV32IA-NEXT: bnez a0, .LBB43_1
; RV32IA-NEXT: # %bb.6: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV32IA-NEXT: mv a2, s2
; RV32IA-NEXT: mv a3, s1
; RV32IA-NEXT: j .LBB43_1
; RV32IA-NEXT: .LBB43_7: # %atomicrmw.end
; RV32IA-NEXT: mv a0, a4
; RV32IA-NEXT: mv a1, a5
; RV32IA-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_max_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: ld a3, 0(a0)
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: j .LBB43_2
; RV64I-NEXT: .LBB43_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV64I-NEXT: sd a3, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_8@plt
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB43_4
; RV64I-NEXT: .LBB43_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: blt s1, a3, .LBB43_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB43_2 Depth=1
; RV64I-NEXT: mv a2, s1
; RV64I-NEXT: j .LBB43_1
; RV64I-NEXT: .LBB43_4: # %atomicrmw.end
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_max_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomax.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw max i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_min_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lw a5, 4(a0)
; RV32I-NEXT: lw a4, 0(a0)
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: j .LBB44_2
; RV32I-NEXT: .LBB44_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV32I-NEXT: sw a4, 8(sp)
; RV32I-NEXT: sw a5, 12(sp)
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: mv a5, zero
; RV32I-NEXT: call __atomic_compare_exchange_8@plt
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB44_7
; RV32I-NEXT: .LBB44_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: beq a5, s1, .LBB44_4
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV32I-NEXT: slt a0, s1, a5
; RV32I-NEXT: j .LBB44_5
; RV32I-NEXT: .LBB44_4: # in Loop: Header=BB44_2 Depth=1
; RV32I-NEXT: sltu a0, s2, a4
; RV32I-NEXT: .LBB44_5: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: mv a3, a5
; RV32I-NEXT: bnez a0, .LBB44_1
; RV32I-NEXT: # %bb.6: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: mv a3, s1
; RV32I-NEXT: j .LBB44_1
; RV32I-NEXT: .LBB44_7: # %atomicrmw.end
; RV32I-NEXT: mv a0, a4
; RV32I-NEXT: mv a1, a5
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_min_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
; RV32IA-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv s0, a0
; RV32IA-NEXT: lw a5, 4(a0)
; RV32IA-NEXT: lw a4, 0(a0)
; RV32IA-NEXT: mv s1, a2
; RV32IA-NEXT: mv s2, a1
; RV32IA-NEXT: j .LBB44_2
; RV32IA-NEXT: .LBB44_1: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV32IA-NEXT: sw a4, 8(sp)
; RV32IA-NEXT: sw a5, 12(sp)
; RV32IA-NEXT: addi a1, sp, 8
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: mv a4, zero
; RV32IA-NEXT: mv a5, zero
; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB44_7
; RV32IA-NEXT: .LBB44_2: # %atomicrmw.start
; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: beq a5, s1, .LBB44_4
; RV32IA-NEXT: # %bb.3: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV32IA-NEXT: slt a0, s1, a5
; RV32IA-NEXT: j .LBB44_5
; RV32IA-NEXT: .LBB44_4: # in Loop: Header=BB44_2 Depth=1
; RV32IA-NEXT: sltu a0, s2, a4
; RV32IA-NEXT: .LBB44_5: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV32IA-NEXT: xori a0, a0, 1
; RV32IA-NEXT: mv a2, a4
; RV32IA-NEXT: mv a3, a5
; RV32IA-NEXT: bnez a0, .LBB44_1
; RV32IA-NEXT: # %bb.6: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV32IA-NEXT: mv a2, s2
; RV32IA-NEXT: mv a3, s1
; RV32IA-NEXT: j .LBB44_1
; RV32IA-NEXT: .LBB44_7: # %atomicrmw.end
; RV32IA-NEXT: mv a0, a4
; RV32IA-NEXT: mv a1, a5
; RV32IA-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_min_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: ld a3, 0(a0)
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: j .LBB44_2
; RV64I-NEXT: .LBB44_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV64I-NEXT: sd a3, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_8@plt
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB44_4
; RV64I-NEXT: .LBB44_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bge s1, a3, .LBB44_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB44_2 Depth=1
; RV64I-NEXT: mv a2, s1
; RV64I-NEXT: j .LBB44_1
; RV64I-NEXT: .LBB44_4: # %atomicrmw.end
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_min_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomin.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw min i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_umax_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lw a5, 4(a0)
; RV32I-NEXT: lw a4, 0(a0)
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: j .LBB45_2
; RV32I-NEXT: .LBB45_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV32I-NEXT: sw a4, 8(sp)
; RV32I-NEXT: sw a5, 12(sp)
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: mv a5, zero
; RV32I-NEXT: call __atomic_compare_exchange_8@plt
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB45_7
; RV32I-NEXT: .LBB45_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: beq a5, s1, .LBB45_4
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV32I-NEXT: sltu a0, s1, a5
; RV32I-NEXT: j .LBB45_5
; RV32I-NEXT: .LBB45_4: # in Loop: Header=BB45_2 Depth=1
; RV32I-NEXT: sltu a0, s2, a4
; RV32I-NEXT: .LBB45_5: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: mv a3, a5
; RV32I-NEXT: bnez a0, .LBB45_1
; RV32I-NEXT: # %bb.6: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: mv a3, s1
; RV32I-NEXT: j .LBB45_1
; RV32I-NEXT: .LBB45_7: # %atomicrmw.end
; RV32I-NEXT: mv a0, a4
; RV32I-NEXT: mv a1, a5
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_umax_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
; RV32IA-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv s0, a0
; RV32IA-NEXT: lw a5, 4(a0)
; RV32IA-NEXT: lw a4, 0(a0)
; RV32IA-NEXT: mv s1, a2
; RV32IA-NEXT: mv s2, a1
; RV32IA-NEXT: j .LBB45_2
; RV32IA-NEXT: .LBB45_1: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV32IA-NEXT: sw a4, 8(sp)
; RV32IA-NEXT: sw a5, 12(sp)
; RV32IA-NEXT: addi a1, sp, 8
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: mv a4, zero
; RV32IA-NEXT: mv a5, zero
; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB45_7
; RV32IA-NEXT: .LBB45_2: # %atomicrmw.start
; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: beq a5, s1, .LBB45_4
; RV32IA-NEXT: # %bb.3: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV32IA-NEXT: sltu a0, s1, a5
; RV32IA-NEXT: j .LBB45_5
; RV32IA-NEXT: .LBB45_4: # in Loop: Header=BB45_2 Depth=1
; RV32IA-NEXT: sltu a0, s2, a4
; RV32IA-NEXT: .LBB45_5: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV32IA-NEXT: mv a2, a4
; RV32IA-NEXT: mv a3, a5
; RV32IA-NEXT: bnez a0, .LBB45_1
; RV32IA-NEXT: # %bb.6: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV32IA-NEXT: mv a2, s2
; RV32IA-NEXT: mv a3, s1
; RV32IA-NEXT: j .LBB45_1
; RV32IA-NEXT: .LBB45_7: # %atomicrmw.end
; RV32IA-NEXT: mv a0, a4
; RV32IA-NEXT: mv a1, a5
; RV32IA-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_umax_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: ld a3, 0(a0)
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: j .LBB45_2
; RV64I-NEXT: .LBB45_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV64I-NEXT: sd a3, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_8@plt
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB45_4
; RV64I-NEXT: .LBB45_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bltu s1, a3, .LBB45_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB45_2 Depth=1
; RV64I-NEXT: mv a2, s1
; RV64I-NEXT: j .LBB45_1
; RV64I-NEXT: .LBB45_4: # %atomicrmw.end
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_umax_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomaxu.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw umax i64* %a, i64 %b monotonic
ret i64 %1
}
define signext i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
; RV32I-LABEL: atomicrmw_umin_i64_monotonic:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lw a5, 4(a0)
; RV32I-NEXT: lw a4, 0(a0)
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: j .LBB46_2
; RV32I-NEXT: .LBB46_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV32I-NEXT: sw a4, 8(sp)
; RV32I-NEXT: sw a5, 12(sp)
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a4, zero
; RV32I-NEXT: mv a5, zero
; RV32I-NEXT: call __atomic_compare_exchange_8@plt
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB46_7
; RV32I-NEXT: .LBB46_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: beq a5, s1, .LBB46_4
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV32I-NEXT: sltu a0, s1, a5
; RV32I-NEXT: j .LBB46_5
; RV32I-NEXT: .LBB46_4: # in Loop: Header=BB46_2 Depth=1
; RV32I-NEXT: sltu a0, s2, a4
; RV32I-NEXT: .LBB46_5: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: mv a3, a5
; RV32I-NEXT: bnez a0, .LBB46_1
; RV32I-NEXT: # %bb.6: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: mv a3, s1
; RV32I-NEXT: j .LBB46_1
; RV32I-NEXT: .LBB46_7: # %atomicrmw.end
; RV32I-NEXT: mv a0, a4
; RV32I-NEXT: mv a1, a5
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32IA-LABEL: atomicrmw_umin_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
; RV32IA-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IA-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32IA-NEXT: mv s0, a0
; RV32IA-NEXT: lw a5, 4(a0)
; RV32IA-NEXT: lw a4, 0(a0)
; RV32IA-NEXT: mv s1, a2
; RV32IA-NEXT: mv s2, a1
; RV32IA-NEXT: j .LBB46_2
; RV32IA-NEXT: .LBB46_1: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV32IA-NEXT: sw a4, 8(sp)
; RV32IA-NEXT: sw a5, 12(sp)
; RV32IA-NEXT: addi a1, sp, 8
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: mv a4, zero
; RV32IA-NEXT: mv a5, zero
; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB46_7
; RV32IA-NEXT: .LBB46_2: # %atomicrmw.start
; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
; RV32IA-NEXT: beq a5, s1, .LBB46_4
; RV32IA-NEXT: # %bb.3: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV32IA-NEXT: sltu a0, s1, a5
; RV32IA-NEXT: j .LBB46_5
; RV32IA-NEXT: .LBB46_4: # in Loop: Header=BB46_2 Depth=1
; RV32IA-NEXT: sltu a0, s2, a4
; RV32IA-NEXT: .LBB46_5: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV32IA-NEXT: xori a0, a0, 1
; RV32IA-NEXT: mv a2, a4
; RV32IA-NEXT: mv a3, a5
; RV32IA-NEXT: bnez a0, .LBB46_1
; RV32IA-NEXT: # %bb.6: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV32IA-NEXT: mv a2, s2
; RV32IA-NEXT: mv a3, s1
; RV32IA-NEXT: j .LBB46_1
; RV32IA-NEXT: .LBB46_7: # %atomicrmw.end
; RV32IA-NEXT: mv a0, a4
; RV32IA-NEXT: mv a1, a5
; RV32IA-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomicrmw_umin_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: ld a3, 0(a0)
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: j .LBB46_2
; RV64I-NEXT: .LBB46_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV64I-NEXT: sd a3, 0(sp)
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a3, zero
; RV64I-NEXT: mv a4, zero
; RV64I-NEXT: call __atomic_compare_exchange_8@plt
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB46_4
; RV64I-NEXT: .LBB46_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bgeu s1, a3, .LBB46_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB46_2 Depth=1
; RV64I-NEXT: mv a2, s1
; RV64I-NEXT: j .LBB46_1
; RV64I-NEXT: .LBB46_4: # %atomicrmw.end
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
; RV64IA-LABEL: atomicrmw_umin_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amominu.d a0, a1, (a0)
; RV64IA-NEXT: ret
%1 = atomicrmw umin i64* %a, i64 %b monotonic
ret i64 %1
}