mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
dc20bc576c
This is already how it is handled for global and flat atomics. Differential Revision: https://reviews.llvm.org/D102366
1049 lines
46 KiB
LLVM
1049 lines
46 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -march=amdgcn -mcpu=gfx90a -verify-machineinstrs | FileCheck %s -check-prefix=GFX90A
|
|
|
|
declare double @llvm.amdgcn.buffer.atomic.fadd.f64(double, <4 x i32>, i32, i32, i1)
|
|
declare double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double, <4 x i32>, i32, i32, i32, i32 immarg)
|
|
declare double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double, <4 x i32>, i32, i32, i32 immarg)
|
|
declare double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double, <4 x i32>, i32, i32, i32, i32 immarg)
|
|
declare double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double, <4 x i32>, i32, i32, i32 immarg)
|
|
declare double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double, <4 x i32>, i32, i32, i32, i32 immarg)
|
|
declare double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double, <4 x i32>, i32, i32, i32 immarg)
|
|
declare double @llvm.amdgcn.global.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %ptr, double %data)
|
|
declare double @llvm.amdgcn.global.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %ptr, double %data)
|
|
declare double @llvm.amdgcn.global.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %ptr, double %data)
|
|
declare double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %ptr, double %data)
|
|
declare double @llvm.amdgcn.flat.atomic.fmin.f64.p0f64.f64(double* %ptr, double %data)
|
|
declare double @llvm.amdgcn.flat.atomic.fmax.f64.p0f64.f64(double* %ptr, double %data)
|
|
declare double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* nocapture, double, i32, i32, i1)
|
|
|
|
define amdgpu_kernel void @buffer_atomic_add_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: buffer_atomic_add_noret_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
|
|
; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[4:7], 0 idxen
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps void @buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: buffer_atomic_add_rtn_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0)
|
|
store double %ret, double* undef
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) {
|
|
; GFX90A-LABEL: buffer_atomic_add_rtn_f64_off4_slc:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s10
|
|
; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v3, s[4:7], 0 idxen offset:4 glc slc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i1 1)
|
|
store double %ret, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @raw_buffer_atomic_add_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: raw_buffer_atomic_add_noret_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
|
|
; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[4:7], 0 offen
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps void @raw_buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: raw_buffer_atomic_add_rtn_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 offen glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
|
|
store double %ret, double* undef
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @raw_buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) {
|
|
; GFX90A-LABEL: raw_buffer_atomic_add_rtn_f64_off4_slc:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s10
|
|
; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v3, s[4:7], 4 offen glc slc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2)
|
|
store double %ret, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_buffer_atomic_add_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: struct_buffer_atomic_add_noret_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
|
|
; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[4:7], 0 idxen
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps void @struct_buffer_atomic_add_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: struct_buffer_atomic_add_rtn_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v2, s[0:3], 0 idxen glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
|
|
store double %ret, double* undef
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_buffer_atomic_add_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) {
|
|
; GFX90A-LABEL: struct_buffer_atomic_add_rtn_f64_off4_slc:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s10
|
|
; GFX90A-NEXT: buffer_atomic_add_f64 v[0:1], v3, s[4:7], 0 idxen offset:4 glc slc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fadd.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
|
|
store double %ret, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @raw_buffer_atomic_min_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: raw_buffer_atomic_min_noret_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
|
|
; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[4:7], 0 offen
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps void @raw_buffer_atomic_min_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: raw_buffer_atomic_min_rtn_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 offen glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
|
|
store double %ret, double* undef
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @raw_buffer_atomic_min_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) {
|
|
; GFX90A-LABEL: raw_buffer_atomic_min_rtn_f64_off4_slc:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s10
|
|
; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v3, s[4:7], 4 offen glc slc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2)
|
|
store double %ret, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_buffer_atomic_min_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: struct_buffer_atomic_min_noret_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
|
|
; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[4:7], 0 idxen
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps void @struct_buffer_atomic_min_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: struct_buffer_atomic_min_rtn_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v2, s[0:3], 0 idxen glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
|
|
store double %ret, double* undef
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_buffer_atomic_min_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) {
|
|
; GFX90A-LABEL: struct_buffer_atomic_min_rtn_f64_off4_slc:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s10
|
|
; GFX90A-NEXT: buffer_atomic_min_f64 v[0:1], v3, s[4:7], 0 idxen offset:4 glc slc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmin.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
|
|
store double %ret, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @raw_buffer_atomic_max_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: raw_buffer_atomic_max_noret_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
|
|
; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[4:7], 0 offen
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps void @raw_buffer_atomic_max_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: raw_buffer_atomic_max_rtn_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 offen glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0)
|
|
store double %ret, double* undef
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @raw_buffer_atomic_max_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) {
|
|
; GFX90A-LABEL: raw_buffer_atomic_max_rtn_f64_off4_slc:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s10
|
|
; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v3, s[4:7], 4 offen glc slc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.raw.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 2)
|
|
store double %ret, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_buffer_atomic_max_noret_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: struct_buffer_atomic_max_noret_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s8, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s8
|
|
; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[4:7], 0 idxen
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps void @struct_buffer_atomic_max_rtn_f64(<4 x i32> inreg %rsrc, double %data, i32 %vindex) {
|
|
; GFX90A-LABEL: struct_buffer_atomic_max_rtn_f64:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v2, s[0:3], 0 idxen glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 0, i32 0)
|
|
store double %ret, double* undef
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_buffer_atomic_max_rtn_f64_off4_slc(<4 x i32> inreg %rsrc, double %data, i32 %vindex, double addrspace(1)* %out) {
|
|
; GFX90A-LABEL: struct_buffer_atomic_max_rtn_f64_off4_slc:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX90A-NEXT: s_load_dword s10, s[0:1], 0x3c
|
|
; GFX90A-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x44
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s10
|
|
; GFX90A-NEXT: buffer_atomic_max_f64 v[0:1], v3, s[4:7], 0 idxen offset:4 glc slc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.struct.buffer.atomic.fmax.f64(double %data, <4 x i32> %rsrc, i32 %vindex, i32 4, i32 0, i32 2)
|
|
store double %ret, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @global_atomic_fadd_f64_noret(double addrspace(1)* %ptr, double %data) {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_noret:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.global.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %ptr, double %data)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @global_atomic_fmin_f64_noret(double addrspace(1)* %ptr, double %data) {
|
|
; GFX90A-LABEL: global_atomic_fmin_f64_noret:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX90A-NEXT: global_atomic_min_f64 v2, v[0:1], s[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.global.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %ptr, double %data)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @global_atomic_fmax_f64_noret(double addrspace(1)* %ptr, double %data) {
|
|
; GFX90A-LABEL: global_atomic_fmax_f64_noret:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX90A-NEXT: global_atomic_max_f64 v2, v[0:1], s[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.global.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %ptr, double %data)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(double addrspace(1)* %ptr) #1 {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
|
|
; GFX90A-NEXT: BB24_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB24_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(1)* %ptr, double 4.0 seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(double addrspace(1)* %ptr) #1 {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(1)* %ptr, double 4.0 syncscope("agent") seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(double addrspace(1)* %ptr) #1 {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_system:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
|
|
; GFX90A-NEXT: BB26_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB26_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(1)* %ptr, double 4.0 syncscope("one-as") seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(double addrspace(1)* %ptr) #0 {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_flush:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(1)* %ptr, double 4.0 syncscope("agent") seq_cst
|
|
ret void
|
|
}
|
|
|
|
define double @global_atomic_fadd_f64_rtn(double addrspace(1)* %ptr, double %data) {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_rtn:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.global.atomic.fadd.f64.p1f64.f64(double addrspace(1)* %ptr, double %data)
|
|
ret double %ret
|
|
}
|
|
|
|
define double @global_atomic_fadd_f64_rtn_pat(double addrspace(1)* %ptr, double %data) #1 {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_rtn_pat:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
|
|
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX90A-NEXT: BB29_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], 4.0
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
|
|
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB29_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(1)* %ptr, double 4.0 seq_cst
|
|
ret double %ret
|
|
}
|
|
|
|
define double @global_atomic_fadd_f64_rtn_pat_agent(double addrspace(1)* %ptr, double %data) #1 {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_rtn_pat_agent:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, 0x40100000
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(1)* %ptr, double 4.0 syncscope("agent") seq_cst
|
|
ret double %ret
|
|
}
|
|
|
|
define double @global_atomic_fadd_f64_rtn_pat_system(double addrspace(1)* %ptr, double %data) #1 {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_rtn_pat_system:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
|
|
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX90A-NEXT: BB31_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], 4.0
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
|
|
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB31_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(1)* %ptr, double 4.0 syncscope("one-as") seq_cst
|
|
ret double %ret
|
|
}
|
|
|
|
define double @global_atomic_fmax_f64_rtn(double addrspace(1)* %ptr, double %data) {
|
|
; GFX90A-LABEL: global_atomic_fmax_f64_rtn:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_max_f64 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.global.atomic.fmax.f64.p1f64.f64(double addrspace(1)* %ptr, double %data)
|
|
ret double %ret
|
|
}
|
|
|
|
define double @global_atomic_fmin_f64_rtn(double addrspace(1)* %ptr, double %data) {
|
|
; GFX90A-LABEL: global_atomic_fmin_f64_rtn:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_min_f64 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.global.atomic.fmin.f64.p1f64.f64(double addrspace(1)* %ptr, double %data)
|
|
ret double %ret
|
|
}
|
|
|
|
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(double addrspace(1)* %ptr) {
|
|
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
|
|
; GFX90A-NEXT: BB34_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB34_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(1)* %ptr, double 4.0 syncscope("agent") seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat(double* %ptr) #1 {
|
|
; GFX90A-LABEL: flat_atomic_fadd_f64_noret_pat:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; GFX90A-NEXT: BB35_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], s[0:1], s[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB35_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double* %ptr, double 4.0 seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent(double* %ptr) #1 {
|
|
; GFX90A-LABEL: flat_atomic_fadd_f64_noret_pat_agent:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, 0x40100000
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double* %ptr, double 4.0 syncscope("agent") seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_system(double* %ptr) #1 {
|
|
; GFX90A-LABEL: flat_atomic_fadd_f64_noret_pat_system:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; GFX90A-NEXT: BB37_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], s[0:1], s[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB37_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double* %ptr, double 4.0 syncscope("one-as") seq_cst
|
|
ret void
|
|
}
|
|
|
|
define double @flat_atomic_fadd_f64_rtn_pat(double* %ptr) #1 {
|
|
; GFX90A-LABEL: flat_atomic_fadd_f64_rtn_pat:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX90A-NEXT: BB38_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], 4.0
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
|
|
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB38_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = atomicrmw fadd double* %ptr, double 4.0 seq_cst
|
|
ret double %ret
|
|
}
|
|
|
|
define double @flat_atomic_fadd_f64_rtn_pat_agent(double* %ptr) #1 {
|
|
; GFX90A-LABEL: flat_atomic_fadd_f64_rtn_pat_agent:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, 0x40100000
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = atomicrmw fadd double* %ptr, double 4.0 syncscope("agent") seq_cst
|
|
ret double %ret
|
|
}
|
|
|
|
define double @flat_atomic_fadd_f64_rtn_pat_system(double* %ptr) #1 {
|
|
; GFX90A-LABEL: flat_atomic_fadd_f64_rtn_pat_system:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX90A-NEXT: BB40_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], 4.0
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
|
|
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB40_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = atomicrmw fadd double* %ptr, double 4.0 syncscope("one-as") seq_cst
|
|
ret double %ret
|
|
}
|
|
|
|
define amdgpu_kernel void @flat_atomic_fadd_f64_noret(double* %ptr, double %data) {
|
|
; GFX90A-LABEL: flat_atomic_fadd_f64_noret:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s3
|
|
; GFX90A-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %ptr, double %data)
|
|
ret void
|
|
}
|
|
|
|
define double @flat_atomic_fadd_f64_rtn(double* %ptr, double %data) {
|
|
; GFX90A-LABEL: flat_atomic_fadd_f64_rtn:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_add_f64 v[0:1], v[0:1], v[2:3] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %ptr, double %data)
|
|
ret double %ret
|
|
}
|
|
|
|
define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent_safe(double* %ptr) {
|
|
; GFX90A-LABEL: flat_atomic_fadd_f64_noret_pat_agent_safe:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; GFX90A-NEXT: BB43_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], s[0:1], s[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: buffer_wbinvl1_vol
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB43_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double* %ptr, double 4.0 syncscope("agent") seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @flat_atomic_fmin_f64_noret(double* %ptr, double %data) {
|
|
; GFX90A-LABEL: flat_atomic_fmin_f64_noret:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s3
|
|
; GFX90A-NEXT: flat_atomic_min_f64 v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.flat.atomic.fmin.f64.p0f64.f64(double* %ptr, double %data)
|
|
ret void
|
|
}
|
|
|
|
define double @flat_atomic_fmin_f64_rtn(double* %ptr, double %data) {
|
|
; GFX90A-LABEL: flat_atomic_fmin_f64_rtn:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_min_f64 v[0:1], v[0:1], v[2:3] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.flat.atomic.fmin.f64.p0f64.f64(double* %ptr, double %data)
|
|
ret double %ret
|
|
}
|
|
|
|
define amdgpu_kernel void @flat_atomic_fmax_f64_noret(double* %ptr, double %data) {
|
|
; GFX90A-LABEL: flat_atomic_fmax_f64_noret:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, s3
|
|
; GFX90A-NEXT: flat_atomic_max_f64 v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.flat.atomic.fmax.f64.p0f64.f64(double* %ptr, double %data)
|
|
ret void
|
|
}
|
|
|
|
define double @flat_atomic_fmax_f64_rtn(double* %ptr, double %data) {
|
|
; GFX90A-LABEL: flat_atomic_fmax_f64_rtn:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: flat_atomic_max_f64 v[0:1], v[0:1], v[2:3] glc
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.flat.atomic.fmax.f64.p0f64.f64(double* %ptr, double %data)
|
|
ret double %ret
|
|
}
|
|
|
|
define amdgpu_kernel void @local_atomic_fadd_f64_noret(double addrspace(3)* %ptr, double %data) {
|
|
; GFX90A-LABEL: local_atomic_fadd_f64_noret:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dword s4, s[0:1], 0x24
|
|
; GFX90A-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* %ptr, double %data, i32 0, i32 0, i1 0)
|
|
ret void
|
|
}
|
|
|
|
define double @local_atomic_fadd_f64_rtn(double addrspace(3)* %ptr, double %data) {
|
|
; GFX90A-LABEL: local_atomic_fadd_f64_rtn:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, v1
|
|
; GFX90A-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* %ptr, double %data, i32 0, i32 0, i1 0)
|
|
ret double %ret
|
|
}
|
|
|
|
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(double addrspace(3)* %ptr) #1 {
|
|
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(double addrspace(3)* %ptr) #0 {
|
|
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(double addrspace(3)* %ptr) #4 {
|
|
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
|
|
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX90A-NEXT: ds_read_b64 v[0:1], v0
|
|
; GFX90A-NEXT: BB52_1: ; %atomicrmw.start
|
|
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_add_f64 v[2:3], v[0:1], 4.0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v4, s0
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: ds_cmpst_rtn_b64 v[2:3], v4, v[0:1], v[2:3]
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[0:1]
|
|
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1]
|
|
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX90A-NEXT: s_cbranch_execnz BB52_1
|
|
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX90A-NEXT: s_endpgm
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst
|
|
ret void
|
|
}
|
|
|
|
define double @local_atomic_fadd_f64_rtn_pat(double addrspace(3)* %ptr, double %data) #1 {
|
|
; GFX90A-LABEL: local_atomic_fadd_f64_rtn_pat:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, 0x40100000
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst
|
|
ret double %ret
|
|
}
|
|
|
|
define double @local_atomic_fadd_f64_rtn_ieee_unsafe(double addrspace(3)* %ptr, double %data) #2 {
|
|
; GFX90A-LABEL: local_atomic_fadd_f64_rtn_ieee_unsafe:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, v1
|
|
; GFX90A-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* %ptr, double %data, i32 0, i32 0, i1 0)
|
|
ret double %ret
|
|
}
|
|
|
|
define double @local_atomic_fadd_f64_rtn_ieee_safe(double addrspace(3)* %ptr, double %data) #3 {
|
|
; GFX90A-LABEL: local_atomic_fadd_f64_rtn_ieee_safe:
|
|
; GFX90A: ; %bb.0: ; %main_body
|
|
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX90A-NEXT: v_mov_b32_e32 v2, v1
|
|
; GFX90A-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
|
|
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX90A-NEXT: s_setpc_b64 s[30:31]
|
|
main_body:
|
|
%ret = call double @llvm.amdgcn.ds.fadd.f64(double addrspace(3)* %ptr, double %data, i32 0, i32 0, i1 0)
|
|
ret double %ret
|
|
}
|
|
|
|
attributes #0 = { "denormal-fp-math"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
|
|
attributes #1 = { "amdgpu-unsafe-fp-atomics"="true" }
|
|
attributes #2 = { "denormal-fp-math"="ieee,ieee" "amdgpu-unsafe-fp-atomics"="true" }
|
|
attributes #3 = { "denormal-fp-math"="ieee,ieee" }
|
|
attributes #4 = { "denormal-fp-math"="preserve-sign,preserve-sign" }
|