mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
914315597c
This commit adds a new IR level pass to the AMDGPU backend to perform atomic optimizations. It works by: - Running through a function and finding atomicrmw add/sub or uses of the atomic buffer intrinsics for add/sub. - If all arguments except the value to be added/subtracted are uniform, record the value to be optimized. - Run through the atomic operations we can optimize and, depending on whether the value is uniform/divergent use wavefront wide operations (DPP in the divergent case) to calculate the total amount to be atomically added/subtracted. - Then let only a single lane of each wavefront perform the atomic operation, reducing the total number of atomic operations in flight. - Lastly we recombine the result from the single lane to each lane of the wavefront, and calculate our individual lanes offset into the final result. Differential Revision: https://reviews.llvm.org/D51969 llvm-svn: 343973
52 lines
2.0 KiB
LLVM
52 lines
2.0 KiB
LLVM
; RUN: llc -march=amdgcn -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SICIVI,FUNC %s
|
|
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SICIVI,FUNC %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
|
|
; RUN: llc -march=r600 -mcpu=redwood -amdgpu-atomic-optimizations=false < %s | FileCheck -check-prefixes=R600,FUNC %s
|
|
|
|
; FUNC-LABEL: {{^}}atomic_add_local:
|
|
; SICIVI: s_mov_b32 m0
|
|
; GFX9-NOT: m0
|
|
; R600: LDS_ADD *
|
|
; GCN: ds_add_u32
|
|
define amdgpu_kernel void @atomic_add_local(i32 addrspace(3)* %local) {
|
|
%unused = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
|
|
ret void
|
|
}
|
|
|
|
; FUNC-LABEL: {{^}}atomic_add_local_const_offset:
|
|
; SICIVI: s_mov_b32 m0
|
|
; GFX9-NOT: m0
|
|
|
|
; R600: LDS_ADD *
|
|
; GCN: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
|
|
define amdgpu_kernel void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
|
|
%gep = getelementptr i32, i32 addrspace(3)* %local, i32 4
|
|
%val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
|
|
ret void
|
|
}
|
|
|
|
; FUNC-LABEL: {{^}}atomic_add_ret_local:
|
|
; SICIVI: s_mov_b32 m0
|
|
; GFX9-NOT: m0
|
|
|
|
; R600: LDS_ADD_RET *
|
|
; GCN: ds_add_rtn_u32
|
|
define amdgpu_kernel void @atomic_add_ret_local(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
|
|
%val = atomicrmw volatile add i32 addrspace(3)* %local, i32 5 seq_cst
|
|
store i32 %val, i32 addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
; FUNC-LABEL: {{^}}atomic_add_ret_local_const_offset:
|
|
; SICIVI: s_mov_b32 m0
|
|
; GFX9-NOT: m0
|
|
|
|
; R600: LDS_ADD_RET *
|
|
; GCN: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:20
|
|
define amdgpu_kernel void @atomic_add_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
|
|
%gep = getelementptr i32, i32 addrspace(3)* %local, i32 5
|
|
%val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
|
|
store i32 %val, i32 addrspace(1)* %out
|
|
ret void
|
|
}
|