mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
a70016c8d5
Add the scratch wave offset to the scratch buffer descriptor (SRSrc) in the entry function prologue. This allows us to removes the scratch wave offset register from the calling convention ABI. As part of this change, allow the use of an inline constant zero for the SOffset of MUBUF instructions accessing the stack in entry functions when a frame pointer is not requested/required. Entry functions with calls still need to set up the calling convention ABI stack pointer register, and reference it in order to address arguments of called functions. The ABI stack pointer register remains unswizzled, but is now wave-relative instead of queue-relative. Non-entry functions also use an inline constant zero SOffset for wave-relative scratch access, but continue to use the stack and frame pointers as before. When the stack or frame pointer is converted to a swizzled offset it is now scaled directly, as the scratch wave offset no longer needs to be subtracted first. Update llvm/docs/AMDGPUUsage.rst to reflect these changes to the calling convention. Tags: #llvm Differential Revision: https://reviews.llvm.org/D75138
156 lines
6.5 KiB
LLVM
156 lines
6.5 KiB
LLVM
; RUN: llc -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SICIVI %s
|
|
; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+max-private-element-size-16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SICIVI %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+max-private-element-size-16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
|
|
|
|
; Test addressing modes when the scratch base is not a frame index.
|
|
|
|
; GCN-LABEL: {{^}}store_private_offset_i8:
|
|
; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @store_private_offset_i8() #0 {
|
|
store volatile i8 5, i8 addrspace(5)* inttoptr (i32 8 to i8 addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}store_private_offset_i16:
|
|
; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @store_private_offset_i16() #0 {
|
|
store volatile i16 5, i16 addrspace(5)* inttoptr (i32 8 to i16 addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}store_private_offset_i32:
|
|
; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @store_private_offset_i32() #0 {
|
|
store volatile i32 5, i32 addrspace(5)* inttoptr (i32 8 to i32 addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}store_private_offset_v2i32:
|
|
; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @store_private_offset_v2i32() #0 {
|
|
store volatile <2 x i32> <i32 5, i32 10>, <2 x i32> addrspace(5)* inttoptr (i32 8 to <2 x i32> addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}store_private_offset_v4i32:
|
|
; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @store_private_offset_v4i32() #0 {
|
|
store volatile <4 x i32> <i32 5, i32 10, i32 15, i32 0>, <4 x i32> addrspace(5)* inttoptr (i32 8 to <4 x i32> addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}load_private_offset_i8:
|
|
; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @load_private_offset_i8() #0 {
|
|
%load = load volatile i8, i8 addrspace(5)* inttoptr (i32 8 to i8 addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}sextload_private_offset_i8:
|
|
; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 {
|
|
%load = load volatile i8, i8 addrspace(5)* inttoptr (i32 8 to i8 addrspace(5)*)
|
|
%sextload = sext i8 %load to i32
|
|
store i32 %sextload, i32 addrspace(1)* undef
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}zextload_private_offset_i8:
|
|
; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 {
|
|
%load = load volatile i8, i8 addrspace(5)* inttoptr (i32 8 to i8 addrspace(5)*)
|
|
%zextload = zext i8 %load to i32
|
|
store i32 %zextload, i32 addrspace(1)* undef
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}load_private_offset_i16:
|
|
; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @load_private_offset_i16() #0 {
|
|
%load = load volatile i16, i16 addrspace(5)* inttoptr (i32 8 to i16 addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}sextload_private_offset_i16:
|
|
; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) #0 {
|
|
%load = load volatile i16, i16 addrspace(5)* inttoptr (i32 8 to i16 addrspace(5)*)
|
|
%sextload = sext i16 %load to i32
|
|
store i32 %sextload, i32 addrspace(1)* undef
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}zextload_private_offset_i16:
|
|
; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #0 {
|
|
%load = load volatile i16, i16 addrspace(5)* inttoptr (i32 8 to i16 addrspace(5)*)
|
|
%zextload = zext i16 %load to i32
|
|
store i32 %zextload, i32 addrspace(1)* undef
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}load_private_offset_i32:
|
|
; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @load_private_offset_i32() #0 {
|
|
%load = load volatile i32, i32 addrspace(5)* inttoptr (i32 8 to i32 addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}load_private_offset_v2i32:
|
|
; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @load_private_offset_v2i32() #0 {
|
|
%load = load volatile <2 x i32>, <2 x i32> addrspace(5)* inttoptr (i32 8 to <2 x i32> addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}load_private_offset_v4i32:
|
|
; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], 0 offset:8
|
|
define amdgpu_kernel void @load_private_offset_v4i32() #0 {
|
|
%load = load volatile <4 x i32>, <4 x i32> addrspace(5)* inttoptr (i32 8 to <4 x i32> addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset:
|
|
; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], 0 offset:4095
|
|
define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 {
|
|
store volatile i8 5, i8 addrspace(5)* inttoptr (i32 4095 to i8 addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus1:
|
|
; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000
|
|
; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], 0 offen{{$}}
|
|
define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 {
|
|
store volatile i8 5, i8 addrspace(5)* inttoptr (i32 4096 to i8 addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus2:
|
|
; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000
|
|
; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], 0 offen offset:1{{$}}
|
|
define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 {
|
|
store volatile i8 5, i8 addrspace(5)* inttoptr (i32 4097 to i8 addrspace(5)*)
|
|
ret void
|
|
}
|
|
|
|
; MUBUF used for stack access has bounds checking enabled before gfx9,
|
|
; so a possibly negative base index can't be used for the vgpr offset.
|
|
|
|
; GCN-LABEL: {{^}}store_private_unknown_bits_vaddr:
|
|
; SICIVI: v_add_{{i|u}}32_e32 [[ADDR0:v[0-9]+]], vcc, 4
|
|
; SICIVI: v_add_{{i|u}}32_e32 [[ADDR1:v[0-9]+]], vcc, 32, [[ADDR0]]
|
|
; SICIVI: buffer_store_dword v{{[0-9]+}}, [[ADDR1]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
|
|
|
|
; GFX9: v_add_u32_e32 [[ADDR:v[0-9]+]], 4,
|
|
; GFX9: buffer_store_dword v{{[0-9]+}}, [[ADDR]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen offset:32
|
|
define amdgpu_kernel void @store_private_unknown_bits_vaddr() #0 {
|
|
%alloca = alloca [16 x i32], align 4, addrspace(5)
|
|
%vaddr = load volatile i32, i32 addrspace(1)* undef
|
|
%vaddr.off = add i32 %vaddr, 8
|
|
%gep = getelementptr inbounds [16 x i32], [16 x i32] addrspace(5)* %alloca, i32 0, i32 %vaddr.off
|
|
store volatile i32 9, i32 addrspace(5)* %gep
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { nounwind }
|