1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00
llvm-mirror/test/CodeGen/AMDGPU/spill-wide-sgpr.ll
Carl Ritson 8b90fe296e [AMDGPU] Make SGPR spills exec mask agnostic
Explicitly set the exec mask for SGPR spills and reloads.
This fixes a bug where SGPR spills to memory could be incorrect
if the exec mask was 0 (or differed between spill and reload).

Additionally pack scalar subregisters (upto 16/32 per VGPR),
so that the majority of scalar types can be spilt or reloaded
with a simple memory access.  This should amortize some of the
additional overhead of manipulating the exec mask.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D80282
2020-06-03 12:34:26 +09:00

303 lines
10 KiB
LLVM

; RUN: llc -O0 -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=VGPR %s
; RUN: llc -O0 -march=amdgcn -mcpu=fiji -amdgpu-spill-sgpr-to-vgpr=0 -verify-machineinstrs < %s | FileCheck -check-prefix=ALL -check-prefix=VMEM %s
; ALL-LABEL: {{^}}spill_sgpr_x2:
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 0
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 1
; VGPR: s_cbranch_scc1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 0
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 1
; VMEM: buffer_store_dword
; VMEM: s_cbranch_scc1
; VMEM: buffer_load_dword
define amdgpu_kernel void @spill_sgpr_x2(i32 addrspace(1)* %out, i32 %in) #0 {
%wide.sgpr = call <2 x i32> asm sideeffect "; def $0", "=s" () #0
%cmp = icmp eq i32 %in, 0
br i1 %cmp, label %bb0, label %ret
bb0:
call void asm sideeffect "; use $0", "s"(<2 x i32> %wide.sgpr) #0
br label %ret
ret:
ret void
}
; ALL-LABEL: {{^}}spill_sgpr_x3:
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 0
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 1
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 2
; VGPR: s_cbranch_scc1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 0
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 2
; VMEM: buffer_store_dword
; VMEM: s_cbranch_scc1
; VMEM: buffer_load_dword
define amdgpu_kernel void @spill_sgpr_x3(i32 addrspace(1)* %out, i32 %in) #0 {
%wide.sgpr = call <3 x i32> asm sideeffect "; def $0", "=s" () #0
%cmp = icmp eq i32 %in, 0
br i1 %cmp, label %bb0, label %ret
bb0:
call void asm sideeffect "; use $0", "s"(<3 x i32> %wide.sgpr) #0
br label %ret
ret:
ret void
}
; ALL-LABEL: {{^}}spill_sgpr_x4:
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 0
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 1
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 2
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 3
; VGPR: s_cbranch_scc1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 0
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 2
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 3
; VMEM: buffer_store_dword
; VMEM: s_cbranch_scc1
; VMEM: buffer_load_dword
define amdgpu_kernel void @spill_sgpr_x4(i32 addrspace(1)* %out, i32 %in) #0 {
%wide.sgpr = call <4 x i32> asm sideeffect "; def $0", "=s" () #0
%cmp = icmp eq i32 %in, 0
br i1 %cmp, label %bb0, label %ret
bb0:
call void asm sideeffect "; use $0", "s"(<4 x i32> %wide.sgpr) #0
br label %ret
ret:
ret void
}
; ALL-LABEL: {{^}}spill_sgpr_x5:
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 0
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 1
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 2
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 3
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 4
; VGPR: s_cbranch_scc1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 0
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 2
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 3
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 4
; VMEM: buffer_store_dword
; VMEM: s_cbranch_scc1
; VMEM: buffer_load_dword
define amdgpu_kernel void @spill_sgpr_x5(i32 addrspace(1)* %out, i32 %in) #0 {
%wide.sgpr = call <5 x i32> asm sideeffect "; def $0", "=s" () #0
%cmp = icmp eq i32 %in, 0
br i1 %cmp, label %bb0, label %ret
bb0:
call void asm sideeffect "; use $0", "s"(<5 x i32> %wide.sgpr) #0
br label %ret
ret:
ret void
}
; ALL-LABEL: {{^}}spill_sgpr_x8:
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 0
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 1
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 2
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 3
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 4
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 5
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 6
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 7
; VGPR: s_cbranch_scc1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 0
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 2
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 3
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 4
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 5
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 6
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 7
; VMEM: buffer_store_dword
; VMEM: s_cbranch_scc1
; VMEM: buffer_load_dword
define amdgpu_kernel void @spill_sgpr_x8(i32 addrspace(1)* %out, i32 %in) #0 {
%wide.sgpr = call <8 x i32> asm sideeffect "; def $0", "=s" () #0
%cmp = icmp eq i32 %in, 0
br i1 %cmp, label %bb0, label %ret
bb0:
call void asm sideeffect "; use $0", "s"(<8 x i32> %wide.sgpr) #0
br label %ret
ret:
ret void
}
; ALL-LABEL: {{^}}spill_sgpr_x16:
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 0
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 1
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 2
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 3
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 4
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 5
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 6
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 7
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 8
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 9
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 10
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 11
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 12
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 13
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 14
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 15
; VGPR: s_cbranch_scc1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 0
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 2
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 3
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 4
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 5
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 6
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 7
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 8
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 9
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 10
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 11
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 12
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 13
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 14
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 15
; VMEM: buffer_store_dword
; VMEM: s_cbranch_scc1
; VMEM: buffer_load_dword
define amdgpu_kernel void @spill_sgpr_x16(i32 addrspace(1)* %out, i32 %in) #0 {
%wide.sgpr = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
%cmp = icmp eq i32 %in, 0
br i1 %cmp, label %bb0, label %ret
bb0:
call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr) #0
br label %ret
ret:
ret void
}
; ALL-LABEL: {{^}}spill_sgpr_x32:
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 0
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 1
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 2
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 3
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 4
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 5
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 6
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 7
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 8
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 9
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 10
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 11
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 12
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 13
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 14
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 15
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 16
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 17
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 18
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 19
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 20
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 21
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 22
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 23
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 24
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 25
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 26
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 27
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 28
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 29
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 30
; VGPR: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 31
; VGPR: s_cbranch_scc1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 0
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 1
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 2
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 3
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 4
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 5
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 6
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 7
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 8
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 9
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 10
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 11
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 12
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 13
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 14
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 15
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 16
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 17
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 18
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 19
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 20
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 21
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 22
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 23
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 24
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 25
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 26
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 27
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 28
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 29
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 30
; VGPR: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 31
; VMEM: buffer_store_dword
; VMEM: s_cbranch_scc1
; VMEM: buffer_load_dword
define amdgpu_kernel void @spill_sgpr_x32(i32 addrspace(1)* %out, i32 %in) #0 {
%wide.sgpr = call <32 x i32> asm sideeffect "; def $0", "=s" () #0
%cmp = icmp eq i32 %in, 0
br i1 %cmp, label %bb0, label %ret
bb0:
call void asm sideeffect "; use $0", "s"(<32 x i32> %wide.sgpr) #0
br label %ret
ret:
ret void
}
attributes #0 = { nounwind }