mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
74be1319be
AMDGPU normally spills SGPRs to VGPRs. Previously, since all register classes are handled at the same time, this was problematic. We don't know ahead of time how many registers will be needed to be reserved to handle the spilling. If no VGPRs were left for spilling, we would have to try to spill to memory. If the spilled SGPRs were required for exec mask manipulation, it is highly problematic because the lanes active at the point of spill are not necessarily the same as at the restore point. Avoid this problem by fully allocating SGPRs in a separate regalloc run from VGPRs. This way we know the exact number of VGPRs needed, and can reserve them for a second run. This fixes the most serious issues, but it is still possible using inline asm to make all VGPRs unavailable. Start erroring in the case where we ever would require memory for an SGPR spill. This is implemented by giving each regalloc pass a callback which reports if a register class should be handled or not. A few passes need some small changes to deal with leftover virtual registers. In the AMDGPU implementation, a new pass is introduced to take the place of PrologEpilogInserter for SGPR spills emitted during the first run. One disadvantage of this is currently StackSlotColoring is no longer used for SGPR spills. It would need to be run again, which will require more work. Error if the standard -regalloc option is used. Introduce new separate -sgpr-regalloc and -vgpr-regalloc flags, so the two runs can be controlled individually. PBQB is not currently supported, so this also prevents using the unhandled allocator.
235 lines
9.2 KiB
LLVM
235 lines
9.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -O0 -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
|
|
|
|
; The first 64 SGPR spills can go to a VGPR, but there isn't a second
|
|
; so some spills must be to memory. The last 16 element spill runs out of lanes at the 15th element.
|
|
|
|
define amdgpu_kernel void @partial_no_vgprs_last_sgpr_spill(i32 addrspace(1)* %out, i32 %in) #1 {
|
|
; GCN-LABEL: partial_no_vgprs_last_sgpr_spill:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_add_u32 s0, s0, s7
|
|
; GCN-NEXT: s_addc_u32 s1, s1, 0
|
|
; GCN-NEXT: s_load_dword s4, s[4:5], 0x2
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; def s[8:23]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: v_writelane_b32 v23, s8, 0
|
|
; GCN-NEXT: v_writelane_b32 v23, s9, 1
|
|
; GCN-NEXT: v_writelane_b32 v23, s10, 2
|
|
; GCN-NEXT: v_writelane_b32 v23, s11, 3
|
|
; GCN-NEXT: v_writelane_b32 v23, s12, 4
|
|
; GCN-NEXT: v_writelane_b32 v23, s13, 5
|
|
; GCN-NEXT: v_writelane_b32 v23, s14, 6
|
|
; GCN-NEXT: v_writelane_b32 v23, s15, 7
|
|
; GCN-NEXT: v_writelane_b32 v23, s16, 8
|
|
; GCN-NEXT: v_writelane_b32 v23, s17, 9
|
|
; GCN-NEXT: v_writelane_b32 v23, s18, 10
|
|
; GCN-NEXT: v_writelane_b32 v23, s19, 11
|
|
; GCN-NEXT: v_writelane_b32 v23, s20, 12
|
|
; GCN-NEXT: v_writelane_b32 v23, s21, 13
|
|
; GCN-NEXT: v_writelane_b32 v23, s22, 14
|
|
; GCN-NEXT: v_writelane_b32 v23, s23, 15
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; def s[8:23]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: v_writelane_b32 v23, s8, 16
|
|
; GCN-NEXT: v_writelane_b32 v23, s9, 17
|
|
; GCN-NEXT: v_writelane_b32 v23, s10, 18
|
|
; GCN-NEXT: v_writelane_b32 v23, s11, 19
|
|
; GCN-NEXT: v_writelane_b32 v23, s12, 20
|
|
; GCN-NEXT: v_writelane_b32 v23, s13, 21
|
|
; GCN-NEXT: v_writelane_b32 v23, s14, 22
|
|
; GCN-NEXT: v_writelane_b32 v23, s15, 23
|
|
; GCN-NEXT: v_writelane_b32 v23, s16, 24
|
|
; GCN-NEXT: v_writelane_b32 v23, s17, 25
|
|
; GCN-NEXT: v_writelane_b32 v23, s18, 26
|
|
; GCN-NEXT: v_writelane_b32 v23, s19, 27
|
|
; GCN-NEXT: v_writelane_b32 v23, s20, 28
|
|
; GCN-NEXT: v_writelane_b32 v23, s21, 29
|
|
; GCN-NEXT: v_writelane_b32 v23, s22, 30
|
|
; GCN-NEXT: v_writelane_b32 v23, s23, 31
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; def s[8:23]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: v_writelane_b32 v23, s8, 32
|
|
; GCN-NEXT: v_writelane_b32 v23, s9, 33
|
|
; GCN-NEXT: v_writelane_b32 v23, s10, 34
|
|
; GCN-NEXT: v_writelane_b32 v23, s11, 35
|
|
; GCN-NEXT: v_writelane_b32 v23, s12, 36
|
|
; GCN-NEXT: v_writelane_b32 v23, s13, 37
|
|
; GCN-NEXT: v_writelane_b32 v23, s14, 38
|
|
; GCN-NEXT: v_writelane_b32 v23, s15, 39
|
|
; GCN-NEXT: v_writelane_b32 v23, s16, 40
|
|
; GCN-NEXT: v_writelane_b32 v23, s17, 41
|
|
; GCN-NEXT: v_writelane_b32 v23, s18, 42
|
|
; GCN-NEXT: v_writelane_b32 v23, s19, 43
|
|
; GCN-NEXT: v_writelane_b32 v23, s20, 44
|
|
; GCN-NEXT: v_writelane_b32 v23, s21, 45
|
|
; GCN-NEXT: v_writelane_b32 v23, s22, 46
|
|
; GCN-NEXT: v_writelane_b32 v23, s23, 47
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; def s[8:23]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: v_writelane_b32 v23, s8, 48
|
|
; GCN-NEXT: v_writelane_b32 v23, s9, 49
|
|
; GCN-NEXT: v_writelane_b32 v23, s10, 50
|
|
; GCN-NEXT: v_writelane_b32 v23, s11, 51
|
|
; GCN-NEXT: v_writelane_b32 v23, s12, 52
|
|
; GCN-NEXT: v_writelane_b32 v23, s13, 53
|
|
; GCN-NEXT: v_writelane_b32 v23, s14, 54
|
|
; GCN-NEXT: v_writelane_b32 v23, s15, 55
|
|
; GCN-NEXT: v_writelane_b32 v23, s16, 56
|
|
; GCN-NEXT: v_writelane_b32 v23, s17, 57
|
|
; GCN-NEXT: v_writelane_b32 v23, s18, 58
|
|
; GCN-NEXT: v_writelane_b32 v23, s19, 59
|
|
; GCN-NEXT: v_writelane_b32 v23, s20, 60
|
|
; GCN-NEXT: v_writelane_b32 v23, s21, 61
|
|
; GCN-NEXT: v_writelane_b32 v23, s22, 62
|
|
; GCN-NEXT: v_writelane_b32 v23, s23, 63
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; def s[6:7]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: s_mov_b64 s[8:9], exec
|
|
; GCN-NEXT: s_mov_b64 exec, 3
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GCN-NEXT: v_writelane_b32 v0, s6, 0
|
|
; GCN-NEXT: v_writelane_b32 v0, s7, 1
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 offset:4 ; 4-byte Folded Spill
|
|
; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
; GCN-NEXT: s_mov_b64 exec, s[8:9]
|
|
; GCN-NEXT: s_mov_b32 s5, 0
|
|
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GCN-NEXT: s_cmp_lg_u32 s4, s5
|
|
; GCN-NEXT: s_cbranch_scc1 BB0_2
|
|
; GCN-NEXT: ; %bb.1: ; %bb0
|
|
; GCN-NEXT: v_readlane_b32 s4, v23, 0
|
|
; GCN-NEXT: v_readlane_b32 s5, v23, 1
|
|
; GCN-NEXT: v_readlane_b32 s6, v23, 2
|
|
; GCN-NEXT: v_readlane_b32 s7, v23, 3
|
|
; GCN-NEXT: v_readlane_b32 s8, v23, 4
|
|
; GCN-NEXT: v_readlane_b32 s9, v23, 5
|
|
; GCN-NEXT: v_readlane_b32 s10, v23, 6
|
|
; GCN-NEXT: v_readlane_b32 s11, v23, 7
|
|
; GCN-NEXT: v_readlane_b32 s12, v23, 8
|
|
; GCN-NEXT: v_readlane_b32 s13, v23, 9
|
|
; GCN-NEXT: v_readlane_b32 s14, v23, 10
|
|
; GCN-NEXT: v_readlane_b32 s15, v23, 11
|
|
; GCN-NEXT: v_readlane_b32 s16, v23, 12
|
|
; GCN-NEXT: v_readlane_b32 s17, v23, 13
|
|
; GCN-NEXT: v_readlane_b32 s18, v23, 14
|
|
; GCN-NEXT: v_readlane_b32 s19, v23, 15
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; use s[4:19]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: v_readlane_b32 s4, v23, 16
|
|
; GCN-NEXT: v_readlane_b32 s5, v23, 17
|
|
; GCN-NEXT: v_readlane_b32 s6, v23, 18
|
|
; GCN-NEXT: v_readlane_b32 s7, v23, 19
|
|
; GCN-NEXT: v_readlane_b32 s8, v23, 20
|
|
; GCN-NEXT: v_readlane_b32 s9, v23, 21
|
|
; GCN-NEXT: v_readlane_b32 s10, v23, 22
|
|
; GCN-NEXT: v_readlane_b32 s11, v23, 23
|
|
; GCN-NEXT: v_readlane_b32 s12, v23, 24
|
|
; GCN-NEXT: v_readlane_b32 s13, v23, 25
|
|
; GCN-NEXT: v_readlane_b32 s14, v23, 26
|
|
; GCN-NEXT: v_readlane_b32 s15, v23, 27
|
|
; GCN-NEXT: v_readlane_b32 s16, v23, 28
|
|
; GCN-NEXT: v_readlane_b32 s17, v23, 29
|
|
; GCN-NEXT: v_readlane_b32 s18, v23, 30
|
|
; GCN-NEXT: v_readlane_b32 s19, v23, 31
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; use s[4:19]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: v_readlane_b32 s4, v23, 32
|
|
; GCN-NEXT: v_readlane_b32 s5, v23, 33
|
|
; GCN-NEXT: v_readlane_b32 s6, v23, 34
|
|
; GCN-NEXT: v_readlane_b32 s7, v23, 35
|
|
; GCN-NEXT: v_readlane_b32 s8, v23, 36
|
|
; GCN-NEXT: v_readlane_b32 s9, v23, 37
|
|
; GCN-NEXT: v_readlane_b32 s10, v23, 38
|
|
; GCN-NEXT: v_readlane_b32 s11, v23, 39
|
|
; GCN-NEXT: v_readlane_b32 s12, v23, 40
|
|
; GCN-NEXT: v_readlane_b32 s13, v23, 41
|
|
; GCN-NEXT: v_readlane_b32 s14, v23, 42
|
|
; GCN-NEXT: v_readlane_b32 s15, v23, 43
|
|
; GCN-NEXT: v_readlane_b32 s16, v23, 44
|
|
; GCN-NEXT: v_readlane_b32 s17, v23, 45
|
|
; GCN-NEXT: v_readlane_b32 s18, v23, 46
|
|
; GCN-NEXT: v_readlane_b32 s19, v23, 47
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; use s[4:19]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: v_readlane_b32 s8, v23, 48
|
|
; GCN-NEXT: v_readlane_b32 s9, v23, 49
|
|
; GCN-NEXT: v_readlane_b32 s10, v23, 50
|
|
; GCN-NEXT: v_readlane_b32 s11, v23, 51
|
|
; GCN-NEXT: v_readlane_b32 s12, v23, 52
|
|
; GCN-NEXT: v_readlane_b32 s13, v23, 53
|
|
; GCN-NEXT: v_readlane_b32 s14, v23, 54
|
|
; GCN-NEXT: v_readlane_b32 s15, v23, 55
|
|
; GCN-NEXT: v_readlane_b32 s16, v23, 56
|
|
; GCN-NEXT: v_readlane_b32 s17, v23, 57
|
|
; GCN-NEXT: v_readlane_b32 s18, v23, 58
|
|
; GCN-NEXT: v_readlane_b32 s19, v23, 59
|
|
; GCN-NEXT: v_readlane_b32 s20, v23, 60
|
|
; GCN-NEXT: v_readlane_b32 s21, v23, 61
|
|
; GCN-NEXT: v_readlane_b32 s22, v23, 62
|
|
; GCN-NEXT: v_readlane_b32 s23, v23, 63
|
|
; GCN-NEXT: s_mov_b64 s[6:7], exec
|
|
; GCN-NEXT: s_mov_b64 exec, 3
|
|
; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 offset:4 ; 4-byte Folded Reload
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
; GCN-NEXT: v_readlane_b32 s4, v0, 0
|
|
; GCN-NEXT: v_readlane_b32 s5, v0, 1
|
|
; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
; GCN-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; use s[8:23]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: ;;#ASMSTART
|
|
; GCN-NEXT: ; use s[4:5]
|
|
; GCN-NEXT: ;;#ASMEND
|
|
; GCN-NEXT: BB0_2: ; %ret
|
|
; GCN-NEXT: s_endpgm
|
|
call void asm sideeffect "", "~{v[0:7]}" () #0
|
|
call void asm sideeffect "", "~{v[8:15]}" () #0
|
|
call void asm sideeffect "", "~{v[16:19]}"() #0
|
|
call void asm sideeffect "", "~{v[20:21]}"() #0
|
|
call void asm sideeffect "", "~{v22}"() #0
|
|
|
|
%wide.sgpr0 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
|
|
%wide.sgpr1 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
|
|
%wide.sgpr2 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
|
|
%wide.sgpr3 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0
|
|
%wide.sgpr4 = call <2 x i32> asm sideeffect "; def $0", "=s" () #0
|
|
%cmp = icmp eq i32 %in, 0
|
|
br i1 %cmp, label %bb0, label %ret
|
|
|
|
bb0:
|
|
call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr0) #0
|
|
call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr1) #0
|
|
call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr2) #0
|
|
call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr3) #0
|
|
call void asm sideeffect "; use $0", "s"(<2 x i32> %wide.sgpr4) #0
|
|
br label %ret
|
|
|
|
ret:
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { nounwind }
|
|
attributes #1 = { nounwind "amdgpu-waves-per-eu"="10,10" }
|