mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
196e7f3138
Replace individual operands GLC, SLC, and DLC with a single cache_policy bitmask operand. This will reduce the number of operands in MIR and I hope the amount of code. These operands are mostly 0 anyway. Additional advantage that parser will accept these flags in any order unlike now. Differential Revision: https://reviews.llvm.org/D96469
122 lines
5.4 KiB
YAML
122 lines
5.4 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=si-optimize-exec-masking-pre-ra %s -o - | FileCheck -check-prefix=GCN %s
|
|
|
|
--- |
|
|
define amdgpu_kernel void @call_no_explicit_exec_dependency () {
|
|
unreachable
|
|
}
|
|
|
|
declare void @func()
|
|
|
|
...
|
|
|
|
# Call should be assumed to read exec
|
|
---
|
|
name: call_no_explicit_exec_dependency
|
|
tracksRegLiveness: true
|
|
liveins:
|
|
- { reg: '$vgpr0', virtual-reg: '%0' }
|
|
- { reg: '$sgpr0_sgpr1', virtual-reg: '%1' }
|
|
machineFunctionInfo:
|
|
isEntryFunction: true
|
|
body: |
|
|
; GCN-LABEL: name: call_no_explicit_exec_dependency
|
|
; GCN: bb.0:
|
|
; GCN: successors: %bb.1(0x40000000), %bb.4(0x40000000)
|
|
; GCN: liveins: $vgpr0, $sgpr0_sgpr1
|
|
; GCN: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr0_sgpr1
|
|
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; GCN: [[V_CMP_LT_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_U32_e64 1, [[COPY1]], implicit $exec
|
|
; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
|
|
; GCN: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY2]], [[V_CMP_LT_U32_e64_]], implicit-def dead $scc
|
|
; GCN: $exec = S_MOV_B64_term [[S_AND_B64_]]
|
|
; GCN: S_CBRANCH_EXECZ %bb.4, implicit $exec
|
|
; GCN: S_BRANCH %bb.1
|
|
; GCN: bb.1:
|
|
; GCN: successors: %bb.2(0x40000000), %bb.3(0x40000000)
|
|
; GCN: undef %5.sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM [[COPY]], 9, 0 :: (dereferenceable invariant load 8, align 4, addrspace 4)
|
|
; GCN: undef %6.sub0:vreg_64 = V_LSHLREV_B32_e32 2, [[COPY1]], implicit $exec
|
|
; GCN: %6.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
|
|
; GCN: [[COPY3:%[0-9]+]]:vgpr_32 = COPY %5.sub1
|
|
; GCN: undef %8.sub0:vreg_64, %9:sreg_64_xexec = V_ADD_CO_U32_e64 %5.sub0, %6.sub0, 0, implicit $exec
|
|
; GCN: %8.sub1:vreg_64, dead %10:sreg_64_xexec = V_ADDC_U32_e64 0, [[COPY3]], %9, 0, implicit $exec
|
|
; GCN: %5.sub3:sgpr_128 = S_MOV_B32 61440
|
|
; GCN: %5.sub2:sgpr_128 = S_MOV_B32 0
|
|
; GCN: BUFFER_STORE_DWORD_ADDR64 %6.sub1, %6, %5, 0, 0, 0, 0, 0, implicit $exec :: (store 4, addrspace 1)
|
|
; GCN: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 2, [[COPY1]], implicit $exec
|
|
; GCN: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
|
|
; GCN: [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY4]], [[V_CMP_NE_U32_e64_]], implicit-def dead $scc
|
|
; GCN: $exec = S_MOV_B64_term [[S_AND_B64_1]]
|
|
; GCN: S_CBRANCH_EXECZ %bb.3, implicit $exec
|
|
; GCN: S_BRANCH %bb.2
|
|
; GCN: bb.2:
|
|
; GCN: successors: %bb.3(0x80000000)
|
|
; GCN: %5.sub0:sgpr_128 = COPY %5.sub2
|
|
; GCN: %5.sub1:sgpr_128 = COPY %5.sub2
|
|
; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
|
|
; GCN: BUFFER_STORE_DWORD_ADDR64 [[V_MOV_B32_e32_]], %8, %5, 0, 4, 0, 0, 0, implicit $exec :: (store 4, addrspace 1)
|
|
; GCN: bb.3:
|
|
; GCN: successors: %bb.4(0x80000000)
|
|
; GCN: $exec = S_OR_B64 $exec, [[COPY4]], implicit-def $scc
|
|
; GCN: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
|
|
; GCN: dead %16:sreg_64 = SI_CALL [[DEF]], @func, csr_amdgpu_highregs
|
|
; GCN: bb.4:
|
|
; GCN: $exec = S_OR_B64 $exec, [[COPY2]], implicit-def $scc
|
|
; GCN: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
|
|
; GCN: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
|
|
; GCN: $m0 = S_MOV_B32 -1
|
|
; GCN: DS_WRITE_B32 [[V_MOV_B32_e32_2]], [[V_MOV_B32_e32_1]], 0, 0, implicit $m0, implicit $exec :: (store 4, addrspace 3)
|
|
; GCN: S_ENDPGM 0
|
|
bb.0:
|
|
successors: %bb.1, %bb.4
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
%1:sgpr_64 = COPY $sgpr0_sgpr1
|
|
%0:vgpr_32 = COPY $vgpr0
|
|
%2:sreg_64 = V_CMP_LT_U32_e64 1, %0, implicit $exec
|
|
%3:sreg_64 = COPY $exec, implicit-def $exec
|
|
%4:sreg_64 = S_AND_B64 %3, %2, implicit-def dead $scc
|
|
$exec = S_MOV_B64_term %4
|
|
S_CBRANCH_EXECZ %bb.4, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1:
|
|
successors: %bb.2, %bb.3
|
|
|
|
undef %5.sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM %1, 9, 0 :: (dereferenceable invariant load 8, align 4, addrspace 4)
|
|
undef %6.sub0:vreg_64 = V_LSHLREV_B32_e32 2, %0, implicit $exec
|
|
%6.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
|
|
%7:vgpr_32 = COPY %5.sub1
|
|
undef %8.sub0:vreg_64, %9:sreg_64_xexec = V_ADD_CO_U32_e64 %5.sub0, %6.sub0, 0, implicit $exec
|
|
%8.sub1:vreg_64, dead %10:sreg_64_xexec = V_ADDC_U32_e64 0, %7, %9, 0, implicit $exec
|
|
%5.sub3:sgpr_128 = S_MOV_B32 61440
|
|
%5.sub2:sgpr_128 = S_MOV_B32 0
|
|
BUFFER_STORE_DWORD_ADDR64 %6.sub1, %6, %5, 0, 0, 0, 0, 0, implicit $exec :: (store 4, addrspace 1)
|
|
%11:sreg_64 = V_CMP_NE_U32_e64 2, %0, implicit $exec
|
|
%12:sreg_64 = COPY $exec, implicit-def $exec
|
|
%13:sreg_64 = S_AND_B64 %12, %11, implicit-def dead $scc
|
|
$exec = S_MOV_B64_term %13
|
|
S_CBRANCH_EXECZ %bb.3, implicit $exec
|
|
S_BRANCH %bb.2
|
|
|
|
bb.2:
|
|
%5.sub0:sgpr_128 = COPY %5.sub2
|
|
%5.sub1:sgpr_128 = COPY %5.sub2
|
|
%14:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
|
|
BUFFER_STORE_DWORD_ADDR64 %14, %8, %5, 0, 4, 0, 0, 0, implicit $exec :: (store 4, addrspace 1)
|
|
|
|
bb.3:
|
|
$exec = S_OR_B64 $exec, %12, implicit-def $scc
|
|
%20:sreg_64 = IMPLICIT_DEF
|
|
%21:sreg_64 = SI_CALL %20, @func, csr_amdgpu_highregs
|
|
|
|
bb.4:
|
|
$exec = S_OR_B64 $exec, %3, implicit-def $scc
|
|
%17:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
|
|
%18:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
|
|
$m0 = S_MOV_B32 -1
|
|
DS_WRITE_B32 %18, %17, 0, 0, implicit $m0, implicit $exec :: (store 4, addrspace 3)
|
|
S_ENDPGM 0
|
|
|
|
...
|