mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 03:02:36 +01:00
cd7c37e898
The function only checks that instruction reads a super-register containing requested physical register. In case if a sub-register if being read that is also a use of a super-reg, so added the check. In particular MI->readsRegister() is broken because of the missing check. The resulting check is essentially regsOverlap(). Differential Revision: https://reviews.llvm.org/D54128 llvm-svn: 346686
553 lines
16 KiB
YAML
553 lines
16 KiB
YAML
# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-optimize-exec-masking -o - %s | FileCheck %s
|
|
|
|
--- |
|
|
define amdgpu_kernel void @optimize_if_and_saveexec_xor(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if: ; preds = %main_body
|
|
%v.if = load volatile i32, i32 addrspace(1)* undef
|
|
br label %end
|
|
|
|
end: ; preds = %if, %main_body
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @optimize_if_and_saveexec(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if:
|
|
br label %end
|
|
|
|
end:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @optimize_if_or_saveexec(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if:
|
|
br label %end
|
|
|
|
end:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @optimize_if_and_saveexec_xor_valu_middle(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if: ; preds = %main_body
|
|
br label %end
|
|
|
|
end: ; preds = %if, %main_body
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @optimize_if_and_saveexec_xor_wrong_reg(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if:
|
|
br label %end
|
|
|
|
end:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @optimize_if_and_saveexec_xor_modify_copy_to_exec(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if:
|
|
br label %end
|
|
|
|
end:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @optimize_if_and_saveexec_xor_live_out_setexec(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if:
|
|
br label %end
|
|
|
|
end:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @optimize_if_unknown_saveexec(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if:
|
|
br label %end
|
|
|
|
end:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @optimize_if_andn2_saveexec(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if:
|
|
br label %end
|
|
|
|
end:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @optimize_if_andn2_saveexec_no_commute(i32 %z, i32 %v) {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if:
|
|
br label %end
|
|
|
|
end:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @if_and_xor_read_exec_copy_subreg() {
|
|
main_body:
|
|
br i1 undef, label %if, label %end
|
|
|
|
if: ; preds = %main_body
|
|
br label %end
|
|
|
|
end: ; preds = %if, %main_body
|
|
ret void
|
|
}
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: name: optimize_if_and_saveexec_xor{{$}}
|
|
# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
|
|
# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
# CHECK-NEXT: SI_MASK_BRANCH
|
|
|
|
name: optimize_if_and_saveexec_xor
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
$sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$exec = S_MOV_B64_term killed $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: name: optimize_if_and_saveexec{{$}}
|
|
# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
|
|
# CHECK-NEXT: SI_MASK_BRANCH
|
|
|
|
name: optimize_if_and_saveexec
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
$exec = S_MOV_B64_term killed $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: name: optimize_if_or_saveexec{{$}}
|
|
# CHECK: $sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
|
|
# CHECK-NEXT: SI_MASK_BRANCH
|
|
|
|
name: optimize_if_or_saveexec
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_OR_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
$exec = S_MOV_B64_term killed $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: name: optimize_if_and_saveexec_xor_valu_middle
|
|
# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
|
|
# CHECK-NEXT: SI_MASK_BRANCH
|
|
name: optimize_if_and_saveexec_xor_valu_middle
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
$sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$exec = S_MOV_B64_term killed $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: name: optimize_if_and_saveexec_xor_wrong_reg{{$}}
|
|
# CHECK: $sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
# CHECK-NEXT: $exec = COPY $sgpr0_sgpr1
|
|
# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
|
|
name: optimize_if_and_saveexec_xor_wrong_reg
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
$sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$exec = S_MOV_B64_term $sgpr0_sgpr1
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1 , $sgpr4_sgpr5_sgpr6_sgpr7
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1, $sgpr4_sgpr5_sgpr6_sgpr7
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: name: optimize_if_and_saveexec_xor_modify_copy_to_exec{{$}}
|
|
# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
# CHECK-NEXT: $sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc
|
|
# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
|
|
# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
|
|
|
|
name: optimize_if_and_saveexec_xor_modify_copy_to_exec
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
$sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc
|
|
$sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$exec = S_MOV_B64_term killed $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr0 = S_MOV_B32 0
|
|
$sgpr1 = S_MOV_B32 1
|
|
$sgpr2 = S_MOV_B32 -1
|
|
$sgpr3 = S_MOV_B32 61440
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: name: optimize_if_and_saveexec_xor_live_out_setexec{{$}}
|
|
# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
# CHECK-NEXT: $exec = COPY $sgpr2_sgpr3
|
|
# CHECK-NEXT: SI_MASK_BRANCH
|
|
name: optimize_if_and_saveexec_xor_live_out_setexec
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
$sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$exec = S_MOV_B64_term $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
|
|
S_SLEEP 0, implicit $sgpr2_sgpr3
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
|
|
# CHECK-LABEL: name: optimize_if_unknown_saveexec{{$}}
|
|
# CHECK: $sgpr0_sgpr1 = COPY $exec
|
|
# CHECK: $sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc
|
|
# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
|
|
# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
|
|
|
|
name: optimize_if_unknown_saveexec
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc
|
|
$exec = S_MOV_B64_term killed $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: name: optimize_if_andn2_saveexec{{$}}
|
|
# CHECK: $sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
|
|
# CHECK-NEXT: SI_MASK_BRANCH
|
|
|
|
name: optimize_if_andn2_saveexec
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_ANDN2_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
$exec = S_MOV_B64_term killed $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}}
|
|
# CHECK: $sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc
|
|
# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
|
|
# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
|
|
name: optimize_if_andn2_saveexec_no_commute
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc
|
|
$exec = S_MOV_B64_term killed $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
|
|
...
|
|
---
|
|
# A read from exec copy subreg prevents optimization
|
|
# CHECK-LABEL: name: if_and_xor_read_exec_copy_subreg{{$}}
|
|
# CHECK: $sgpr0_sgpr1 = COPY $exec
|
|
# CHECK-NEXT: $sgpr4 = S_MOV_B32 $sgpr1
|
|
name: if_and_xor_read_exec_copy_subreg
|
|
liveins:
|
|
- { reg: '$vgpr0' }
|
|
body: |
|
|
bb.0.main_body:
|
|
liveins: $vgpr0
|
|
|
|
$sgpr0_sgpr1 = COPY $exec
|
|
$sgpr4 = S_MOV_B32 $sgpr1
|
|
$vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
|
|
$vgpr0 = V_MOV_B32_e32 4, implicit $exec
|
|
$sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
|
|
$sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$exec = S_MOV_B64_term killed $sgpr2_sgpr3
|
|
SI_MASK_BRANCH %bb.2, implicit $exec
|
|
S_BRANCH %bb.1
|
|
|
|
bb.1.if:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$sgpr7 = S_MOV_B32 61440
|
|
$sgpr6 = S_MOV_B32 -1
|
|
$vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec
|
|
|
|
bb.2.end:
|
|
liveins: $vgpr0, $sgpr0_sgpr1
|
|
|
|
$exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
|
|
$sgpr3 = S_MOV_B32 61440
|
|
$sgpr2 = S_MOV_B32 -1
|
|
BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
|
|
S_ENDPGM
|
|
...
|