1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[AMDGPU] Fix formatting in MIR tests

This commit is contained in:
Jay Foad 2020-07-02 10:17:03 +01:00
parent 58e4673769
commit 079643288c
13 changed files with 55 additions and 55 deletions

View File

@ -207,7 +207,7 @@ body: |
; WAVE32: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
; WAVE32: S_ENDPGM 0, implicit [[COPY]]
%0:sreg_64_xexec = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = COPY %0
%1:sreg_64_xexec = COPY %0
S_ENDPGM 0, implicit %1
...

View File

@ -13,7 +13,7 @@ body: |
; GCN-LABEL: name: trunc_sgpr_s32_to_s1
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: S_ENDPGM 0, implicit [[COPY]]
%0:sgpr(s32) =COPY $sgpr0
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s1) = G_TRUNC %0
S_ENDPGM 0, implicit %1
...
@ -29,7 +29,7 @@ body: |
; GCN-LABEL: name: trunc_sgpr_s32_to_s16
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: S_ENDPGM 0, implicit [[COPY]]
%0:sgpr(s32) =COPY $sgpr0
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
S_ENDPGM 0, implicit %1
...
@ -198,7 +198,7 @@ body: |
; GCN-LABEL: name: trunc_vgpr_s32_to_s1
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: S_ENDPGM 0, implicit [[COPY]]
%0:vgpr(s32) =COPY $vgpr0
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s1) = G_TRUNC %0
S_ENDPGM 0, implicit %1
...
@ -214,7 +214,7 @@ body: |
; GCN-LABEL: name: trunc_vgpr_s32_to_s16
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: S_ENDPGM 0, implicit [[COPY]]
%0:vgpr(s32) =COPY $vgpr0
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_TRUNC %0
S_ENDPGM 0, implicit %1
...
@ -381,8 +381,8 @@ regBankSelected: true
body: |
bb.0:
liveins: $sgpr0, $sgpr1
%0:sgpr(s32) =COPY $sgpr0
%1:sgpr(s32) =COPY $sgpr1
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:sgpr(s1) = G_TRUNC %0
%3:sgpr(s32) = G_SELECT %2, %0, %1
S_ENDPGM 0, implicit %3

View File

@ -30,7 +30,7 @@ body: |
; GFX8: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
; GFX8: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_LSHL_B32_]], [[S_AND_B32_]], implicit-def $scc
; GFX8: S_ENDPGM 0, implicit [[S_OR_B32_]]
%0:sgpr(<2 x s32>) =COPY $sgpr0_sgpr1
%0:sgpr(<2 x s32>) = COPY $sgpr0_sgpr1
%1:sgpr(<2 x s16>) = G_TRUNC %0
S_ENDPGM 0, implicit %1
...
@ -59,7 +59,7 @@ body: |
; GFX8: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
; GFX8: [[V_MOV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_MOV_B32_sdwa 0, [[COPY2]], 0, 5, 2, 4, implicit $exec, implicit [[COPY1]](tied-def 0)
; GFX8: S_ENDPGM 0, implicit [[V_MOV_B32_sdwa]]
%0:vgpr(<2 x s32>) =COPY $vgpr0_vgpr1
%0:vgpr(<2 x s32>) = COPY $vgpr0_vgpr1
%1:vgpr(<2 x s16>) = G_TRUNC %0
S_ENDPGM 0, implicit %1
...

View File

@ -1652,7 +1652,7 @@ body: |
%1:_(s32) = COPY $vgpr8
%2:_(s256) = G_ZEXT %1
%3:_(s256) = G_ASHR %0, %2
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3
...
---
@ -1784,5 +1784,5 @@ body: |
%0:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
%1:_(<2 x s32>) = COPY $vgpr4_vgpr5
%2:_(<2 x s128>) = G_ASHR %0, %1
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
...

View File

@ -1488,7 +1488,7 @@ body: |
%1:_(s32) = COPY $vgpr8
%2:_(s256) = G_ZEXT %1
%3:_(s256) = G_LSHR %0, %2
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3
...
---
@ -1614,5 +1614,5 @@ body: |
%0:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
%1:_(<2 x s32>) = COPY $vgpr4_vgpr5
%2:_(<2 x s128>) = G_LSHR %0, %1
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
...

View File

@ -1003,7 +1003,7 @@ body: |
%3:_(s32) = G_CONSTANT i32 0
%4:_(s1) = G_ICMP intpred(ne), %2, %3
%5:_(<2 x s128>) = G_SELECT %4, %0, %1
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %5
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %5
...

View File

@ -1149,5 +1149,5 @@ body: |
; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
%0:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
%1:_(<2 x s128>) = G_SEXT_INREG %0, 1
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
...

View File

@ -1550,7 +1550,7 @@ body: |
%1:_(s32) = COPY $vgpr8
%2:_(s256) = G_ZEXT %1
%3:_(s256) = G_SHL %0, %2
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3
...
---
@ -1676,5 +1676,5 @@ body: |
%0:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
%1:_(<2 x s32>) = COPY $vgpr4_vgpr5
%2:_(<2 x s128>) = G_SHL %0, %1
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
...

View File

@ -32,13 +32,13 @@ name: no_extra_fold_on_same_opnd
tracksRegLiveness: true
body: |
bb.0:
%0:vgpr_32 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%4:vreg_64 = REG_SEQUENCE killed %0, %subreg.sub0, killed %3, %subreg.sub1
%5:vgpr_32 = V_XOR_B32_e32 %1, %4.sub1, implicit $exec
%6:vgpr_32 = V_XOR_B32_e32 %2, %4.sub0, implicit $exec
%0:vgpr_32 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%4:vreg_64 = REG_SEQUENCE killed %0, %subreg.sub0, killed %3, %subreg.sub1
%5:vgpr_32 = V_XOR_B32_e32 %1, %4.sub1, implicit $exec
%6:vgpr_32 = V_XOR_B32_e32 %2, %4.sub0, implicit $exec
...
---

View File

@ -58,7 +58,7 @@ body: |
; SI: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
; SI: $m0 = S_MOV_B32 -1
; SI: DS_GWS_INIT $vgpr0, 0, 1, implicit $m0, implicit $exec
$vgpr0 = V_MOV_B32_e32 0, implicit $exec
$vgpr0 = V_MOV_B32_e32 0, implicit $exec
$m0 = S_MOV_B32 -1
DS_GWS_INIT $vgpr0, 0, 1, implicit $m0, implicit $exec

View File

@ -10,7 +10,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -28,7 +28,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -47,7 +47,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -66,7 +66,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -85,7 +85,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -104,7 +104,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -121,7 +121,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -143,7 +143,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vreg_128 = COPY %2
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -162,7 +162,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -180,7 +180,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -198,7 +198,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -217,7 +217,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%5:vgpr_32 = COPY %2.sub3
@ -236,7 +236,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -254,7 +254,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -272,7 +272,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -290,7 +290,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -308,7 +308,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -326,7 +326,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -344,7 +344,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -362,7 +362,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -381,7 +381,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -402,7 +402,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -423,7 +423,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -442,7 +442,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)
@ -461,7 +461,7 @@ body: |
bb.0.entry:
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0, 0
%2:sgpr_128 =COPY $sgpr96_sgpr97_sgpr98_sgpr99
%2:sgpr_128 = COPY $sgpr96_sgpr97_sgpr98_sgpr99
%3:sgpr_256 = S_LOAD_DWORDX8_IMM %1, 208, 0, 0
%4:vgpr_32 = COPY %2.sub3
%5:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16)

View File

@ -4,7 +4,7 @@
# CHECK: bb.0:
# CHECK: [[COND:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64
# CHECK: [[IF_SOURCE0:%[0-9]+]]:sreg_64 = SI_IF [[COND]], %bb.1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
# CHECK: [[IF_INPUT_REG:%[0-9]+]]:sreg_64 = S_MOV_B64_term killed [[IF_SOURCE0]], implicit $exec
# CHECK: [[IF_INPUT_REG:%[0-9]+]]:sreg_64 = S_MOV_B64_term killed [[IF_SOURCE0]], implicit $exec
# CHECK: bb.1:
# CHECK: [[END_CF_ARG:%[0-9]+]]:sreg_64 = COPY killed [[IF_INPUT_REG]]
@ -12,7 +12,7 @@
# CHECK: bb.2:
# CHECK: [[IF_SOURCE1:%[0-9]+]]:sreg_64 = SI_IF [[COND]], %bb.1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
# CHECK: [[IF_INPUT_REG]]:sreg_64 = S_MOV_B64_term killed [[IF_SOURCE1]], implicit $exec
# CHECK: [[IF_INPUT_REG]]:sreg_64 = S_MOV_B64_term killed [[IF_SOURCE1]], implicit $exec
...

View File

@ -18,7 +18,7 @@ body: |
bb.1:
successors: %bb.2
%2:sreg_32 = S_ADD_U32 %4, %5, implicit-def $scc
%2:sreg_32 = S_ADD_U32 %4, %5, implicit-def $scc
S_BRANCH %bb.2
bb.2:
@ -50,7 +50,7 @@ body: |
bb.1:
successors: %bb.2
undef %2.sub0:sreg_64 = S_ADD_U32 %4, %5, implicit-def $scc
undef %2.sub0:sreg_64 = S_ADD_U32 %4, %5, implicit-def $scc
S_BRANCH %bb.2
bb.2:
@ -84,7 +84,7 @@ body: |
bb.1:
successors: %bb.2
%2:sreg_32 = S_ADD_U32 %4, %5, implicit-def $scc
%2:sreg_32 = S_ADD_U32 %4, %5, implicit-def $scc
S_BRANCH %bb.2
bb.2:
successors: %bb.3