1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

Shrink interval after moving copy in removePartialRedundancy

llvm-svn: 334963
This commit is contained in:
Krzysztof Parzyszek 2018-06-18 17:16:39 +00:00
parent f6e62b21be
commit deaab0b105
2 changed files with 241 additions and 0 deletions

View File

@ -1050,6 +1050,8 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
BValNo->markUnused();
LIS->extendToIndices(SR, EndPoints);
}
// If any dead defs were extended, truncate them.
shrinkToUses(&IntB);
// Finally, update the live-range of IntA.
shrinkToUses(&IntA);

View File

@ -0,0 +1,239 @@
# RUN: llc -march=amdgcn -run-pass simple-register-coalescing -o - %s | FileCheck --check-prefix=GCN %s
# REQUIRES: asserts
#
# This test will provoke a Couldn't join subrange unreachable without the
# fix for http://llvm.org/PR35373
#
# GCN: S_CBRANCH_SCC1 %bb.6, implicit undef $scc
--- |
define amdgpu_ps void @regcoal-subrange-join-seg() local_unnamed_addr #0 {
ret void
}
...
---
name: regcoal-subrange-join-seg
tracksRegLiveness: true
registers:
- { id: 0, class: sreg_128 }
- { id: 1, class: sreg_128 }
- { id: 2, class: sreg_128 }
- { id: 3, class: sreg_128 }
- { id: 4, class: sreg_128 }
- { id: 5, class: sreg_128 }
- { id: 6, class: sreg_128 }
- { id: 7, class: sreg_128 }
- { id: 8, class: sreg_128 }
- { id: 9, class: sreg_32_xm0 }
- { id: 10, class: sreg_32_xm0 }
- { id: 11, class: vgpr_32 }
- { id: 12, class: vgpr_32 }
- { id: 13, class: vgpr_32 }
- { id: 14, class: sreg_32_xm0_xexec }
- { id: 15, class: sreg_128 }
- { id: 16, class: sreg_32 }
- { id: 17, class: sreg_32_xm0 }
- { id: 18, class: sreg_32_xm0 }
- { id: 19, class: sreg_32_xm0 }
- { id: 20, class: sreg_32_xm0 }
- { id: 21, class: sreg_32_xm0_xexec }
- { id: 22, class: sreg_128 }
- { id: 23, class: sreg_32_xm0 }
- { id: 24, class: vgpr_32 }
- { id: 25, class: sreg_64_xexec }
- { id: 26, class: vgpr_32 }
- { id: 27, class: sreg_32_xm0 }
- { id: 28, class: sreg_32 }
- { id: 29, class: sreg_128 }
- { id: 30, class: sreg_32_xm0 }
- { id: 31, class: sreg_32_xm0 }
- { id: 32, class: vgpr_32 }
- { id: 33, class: vgpr_32 }
- { id: 34, class: vgpr_32 }
- { id: 35, class: vgpr_32 }
- { id: 36, class: vgpr_32 }
- { id: 37, class: vgpr_32 }
- { id: 38, class: vgpr_32 }
- { id: 39, class: vgpr_32 }
- { id: 40, class: vgpr_32 }
- { id: 41, class: vgpr_32 }
- { id: 42, class: vgpr_32 }
- { id: 43, class: vgpr_32 }
- { id: 44, class: vgpr_32 }
- { id: 45, class: vgpr_32 }
- { id: 46, class: vgpr_32 }
- { id: 47, class: vgpr_32 }
- { id: 48, class: vgpr_32 }
- { id: 49, class: vreg_128 }
- { id: 50, class: vreg_128 }
- { id: 51, class: vreg_128 }
- { id: 52, class: vreg_128 }
- { id: 53, class: vreg_128 }
- { id: 54, class: vreg_128 }
- { id: 55, class: vgpr_32 }
- { id: 56, class: vreg_128 }
- { id: 57, class: vreg_128 }
- { id: 58, class: vreg_128 }
- { id: 59, class: vreg_128 }
- { id: 60, class: vreg_128 }
- { id: 61, class: vreg_128 }
- { id: 62, class: vreg_128 }
- { id: 63, class: vreg_128 }
body: |
bb.0:
S_CBRANCH_SCC1 %bb.6, implicit undef $scc
S_BRANCH %bb.1
bb.1:
S_CBRANCH_SCC1 %bb.4, implicit undef $scc
S_BRANCH %bb.2
bb.2:
S_CBRANCH_SCC1 %bb.4, implicit undef $scc
S_BRANCH %bb.3
bb.3:
bb.4:
successors: %bb.5, %bb.6
S_CBRANCH_SCC1 %bb.6, implicit undef $scc
S_BRANCH %bb.5
bb.5:
bb.6:
S_CBRANCH_SCC1 %bb.14, implicit undef $scc
S_BRANCH %bb.7
bb.7:
S_CBRANCH_SCC1 %bb.9, implicit undef $scc
S_BRANCH %bb.8
bb.8:
bb.9:
successors: %bb.10, %bb.13
S_CBRANCH_SCC1 %bb.13, implicit undef $scc
S_BRANCH %bb.10
bb.10:
S_CBRANCH_SCC1 %bb.12, implicit undef $scc
S_BRANCH %bb.11
bb.11:
bb.12:
bb.13:
bb.14:
S_CBRANCH_SCC1 %bb.26, implicit undef $scc
S_BRANCH %bb.15
bb.15:
S_CBRANCH_SCC1 %bb.20, implicit undef $scc
S_BRANCH %bb.16
bb.16:
successors: %bb.17, %bb.19
S_CBRANCH_SCC1 %bb.19, implicit undef $scc
S_BRANCH %bb.17
bb.17:
successors: %bb.18, %bb.19
S_CBRANCH_SCC1 %bb.19, implicit undef $scc
S_BRANCH %bb.18
bb.18:
bb.19:
bb.20:
S_CBRANCH_SCC1 %bb.25, implicit undef $scc
S_BRANCH %bb.21
bb.21:
successors: %bb.22, %bb.24
S_CBRANCH_SCC1 %bb.24, implicit undef $scc
S_BRANCH %bb.22
bb.22:
successors: %bb.23, %bb.24
S_CBRANCH_SCC1 %bb.24, implicit undef $scc
S_BRANCH %bb.23
bb.23:
bb.24:
bb.25:
bb.26:
S_CBRANCH_SCC1 %bb.35, implicit undef $scc
S_BRANCH %bb.27
bb.27:
S_CBRANCH_SCC1 %bb.32, implicit undef $scc
S_BRANCH %bb.28
bb.28:
%9 = S_FF1_I32_B32 undef %10
%13 = V_MAD_U32_U24 killed %9, 48, 32, 0, implicit $exec
%45 = BUFFER_LOAD_DWORD_OFFEN killed %13, undef %15, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4)
%46 = V_AND_B32_e32 1, killed %45, implicit $exec
%21 = S_BUFFER_LOAD_DWORD_SGPR undef %22, undef %23, 0 :: (dereferenceable invariant load 4)
%25 = V_CMP_GE_F32_e64 0, 0, 0, killed %21, 0, implicit $exec
%26 = V_CNDMASK_B32_e64 0, -1, killed %25, implicit $exec
%62 = IMPLICIT_DEF
bb.29:
successors: %bb.30(0x30000000), %bb.36(0x50000000)
%53 = COPY killed %62
%47 = V_ADD_I32_e32 -1, %46, implicit-def dead $vcc, implicit $exec
%48 = V_OR_B32_e32 killed %47, %26, implicit $exec
%49 = COPY %53
%49.sub2 = COPY undef %48
%51 = COPY killed %49
%51.sub3 = COPY undef %26
V_CMP_NE_U32_e32 0, killed %48, implicit-def $vcc, implicit $exec
$vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
S_CBRANCH_VCCZ %bb.30, implicit killed $vcc
bb.36:
%63 = COPY killed %51
S_BRANCH %bb.31
bb.30:
%33 = V_MAD_F32 1, killed %53.sub0, 0, undef %34, 0, 0, 0, 0, implicit $exec
%35 = V_MAC_F32_e32 killed %33, undef %36, undef %35, implicit $exec
%38 = V_MAX_F32_e32 0, killed %35, implicit $exec
%39 = V_LOG_F32_e32 killed %38, implicit $exec
%40 = V_MUL_F32_e32 killed %39, undef %41, implicit $exec
%42 = V_EXP_F32_e32 killed %40, implicit $exec
dead %43 = V_MUL_F32_e32 killed %42, undef %44, implicit $exec
%63 = COPY killed %51
bb.31:
%52 = COPY killed %63
%62 = COPY killed %52
S_BRANCH %bb.29
bb.32:
S_CBRANCH_SCC1 %bb.34, implicit undef $scc
S_BRANCH %bb.33
bb.33:
bb.34:
bb.35:
S_ENDPGM
...