1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 19:42:54 +02:00

AMDGPU: Don't fold copies to physregs

In a future patch, this will help cleanup m0 handling.

The register coalescer handles copies from a register that
materializes an immediate, but doesn't handle move immediates
itself. The virtual register uses will often be allocated to the same
register, so there end up being no real copy.

llvm-svn: 374257
This commit is contained in:
Matt Arsenault 2019-10-09 22:51:42 +00:00
parent 116a22fd06
commit 983c9e76c0
2 changed files with 10 additions and 6 deletions

View File

@ -581,13 +581,17 @@ void SIFoldOperands::foldOperand(
if (FoldingImmLike && UseMI->isCopy()) { if (FoldingImmLike && UseMI->isCopy()) {
Register DestReg = UseMI->getOperand(0).getReg(); Register DestReg = UseMI->getOperand(0).getReg();
const TargetRegisterClass *DestRC = Register::isVirtualRegister(DestReg)
? MRI->getRegClass(DestReg) // Don't fold into a copy to a physical register. Doing so would interfere
: TRI->getPhysRegClass(DestReg); // with the register coalescer's logic which would avoid redundant
// initalizations.
if (DestReg.isPhysical())
return;
const TargetRegisterClass *DestRC = MRI->getRegClass(DestReg);
Register SrcReg = UseMI->getOperand(1).getReg(); Register SrcReg = UseMI->getOperand(1).getReg();
if (Register::isVirtualRegister(DestReg) && if (SrcReg.isVirtual()) { // XXX - This can be an assert?
Register::isVirtualRegister(SrcReg)) {
const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) { if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
MachineRegisterInfo::use_iterator NextUse; MachineRegisterInfo::use_iterator NextUse;

View File

@ -14,8 +14,8 @@ define amdgpu_kernel void @kernel_background_evaluate(float addrspace(5)* %kg, <
; GCN-NEXT: s_mov_b64 s[0:1], s[36:37] ; GCN-NEXT: s_mov_b64 s[0:1], s[36:37]
; GCN-NEXT: v_mov_b32_e32 v1, 0x2000 ; GCN-NEXT: v_mov_b32_e32 v1, 0x2000
; GCN-NEXT: v_mov_b32_e32 v2, 0x4000 ; GCN-NEXT: v_mov_b32_e32 v2, 0x4000
; GCN-NEXT: s_mov_b64 s[2:3], s[38:39]
; GCN-NEXT: v_mov_b32_e32 v3, 0 ; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_mov_b64 s[2:3], s[38:39]
; GCN-NEXT: v_mov_b32_e32 v4, 0x400000 ; GCN-NEXT: v_mov_b32_e32 v4, 0x400000
; GCN-NEXT: s_add_u32 s32, s33, 0xc0000 ; GCN-NEXT: s_add_u32 s32, s33, 0xc0000
; GCN-NEXT: v_add_nc_u32_e64 v32, 4, 0x4000 ; GCN-NEXT: v_add_nc_u32_e64 v32, 4, 0x4000