1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[AMDGPU] Legalize VGPR Rsrc operands for MUBUF instructions

Emit a waterfall loop in the general case for a potentially-divergent Rsrc
operand. When practical, avoid this by using Addr64 instructions.

Recommits r341413 with changes to update the MachineDominatorTree when present.

Differential Revision: https://reviews.llvm.org/D51742

llvm-svn: 343992
This commit is contained in:
Scott Linder 2018-10-08 18:47:01 +00:00
parent 4b773ffb52
commit a1e537617c
6 changed files with 762 additions and 108 deletions

View File

@ -599,7 +599,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
unsigned SrcReg = MI.getOperand(1).getReg();
if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
TII->moveToVALU(MI);
TII->moveToVALU(MI, MDT);
break;
}
@ -614,7 +614,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
MI.setDesc(TII->get(SMovOp));
break;
}
TII->moveToVALU(MI);
TII->moveToVALU(MI, MDT);
} else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) {
tryChangeVGPRtoSGPRinCopy(MI, TRI, TII);
}
@ -677,7 +677,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
SmallSet<unsigned, 8> Visited;
if (HasVGPROperand || !phiHasBreakDef(MI, MRI, Visited)) {
LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI);
TII->moveToVALU(MI);
TII->moveToVALU(MI, MDT);
}
break;
}
@ -690,7 +690,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
TII->moveToVALU(MI);
TII->moveToVALU(MI, MDT);
break;
case AMDGPU::INSERT_SUBREG: {
const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
@ -700,7 +700,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
if (TRI->isSGPRClass(DstRC) &&
(TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
TII->moveToVALU(MI);
TII->moveToVALU(MI, MDT);
}
break;
}

View File

@ -31,6 +31,7 @@
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
@ -3612,7 +3613,191 @@ void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
FoldImmediate(*Copy, *Def, OpReg, &MRI);
}
void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
// Emit the actual waterfall loop, executing the wrapped instruction for each
// unique value of \p Rsrc across all lanes. In the best case we execute 1
// iteration, in the worst case we execute 64 (once per lane).
static void
emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
const DebugLoc &DL, MachineOperand &Rsrc) {
MachineBasicBlock::iterator I = LoopBB.begin();
unsigned VRsrc = Rsrc.getReg();
unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned CondReg0 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned CondReg1 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned AndCond = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
// Beginning of the loop, read the next Rsrc variant.
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0)
.addReg(VRsrc, VRsrcUndef, AMDGPU::sub0);
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1)
.addReg(VRsrc, VRsrcUndef, AMDGPU::sub1);
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2)
.addReg(VRsrc, VRsrcUndef, AMDGPU::sub2);
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3)
.addReg(VRsrc, VRsrcUndef, AMDGPU::sub3);
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc)
.addReg(SRsrcSub0)
.addImm(AMDGPU::sub0)
.addReg(SRsrcSub1)
.addImm(AMDGPU::sub1)
.addReg(SRsrcSub2)
.addImm(AMDGPU::sub2)
.addReg(SRsrcSub3)
.addImm(AMDGPU::sub3);
// Update Rsrc operand to use the SGPR Rsrc.
Rsrc.setReg(SRsrc);
Rsrc.setIsKill(true);
// Identify all lanes with identical Rsrc operands in their VGPRs.
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0)
.addReg(SRsrc, 0, AMDGPU::sub0_sub1)
.addReg(VRsrc, 0, AMDGPU::sub0_sub1);
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1)
.addReg(SRsrc, 0, AMDGPU::sub2_sub3)
.addReg(VRsrc, 0, AMDGPU::sub2_sub3);
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_B64), AndCond)
.addReg(CondReg0)
.addReg(CondReg1);
MRI.setSimpleHint(SaveExec, AndCond);
// Update EXEC to matching lanes, saving original to SaveExec.
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_SAVEEXEC_B64), SaveExec)
.addReg(AndCond, RegState::Kill);
// The original instruction is here; we insert the terminators after it.
I = LoopBB.end();
// Update EXEC, switch all done bits to 0 and all todo bits to 1.
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC)
.addReg(SaveExec);
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB);
}
// Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register
// with SGPRs by iterating over all unique values across all lanes.
static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
MachineOperand &Rsrc, MachineDominatorTree *MDT) {
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
MachineBasicBlock::iterator I(&MI);
const DebugLoc &DL = MI.getDebugLoc();
unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
// Save the EXEC mask
BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B64), SaveExec)
.addReg(AMDGPU::EXEC);
// Killed uses in the instruction we are waterfalling around will be
// incorrect due to the added control-flow.
for (auto &MO : MI.uses()) {
if (MO.isReg() && MO.isUse()) {
MRI.clearKillFlags(MO.getReg());
}
}
// To insert the loop we need to split the block. Move everything after this
// point to a new block, and insert a new empty block between the two.
MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
MachineFunction::iterator MBBI(MBB);
++MBBI;
MF.insert(MBBI, LoopBB);
MF.insert(MBBI, RemainderBB);
LoopBB->addSuccessor(LoopBB);
LoopBB->addSuccessor(RemainderBB);
// Move MI to the LoopBB, and the remainder of the block to RemainderBB.
MachineBasicBlock::iterator J = I++;
RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
LoopBB->splice(LoopBB->begin(), &MBB, J);
MBB.addSuccessor(LoopBB);
// Update dominators. We know that MBB immediately dominates LoopBB, that
// LoopBB immediately dominates RemainderBB, and that RemainderBB immediately
// dominates all of the successors transferred to it from MBB that MBB used
// to dominate.
if (MDT) {
MDT->addNewBlock(LoopBB, &MBB);
MDT->addNewBlock(RemainderBB, LoopBB);
for (auto &Succ : RemainderBB->successors()) {
if (MDT->dominates(&MBB, Succ)) {
MDT->changeImmediateDominator(Succ, RemainderBB);
}
}
}
emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc);
// Restore the EXEC mask
MachineBasicBlock::iterator First = RemainderBB->begin();
BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
.addReg(SaveExec);
}
// Extract pointer from Rsrc and return a zero-value Rsrc replacement.
static std::tuple<unsigned, unsigned>
extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) {
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
// Extract the ptr from the resource descriptor.
unsigned RsrcPtr =
TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass,
AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
// Create an empty resource descriptor
unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
// Zero64 = 0
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64)
.addImm(0);
// SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
.addImm(RsrcDataFormat & 0xFFFFFFFF);
// SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
.addImm(RsrcDataFormat >> 32);
// NewSRsrc = {Zero64, SRsrcFormat}
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc)
.addReg(Zero64)
.addImm(AMDGPU::sub0_sub1)
.addReg(SRsrcFormatLo)
.addImm(AMDGPU::sub2)
.addReg(SRsrcFormatHi)
.addImm(AMDGPU::sub3);
return std::make_tuple(RsrcPtr, NewSRsrc);
}
void SIInstrInfo::legalizeOperands(MachineInstr &MI,
MachineDominatorTree *MDT) const {
MachineFunction &MF = *MI.getParent()->getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
@ -3754,75 +3939,56 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
return;
}
// Legalize MUBUF* instructions by converting to addr64 form.
// FIXME: If we start using the non-addr64 instructions for compute, we
// may need to legalize them as above. This especially applies to the
// buffer_load_format_* variants and variants with idxen (or bothen).
int SRsrcIdx =
// Legalize MUBUF* instructions.
int RsrcIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
if (SRsrcIdx != -1) {
if (RsrcIdx != -1) {
// We have an MUBUF instruction
MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx);
unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass;
if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
RI.getRegClass(SRsrcRC))) {
MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()),
RI.getRegClass(RsrcRC))) {
// The operands are legal.
// FIXME: We may need to legalize operands besided srsrc.
return;
}
// Legalize a VGPR Rsrc.
//
// If the instruction is _ADDR64, we can avoid a waterfall by extracting
// the base pointer from the VGPR Rsrc, adding it to the VAddr, then using
// a zero-value SRsrc.
//
// If the instruction is _OFFSET (both idxen and offen disabled), and we
// support ADDR64 instructions, we can convert to ADDR64 and do the same as
// above.
//
// Otherwise we are on non-ADDR64 hardware, and/or we have
// idxen/offen/bothen and we fall back to a waterfall loop.
MachineBasicBlock &MBB = *MI.getParent();
// Extract the ptr from the resource descriptor.
unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc,
&AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
// Create an empty resource descriptor
unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
// Zero64 = 0
BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64)
.addImm(0);
// SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
.addImm(RsrcDataFormat & 0xFFFFFFFF);
// SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
.addImm(RsrcDataFormat >> 32);
// NewSRsrc = {Zero64, SRsrcFormat}
BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc)
.addReg(Zero64)
.addImm(AMDGPU::sub0_sub1)
.addReg(SRsrcFormatLo)
.addImm(AMDGPU::sub2)
.addReg(SRsrcFormatHi)
.addImm(AMDGPU::sub3);
MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
if (VAddr) {
if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
// This is already an ADDR64 instruction so we need to add the pointer
// extracted from the resource descriptor to the current value of VAddr.
unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
// NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0
unsigned RsrcPtr, NewSRsrc;
std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
// NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
DebugLoc DL = MI.getDebugLoc();
BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
.addReg(SRsrcPtr, 0, AMDGPU::sub0)
.addReg(VAddr->getReg(), 0, AMDGPU::sub0);
.addReg(RsrcPtr, 0, AMDGPU::sub0)
.addReg(VAddr->getReg(), 0, AMDGPU::sub0);
// NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1
// NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
.addReg(SRsrcPtr, 0, AMDGPU::sub1)
.addReg(VAddr->getReg(), 0, AMDGPU::sub1);
.addReg(RsrcPtr, 0, AMDGPU::sub1)
.addReg(VAddr->getReg(), 0, AMDGPU::sub1);
// NewVaddr = {NewVaddrHi, NewVaddrLo}
BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
@ -3830,13 +3996,20 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
.addImm(AMDGPU::sub0)
.addReg(NewVAddrHi)
.addImm(AMDGPU::sub1);
} else {
VAddr->setReg(NewVAddr);
Rsrc->setReg(NewSRsrc);
} else if (!VAddr && ST.hasAddr64()) {
// This instructions is the _OFFSET variant, so we need to convert it to
// ADDR64.
assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration()
< AMDGPUSubtarget::VOLCANIC_ISLANDS &&
"FIXME: Need to emit flat atomics here");
unsigned RsrcPtr, NewSRsrc;
std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
@ -3852,10 +4025,8 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
MachineInstrBuilder MIB =
BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
.add(*VData)
.addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
// This will be replaced later
// with the new value of vaddr.
.add(*SRsrc)
.addReg(NewVAddr)
.addReg(NewSRsrc)
.add(*SOffset)
.add(*Offset);
@ -3879,10 +4050,8 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
.add(*VData)
.add(*VDataIn)
.addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
// This will be replaced later
// with the new value of vaddr.
.add(*SRsrc)
.addReg(NewVAddr)
.addReg(NewSRsrc)
.add(*SOffset)
.add(*Offset)
.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
@ -3894,23 +4063,20 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
// NewVaddr = {NewVaddrHi, NewVaddrLo}
BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
NewVAddr)
.addReg(SRsrcPtr, 0, AMDGPU::sub0)
.addReg(RsrcPtr, 0, AMDGPU::sub0)
.addImm(AMDGPU::sub0)
.addReg(SRsrcPtr, 0, AMDGPU::sub1)
.addReg(RsrcPtr, 0, AMDGPU::sub1)
.addImm(AMDGPU::sub1);
VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr);
SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc);
} else {
// This is another variant; legalize Rsrc with waterfall loop from VGPRs
// to SGPRs.
loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT);
}
// Update the instruction to use NewVaddr
VAddr->setReg(NewVAddr);
// Update the instruction to use NewSRsrc
SRsrc->setReg(NewSRsrc);
}
}
void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
MachineDominatorTree *MDT) const {
SetVectorType Worklist;
Worklist.insert(&TopInst);
@ -3928,29 +4094,29 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
break;
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_U64_PSEUDO:
splitScalar64BitAddSub(Worklist, Inst);
splitScalar64BitAddSub(Worklist, Inst, MDT);
Inst.eraseFromParent();
continue;
case AMDGPU::S_ADD_I32:
case AMDGPU::S_SUB_I32:
// FIXME: The u32 versions currently selected use the carry.
if (moveScalarAddSub(Worklist, Inst))
if (moveScalarAddSub(Worklist, Inst, MDT))
continue;
// Default handling
break;
case AMDGPU::S_AND_B64:
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64);
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64, MDT);
Inst.eraseFromParent();
continue;
case AMDGPU::S_OR_B64:
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64);
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64, MDT);
Inst.eraseFromParent();
continue;
case AMDGPU::S_XOR_B64:
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64);
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64, MDT);
Inst.eraseFromParent();
continue;
@ -4037,7 +4203,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
continue;
case AMDGPU::S_XNOR_B64:
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32);
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
Inst.eraseFromParent();
continue;
@ -4146,7 +4312,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
// Legalize all operands other than the offset. Notably, convert the srsrc
// into SGPRs using v_readfirstlane if needed.
legalizeOperands(*NewInstr);
legalizeOperands(*NewInstr, MDT);
continue;
}
}
@ -4154,7 +4320,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
// We cannot move this instruction to the VALU, so we should try to
// legalize its operands instead.
legalizeOperands(Inst);
legalizeOperands(Inst, MDT);
continue;
}
@ -4243,7 +4409,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
}
// Legalize the operands
legalizeOperands(Inst);
legalizeOperands(Inst, MDT);
if (HasDst)
addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
@ -4251,8 +4417,8 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
}
// Add/sub require special handling to deal with carry outs.
bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist,
MachineInstr &Inst) const {
bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
MachineDominatorTree *MDT) const {
if (ST.hasAddNoCarry()) {
// Assume there is no user of scc since we don't select this in that case.
// Since scc isn't used, it doesn't really matter if the i32 or u32 variant
@ -4276,7 +4442,7 @@ bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist,
Inst.setDesc(get(NewOpc));
Inst.addImplicitDefUseOperands(*MBB.getParent());
MRI.replaceRegWith(OldDstReg, ResultReg);
legalizeOperands(Inst);
legalizeOperands(Inst, MDT);
addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
return true;
@ -4396,8 +4562,9 @@ void SIInstrInfo::splitScalar64BitUnaryOp(
addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
}
void SIInstrInfo::splitScalar64BitAddSub(
SetVectorType &Worklist, MachineInstr &Inst) const {
void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist,
MachineInstr &Inst,
MachineDominatorTree *MDT) const {
bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
MachineBasicBlock &MBB = *Inst.getParent();
@ -4457,16 +4624,16 @@ void SIInstrInfo::splitScalar64BitAddSub(
// Try to legalize the operands in case we need to swap the order to keep it
// valid.
legalizeOperands(*LoHalf);
legalizeOperands(*HiHalf);
legalizeOperands(*LoHalf, MDT);
legalizeOperands(*HiHalf, MDT);
// Move all users of this moved vlaue.
addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
}
void SIInstrInfo::splitScalar64BitBinaryOp(
SetVectorType &Worklist, MachineInstr &Inst,
unsigned Opcode) const {
void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist,
MachineInstr &Inst, unsigned Opcode,
MachineDominatorTree *MDT) const {
MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
@ -4524,8 +4691,8 @@ void SIInstrInfo::splitScalar64BitBinaryOp(
// Try to legalize the operands in case we need to swap the order to keep it
// valid.
legalizeOperands(LoHalf);
legalizeOperands(HiHalf);
legalizeOperands(LoHalf, MDT);
legalizeOperands(HiHalf, MDT);
// Move all users of this moved vlaue.
addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);

View File

@ -37,6 +37,7 @@
namespace llvm {
class APInt;
class MachineDominatorTree;
class MachineRegisterInfo;
class RegScavenger;
class GCNSubtarget;
@ -79,8 +80,8 @@ public:
private:
void swapOperands(MachineInstr &Inst) const;
bool moveScalarAddSub(SetVectorType &Worklist,
MachineInstr &Inst) const;
bool moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
MachineDominatorTree *MDT = nullptr) const;
void lowerScalarAbs(SetVectorType &Worklist,
MachineInstr &Inst) const;
@ -91,11 +92,12 @@ private:
void splitScalar64BitUnaryOp(SetVectorType &Worklist,
MachineInstr &Inst, unsigned Opcode) const;
void splitScalar64BitAddSub(SetVectorType &Worklist,
MachineInstr &Inst) const;
void splitScalar64BitAddSub(SetVectorType &Worklist, MachineInstr &Inst,
MachineDominatorTree *MDT = nullptr) const;
void splitScalar64BitBinaryOp(SetVectorType &Worklist,
MachineInstr &Inst, unsigned Opcode) const;
void splitScalar64BitBinaryOp(SetVectorType &Worklist, MachineInstr &Inst,
unsigned Opcode,
MachineDominatorTree *MDT = nullptr) const;
void splitScalar64BitBCNT(SetVectorType &Worklist,
MachineInstr &Inst) const;
@ -798,14 +800,16 @@ public:
MachineOperand &Op, MachineRegisterInfo &MRI,
const DebugLoc &DL) const;
/// Legalize all operands in this instruction. This function may
/// create new instruction and insert them before \p MI.
void legalizeOperands(MachineInstr &MI) const;
/// Legalize all operands in this instruction. This function may create new
/// instructions and control-flow around \p MI. If present, \p MDT is
/// updated.
void legalizeOperands(MachineInstr &MI,
MachineDominatorTree *MDT = nullptr) const;
/// Replace this instruction's opcode with the equivalent VALU
/// opcode. This function will also move the users of \p MI to the
/// VALU if necessary.
void moveToVALU(MachineInstr &MI) const;
/// VALU if necessary. If present, \p MDT is updated.
void moveToVALU(MachineInstr &MI, MachineDominatorTree *MDT = nullptr) const;
void insertWaitStates(MachineBasicBlock &MBB,MachineBasicBlock::iterator MI,
int Count) const;
@ -932,6 +936,12 @@ namespace AMDGPU {
LLVM_READONLY
int getAddr64Inst(uint16_t Opcode);
/// Check if \p Opcode is an Addr64 opcode.
///
/// \returns \p Opcode if it is an Addr64 opcode, otherwise -1.
LLVM_READONLY
int getIfAddr64Inst(uint16_t Opcode);
LLVM_READONLY
int getMUBUFNoLdsInst(uint16_t Opcode);

View File

@ -1983,6 +1983,14 @@ def getAddr64Inst : InstrMapping {
let ValueCols = [["1"]];
}
def getIfAddr64Inst : InstrMapping {
let FilterClass = "MUBUFAddr64Table";
let RowFields = ["OpName"];
let ColFields = ["IsAddr64"];
let KeyCol = ["1"];
let ValueCols = [["1"]];
}
def getMUBUFNoLdsInst : InstrMapping {
let FilterClass = "MUBUFLdsTable";
let RowFields = ["OpName"];

View File

@ -0,0 +1,230 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -verify-machine-dom-info -o - %s | FileCheck %s
; RUN: llc -O0 -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -verify-machine-dom-info -o - %s | FileCheck %s --check-prefix=CHECK-O0
; Test that we correctly legalize VGPR Rsrc operands in MUBUF instructions.
; CHECK-LABEL: mubuf_vgpr
; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
; CHECK: [[LOOPBB:BB[0-9]+_[0-9]+]]:
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v0
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v1
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v2
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v3
; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[0:1]
; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[2:3]
; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
; CHECK: s_waitcnt vmcnt(0)
; CHECK: buffer_load_format_x [[RES:v[0-9]+]], v4, s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
; CHECK: s_xor_b64 exec, exec, [[CMP]]
; CHECK: s_cbranch_execnz [[LOOPBB]]
; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
; CHECK: v_mov_b32_e32 v0, [[RES]]
define float @mubuf_vgpr(<4 x i32> %i, i32 %c) #0 {
%call = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %i, i32 %c, i32 0, i1 zeroext false, i1 zeroext false) #1
ret float %call
}
; CHECK-LABEL: mubuf_vgpr_adjacent_in_block
; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
; CHECK: [[LOOPBB0:BB[0-9]+_[0-9]+]]:
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v0
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v1
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v2
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v3
; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[0:1]
; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[2:3]
; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
; CHECK: s_waitcnt vmcnt(0)
; CHECK: buffer_load_format_x [[RES0:v[0-9]+]], v8, s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
; CHECK: s_xor_b64 exec, exec, [[CMP]]
; CHECK: s_cbranch_execnz [[LOOPBB0]]
; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
; FIXME: redundant s_mov
; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
; CHECK: [[LOOPBB1:BB[0-9]+_[0-9]+]]:
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v4
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v5
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v6
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v7
; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[4:5]
; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[6:7]
; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
; CHECK: s_waitcnt vmcnt(0)
; CHECK: buffer_load_format_x [[RES1:v[0-9]+]], v8, s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
; CHECK: s_xor_b64 exec, exec, [[CMP]]
; CHECK: s_cbranch_execnz [[LOOPBB1]]
; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
; CHECK-DAG: global_store_dword v[9:10], [[RES0]], off
; CHECK-DAG: global_store_dword v[11:12], [[RES1]], off
define void @mubuf_vgpr_adjacent_in_block(<4 x i32> %i, <4 x i32> %j, i32 %c, float addrspace(1)* %out0, float addrspace(1)* %out1) #0 {
entry:
%val0 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %i, i32 %c, i32 0, i1 zeroext false, i1 zeroext false) #1
%val1 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %j, i32 %c, i32 0, i1 zeroext false, i1 zeroext false) #1
store volatile float %val0, float addrspace(1)* %out0
store volatile float %val1, float addrspace(1)* %out1
ret void
}
; CHECK-LABEL: mubuf_vgpr_outside_entry
; CHECK-DAG: v_mov_b32_e32 [[IDX:v[0-9]+]], s4
; CHECK-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
; CHECK: [[LOOPBB0:BB[0-9]+_[0-9]+]]:
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v0
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v1
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v2
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v3
; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[0:1]
; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[2:3]
; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
; CHECK: s_waitcnt vmcnt(0)
; CHECK: buffer_load_format_x [[RES:v[0-9]+]], [[IDX]], s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
; CHECK: s_xor_b64 exec, exec, [[CMP]]
; CHECK: s_cbranch_execnz [[LOOPBB0]]
; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
; CHECK: s_cbranch_execz [[TERMBB:BB[0-9]+_[0-9]+]]
; CHECK: BB{{[0-9]+_[0-9]+}}:
; CHECK-DAG: v_mov_b32_e32 [[IDX:v[0-9]+]], s4
; CHECK-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
; CHECK: [[LOOPBB1:BB[0-9]+_[0-9]+]]:
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v4
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v5
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v6
; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v7
; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[4:5]
; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[6:7]
; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
; CHECK: s_waitcnt vmcnt(0)
; CHECK: buffer_load_format_x [[RES]], [[IDX]], s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
; CHECK: s_xor_b64 exec, exec, [[CMP]]
; CHECK: s_cbranch_execnz [[LOOPBB1]]
; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
; CHECK: [[TERMBB]]:
; CHECK: global_store_dword v[11:12], [[RES]], off
; Confirm spills do not occur between the XOR and branch that terminate the
; waterfall loop BBs.
; CHECK-O0-LABEL: mubuf_vgpr_outside_entry
; CHECK-O0-DAG: s_mov_b32 [[IDX_S:s[0-9]+]], s4
; CHECK-O0-DAG: v_mov_b32_e32 [[IDX_V:v[0-9]+]], [[IDX_S]]
; CHECK-O0-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
; CHECK-O0-DAG: buffer_store_dword [[IDX_V]], off, s[0:3], s5 offset:[[IDX_OFF:[0-9]+]] ; 4-byte Folded Spill
; CHECK-O0: [[LOOPBB0:BB[0-9]+_[0-9]+]]:
; CHECK-O0: buffer_load_dword v[[VRSRC0:[0-9]+]], {{.*}} ; 4-byte Folded Reload
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0: buffer_load_dword v[[VRSRC1:[0-9]+]], {{.*}} ; 4-byte Folded Reload
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0: buffer_load_dword v[[VRSRC2:[0-9]+]], {{.*}} ; 4-byte Folded Reload
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0: buffer_load_dword v[[VRSRC3:[0-9]+]], {{.*}} ; 4-byte Folded Reload
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP0:[0-9]+]], v[[VRSRC0]]
; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP1:[0-9]+]], v[[VRSRC1]]
; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP2:[0-9]+]], v[[VRSRC2]]
; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP3:[0-9]+]], v[[VRSRC3]]
; CHECK-O0-DAG: s_mov_b32 s[[SRSRC0:[0-9]+]], s[[SRSRCTMP0]]
; CHECK-O0-DAG: s_mov_b32 s[[SRSRC1:[0-9]+]], s[[SRSRCTMP1]]
; CHECK-O0-DAG: s_mov_b32 s[[SRSRC2:[0-9]+]], s[[SRSRCTMP2]]
; CHECK-O0-DAG: s_mov_b32 s[[SRSRC3:[0-9]+]], s[[SRSRCTMP3]]
; CHECK-O0: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v{{\[}}[[VRSRC0]]:[[VRSRC1]]{{\]}}
; CHECK-O0: v_cmp_eq_u64_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v{{\[}}[[VRSRC2]]:[[VRSRC3]]{{\]}}
; CHECK-O0: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]]
; CHECK-O0: s_and_saveexec_b64 [[CMP]], [[CMP]]
; CHECK-O0: buffer_load_dword [[IDX:v[0-9]+]], off, s[0:3], s5 offset:[[IDX_OFF]] ; 4-byte Folded Reload
; CHECK-O0: buffer_load_format_x [[RES:v[0-9]+]], [[IDX]], s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, {{.*}} idxen
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5 offset:[[RES_OFF_TMP:[0-9]+]] ; 4-byte Folded Spill
; CHECK-O0: s_xor_b64 exec, exec, [[CMP]]
; CHECK-O0-NEXT: s_cbranch_execnz [[LOOPBB0]]
; CHECK-O0: s_mov_b64 exec, [[SAVEEXEC]]
; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s5 offset:[[RES_OFF_TMP]] ; 4-byte Folded Reload
; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5 offset:[[RES_OFF:[0-9]+]] ; 4-byte Folded Spill
; CHECK-O0: s_cbranch_execz [[TERMBB:BB[0-9]+_[0-9]+]]
; CHECK-O0: BB{{[0-9]+_[0-9]+}}:
; CHECK-O0-DAG: s_mov_b64 s{{\[}}[[SAVEEXEC0:[0-9]+]]:[[SAVEEXEC1:[0-9]+]]{{\]}}, exec
; CHECK-O0-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s5 offset:[[IDX_OFF:[0-9]+]] ; 4-byte Folded Spill
; CHECK-O0: v_writelane_b32 [[VSAVEEXEC:v[0-9]+]], s[[SAVEEXEC0]], [[SAVEEXEC_IDX0:[0-9]+]]
; CHECK-O0: v_writelane_b32 [[VSAVEEXEC:v[0-9]+]], s[[SAVEEXEC1]], [[SAVEEXEC_IDX1:[0-9]+]]
; CHECK-O0: [[LOOPBB1:BB[0-9]+_[0-9]+]]:
; CHECK-O0: buffer_load_dword v[[VRSRC0:[0-9]+]], {{.*}} ; 4-byte Folded Reload
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0: buffer_load_dword v[[VRSRC1:[0-9]+]], {{.*}} ; 4-byte Folded Reload
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0: buffer_load_dword v[[VRSRC2:[0-9]+]], {{.*}} ; 4-byte Folded Reload
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0: buffer_load_dword v[[VRSRC3:[0-9]+]], {{.*}} ; 4-byte Folded Reload
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP0:[0-9]+]], v[[VRSRC0]]
; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP1:[0-9]+]], v[[VRSRC1]]
; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP2:[0-9]+]], v[[VRSRC2]]
; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP3:[0-9]+]], v[[VRSRC3]]
; CHECK-O0-DAG: s_mov_b32 s[[SRSRC0:[0-9]+]], s[[SRSRCTMP0]]
; CHECK-O0-DAG: s_mov_b32 s[[SRSRC1:[0-9]+]], s[[SRSRCTMP1]]
; CHECK-O0-DAG: s_mov_b32 s[[SRSRC2:[0-9]+]], s[[SRSRCTMP2]]
; CHECK-O0-DAG: s_mov_b32 s[[SRSRC3:[0-9]+]], s[[SRSRCTMP3]]
; CHECK-O0: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v{{\[}}[[VRSRC0]]:[[VRSRC1]]{{\]}}
; CHECK-O0: v_cmp_eq_u64_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v{{\[}}[[VRSRC2]]:[[VRSRC3]]{{\]}}
; CHECK-O0: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]]
; CHECK-O0: s_and_saveexec_b64 [[CMP]], [[CMP]]
; CHECK-O0: buffer_load_dword [[IDX:v[0-9]+]], off, s[0:3], s5 offset:[[IDX_OFF]] ; 4-byte Folded Reload
; CHECK-O0: buffer_load_format_x [[RES:v[0-9]+]], [[IDX]], s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, {{.*}} idxen
; CHECK-O0: s_waitcnt vmcnt(0)
; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5 offset:[[RES_OFF_TMP:[0-9]+]] ; 4-byte Folded Spill
; CHECK-O0: s_xor_b64 exec, exec, [[CMP]]
; CHECK-O0-NEXT: s_cbranch_execnz [[LOOPBB1]]
; CHECK-O0: v_readlane_b32 s[[SAVEEXEC0:[0-9]+]], [[VSAVEEXEC]], [[SAVEEXEC_IDX0]]
; CHECK-O0: v_readlane_b32 s[[SAVEEXEC1:[0-9]+]], [[VSAVEEXEC]], [[SAVEEXEC_IDX1]]
; CHECK-O0: s_mov_b64 exec, s{{\[}}[[SAVEEXEC0]]:[[SAVEEXEC1]]{{\]}}
; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s5 offset:[[RES_OFF_TMP]] ; 4-byte Folded Reload
; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5 offset:[[RES_OFF]] ; 4-byte Folded Spill
; CHECK-O0: [[TERMBB]]:
; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s5 offset:[[RES_OFF]] ; 4-byte Folded Reload
; CHECK-O0: global_store_dword v[{{[0-9]+:[0-9]+}}], [[RES]], off
define void @mubuf_vgpr_outside_entry(<4 x i32> %i, <4 x i32> %j, i32 %c, float addrspace(1)* %in, float addrspace(1)* %out) #0 {
entry:
%live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={s4}" ()
%val0 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %i, i32 %live.out.reg, i32 0, i1 zeroext false, i1 zeroext false) #1
%idx = call i32 @llvm.amdgcn.workitem.id.x() #1
%cmp = icmp eq i32 %idx, 0
br i1 %cmp, label %bb1, label %bb2
bb1:
%val1 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %j, i32 %live.out.reg, i32 0, i1 zeroext false, i1 zeroext false) #1
br label %bb2
bb2:
%val = phi float [ %val0, %entry ], [ %val1, %bb1 ]
store volatile float %val, float addrspace(1)* %out
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare float @llvm.amdgcn.buffer.load.format.f32(<4 x i32>, i32, i32, i1, i1) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }

View File

@ -0,0 +1,239 @@
# RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=COMMON,ADDR64
# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=COMMON,NO-ADDR64
# Test that we correctly legalize VGPR Rsrc operands in MUBUF instructions.
#
# On ADDR64 hardware we optimize the _ADDR64 and _OFFSET cases to avoid
# needing a waterfall. For all other instruction variants, and when we are
# on non-ADDR64 hardware, we emit a waterfall loop.
# COMMON-LABEL: name: idxen
# COMMON-LABEL: bb.0:
# COMMON-NEXT: successors: %bb.1({{.*}})
# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# COMMON-LABEL: bb.1:
# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec
# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# COMMON-LABEL bb.2:
# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]]
---
name: idxen
liveins:
- { reg: '$vgpr0', virtual-reg: '%0' }
- { reg: '$vgpr1', virtual-reg: '%1' }
- { reg: '$vgpr2', virtual-reg: '%2' }
- { reg: '$vgpr3', virtual-reg: '%3' }
- { reg: '$vgpr4', virtual-reg: '%4' }
- { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
body: |
bb.0:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
%5:sreg_64 = COPY $sgpr30_sgpr31
%4:vgpr_32 = COPY $vgpr4
%3:vgpr_32 = COPY $vgpr3
%2:vgpr_32 = COPY $vgpr2
%1:vgpr_32 = COPY $vgpr1
%0:vgpr_32 = COPY $vgpr0
%6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
%7:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed %6, 0, 0, 0, 0, 0, implicit $exec
$sgpr30_sgpr31 = COPY %5
$vgpr0 = COPY %7
S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
...
# COMMON-LABEL: name: offen
# COMMON-LABEL: bb.0:
# COMMON-NEXT: successors: %bb.1({{.*}})
# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# COMMON-LABEL: bb.1:
# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec
# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# COMMON-LABEL bb.2:
# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]]
---
name: offen
liveins:
- { reg: '$vgpr0', virtual-reg: '%0' }
- { reg: '$vgpr1', virtual-reg: '%1' }
- { reg: '$vgpr2', virtual-reg: '%2' }
- { reg: '$vgpr3', virtual-reg: '%3' }
- { reg: '$vgpr4', virtual-reg: '%4' }
- { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
body: |
bb.0:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
%5:sreg_64 = COPY $sgpr30_sgpr31
%4:vgpr_32 = COPY $vgpr4
%3:vgpr_32 = COPY $vgpr3
%2:vgpr_32 = COPY $vgpr2
%1:vgpr_32 = COPY $vgpr1
%0:vgpr_32 = COPY $vgpr0
%6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
%7:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed %6, 0, 0, 0, 0, 0, implicit $exec
$sgpr30_sgpr31 = COPY %5
$vgpr0 = COPY %7
S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
...
# COMMON-LABEL: name: bothen
# COMMON-LABEL: bb.0:
# COMMON-NEXT: successors: %bb.1({{.*}})
# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# COMMON-LABEL: bb.1:
# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec
# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# COMMON-LABEL bb.2:
# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]]
---
name: bothen
liveins:
- { reg: '$vgpr0', virtual-reg: '%0' }
- { reg: '$vgpr1', virtual-reg: '%1' }
- { reg: '$vgpr2', virtual-reg: '%2' }
- { reg: '$vgpr3', virtual-reg: '%3' }
- { reg: '$vgpr4_vgpr5', virtual-reg: '%4' }
- { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
body: |
bb.0:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
%5:sreg_64 = COPY $sgpr30_sgpr31
%4:vreg_64 = COPY $vgpr4_vgpr5
%3:vgpr_32 = COPY $vgpr3
%2:vgpr_32 = COPY $vgpr2
%1:vgpr_32 = COPY $vgpr1
%0:vgpr_32 = COPY $vgpr0
%6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
%7:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed %6, 0, 0, 0, 0, 0, implicit $exec
$sgpr30_sgpr31 = COPY %5
$vgpr0 = COPY %7
S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
...
# COMMON-LABEL: name: addr64
# COMMON-LABEL: bb.0:
# COMMON: %12:vreg_64 = COPY %8.sub0_sub1
# COMMON: %13:sreg_64 = S_MOV_B64 0
# COMMON: %14:sgpr_32 = S_MOV_B32 0
# COMMON: %15:sgpr_32 = S_MOV_B32 61440
# COMMON: %16:sreg_128 = REG_SEQUENCE %13, %subreg.sub0_sub1, %14, %subreg.sub2, %15, %subreg.sub3
# COMMON: %9:vgpr_32 = V_ADD_I32_e32 %12.sub0, %4.sub0, implicit-def $vcc, implicit $exec
# COMMON: %10:vgpr_32 = V_ADDC_U32_e32 %12.sub1, %4.sub1, implicit-def $vcc, implicit $vcc, implicit $exec
# COMMON: %11:vreg_64 = REG_SEQUENCE %9, %subreg.sub0, %10, %subreg.sub1
# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %11, killed %16, 0, 0, 0, 0, 0, implicit $exec
---
name: addr64
liveins:
- { reg: '$vgpr0', virtual-reg: '%0' }
- { reg: '$vgpr1', virtual-reg: '%1' }
- { reg: '$vgpr2', virtual-reg: '%2' }
- { reg: '$vgpr3', virtual-reg: '%3' }
- { reg: '$vgpr4_vgpr5', virtual-reg: '%4' }
- { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
body: |
bb.0:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
%5:sreg_64 = COPY $sgpr30_sgpr31
%4:vreg_64 = COPY $vgpr4_vgpr5
%3:vgpr_32 = COPY $vgpr3
%2:vgpr_32 = COPY $vgpr2
%1:vgpr_32 = COPY $vgpr1
%0:vgpr_32 = COPY $vgpr0
%6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
%7:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %4, killed %6, 0, 0, 0, 0, 0, implicit $exec
$sgpr30_sgpr31 = COPY %5
$vgpr0 = COPY %7
S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
...
# COMMON-LABEL: name: offset
# COMMON-LABEL: bb.0:
# NO-ADDR64-NEXT: successors: %bb.1({{.*}})
# NO-ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# NO-ADDR64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# NO-ADDR64-LABEL: bb.1:
# NO-ADDR64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# NO-ADDR64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# NO-ADDR64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# NO-ADDR64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# NO-ADDR64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# NO-ADDR64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# NO-ADDR64: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# NO-ADDR64: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# NO-ADDR64: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# NO-ADDR64: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# NO-ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec
# NO-ADDR64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# NO-ADDR64: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# NO-ADDR64-LABEL bb.2:
# NO-ADDR64: $exec = S_MOV_B64 [[SAVEEXEC]]
# ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# ADDR64: [[RSRCPTR:%[0-9]+]]:vreg_64 = COPY [[VRSRC]].sub0_sub1
# ADDR64: [[ZERO64:%[0-9]+]]:sreg_64 = S_MOV_B64 0
# ADDR64: [[RSRCFMTLO:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
# ADDR64: [[RSRCFMTHI:%[0-9]+]]:sgpr_32 = S_MOV_B32 61440
# ADDR64: [[ZERORSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[ZERO64]], %subreg.sub0_sub1, [[RSRCFMTLO]], %subreg.sub2, [[RSRCFMTHI]], %subreg.sub3
# ADDR64: [[VADDR64:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[RSRCPTR]].sub0, %subreg.sub0, [[RSRCPTR]].sub1, %subreg.sub1
# ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 [[VADDR64]], [[ZERORSRC]], 0, 0, 0, 0, 0, implicit $exec
---
name: offset
liveins:
- { reg: '$vgpr0', virtual-reg: '%0' }
- { reg: '$vgpr1', virtual-reg: '%1' }
- { reg: '$vgpr2', virtual-reg: '%2' }
- { reg: '$vgpr3', virtual-reg: '%3' }
- { reg: '$vgpr4_vgpr5', virtual-reg: '%4' }
- { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
body: |
bb.0:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
%5:sreg_64 = COPY $sgpr30_sgpr31
%4:vreg_64 = COPY $vgpr4_vgpr5
%3:vgpr_32 = COPY $vgpr3
%2:vgpr_32 = COPY $vgpr2
%1:vgpr_32 = COPY $vgpr1
%0:vgpr_32 = COPY $vgpr0
%6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
%7:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed %6, 0, 0, 0, 0, 0, implicit $exec
$sgpr30_sgpr31 = COPY %5
$vgpr0 = COPY %7
S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
...