1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

AMDGPU: Remove v0 workaround for DS_GWS_* instructions

Any register should work for the src field since r366067, since the
used value is not pulled from the expected encoding field.

llvm-svn: 367598
This commit is contained in:
Matt Arsenault 2019-08-01 18:41:32 +00:00
parent 5e1524bdd5
commit 55161e45ee
4 changed files with 25 additions and 55 deletions

View File

@ -2189,22 +2189,7 @@ void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
glueCopyToM0(N, SDValue(M0Base, 0));
}
SDValue V0;
SDValue Chain = N->getOperand(0);
SDValue Glue;
if (HasVSrc) {
SDValue VSrc0 = N->getOperand(2);
// The manual doesn't mention this, but it seems only v0 works.
V0 = CurDAG->getRegister(AMDGPU::VGPR0, MVT::i32);
SDValue CopyToV0 = CurDAG->getCopyToReg(
N->getOperand(0), SL, V0, VSrc0,
N->getOperand(N->getNumOperands() - 1));
Chain = CopyToV0;
Glue = CopyToV0.getValue(1);
}
SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32);
// TODO: Can this just be removed from the instruction?
@ -2213,14 +2198,11 @@ void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
const unsigned Opc = gwsIntrinToOpcode(IntrID);
SmallVector<SDValue, 5> Ops;
if (HasVSrc)
Ops.push_back(V0);
Ops.push_back(N->getOperand(2));
Ops.push_back(OffsetField);
Ops.push_back(GDS);
Ops.push_back(Chain);
if (HasVSrc)
Ops.push_back(Glue);
SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
}

View File

@ -3096,12 +3096,13 @@ SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
MachineBasicBlock *RemainderBB;
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
MachineBasicBlock::iterator Prev = std::prev(MI.getIterator());
// Apparently kill flags are only valid if the def is in the same block?
if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0))
Src->setIsKill(false);
std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true);
MachineBasicBlock::iterator I = LoopBB->end();
MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0);
const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
@ -3111,19 +3112,6 @@ SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
.addImm(0)
.addImm(EncodedReg);
// This is a pain, but we're not allowed to have physical register live-ins
// yet. Insert a pair of copies if the VGPR0 hack is necessary.
if (Src && TargetRegisterInfo::isPhysicalRegister(Src->getReg())) {
unsigned Data0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(*BB, std::next(Prev), DL, TII->get(AMDGPU::COPY), Data0)
.add(*Src);
BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::COPY), Src->getReg())
.addReg(Data0);
MRI.setSimpleHint(Data0, Src->getReg());
}
bundleInstWithWaitcnt(MI);
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);

View File

@ -26,8 +26,8 @@
; LOOP-NEXT: s_cbranch_scc1 [[LOOP]]
; MIR-LABEL: name: gws_barrier_offset0{{$}}
; MIR: BUNDLE implicit{{( killed)?}} $vgpr0, implicit $m0, implicit $exec {
; MIR-NEXT: DS_GWS_BARRIER $vgpr0, 0, -1, implicit $m0, implicit $exec :: (load 4 from custom GWSResource)
; MIR: BUNDLE implicit{{( killed)?( renamable)?}} $vgpr0, implicit $m0, implicit $exec {
; MIR-NEXT: DS_GWS_BARRIER renamable $vgpr0, 0, -1, implicit $m0, implicit $exec :: (load 4 from custom GWSResource)
; MIR-NEXT: S_WAITCNT 0
; MIR-NEXT: }
define amdgpu_kernel void @gws_barrier_offset0(i32 %val) #0 {
@ -53,8 +53,8 @@ define amdgpu_kernel void @gws_barrier_offset63(i32 %val) #0 {
; NOLOOP-DAG: s_load_dwordx2 s{{\[}}[[BAR_NUM:[0-9]+]]:[[OFFSET:[0-9]+]]{{\]}}
; NOLOOP-DAG: s_lshl_b32 [[SHL:s[0-9]+]], s[[OFFSET]], 16
; NOLOOP-DAG: s_mov_b32 m0, [[SHL]]{{$}}
; NOLOOP-DAG: v_mov_b32_e32 v0, s[[BAR_NUM]]
; NOLOOP: ds_gws_barrier v0 gds{{$}}
; NOLOOP-DAG: v_mov_b32_e32 [[GWS_VAL:v[0-9]+]], s[[BAR_NUM]]
; NOLOOP: ds_gws_barrier [[GWS_VAL]] gds{{$}}
define amdgpu_kernel void @gws_barrier_sgpr_offset(i32 %val, i32 %offset) #0 {
call void @llvm.amdgcn.ds.gws.barrier(i32 %val, i32 %offset)
ret void
@ -65,8 +65,8 @@ define amdgpu_kernel void @gws_barrier_sgpr_offset(i32 %val, i32 %offset) #0 {
; NOLOOP-DAG: s_load_dwordx2 s{{\[}}[[BAR_NUM:[0-9]+]]:[[OFFSET:[0-9]+]]{{\]}}
; NOLOOP-DAG: s_lshl_b32 [[SHL:s[0-9]+]], s[[OFFSET]], 16
; NOLOOP-DAG: s_mov_b32 m0, [[SHL]]{{$}}
; NOLOOP-DAG: v_mov_b32_e32 v0, s[[BAR_NUM]]
; NOLOOP: ds_gws_barrier v0 offset:1 gds{{$}}
; NOLOOP-DAG: v_mov_b32_e32 [[GWS_VAL:v[0-9]+]], s[[BAR_NUM]]
; NOLOOP: ds_gws_barrier [[GWS_VAL]] offset:1 gds{{$}}
define amdgpu_kernel void @gws_barrier_sgpr_offset_add1(i32 %val, i32 %offset.base) #0 {
%offset = add i32 %offset.base, 1
call void @llvm.amdgcn.ds.gws.barrier(i32 %val, i32 %offset)
@ -78,8 +78,8 @@ define amdgpu_kernel void @gws_barrier_sgpr_offset_add1(i32 %val, i32 %offset.ba
; NOLOOP-DAG: v_readfirstlane_b32 [[READLANE:s[0-9]+]], v0
; NOLOOP-DAG: s_lshl_b32 [[SHL:s[0-9]+]], [[READLANE]], 16
; NOLOOP-DAG: s_mov_b32 m0, [[SHL]]{{$}}
; NOLOOP-DAG: v_mov_b32_e32 v0, [[BAR_NUM]]
; NOLOOP: ds_gws_barrier v0 gds{{$}}
; NOLOOP-DAG: v_mov_b32_e32 [[GWS_VAL:v[0-9]+]], [[BAR_NUM]]
; NOLOOP: ds_gws_barrier [[GWS_VAL]] gds{{$}}
define amdgpu_kernel void @gws_barrier_vgpr_offset(i32 %val) #0 {
%vgpr.offset = call i32 @llvm.amdgcn.workitem.id.x()
call void @llvm.amdgcn.ds.gws.barrier(i32 %val, i32 %vgpr.offset)
@ -92,8 +92,8 @@ define amdgpu_kernel void @gws_barrier_vgpr_offset(i32 %val) #0 {
; NOLOOP-DAG: v_readfirstlane_b32 [[READLANE:s[0-9]+]], v0
; NOLOOP-DAG: s_lshl_b32 [[SHL:s[0-9]+]], [[READLANE]], 16
; NOLOOP-DAG: s_mov_b32 m0, [[SHL]]{{$}}
; NOLOOP-DAG: v_mov_b32_e32 v0, [[BAR_NUM]]
; NOLOOP: ds_gws_barrier v0 offset:3 gds{{$}}
; NOLOOP-DAG: v_mov_b32_e32 [[GWS_VAL:v[0-9]+]], [[BAR_NUM]]
; NOLOOP: ds_gws_barrier [[GWS_VAL]] offset:3 gds{{$}}
define amdgpu_kernel void @gws_barrier_vgpr_offset_add(i32 %val) #0 {
%vgpr.offset.base = call i32 @llvm.amdgcn.workitem.id.x()
%vgpr.offset = add i32 %vgpr.offset.base, 3
@ -106,13 +106,13 @@ define amdgpu_kernel void @gws_barrier_vgpr_offset_add(i32 %val) #0 {
; Check if m0 initialization is shared
; GCN-LABEL: {{^}}gws_barrier_save_m0_barrier_constant_offset:
; NOLOOP: s_mov_b32 m0, 0
; NOLOOP: ds_gws_barrier v0 offset:10 gds
; NOLOOP: ds_gws_barrier v{{[0-9]+}} offset:10 gds
; LOOP: s_mov_b32 m0, -1
; LOOP: ds_write_b32
; LOOP: s_mov_b32 m0, 0
; LOOP: s_setreg_imm32_b32
; LOOP: ds_gws_barrier v0 offset:10 gds
; LOOP: ds_gws_barrier v{{[0-9]+}} offset:10 gds
; LOOP: s_cbranch_scc1
; LOOP: s_mov_b32 m0, -1
@ -147,7 +147,7 @@ define amdgpu_kernel void @gws_barrier_wait_before(i32 %val, i32 addrspace(1)* %
; GCN-LABEL: {{^}}gws_barrier_wait_after:
; NOLOOP: s_mov_b32 m0, 0{{$}}
; NOLOOP: ds_gws_barrier v0 offset:7 gds
; NOLOOP: ds_gws_barrier v{{[0-9]+}} offset:7 gds
; NOLOOP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; NOLOOP-NEXT: load_dword
define amdgpu_kernel void @gws_barrier_wait_after(i32 %val, i32 addrspace(1)* %ptr) #0 {
@ -161,7 +161,7 @@ define amdgpu_kernel void @gws_barrier_wait_after(i32 %val, i32 addrspace(1)* %p
; NOLOOP: s_mov_b32 m0, 0{{$}}
; NOLOOP: store_dword
; NOLOOP: s_waitcnt vmcnt(0) lgkmcnt(0)
; NOLOOP: ds_gws_barrier v0 offset:7 gds
; NOLOOP: ds_gws_barrier v{{[0-9]+}} offset:7 gds
; NOLOOP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
define amdgpu_kernel void @gws_barrier_fence_before(i32 %val, i32 addrspace(1)* %ptr) #0 {
store i32 0, i32 addrspace(1)* %ptr
@ -173,7 +173,7 @@ define amdgpu_kernel void @gws_barrier_fence_before(i32 %val, i32 addrspace(1)*
; FIXME: Extra waitcnt
; GCN-LABEL: {{^}}gws_barrier_fence_after:
; NOLOOP: s_mov_b32 m0, 0{{$}}
; NOLOOP: ds_gws_barrier v0 offset:7 gds
; NOLOOP: ds_gws_barrier v{{[0-9]+}} offset:7 gds
; NOLOOP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; NOLOOP-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0

View File

@ -49,8 +49,8 @@ define amdgpu_kernel void @gws_init_offset63(i32 %val) #0 {
; NOLOOP-DAG: s_load_dwordx2 s{{\[}}[[BAR_NUM:[0-9]+]]:[[OFFSET:[0-9]+]]{{\]}}
; NOLOOP-DAG: s_lshl_b32 [[SHL:s[0-9]+]], s[[OFFSET]], 16
; NOLOOP-DAG: s_mov_b32 m0, [[SHL]]{{$}}
; NOLOOP-DAG: v_mov_b32_e32 v0, s[[BAR_NUM]]
; NOLOOP: ds_gws_init v0 gds{{$}}
; NOLOOP-DAG: v_mov_b32_e32 [[GWS_VAL:v[0-9]+]], s[[BAR_NUM]]
; NOLOOP: ds_gws_init [[GWS_VAL]] gds{{$}}
define amdgpu_kernel void @gws_init_sgpr_offset(i32 %val, i32 %offset) #0 {
call void @llvm.amdgcn.ds.gws.init(i32 %val, i32 %offset)
ret void
@ -61,8 +61,8 @@ define amdgpu_kernel void @gws_init_sgpr_offset(i32 %val, i32 %offset) #0 {
; NOLOOP-DAG: s_load_dwordx2 s{{\[}}[[BAR_NUM:[0-9]+]]:[[OFFSET:[0-9]+]]{{\]}}
; NOLOOP-DAG: s_lshl_b32 [[SHL:s[0-9]+]], s[[OFFSET]], 16
; NOLOOP-DAG: s_mov_b32 m0, [[SHL]]{{$}}
; NOLOOP-DAG: v_mov_b32_e32 v0, s[[BAR_NUM]]
; NOLOOP: ds_gws_init v0 offset:1 gds{{$}}
; NOLOOP-DAG: v_mov_b32_e32 [[GWS_VAL:v[0-9]+]], s[[BAR_NUM]]
; NOLOOP: ds_gws_init [[GWS_VAL]] offset:1 gds{{$}}
define amdgpu_kernel void @gws_init_sgpr_offset_add1(i32 %val, i32 %offset.base) #0 {
%offset = add i32 %offset.base, 1
call void @llvm.amdgcn.ds.gws.init(i32 %val, i32 %offset)
@ -102,13 +102,13 @@ define amdgpu_kernel void @gws_init_vgpr_offset_add(i32 %val) #0 {
; Check if m0 initialization is shared.
; GCN-LABEL: {{^}}gws_init_save_m0_init_constant_offset:
; NOLOOP: s_mov_b32 m0, 0
; NOLOOP: ds_gws_init v0 offset:10 gds
; NOLOOP: ds_gws_init v{{[0-9]+}} offset:10 gds
; LOOP: s_mov_b32 m0, -1
; LOOP: ds_write_b32
; LOOP: s_mov_b32 m0, 0
; LOOP: s_setreg_imm32_b32
; LOOP: ds_gws_init v0 offset:10 gds
; LOOP: ds_gws_init v{{[0-9]+}} offset:10 gds
; LOOP: s_cbranch_scc1
; LOOP: s_mov_b32 m0, -1