1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[AMDGPU] Order pos exports before param exports

Summary:
Modify export clustering DAG mutation to move position exports
before other exports types.

Reviewers: foad, arsenm, rampitec, nhaehnle

Reviewed By: foad

Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D79670
This commit is contained in:
Carl Ritson 2020-05-12 23:02:05 +09:00
parent 0cc42d0b13
commit af9e638ad5
3 changed files with 112 additions and 32 deletions

View File

@ -32,53 +32,87 @@ static bool isExport(const SUnit &SU) {
MI->getOpcode() == AMDGPU::EXP_DONE;
}
static bool isPositionExport(const SIInstrInfo *TII, SUnit *SU) {
const MachineInstr *MI = SU->getInstr();
int Imm = TII->getNamedOperand(*MI, AMDGPU::OpName::tgt)->getImm();
return Imm >= 12 && Imm <= 15;
}
static void sortChain(const SIInstrInfo *TII, SmallVector<SUnit *, 8> &Chain,
unsigned PosCount) {
if (!PosCount || PosCount == Chain.size())
return;
// Position exports should occur as soon as possible in the shader
// for optimal performance. This moves position exports before
// other exports while preserving the order within different export
// types (pos or other).
SmallVector<SUnit *, 8> Copy(Chain);
unsigned PosIdx = 0;
unsigned OtherIdx = PosCount;
for (SUnit *SU : Copy) {
if (isPositionExport(TII, SU))
Chain[PosIdx++] = SU;
else
Chain[OtherIdx++] = SU;
}
}
static void buildCluster(ArrayRef<SUnit *> Exports, ScheduleDAGInstrs *DAG) {
// Cluster a series of exports. Also copy all dependencies to the first
// export to avoid computation being inserted into the chain.
SUnit *ChainHead = Exports[0];
SUnit *ChainHead = Exports.front();
// Now construct cluster from chain by adding new edges.
for (unsigned Idx = 0, End = Exports.size() - 1; Idx < End; ++Idx) {
SUnit *SUa = Exports[Idx];
SUnit *SUb = Exports[Idx + 1];
if (DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
for (const SDep &Pred : SUb->Preds) {
SUnit *PredSU = Pred.getSUnit();
if (Pred.isWeak() || isExport(*PredSU))
continue;
// Copy all dependencies to the head of the chain to avoid any
// computation being inserted into the chain.
for (const SDep &Pred : SUb->Preds) {
SUnit *PredSU = Pred.getSUnit();
if (!isExport(*PredSU) && !Pred.isWeak())
DAG->addEdge(ChainHead, SDep(PredSU, SDep::Artificial));
}
}
// New barrier edge ordering exports
DAG->addEdge(SUb, SDep(SUa, SDep::Barrier));
// Also add cluster edge
DAG->addEdge(SUb, SDep(SUa, SDep::Cluster));
}
}
void ExportClustering::apply(ScheduleDAGInstrs *DAG) {
SmallVector<SmallVector<SUnit *, 8>, 4> ExportChains;
DenseMap<unsigned, unsigned> ChainMap;
const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(DAG->TII);
// Build chains of exports
SmallVector<SUnit *, 8> Chain;
// Pass through DAG gathering a list of exports and removing barrier edges
// creating dependencies on exports. Freeing exports of successor edges
// allows more scheduling freedom, and nothing should be order dependent
// on exports. Edges will be added later to order the exports.
unsigned PosCount = 0;
for (SUnit &SU : DAG->SUnits) {
if (!isExport(SU))
continue;
unsigned ChainID = ExportChains.size();
for (const SDep &Pred : SU.Preds) {
const SUnit &PredSU = *Pred.getSUnit();
if (isExport(PredSU) && !Pred.isArtificial()) {
ChainID = ChainMap.lookup(PredSU.NodeNum);
break;
}
if (isExport(SU)) {
Chain.push_back(&SU);
if (isPositionExport(TII, &SU))
PosCount++;
}
ChainMap[SU.NodeNum] = ChainID;
if (ChainID == ExportChains.size())
ExportChains.push_back(SmallVector<SUnit *, 8>());
auto &Chain = ExportChains[ChainID];
Chain.push_back(&SU);
SmallVector<SDep, 2> ToRemove;
for (const SDep &Pred : SU.Preds) {
SUnit *PredSU = Pred.getSUnit();
if (Pred.isBarrier() && isExport(*PredSU))
ToRemove.push_back(Pred);
}
for (SDep Pred : ToRemove)
SU.removePred(Pred);
}
// Apply clustering
for (auto &Chain : ExportChains)
// Apply clustering if there are multiple exports
if (Chain.size() > 1) {
sortChain(TII, Chain, PosCount);
buildCluster(Chain, DAG);
}
}
} // end namespace

View File

@ -3,6 +3,7 @@
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1
declare void @llvm.amdgcn.exp.i32(i32, i32, i32, i32, i32, i32, i1, i1) #1
declare float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32>, i32, i32, i32) #2
; GCN-LABEL: {{^}}test_export_zeroes_f32:
; GCN: exp mrt0 off, off, off, off{{$}}
@ -557,5 +558,50 @@ define amdgpu_kernel void @test_export_clustering(float %x, float %y) #0 {
ret void
}
; GCN-LABEL: {{^}}test_export_pos_before_param:
; GCN: exp pos0
; GCN-NOT: s_waitcnt
; GCN: exp param0
define amdgpu_kernel void @test_export_pos_before_param(float %x, float %y) #0 {
%z0 = fadd float %x, %y
call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false)
%z1 = fsub float %y, %x
call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 true, i1 false)
ret void
}
; GCN-LABEL: {{^}}test_export_pos_before_param_ordered:
; GCN: exp pos0
; GCN: exp pos1
; GCN: exp pos2
; GCN-NOT: s_waitcnt
; GCN: exp param0
; GCN: exp param1
; GCN: exp param2
define amdgpu_kernel void @test_export_pos_before_param_ordered(float %x, float %y) #0 {
%z0 = fadd float %x, %y
call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false)
call void @llvm.amdgcn.exp.f32(i32 33, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false)
call void @llvm.amdgcn.exp.f32(i32 34, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false)
%z1 = fsub float %y, %x
call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 false, i1 false)
call void @llvm.amdgcn.exp.f32(i32 13, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 false, i1 false)
call void @llvm.amdgcn.exp.f32(i32 14, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 true, i1 false)
ret void
}
; GCN-LABEL: {{^}}test_export_pos_before_param_across_load:
; GCN: exp pos0
; GCN-NEXT: exp param0
; GCN-NEXT: exp param1
define amdgpu_kernel void @test_export_pos_before_param_across_load(i32 %idx) #0 {
call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float 1.0, i1 false, i1 false)
call void @llvm.amdgcn.exp.f32(i32 33, i32 15, float 1.0, float 1.0, float 1.0, float 0.5, i1 false, i1 false)
%load = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0)
call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %load, i1 true, i1 false)
ret void
}
attributes #0 = { nounwind }
attributes #1 = { nounwind inaccessiblememonly }
attributes #2 = { nounwind readnone }

View File

@ -10,8 +10,8 @@
; DEFAULT: s_waitcnt lgkmcnt(0)
; DEFAULT: buffer_load_format_xyzw
; DEFAULT: buffer_load_format_xyzw
; DEFAULT: s_waitcnt vmcnt(0)
; DEFAULT: exp
; DEFAULT-DAG: s_waitcnt vmcnt(0)
; DEFAULT-DAG: exp
; DEFAULT: exp
; DEFAULT-NEXT: s_endpgm
define amdgpu_vs void @main(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, <16 x i8> addrspace(4)* inreg %arg3, <16 x i8> addrspace(4)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(4)* inreg %constptr) #0 {