mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
[CodeGen] Use ProcResGroup information in SchedBoundary
When the ProcResGroup has BufferSize=0, 1. if there is a subunit in the list of write resources for the scheduling class, do not attempt to schedule the ProcResGroup. 2. if there is not a subunit in the list of write resources for the scheduling class, choose a subunit to use instead of the ProcResGroup. 3. having both the ProcResGroup and any of its subunits in the resources implied by a InstRW is not supported. Used to model parallel uses from a pool of resources. Differential Revision: https://reviews.llvm.org/D98976
This commit is contained in:
parent
ee87f9f4e2
commit
8fe435e31e
@ -74,6 +74,7 @@
|
||||
#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
|
||||
#define LLVM_CODEGEN_MACHINESCHEDULER_H
|
||||
|
||||
#include "llvm/ADT/APInt.h"
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
@ -674,6 +675,9 @@ private:
|
||||
// it.
|
||||
SmallVector<unsigned, 16> ReservedCyclesIndex;
|
||||
|
||||
// For each PIdx, stores the resource group IDs of its subunits
|
||||
SmallVector<APInt, 16> ResourceGroupSubUnitMasks;
|
||||
|
||||
#ifndef NDEBUG
|
||||
// Remember the greatest possible stall as an upper bound on the number of
|
||||
// times we should retry the pending queue because of a hazard.
|
||||
@ -751,9 +755,15 @@ public:
|
||||
unsigned getNextResourceCycleByInstance(unsigned InstanceIndex,
|
||||
unsigned Cycles);
|
||||
|
||||
std::pair<unsigned, unsigned> getNextResourceCycle(unsigned PIdx,
|
||||
std::pair<unsigned, unsigned> getNextResourceCycle(const MCSchedClassDesc *SC,
|
||||
unsigned PIdx,
|
||||
unsigned Cycles);
|
||||
|
||||
bool isUnbufferedGroup(unsigned PIdx) const {
|
||||
return SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin &&
|
||||
!SchedModel->getProcResource(PIdx)->BufferSize;
|
||||
}
|
||||
|
||||
bool checkHazard(SUnit *SU);
|
||||
|
||||
unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
|
||||
@ -775,7 +785,8 @@ public:
|
||||
|
||||
void incExecutedResources(unsigned PIdx, unsigned Count);
|
||||
|
||||
unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
|
||||
unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx,
|
||||
unsigned Cycles, unsigned ReadyCycle);
|
||||
|
||||
void bumpNode(SUnit *SU);
|
||||
|
||||
|
@ -2004,6 +2004,7 @@ void SchedBoundary::reset() {
|
||||
IsResourceLimited = false;
|
||||
ReservedCycles.clear();
|
||||
ReservedCyclesIndex.clear();
|
||||
ResourceGroupSubUnitMasks.clear();
|
||||
#ifndef NDEBUG
|
||||
// Track the maximum number of stall cycles that could arise either from the
|
||||
// latency of a DAG edge or the number of cycles that a processor resource is
|
||||
@ -2045,11 +2046,18 @@ init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
|
||||
unsigned ResourceCount = SchedModel->getNumProcResourceKinds();
|
||||
ReservedCyclesIndex.resize(ResourceCount);
|
||||
ExecutedResCounts.resize(ResourceCount);
|
||||
ResourceGroupSubUnitMasks.resize(ResourceCount, APInt(ResourceCount, 0));
|
||||
unsigned NumUnits = 0;
|
||||
|
||||
for (unsigned i = 0; i < ResourceCount; ++i) {
|
||||
ReservedCyclesIndex[i] = NumUnits;
|
||||
NumUnits += SchedModel->getProcResource(i)->NumUnits;
|
||||
if (isUnbufferedGroup(i)) {
|
||||
auto SubUnits = SchedModel->getProcResource(i)->SubUnitsIdxBegin;
|
||||
for (unsigned U = 0, UE = SchedModel->getProcResource(i)->NumUnits;
|
||||
U != UE; ++U)
|
||||
ResourceGroupSubUnitMasks[i].setBit(SubUnits[U]);
|
||||
}
|
||||
}
|
||||
|
||||
ReservedCycles.resize(NumUnits, InvalidCycle);
|
||||
@ -2091,7 +2099,9 @@ unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx,
|
||||
/// scheduled. Returns the next cycle and the index of the processor resource
|
||||
/// instance in the reserved cycles vector.
|
||||
std::pair<unsigned, unsigned>
|
||||
SchedBoundary::getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
|
||||
SchedBoundary::getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx,
|
||||
unsigned Cycles) {
|
||||
|
||||
unsigned MinNextUnreserved = InvalidCycle;
|
||||
unsigned InstanceIdx = 0;
|
||||
unsigned StartIndex = ReservedCyclesIndex[PIdx];
|
||||
@ -2099,6 +2109,35 @@ SchedBoundary::getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
|
||||
assert(NumberOfInstances > 0 &&
|
||||
"Cannot have zero instances of a ProcResource");
|
||||
|
||||
if (isUnbufferedGroup(PIdx)) {
|
||||
// If any subunits are used by the instruction, report that the resource
|
||||
// group is available at 0, effectively removing the group record from
|
||||
// hazarding and basing the hazarding decisions on the subunit records.
|
||||
// Otherwise, choose the first available instance from among the subunits.
|
||||
// Specifications which assign cycles to both the subunits and the group or
|
||||
// which use an unbuffered group with buffered subunits will appear to
|
||||
// schedule strangely. In the first case, the additional cycles for the
|
||||
// group will be ignored. In the second, the group will be ignored
|
||||
// entirely.
|
||||
for (const MCWriteProcResEntry &PE :
|
||||
make_range(SchedModel->getWriteProcResBegin(SC),
|
||||
SchedModel->getWriteProcResEnd(SC)))
|
||||
if (ResourceGroupSubUnitMasks[PIdx][PE.ProcResourceIdx])
|
||||
return std::make_pair(0u, StartIndex);
|
||||
|
||||
auto SubUnits = SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin;
|
||||
for (unsigned I = 0, End = NumberOfInstances; I < End; ++I) {
|
||||
unsigned NextUnreserved, NextInstanceIdx;
|
||||
std::tie(NextUnreserved, NextInstanceIdx) =
|
||||
getNextResourceCycle(SC, SubUnits[I], Cycles);
|
||||
if (MinNextUnreserved > NextUnreserved) {
|
||||
InstanceIdx = NextInstanceIdx;
|
||||
MinNextUnreserved = NextUnreserved;
|
||||
}
|
||||
}
|
||||
return std::make_pair(MinNextUnreserved, InstanceIdx);
|
||||
}
|
||||
|
||||
for (unsigned I = StartIndex, End = StartIndex + NumberOfInstances; I < End;
|
||||
++I) {
|
||||
unsigned NextUnreserved = getNextResourceCycleByInstance(I, Cycles);
|
||||
@ -2152,7 +2191,7 @@ bool SchedBoundary::checkHazard(SUnit *SU) {
|
||||
unsigned ResIdx = PE.ProcResourceIdx;
|
||||
unsigned Cycles = PE.Cycles;
|
||||
unsigned NRCycle, InstanceIdx;
|
||||
std::tie(NRCycle, InstanceIdx) = getNextResourceCycle(ResIdx, Cycles);
|
||||
std::tie(NRCycle, InstanceIdx) = getNextResourceCycle(SC, ResIdx, Cycles);
|
||||
if (NRCycle > CurrCycle) {
|
||||
#ifndef NDEBUG
|
||||
MaxObservedStall = std::max(Cycles, MaxObservedStall);
|
||||
@ -2302,8 +2341,8 @@ void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
|
||||
///
|
||||
/// \return the next cycle at which the instruction may execute without
|
||||
/// oversubscribing resources.
|
||||
unsigned SchedBoundary::
|
||||
countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
|
||||
unsigned SchedBoundary::countResource(const MCSchedClassDesc *SC, unsigned PIdx,
|
||||
unsigned Cycles, unsigned NextCycle) {
|
||||
unsigned Factor = SchedModel->getResourceFactor(PIdx);
|
||||
unsigned Count = Factor * Cycles;
|
||||
LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +"
|
||||
@ -2325,7 +2364,7 @@ countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
|
||||
}
|
||||
// For reserved resources, record the highest cycle using the resource.
|
||||
unsigned NextAvailable, InstanceIdx;
|
||||
std::tie(NextAvailable, InstanceIdx) = getNextResourceCycle(PIdx, Cycles);
|
||||
std::tie(NextAvailable, InstanceIdx) = getNextResourceCycle(SC, PIdx, Cycles);
|
||||
if (NextAvailable > CurrCycle) {
|
||||
LLVM_DEBUG(dbgs() << " Resource conflict: "
|
||||
<< SchedModel->getResourceName(PIdx)
|
||||
@ -2405,7 +2444,7 @@ void SchedBoundary::bumpNode(SUnit *SU) {
|
||||
PI = SchedModel->getWriteProcResBegin(SC),
|
||||
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
|
||||
unsigned RCycle =
|
||||
countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
|
||||
countResource(SC, PI->ProcResourceIdx, PI->Cycles, NextCycle);
|
||||
if (RCycle > NextCycle)
|
||||
NextCycle = RCycle;
|
||||
}
|
||||
@ -2420,7 +2459,8 @@ void SchedBoundary::bumpNode(SUnit *SU) {
|
||||
unsigned PIdx = PI->ProcResourceIdx;
|
||||
if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
|
||||
unsigned ReservedUntil, InstanceIdx;
|
||||
std::tie(ReservedUntil, InstanceIdx) = getNextResourceCycle(PIdx, 0);
|
||||
std::tie(ReservedUntil, InstanceIdx) =
|
||||
getNextResourceCycle(SC, PIdx, 0);
|
||||
if (isTop()) {
|
||||
ReservedCycles[InstanceIdx] =
|
||||
std::max(ReservedUntil, NextCycle + PI->Cycles);
|
||||
|
@ -22,9 +22,9 @@ body: |
|
||||
; CHECK-LABEL: name: test_groups
|
||||
; CHECK: liveins: $d0, $r0, $r1, $r2, $r3, $r4
|
||||
; CHECK: renamable $d0 = VADDD killed renamable $d0, renamable $d0, 14 /* CC::al */, $noreg
|
||||
; CHECK: renamable $r3 = t2ADDrr killed renamable $r3, renamable $r3, 14 /* CC::al */, $noreg, $noreg
|
||||
; CHECK: renamable $s2 = VLDRS killed renamable $r0, 0, 14 /* CC::al */, $noreg
|
||||
; CHECK: VSTRS killed renamable $s2, killed renamable $r1, 0, 14 /* CC::al */, $noreg
|
||||
; CHECK: renamable $r3 = t2ADDrr killed renamable $r3, renamable $r3, 14 /* CC::al */, $noreg, $noreg
|
||||
; CHECK: t2STRi12 killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
|
||||
; CHECK: renamable $r4 = t2ADDrr killed renamable $r4, renamable $r4, 14 /* CC::al */, $noreg, $noreg
|
||||
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $d0
|
||||
|
Loading…
Reference in New Issue
Block a user