1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

Factor out more code for computing register live-range informationfor

scheduling, and generalize is so that preserves state across
scheduling regions. This fixes incorrect live-range information around
terminators and labels, which are effective region boundaries.

In place of looking for terminators to anchor inter-block dependencies,
introduce special entry and exit scheduling units for this purpose.

llvm-svn: 64254
This commit is contained in:
Dan Gohman 2009-02-10 23:27:53 +00:00
parent 60571be0de
commit f2b9543ee5
8 changed files with 544 additions and 377 deletions

View File

@ -282,6 +282,16 @@ namespace llvm {
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
CopyDstRC(NULL), CopySrcRC(NULL) {}
/// SUnit - Construct a placeholder SUnit.
SUnit()
: Node(0), Instr(0), OrigNode(0), NodeNum(~0u), NodeQueueId(0),
Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0),
isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
CopyDstRC(NULL), CopySrcRC(NULL) {}
/// setNode - Assign the representative SDNode for this SUnit.
/// This may be used during pre-regalloc scheduling.
void setNode(SDNode *N) {
@ -430,6 +440,8 @@ namespace llvm {
std::vector<SUnit*> Sequence; // The schedule. Null SUnit*'s
// represent noop instructions.
std::vector<SUnit> SUnits; // The scheduling units.
SUnit EntrySU; // Special node for the region entry.
SUnit ExitSU; // Special node for the region exit.
explicit ScheduleDAG(MachineFunction &mf);
@ -446,6 +458,9 @@ namespace llvm {
MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End);
/// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock
/// according to the order specified in Sequence.
///
virtual MachineBasicBlock *EmitSchedule() = 0;
void dumpSchedule() const;

View File

@ -94,6 +94,25 @@ namespace {
/// HazardRec - The hazard recognizer to use.
ScheduleHazardRecognizer *HazardRec;
/// Classes - For live regs that are only used in one register class in a
/// live range, the register class. If the register is not live, the
/// corresponding value is null. If the register is live but used in
/// multiple register classes, the corresponding value is -1 casted to a
/// pointer.
const TargetRegisterClass *
Classes[TargetRegisterInfo::FirstVirtualRegister];
/// RegRegs - Map registers to all their references within a live range.
std::multimap<unsigned, MachineOperand *> RegRefs;
/// The index of the most recent kill (proceding bottom-up), or ~0u if
/// the register is not live.
unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
/// The index of the most recent complete def (proceding bottom up), or ~0u
/// if the register is live.
unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
public:
SchedulePostRATDList(MachineFunction &MF,
const MachineLoopInfo &MLI,
@ -107,10 +126,29 @@ namespace {
delete HazardRec;
}
/// StartBlock - Initialize register live-range state for scheduling in
/// this block.
///
void StartBlock(MachineBasicBlock *BB);
/// Schedule - Schedule the instruction range using list scheduling.
///
void Schedule();
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
void Observe(MachineInstr *MI);
/// FinishBlock - Clean up register live-range state.
///
void FinishBlock();
private:
void PrescanInstruction(MachineInstr *MI);
void ScanInstruction(MachineInstr *MI, unsigned Count);
void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
void ReleaseSuccessors(SUnit *SU);
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void ListScheduleTopDown();
bool BreakAntiDependencies();
@ -173,6 +211,19 @@ namespace {
};
}
/// isSchedulingBoundary - Test if the given instruction should be
/// considered a scheduling boundary. This primarily includes labels
/// and terminators.
///
static bool isSchedulingBoundary(const MachineInstr *MI,
const MachineFunction &MF) {
// Terminators and labels can't be scheduled around.
if (MI->getDesc().isTerminator() || MI->isLabel())
return true;
return false;
}
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
DOUT << "PostRAScheduler\n";
@ -187,131 +238,51 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
// Loop over all of the basic blocks
for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
MBB != MBBe; ++MBB) {
// Initialize register live-range state for scheduling in this block.
Scheduler.StartBlock(MBB);
// Schedule each sequence of instructions not interrupted by a label
// or anything else that effectively needs to shut down scheduling.
MachineBasicBlock::iterator Current = MBB->end(), Top = MBB->begin();
for (MachineBasicBlock::iterator I = Current; I != Top; ) {
MachineInstr *MI = --I;
if (MI->getDesc().isTerminator() || MI->isLabel()) {
Scheduler.Run(0, MBB, next(I), Current);
Scheduler.EmitSchedule();
Current = I;
MachineBasicBlock::iterator Current = MBB->end();
for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
MachineInstr *MI = prior(I);
if (isSchedulingBoundary(MI, Fn)) {
if (I != Current) {
Scheduler.Run(0, MBB, I, Current);
Scheduler.EmitSchedule();
}
Scheduler.Observe(MI);
Current = MI;
}
I = MI;
}
Scheduler.Run(0, MBB, Top, Current);
Scheduler.Run(0, MBB, MBB->begin(), Current);
Scheduler.EmitSchedule();
// Clean up register live-range state.
Scheduler.FinishBlock();
}
return true;
}
/// Schedule - Schedule the DAG using list scheduling.
void SchedulePostRATDList::Schedule() {
DOUT << "********** List Scheduling **********\n";
// Build the scheduling graph.
BuildSchedGraph();
if (EnableAntiDepBreaking) {
if (BreakAntiDependencies()) {
// We made changes. Update the dependency graph.
// Theoretically we could update the graph in place:
// When a live range is changed to use a different register, remove
// the def's anti-dependence *and* output-dependence edges due to
// that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register.
SUnits.clear();
BuildSchedGraph();
}
}
AvailableQueue.initNodes(SUnits);
ListScheduleTopDown();
AvailableQueue.releaseState();
}
/// getInstrOperandRegClass - Return register class of the operand of an
/// instruction of the specified TargetInstrDesc.
static const TargetRegisterClass*
getInstrOperandRegClass(const TargetRegisterInfo *TRI,
const TargetInstrDesc &II, unsigned Op) {
if (Op >= II.getNumOperands())
return NULL;
if (II.OpInfo[Op].isLookupPtrRegClass())
return TRI->getPointerRegClass();
return TRI->getRegClass(II.OpInfo[Op].RegClass);
}
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
static SDep *CriticalPathStep(SUnit *SU) {
SDep *Next = 0;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
SUnit *PredSU = P->getSUnit();
unsigned PredLatency = P->getLatency();
unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
// In the case of a latency tie, prefer an anti-dependency edge over
// other types of edges.
if (NextDepth < PredTotalLatency ||
(NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
NextDepth = PredTotalLatency;
Next = &*P;
}
}
return Next;
}
/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
/// of the ScheduleDAG and break them by renaming registers.
/// StartBlock - Initialize register live-range state for scheduling in
/// this block.
///
bool SchedulePostRATDList::BreakAntiDependencies() {
// The code below assumes that there is at least one instruction,
// so just duck out immediately if the block is empty.
if (SUnits.empty()) return false;
void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
// Call the superclass.
ScheduleDAGInstrs::StartBlock(BB);
// Find the node at the bottom of the critical path.
SUnit *Max = 0;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
SUnit *SU = &SUnits[i];
if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
Max = SU;
}
// Clear out the register class data.
std::fill(Classes, array_endof(Classes),
static_cast<const TargetRegisterClass *>(0));
DOUT << "Critical path has total latency "
<< (Max->getDepth() + Max->Latency) << "\n";
// Track progress along the critical path through the SUnit graph as we walk
// the instructions.
SUnit *CriticalPathSU = Max;
MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
// For live regs that are only used in one register class in a live range,
// the register class. If the register is not live, the corresponding value
// is null. If the register is live but used in multiple register classes,
// the corresponding value is -1 casted to a pointer.
const TargetRegisterClass *
Classes[TargetRegisterInfo::FirstVirtualRegister] = {};
// Map registers to all their references within a live range.
std::multimap<unsigned, MachineOperand *> RegRefs;
// The index of the most recent kill (proceding bottom-up), or ~0u if
// the register is not live.
unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
// Initialize the indices to indicate that no registers are live.
std::fill(KillIndices, array_endof(KillIndices), ~0u);
// The index of the most recent complete def (proceding bottom up), or ~0u if
// the register is live.
unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
std::fill(DefIndices, array_endof(DefIndices), BB->size());
// Determine the live-out physregs for this block.
if (BB->back().getDesc().isReturn())
if (!BB->empty() && BB->back().getDesc().isReturn())
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
E = MRI.liveout_end(); I != E; ++I) {
@ -368,6 +339,217 @@ bool SchedulePostRATDList::BreakAntiDependencies() {
DefIndices[AliasReg] = ~0u;
}
}
}
/// Schedule - Schedule the instruction range using list scheduling.
///
void SchedulePostRATDList::Schedule() {
DOUT << "********** List Scheduling **********\n";
// Build the scheduling graph.
BuildSchedGraph();
if (EnableAntiDepBreaking) {
if (BreakAntiDependencies()) {
// We made changes. Update the dependency graph.
// Theoretically we could update the graph in place:
// When a live range is changed to use a different register, remove
// the def's anti-dependence *and* output-dependence edges due to
// that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register.
SUnits.clear();
EntrySU = SUnit();
ExitSU = SUnit();
BuildSchedGraph();
}
}
AvailableQueue.initNodes(SUnits);
ListScheduleTopDown();
AvailableQueue.releaseState();
}
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
void SchedulePostRATDList::Observe(MachineInstr *MI) {
PrescanInstruction(MI);
ScanInstruction(MI, 0);
}
/// FinishBlock - Clean up register live-range state.
///
void SchedulePostRATDList::FinishBlock() {
RegRefs.clear();
// Call the superclass.
ScheduleDAGInstrs::FinishBlock();
}
/// getInstrOperandRegClass - Return register class of the operand of an
/// instruction of the specified TargetInstrDesc.
static const TargetRegisterClass*
getInstrOperandRegClass(const TargetRegisterInfo *TRI,
const TargetInstrDesc &II, unsigned Op) {
if (Op >= II.getNumOperands())
return NULL;
if (II.OpInfo[Op].isLookupPtrRegClass())
return TRI->getPointerRegClass();
return TRI->getRegClass(II.OpInfo[Op].RegClass);
}
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
static SDep *CriticalPathStep(SUnit *SU) {
SDep *Next = 0;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
SUnit *PredSU = P->getSUnit();
unsigned PredLatency = P->getLatency();
unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
// In the case of a latency tie, prefer an anti-dependency edge over
// other types of edges.
if (NextDepth < PredTotalLatency ||
(NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
NextDepth = PredTotalLatency;
Next = &*P;
}
}
return Next;
}
void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
// Scan the register operands for this instruction and update
// Classes and RegRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
const TargetRegisterClass *NewRC =
getInstrOperandRegClass(TRI, MI->getDesc(), i);
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
if (!Classes[Reg] && NewRC)
Classes[Reg] = NewRC;
else if (!NewRC || Classes[Reg] != NewRC)
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
// Now check for aliases.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
// If an alias of the reg is used during the live range, give up.
// Note that this allows us to skip checking if AntiDepReg
// overlaps with any of the aliases, among other things.
unsigned AliasReg = *Alias;
if (Classes[AliasReg]) {
Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
}
}
// If we're still willing to consider this register, note the reference.
if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
RegRefs.insert(std::make_pair(Reg, &MO));
}
}
void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
unsigned Count) {
// Update liveness.
// Proceding upwards, registers that are defed but not used in this
// instruction are now dead.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isDef()) continue;
// Ignore two-addr defs.
if (MI->isRegReDefinedByTwoAddr(i)) continue;
DefIndices[Reg] = Count;
KillIndices[Reg] = ~0u;
Classes[Reg] = 0;
RegRefs.erase(Reg);
// Repeat, for all subregs.
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
unsigned SubregReg = *Subreg;
DefIndices[SubregReg] = Count;
KillIndices[SubregReg] = ~0u;
Classes[SubregReg] = 0;
RegRefs.erase(SubregReg);
}
// Conservatively mark super-registers as unusable.
for (const unsigned *Super = TRI->getSuperRegisters(Reg);
*Super; ++Super) {
unsigned SuperReg = *Super;
Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
}
}
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isUse()) continue;
const TargetRegisterClass *NewRC =
getInstrOperandRegClass(TRI, MI->getDesc(), i);
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
if (!Classes[Reg] && NewRC)
Classes[Reg] = NewRC;
else if (!NewRC || Classes[Reg] != NewRC)
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
RegRefs.insert(std::make_pair(Reg, &MO));
// It wasn't previously live but now it is, this is a kill.
if (KillIndices[Reg] == ~0u) {
KillIndices[Reg] = Count;
DefIndices[Reg] = ~0u;
}
// Repeat, for all aliases.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
if (KillIndices[AliasReg] == ~0u) {
KillIndices[AliasReg] = Count;
DefIndices[AliasReg] = ~0u;
}
}
}
}
/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
/// of the ScheduleDAG and break them by renaming registers.
///
bool SchedulePostRATDList::BreakAntiDependencies() {
// The code below assumes that there is at least one instruction,
// so just duck out immediately if the block is empty.
if (SUnits.empty()) return false;
// Find the node at the bottom of the critical path.
SUnit *Max = 0;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
SUnit *SU = &SUnits[i];
if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
Max = SU;
}
DOUT << "Critical path has total latency "
<< (Max->getDepth() + Max->Latency) << "\n";
// Track progress along the critical path through the SUnit graph as we walk
// the instructions.
SUnit *CriticalPathSU = Max;
MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
// Consider this pattern:
// A = ...
@ -481,43 +663,19 @@ bool SchedulePostRATDList::BreakAntiDependencies() {
}
}
// Scan the register operands for this instruction and update
// Classes and RegRefs.
PrescanInstruction(MI);
// If this instruction has a use of AntiDepReg, breaking it
// is invalid.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
const TargetRegisterClass *NewRC =
getInstrOperandRegClass(TRI, MI->getDesc(), i);
// If this instruction has a use of AntiDepReg, breaking it
// is invalid.
if (MO.isUse() && AntiDepReg == Reg)
if (MO.isUse() && AntiDepReg == Reg) {
AntiDepReg = 0;
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
if (!Classes[Reg] && NewRC)
Classes[Reg] = NewRC;
else if (!NewRC || Classes[Reg] != NewRC)
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
// Now check for aliases.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
// If an alias of the reg is used during the live range, give up.
// Note that this allows us to skip checking if AntiDepReg
// overlaps with any of the aliases, among other things.
unsigned AliasReg = *Alias;
if (Classes[AliasReg]) {
Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
}
break;
}
// If we're still willing to consider this register, note the reference.
if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
RegRefs.insert(std::make_pair(Reg, &MO));
}
// Determine AntiDepReg's register class, if it is live and is
@ -584,71 +742,7 @@ bool SchedulePostRATDList::BreakAntiDependencies() {
}
}
// Update liveness.
// Proceding upwards, registers that are defed but not used in this
// instruction are now dead.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isDef()) continue;
// Ignore two-addr defs.
if (MI->isRegReDefinedByTwoAddr(i)) continue;
DefIndices[Reg] = Count;
KillIndices[Reg] = ~0u;
Classes[Reg] = 0;
RegRefs.erase(Reg);
// Repeat, for all subregs.
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
unsigned SubregReg = *Subreg;
DefIndices[SubregReg] = Count;
KillIndices[SubregReg] = ~0u;
Classes[SubregReg] = 0;
RegRefs.erase(SubregReg);
}
// Conservatively mark super-registers as unusable.
for (const unsigned *Super = TRI->getSuperRegisters(Reg);
*Super; ++Super) {
unsigned SuperReg = *Super;
Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
}
}
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isUse()) continue;
const TargetRegisterClass *NewRC =
getInstrOperandRegClass(TRI, MI->getDesc(), i);
// For now, only allow the register to be changed if its register
// class is consistent across all uses.
if (!Classes[Reg] && NewRC)
Classes[Reg] = NewRC;
else if (!NewRC || Classes[Reg] != NewRC)
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
RegRefs.insert(std::make_pair(Reg, &MO));
// It wasn't previously live but now it is, this is a kill.
if (KillIndices[Reg] == ~0u) {
KillIndices[Reg] = Count;
DefIndices[Reg] = ~0u;
}
// Repeat, for all aliases.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
if (KillIndices[AliasReg] == ~0u) {
KillIndices[AliasReg] = Count;
DefIndices[AliasReg] = ~0u;
}
}
}
ScanInstruction(MI, Count);
}
assert(Count == ~0u && "Count mismatch!");
@ -679,9 +773,17 @@ void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
// their latencies.
SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
if (SuccSU->NumPredsLeft == 0) {
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
PendingQueue.push_back(SuccSU);
}
}
/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I)
ReleaseSucc(SU, &*I);
}
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
@ -695,11 +797,7 @@ void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
// Top down: release successors.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I)
ReleaseSucc(SU, &*I);
ReleaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue.ScheduledNode(SU);
}
@ -709,6 +807,9 @@ void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
void SchedulePostRATDList::ListScheduleTopDown() {
unsigned CurCycle = 0;
// Release any successors of the special Entry node.
ReleaseSuccessors(&EntrySU);
// All leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.
@ -717,7 +818,7 @@ void SchedulePostRATDList::ListScheduleTopDown() {
SUnits[i].isAvailable = true;
}
}
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
std::vector<SUnit*> NotReady;

View File

@ -28,7 +28,8 @@ ScheduleDAG::ScheduleDAG(MachineFunction &mf)
TRI(TM.getRegisterInfo()),
TLI(TM.getTargetLowering()),
MF(mf), MRI(mf.getRegInfo()),
ConstPool(MF.getConstantPool()) {
ConstPool(MF.getConstantPool()),
EntrySU(), ExitSU() {
}
ScheduleDAG::~ScheduleDAG() {}
@ -58,6 +59,8 @@ void ScheduleDAG::Run(SelectionDAG *dag, MachineBasicBlock *bb,
BB = bb;
Begin = begin;
End = end;
EntrySU = SUnit();
ExitSU = SUnit();
Schedule();

View File

@ -15,86 +15,22 @@
#define DEBUG_TYPE "sched-instrs"
#include "ScheduleDAGInstrs.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtarget.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallSet.h"
#include <map>
using namespace llvm;
namespace {
class VISIBILITY_HIDDEN LoopDependencies {
const MachineLoopInfo &MLI;
const MachineDominatorTree &MDT;
public:
typedef std::map<unsigned, std::pair<const MachineOperand *, unsigned> >
LoopDeps;
LoopDeps Deps;
LoopDependencies(const MachineLoopInfo &mli,
const MachineDominatorTree &mdt) :
MLI(mli), MDT(mdt) {}
void VisitLoop(const MachineLoop *Loop) {
Deps.clear();
MachineBasicBlock *Header = Loop->getHeader();
SmallSet<unsigned, 8> LoopLiveIns;
for (MachineBasicBlock::livein_iterator LI = Header->livein_begin(),
LE = Header->livein_end(); LI != LE; ++LI)
LoopLiveIns.insert(*LI);
const MachineDomTreeNode *Node = MDT.getNode(Header);
const MachineBasicBlock *MBB = Node->getBlock();
assert(Loop->contains(MBB) &&
"Loop does not contain header!");
VisitRegion(Node, MBB, Loop, LoopLiveIns);
}
private:
void VisitRegion(const MachineDomTreeNode *Node,
const MachineBasicBlock *MBB,
const MachineLoop *Loop,
const SmallSet<unsigned, 8> &LoopLiveIns) {
unsigned Count = 0;
for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
I != E; ++I, ++Count) {
const MachineInstr *MI = I;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned MOReg = MO.getReg();
if (LoopLiveIns.count(MOReg))
Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
}
}
const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
for (std::vector<MachineDomTreeNode*>::const_iterator I =
Children.begin(), E = Children.end(); I != E; ++I) {
const MachineDomTreeNode *ChildNode = *I;
MachineBasicBlock *ChildBlock = ChildNode->getBlock();
if (Loop->contains(ChildBlock))
VisitRegion(ChildNode, ChildBlock, Loop, LoopLiveIns);
}
}
};
}
ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo &mli,
const MachineDominatorTree &mdt)
: ScheduleDAG(mf), MLI(mli), MDT(mdt) {}
: ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) {}
/// getOpcode - If this is an Instruction or a ConstantExpr, return the
/// opcode value. Otherwise return UserOp1.
@ -172,7 +108,20 @@ static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI) {
return V;
}
void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
if (MachineLoop *ML = MLI.getLoopFor(BB))
if (BB == ML->getLoopLatch()) {
MachineBasicBlock *Header = ML->getHeader();
for (MachineBasicBlock::livein_iterator I = Header->livein_begin(),
E = Header->livein_end(); I != E; ++I)
LoopLiveInRegs.insert(*I);
LoopRegs.VisitLoop(ML);
}
}
void ScheduleDAGInstrs::BuildSchedGraph() {
// We'll be allocating one SUnit for each instruction, plus one for
// the region exit node.
SUnits.reserve(BB->size());
// We build scheduling units by walking a block's instruction list from bottom
@ -189,30 +138,6 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
std::map<const Value *, SUnit *> MemDefs;
std::map<const Value *, std::vector<SUnit *> > MemUses;
// If we have an SUnit which is representing a terminator instruction, we
// can use it as a place-holder successor for inter-block dependencies.
SUnit *Terminator = 0;
// Terminators can perform control transfers, we we need to make sure that
// all the work of the block is done before the terminator. Labels can
// mark points of interest for various types of meta-data (eg. EH data),
// and we need to make sure nothing is scheduled around them.
SUnit *SchedulingBarrier = 0;
LoopDependencies LoopRegs(MLI, MDT);
// Track which regs are live into a loop, to help guide back-edge-aware
// scheduling.
SmallSet<unsigned, 8> LoopLiveInRegs;
if (MachineLoop *ML = MLI.getLoopFor(BB))
if (BB == ML->getLoopLatch()) {
MachineBasicBlock *Header = ML->getHeader();
for (MachineBasicBlock::livein_iterator I = Header->livein_begin(),
E = Header->livein_end(); I != E; ++I)
LoopLiveInRegs.insert(*I);
LoopRegs.VisitLoop(ML);
}
// Check to see if the scheduler cares about latencies.
bool UnitLatencies = ForceUnitLatencies();
@ -220,10 +145,14 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
unsigned SpecialAddressLatency =
TM.getSubtarget<TargetSubtarget>().getSpecialAddressLatency();
// Walk the list of instructions, from bottom moving up.
for (MachineBasicBlock::iterator MII = End, MIE = Begin;
MII != MIE; --MII) {
MachineInstr *MI = prior(MII);
const TargetInstrDesc &TID = MI->getDesc();
assert(!TID.isTerminator() && !MI->isLabel() &&
"Cannot schedule terminators or labels!");
// Create the SUnit for this MI.
SUnit *SU = NewSUnit(MI);
// Assign the Latency field of SU using target-provided information.
@ -298,8 +227,7 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
// If a def is going to wrap back around to the top of the loop,
// backschedule it.
// TODO: Blocks in loops without terminators can benefit too.
if (!UnitLatencies && Terminator && DefList.empty()) {
if (!UnitLatencies && DefList.empty()) {
LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg);
if (I != LoopRegs.Deps.end()) {
const MachineOperand *UseMO = I->second.first;
@ -323,10 +251,10 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
// scheduling region.
Latency -= std::min(Latency, Count);
// Add the artifical edge.
Terminator->addPred(SDep(SU, SDep::Order, Latency,
/*Reg=*/0, /*isNormalMemory=*/false,
/*isMustAlias=*/false,
/*isArtificial=*/true));
ExitSU.addPred(SDep(SU, SDep::Order, Latency,
/*Reg=*/0, /*isNormalMemory=*/false,
/*isMustAlias=*/false,
/*isArtificial=*/true));
} else if (SpecialAddressLatency > 0 &&
UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
// The entire loop body is within the current scheduling region
@ -355,7 +283,7 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
// after stack slots are lowered to actual addresses.
// TODO: Use an AliasAnalysis and do real alias-analysis queries, and
// produce more precise dependence information.
if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects()) {
if (TID.isCall() || TID.hasUnmodeledSideEffects()) {
new_chain:
// This is the conservative case. Add dependencies on all memory
// references.
@ -379,7 +307,7 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
// See if it is known to just have a single memory reference.
MachineInstr *ChainMI = Chain->getInstr();
const TargetInstrDesc &ChainTID = ChainMI->getDesc();
if (!ChainTID.isCall() && !ChainTID.isTerminator() &&
if (!ChainTID.isCall() &&
!ChainTID.hasUnmodeledSideEffects() &&
ChainMI->hasOneMemOperand() &&
!ChainMI->memoperands_begin()->isVolatile() &&
@ -452,28 +380,6 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
PendingLoads.push_back(SU);
}
}
// Add chain edges from terminators and labels to ensure that no
// instructions are scheduled past them.
if (SchedulingBarrier && SU->Succs.empty())
SchedulingBarrier->addPred(SDep(SU, SDep::Order, SU->Latency));
// If we encounter a mid-block label, we need to go back and add
// dependencies on SUnits we've already processed to prevent the
// label from moving downward.
if (MI->isLabel())
for (SUnit *I = SU; I != &SUnits[0]; --I) {
SUnit *SuccSU = SU-1;
SuccSU->addPred(SDep(SU, SDep::Order, SU->Latency));
MachineInstr *SuccMI = SuccSU->getInstr();
if (SuccMI->getDesc().isTerminator() || SuccMI->isLabel())
break;
}
// If this instruction obstructs all scheduling, remember it.
if (TID.isTerminator() || MI->isLabel())
SchedulingBarrier = SU;
// If this instruction is a terminator, remember it.
if (TID.isTerminator())
Terminator = SU;
}
for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
@ -483,6 +389,10 @@ void ScheduleDAGInstrs::BuildSchedGraph() {
PendingLoads.clear();
}
void ScheduleDAGInstrs::FinishBlock() {
// Nothing to do.
}
void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
@ -505,7 +415,12 @@ void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
std::string s;
raw_string_ostream oss(s);
SU->getInstr()->print(oss);
if (SU == &EntrySU)
oss << "<entry>";
else if (SU == &ExitSU)
oss << "<exit>";
else
SU->getInstr()->print(oss);
return oss.str();
}
@ -531,5 +446,10 @@ MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
BB->insert(End, SU->getInstr());
}
// Update the Begin iterator, as the first instruction in the block
// may have been scheduled later.
if (!Sequence.empty())
Begin = Sequence[0]->getInstr();
return BB;
}

View File

@ -15,14 +15,86 @@
#ifndef SCHEDULEDAGINSTRS_H
#define SCHEDULEDAGINSTRS_H
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <map>
namespace llvm {
class MachineLoopInfo;
class MachineDominatorTree;
class ScheduleDAGInstrs : public ScheduleDAG {
/// LoopDependencies - This class analyzes loop-oriented register
/// dependencies, which are used to guide scheduling decisions.
/// For example, loop induction variable increments should be
/// scheduled as soon as possible after the variable's last use.
///
class VISIBILITY_HIDDEN LoopDependencies {
const MachineLoopInfo &MLI;
const MachineDominatorTree &MDT;
public:
typedef std::map<unsigned, std::pair<const MachineOperand *, unsigned> >
LoopDeps;
LoopDeps Deps;
LoopDependencies(const MachineLoopInfo &mli,
const MachineDominatorTree &mdt) :
MLI(mli), MDT(mdt) {}
/// VisitLoop - Clear out any previous state and analyze the given loop.
///
void VisitLoop(const MachineLoop *Loop) {
Deps.clear();
MachineBasicBlock *Header = Loop->getHeader();
SmallSet<unsigned, 8> LoopLiveIns;
for (MachineBasicBlock::livein_iterator LI = Header->livein_begin(),
LE = Header->livein_end(); LI != LE; ++LI)
LoopLiveIns.insert(*LI);
const MachineDomTreeNode *Node = MDT.getNode(Header);
const MachineBasicBlock *MBB = Node->getBlock();
assert(Loop->contains(MBB) &&
"Loop does not contain header!");
VisitRegion(Node, MBB, Loop, LoopLiveIns);
}
private:
void VisitRegion(const MachineDomTreeNode *Node,
const MachineBasicBlock *MBB,
const MachineLoop *Loop,
const SmallSet<unsigned, 8> &LoopLiveIns) {
unsigned Count = 0;
for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
I != E; ++I, ++Count) {
const MachineInstr *MI = I;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned MOReg = MO.getReg();
if (LoopLiveIns.count(MOReg))
Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
}
}
const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
for (std::vector<MachineDomTreeNode*>::const_iterator I =
Children.begin(), E = Children.end(); I != E; ++I) {
const MachineDomTreeNode *ChildNode = *I;
MachineBasicBlock *ChildBlock = ChildNode->getBlock();
if (Loop->contains(ChildBlock))
VisitRegion(ChildNode, ChildBlock, Loop, LoopLiveIns);
}
}
};
/// ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of
/// MachineInstrs.
class VISIBILITY_HIDDEN ScheduleDAGInstrs : public ScheduleDAG {
const MachineLoopInfo &MLI;
const MachineDominatorTree &MDT;
@ -38,6 +110,15 @@ namespace llvm {
/// to minimize construction/destruction.
std::vector<SUnit *> PendingLoads;
/// LoopRegs - Track which registers are used for loop-carried dependencies.
///
LoopDependencies LoopRegs;
/// LoopLiveInRegs - Track which regs are live into a loop, to help guide
/// back-edge-aware scheduling.
///
SmallSet<unsigned, 8> LoopLiveInRegs;
public:
explicit ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo &mli,
@ -68,11 +149,19 @@ namespace llvm {
virtual MachineBasicBlock *EmitSchedule();
/// StartBlock - Prepare to perform scheduling in the given block.
///
virtual void StartBlock(MachineBasicBlock *BB);
/// Schedule - Order nodes according to selected style, filling
/// in the Sequence member.
///
virtual void Schedule() = 0;
/// FinishBlock - Clean up after scheduling in the given block.
///
virtual void FinishBlock();
virtual void dumpNode(const SUnit *SU) const;
virtual std::string getGraphNodeLabel(const SUnit *SU) const;

View File

@ -90,6 +90,7 @@ public:
private:
void ReleasePred(SUnit *SU, SDep *PredEdge);
void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
void ScheduleNodeBottomUp(SUnit*, unsigned);
SUnit *CopyAndMoveSuccessors(SUnit*);
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
@ -142,23 +143,15 @@ void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
}
#endif
if (PredSU->NumSuccsLeft == 0) {
// If all the node's successors are scheduled, this node is ready
// to be scheduled. Ignore the special EntrySU node.
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
PredSU->isAvailable = true;
AvailableQueue.push(PredSU);
}
}
/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
DOUT << "*** Scheduling [" << CurCycle << "]: ";
DEBUG(SU->dump(this));
assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
SU->setHeightToAtLeast(CurCycle);
Sequence.push_back(SU);
void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
// Bottom up: release predecessors
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
@ -175,6 +168,20 @@ void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
}
}
}
}
/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
DOUT << "*** Scheduling [" << CurCycle << "]: ";
DEBUG(SU->dump(this));
assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
SU->setHeightToAtLeast(CurCycle);
Sequence.push_back(SU);
ReleasePredecessors(SU, CurCycle);
// Release all the implicit physical register defs that are live.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
@ -480,6 +487,10 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
/// schedulers.
void ScheduleDAGFast::ListScheduleBottomUp() {
unsigned CurCycle = 0;
// Release any predecessors of the special Exit node.
ReleasePredecessors(&ExitSU, CurCycle);
// Add root to Available queue.
if (!SUnits.empty()) {
SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];

View File

@ -78,6 +78,7 @@ public:
private:
void ReleaseSucc(SUnit *SU, const SDep &D);
void ReleaseSuccessors(SUnit *SU);
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void ListScheduleTopDown();
};
@ -118,8 +119,20 @@ void ScheduleDAGList::ReleaseSucc(SUnit *SU, const SDep &D) {
SuccSU->setDepthToAtLeast(SU->getDepth() + D.getLatency());
if (SuccSU->NumPredsLeft == 0) {
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
PendingQueue.push_back(SuccSU);
}
void ScheduleDAGList::ReleaseSuccessors(SUnit *SU) {
// Top down: release successors.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
assert(!I->isAssignedRegDep() &&
"The list-td scheduler doesn't yet support physreg dependencies!");
ReleaseSucc(SU, *I);
}
}
@ -134,15 +147,7 @@ void ScheduleDAGList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
// Top down: release successors.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
assert(!I->isAssignedRegDep() &&
"The list-td scheduler doesn't yet support physreg dependencies!");
ReleaseSucc(SU, *I);
}
ReleaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue->ScheduledNode(SU);
}
@ -152,6 +157,9 @@ void ScheduleDAGList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
void ScheduleDAGList::ListScheduleTopDown() {
unsigned CurCycle = 0;
// Release any successors of the special Entry node.
ReleaseSuccessors(&EntrySU);
// All leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.

View File

@ -114,7 +114,9 @@ public:
private:
void ReleasePred(SUnit *SU, const SDep *PredEdge);
void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
void ReleaseSuccessors(SUnit *SU);
void CapturePred(SDep *PredEdge);
void ScheduleNodeBottomUp(SUnit*, unsigned);
void ScheduleNodeTopDown(SUnit*, unsigned);
@ -204,23 +206,15 @@ void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
}
#endif
if (PredSU->NumSuccsLeft == 0) {
// If all the node's successors are scheduled, this node is ready
// to be scheduled. Ignore the special EntrySU node.
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
PredSU->isAvailable = true;
AvailableQueue->push(PredSU);
}
}
/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
DOUT << "*** Scheduling [" << CurCycle << "]: ";
DEBUG(SU->dump(this));
assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
SU->setHeightToAtLeast(CurCycle);
Sequence.push_back(SU);
void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
// Bottom up: release predecessors
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
@ -237,6 +231,20 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
}
}
}
}
/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
DOUT << "*** Scheduling [" << CurCycle << "]: ";
DEBUG(SU->dump(this));
assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
SU->setHeightToAtLeast(CurCycle);
Sequence.push_back(SU);
ReleasePredecessors(SU, CurCycle);
// Release all the implicit physical register defs that are live.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
@ -627,6 +635,10 @@ bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
/// schedulers.
void ScheduleDAGRRList::ListScheduleBottomUp() {
unsigned CurCycle = 0;
// Release any predecessors of the special Exit node.
ReleasePredecessors(&ExitSU, CurCycle);
// Add root to Available queue.
if (!SUnits.empty()) {
SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
@ -789,12 +801,25 @@ void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
}
#endif
if (SuccSU->NumPredsLeft == 0) {
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
SuccSU->isAvailable = true;
AvailableQueue->push(SuccSU);
}
}
void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
// Top down: release successors
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
assert(!I->isAssignedRegDep() &&
"The list-tdrr scheduler doesn't yet support physreg dependencies!");
ReleaseSucc(SU, &*I);
}
}
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
@ -806,15 +831,7 @@ void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
SU->setDepthToAtLeast(CurCycle);
Sequence.push_back(SU);
// Top down: release successors
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
assert(!I->isAssignedRegDep() &&
"The list-tdrr scheduler doesn't yet support physreg dependencies!");
ReleaseSucc(SU, &*I);
}
ReleaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue->ScheduledNode(SU);
}
@ -824,6 +841,9 @@ void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
void ScheduleDAGRRList::ListScheduleTopDown() {
unsigned CurCycle = 0;
// Release any successors of the special Entry node.
ReleaseSuccessors(&EntrySU);
// All leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.