1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

Reapply "CodeGen: Use references in MachineTraceMetrics::Trace, NFC"

This reverts commit r261510, effectively reapplying r261509.  The
original commit missed a caller in AArch64ConditionalCompares.

Original commit message:

Pass non-null arguments by reference in MachineTraceMetrics::Trace,
simplifying future work to remove implicit iterator => pointer
conversions.

llvm-svn: 261511
This commit is contained in:
Duncan P. N. Exon Smith 2016-02-22 03:33:28 +00:00
parent 5d1b25325e
commit 3b54098f86
5 changed files with 35 additions and 36 deletions

View File

@ -276,24 +276,24 @@ public:
/// Return the depth and height of MI. The depth is only valid for
/// instructions in or above the trace center block. The height is only
/// valid for instructions in or below the trace center block.
InstrCycles getInstrCycles(const MachineInstr *MI) const {
return TE.Cycles.lookup(MI);
InstrCycles getInstrCycles(const MachineInstr &MI) const {
return TE.Cycles.lookup(&MI);
}
/// Return the slack of MI. This is the number of cycles MI can be delayed
/// before the critical path becomes longer.
/// MI must be an instruction in the trace center block.
unsigned getInstrSlack(const MachineInstr *MI) const;
unsigned getInstrSlack(const MachineInstr &MI) const;
/// Return the Depth of a PHI instruction in a trace center block successor.
/// The PHI does not have to be part of the trace.
unsigned getPHIDepth(const MachineInstr *PHI) const;
unsigned getPHIDepth(const MachineInstr &PHI) const;
/// A dependence is useful if the basic block of the defining instruction
/// is part of the trace of the user instruction. It is assumed that DefMI
/// dominates UseMI (see also isUsefulDominator).
bool isDepInTrace(const MachineInstr *DefMI,
const MachineInstr *UseMI) const;
bool isDepInTrace(const MachineInstr &DefMI,
const MachineInstr &UseMI) const;
};
/// A trace ensemble is a collection of traces selected using the same

View File

@ -718,7 +718,7 @@ bool EarlyIfConverter::shouldConvertIf() {
// TBB / FBB data dependencies may delay the select even more.
MachineTraceMetrics::Trace HeadTrace = MinInstr->getTrace(IfConv.Head);
unsigned BranchDepth =
HeadTrace.getInstrCycles(IfConv.Head->getFirstTerminator()).Depth;
HeadTrace.getInstrCycles(*IfConv.Head->getFirstTerminator()).Depth;
DEBUG(dbgs() << "Branch depth: " << BranchDepth << '\n');
// Look at all the tail phis, and compute the critical path extension caused
@ -726,8 +726,8 @@ bool EarlyIfConverter::shouldConvertIf() {
MachineTraceMetrics::Trace TailTrace = MinInstr->getTrace(IfConv.Tail);
for (unsigned i = 0, e = IfConv.PHIs.size(); i != e; ++i) {
SSAIfConv::PHIInfo &PI = IfConv.PHIs[i];
unsigned Slack = TailTrace.getInstrSlack(PI.PHI);
unsigned MaxDepth = Slack + TailTrace.getInstrCycles(PI.PHI).Depth;
unsigned Slack = TailTrace.getInstrSlack(*PI.PHI);
unsigned MaxDepth = Slack + TailTrace.getInstrCycles(*PI.PHI).Depth;
DEBUG(dbgs() << "Slack " << Slack << ":\t" << *PI.PHI);
// The condition is pulled into the critical path.
@ -742,7 +742,7 @@ bool EarlyIfConverter::shouldConvertIf() {
}
// The TBB value is pulled into the critical path.
unsigned TDepth = adjCycles(TBBTrace.getPHIDepth(PI.PHI), PI.TCycles);
unsigned TDepth = adjCycles(TBBTrace.getPHIDepth(*PI.PHI), PI.TCycles);
if (TDepth > MaxDepth) {
unsigned Extra = TDepth - MaxDepth;
DEBUG(dbgs() << "TBB data adds " << Extra << " cycles.\n");
@ -753,7 +753,7 @@ bool EarlyIfConverter::shouldConvertIf() {
}
// The FBB value is pulled into the critical path.
unsigned FDepth = adjCycles(FBBTrace.getPHIDepth(PI.PHI), PI.FCycles);
unsigned FDepth = adjCycles(FBBTrace.getPHIDepth(*PI.PHI), PI.FCycles);
if (FDepth > MaxDepth) {
unsigned Extra = FDepth - MaxDepth;
DEBUG(dbgs() << "FBB data adds " << Extra << " cycles.\n");

View File

@ -156,7 +156,7 @@ MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
} else {
MachineInstr *DefInstr = getOperandDef(MO);
if (DefInstr) {
DepthOp = BlockTrace.getInstrCycles(DefInstr).Depth;
DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth;
LatencyOp = TSchedModel.computeOperandLatency(
DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
@ -198,7 +198,7 @@ unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,
RI++;
MachineInstr *UseMO = RI->getParent();
unsigned LatencyOp = 0;
if (UseMO && BlockTrace.isDepInTrace(Root, UseMO)) {
if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) {
LatencyOp = TSchedModel.computeOperandLatency(
NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO,
UseMO->findRegisterUseOperandIdx(MO.getReg()));
@ -250,7 +250,7 @@ bool MachineCombiner::improvesCriticalPathLen(
// Get depth and latency of NewRoot and Root.
unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
unsigned RootDepth = BlockTrace.getInstrCycles(Root).Depth;
unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth;
DEBUG(dbgs() << "DEPENDENCE DATA FOR " << Root << "\n";
dbgs() << " NewRootDepth: " << NewRootDepth << "\n";
@ -269,7 +269,7 @@ bool MachineCombiner::improvesCriticalPathLen(
// even if the instruction depths (data dependency cycles) become worse.
unsigned NewRootLatency = getLatency(Root, NewRoot, BlockTrace);
unsigned RootLatency = TSchedModel.computeInstrLatency(Root);
unsigned RootSlack = BlockTrace.getInstrSlack(Root);
unsigned RootSlack = BlockTrace.getInstrSlack(*Root);
DEBUG(dbgs() << " NewRootLatency: " << NewRootLatency << "\n";
dbgs() << " RootLatency: " << RootLatency << "\n";

View File

@ -655,17 +655,17 @@ static bool getDataDeps(const MachineInstr *UseMI,
// Get the input data dependencies of a PHI instruction, using Pred as the
// preferred predecessor.
// This will add at most one dependency to Deps.
static void getPHIDeps(const MachineInstr *UseMI,
static void getPHIDeps(const MachineInstr &UseMI,
SmallVectorImpl<DataDep> &Deps,
const MachineBasicBlock *Pred,
const MachineRegisterInfo *MRI) {
// No predecessor at the beginning of a trace. Ignore dependencies.
if (!Pred)
return;
assert(UseMI->isPHI() && UseMI->getNumOperands() % 2 && "Bad PHI");
for (unsigned i = 1; i != UseMI->getNumOperands(); i += 2) {
if (UseMI->getOperand(i + 1).getMBB() == Pred) {
unsigned Reg = UseMI->getOperand(i).getReg();
assert(UseMI.isPHI() && UseMI.getNumOperands() % 2 && "Bad PHI");
for (unsigned i = 1; i != UseMI.getNumOperands(); i += 2) {
if (UseMI.getOperand(i + 1).getMBB() == Pred) {
unsigned Reg = UseMI.getOperand(i).getReg();
Deps.push_back(DataDep(MRI, Reg, i));
return;
}
@ -827,7 +827,7 @@ computeInstrDepths(const MachineBasicBlock *MBB) {
// Collect all data dependencies.
Deps.clear();
if (UseMI.isPHI())
getPHIDeps(&UseMI, Deps, TBI.Pred, MTM.MRI);
getPHIDeps(UseMI, Deps, TBI.Pred, MTM.MRI);
else if (getDataDeps(&UseMI, Deps, MTM.MRI))
updatePhysDepsDownwards(&UseMI, Deps, RegUnits, MTM.TRI);
@ -1052,7 +1052,7 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
if (!PHI.isPHI())
break;
Deps.clear();
getPHIDeps(&PHI, Deps, MBB, MTM.MRI);
getPHIDeps(PHI, Deps, MBB, MTM.MRI);
if (!Deps.empty()) {
// Loop header PHI heights are all 0.
unsigned Height = TBI.Succ ? Cycles.lookup(&PHI).Height : 0;
@ -1147,26 +1147,25 @@ MachineTraceMetrics::Ensemble::getTrace(const MachineBasicBlock *MBB) {
}
unsigned
MachineTraceMetrics::Trace::getInstrSlack(const MachineInstr *MI) const {
assert(MI && "Not an instruction.");
assert(getBlockNum() == unsigned(MI->getParent()->getNumber()) &&
MachineTraceMetrics::Trace::getInstrSlack(const MachineInstr &MI) const {
assert(getBlockNum() == unsigned(MI.getParent()->getNumber()) &&
"MI must be in the trace center block");
InstrCycles Cyc = getInstrCycles(MI);
return getCriticalPath() - (Cyc.Depth + Cyc.Height);
}
unsigned
MachineTraceMetrics::Trace::getPHIDepth(const MachineInstr *PHI) const {
MachineTraceMetrics::Trace::getPHIDepth(const MachineInstr &PHI) const {
const MachineBasicBlock *MBB = TE.MTM.MF->getBlockNumbered(getBlockNum());
SmallVector<DataDep, 1> Deps;
getPHIDeps(PHI, Deps, MBB, TE.MTM.MRI);
assert(Deps.size() == 1 && "PHI doesn't have MBB as a predecessor");
DataDep &Dep = Deps.front();
unsigned DepCycle = getInstrCycles(Dep.DefMI).Depth;
unsigned DepCycle = getInstrCycles(*Dep.DefMI).Depth;
// Add latency if DefMI is a real instruction. Transients get latency 0.
if (!Dep.DefMI->isTransient())
DepCycle += TE.MTM.SchedModel
.computeOperandLatency(Dep.DefMI, Dep.DefOp, PHI, Dep.UseOp);
DepCycle += TE.MTM.SchedModel.computeOperandLatency(Dep.DefMI, Dep.DefOp,
&PHI, Dep.UseOp);
return DepCycle;
}
@ -1252,13 +1251,13 @@ unsigned MachineTraceMetrics::Trace::getResourceLength(
return std::max(Instrs, PRMax);
}
bool MachineTraceMetrics::Trace::isDepInTrace(const MachineInstr *DefMI,
const MachineInstr *UseMI) const {
if (DefMI->getParent() == UseMI->getParent())
bool MachineTraceMetrics::Trace::isDepInTrace(const MachineInstr &DefMI,
const MachineInstr &UseMI) const {
if (DefMI.getParent() == UseMI.getParent())
return true;
const TraceBlockInfo &DepTBI = TE.BlockInfo[DefMI->getParent()->getNumber()];
const TraceBlockInfo &TBI = TE.BlockInfo[UseMI->getParent()->getNumber()];
const TraceBlockInfo &DepTBI = TE.BlockInfo[DefMI.getParent()->getNumber()];
const TraceBlockInfo &TBI = TE.BlockInfo[UseMI.getParent()->getNumber()];
return DepTBI.isUsefulDominator(TBI);
}

View File

@ -849,9 +849,9 @@ bool AArch64ConditionalCompares::shouldConvert() {
// Instruction depths can be computed for all trace instructions above CmpBB.
unsigned HeadDepth =
Trace.getInstrCycles(CmpConv.Head->getFirstTerminator()).Depth;
Trace.getInstrCycles(*CmpConv.Head->getFirstTerminator()).Depth;
unsigned CmpBBDepth =
Trace.getInstrCycles(CmpConv.CmpBB->getFirstTerminator()).Depth;
Trace.getInstrCycles(*CmpConv.CmpBB->getFirstTerminator()).Depth;
DEBUG(dbgs() << "Head depth: " << HeadDepth
<< "\nCmpBB depth: " << CmpBBDepth << '\n');
if (CmpBBDepth > HeadDepth + DelayLimit) {