1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 03:23:01 +02:00

ExecutionDepsFix refactoring:

- Moving comments to class definition in header file
- Changing comments to doxygen style
- Rephrase loop traversal explaining comment

This is the one of multiple patches that fix bugzilla https://bugs.llvm.org/show_bug.cgi?id=33869
Most of the patches are intended at refactoring the existent code.

Additional relevant reviews:
https://reviews.llvm.org/D40330
https://reviews.llvm.org/D40332
https://reviews.llvm.org/D40333
https://reviews.llvm.org/D40334

Differential Revision: https://reviews.llvm.org/D40331

Change-Id: I9a12618db5b66128611fa71b54a233414f6012ac
llvm-svn: 323092
This commit is contained in:
Marina Yatsina 2018-01-22 10:06:10 +00:00
parent 6d0ce3911c
commit 601fd0ed78
2 changed files with 133 additions and 94 deletions

View File

@ -52,29 +52,29 @@ class TargetInstrInfo;
/// keep track of the fact that the register is now available in multiple /// keep track of the fact that the register is now available in multiple
/// domains. /// domains.
struct DomainValue { struct DomainValue {
// Basic reference counting. /// Basic reference counting.
unsigned Refs = 0; unsigned Refs = 0;
// Bitmask of available domains. For an open DomainValue, it is the still /// Bitmask of available domains. For an open DomainValue, it is the still
// possible domains for collapsing. For a collapsed DomainValue it is the /// possible domains for collapsing. For a collapsed DomainValue it is the
// domains where the register is available for free. /// domains where the register is available for free.
unsigned AvailableDomains; unsigned AvailableDomains;
// Pointer to the next DomainValue in a chain. When two DomainValues are /// Pointer to the next DomainValue in a chain. When two DomainValues are
// merged, Victim.Next is set to point to Victor, so old DomainValue /// merged, Victim.Next is set to point to Victor, so old DomainValue
// references can be updated by following the chain. /// references can be updated by following the chain.
DomainValue *Next; DomainValue *Next;
// Twiddleable instructions using or defining these registers. /// Twiddleable instructions using or defining these registers.
SmallVector<MachineInstr*, 8> Instrs; SmallVector<MachineInstr*, 8> Instrs;
DomainValue() { clear(); } DomainValue() { clear(); }
// A collapsed DomainValue has no instructions to twiddle - it simply keeps /// A collapsed DomainValue has no instructions to twiddle - it simply keeps
// track of the domains where the registers are already available. /// track of the domains where the registers are already available.
bool isCollapsed() const { return Instrs.empty(); } bool isCollapsed() const { return Instrs.empty(); }
// Is domain available? /// Is domain available?
bool hasDomain(unsigned domain) const { bool hasDomain(unsigned domain) const {
assert(domain < assert(domain <
static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
@ -82,7 +82,7 @@ struct DomainValue {
return AvailableDomains & (1u << domain); return AvailableDomains & (1u << domain);
} }
// Mark domain as available. /// Mark domain as available.
void addDomain(unsigned domain) { void addDomain(unsigned domain) {
AvailableDomains |= 1u << domain; AvailableDomains |= 1u << domain;
} }
@ -92,17 +92,17 @@ struct DomainValue {
AvailableDomains = 1u << domain; AvailableDomains = 1u << domain;
} }
// Return bitmask of domains that are available and in mask. /// Return bitmask of domains that are available and in mask.
unsigned getCommonDomains(unsigned mask) const { unsigned getCommonDomains(unsigned mask) const {
return AvailableDomains & mask; return AvailableDomains & mask;
} }
// First domain available. /// First domain available.
unsigned getFirstDomain() const { unsigned getFirstDomain() const {
return countTrailingZeros(AvailableDomains); return countTrailingZeros(AvailableDomains);
} }
// Clear this DomainValue and point to next which has all its data. /// Clear this DomainValue and point to next which has all its data.
void clear() { void clear() {
AvailableDomains = 0; AvailableDomains = 0;
Next = nullptr; Next = nullptr;
@ -114,30 +114,69 @@ struct DomainValue {
/// ReachingDefAnalysis and ExecutionDomainFix. /// ReachingDefAnalysis and ExecutionDomainFix.
/// It identifies basic blocks that are part of loops and should to be visited twice /// It identifies basic blocks that are part of loops and should to be visited twice
/// and returns efficient traversal order for all the blocks. /// and returns efficient traversal order for all the blocks.
///
/// We want to visit every instruction in every basic block in order to update
/// it's execution domain or collect clearance information. However, for the
/// clearance calculation, we need to know clearances from all predecessors
/// (including any backedges), therfore we need to visit some blocks twice.
/// As an example, consider the following loop.
///
///
/// PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
/// ^ |
/// +----------------------------------+
///
/// The iteration order this pass will return is as follows:
/// Optimized: PH A B C A' B' C' D
///
/// The basic block order is constructed as follows:
/// Once we finish processing some block, we update the counters in MBBInfos
/// and re-process any successors that are now 'done'.
/// We call a block that is ready for its final round of processing `done`
/// (isBlockDone), e.g. when all predecessor information is known.
///
/// Note that a naive traversal order would be to do two complete passes over
/// all basic blocks/instructions, the first for recording clearances, the
/// second for updating clearance based on backedges.
/// However, for functions without backedges, or functions with a lot of
/// straight-line code, and a small loop, that would be a lot of unnecessary
/// work (since only the BBs that are part of the loop require two passes).
///
/// E.g., the naive iteration order for the above exmple is as follows:
/// Naive: PH A B C D A' B' C' D'
///
/// In the optimized approach we avoid processing D twice, because we
/// can entirely process the predecessors before getting to D.
class LoopTraversal { class LoopTraversal {
private: private:
struct MBBInfo { struct MBBInfo {
// Whether we have gotten to this block in primary processing yet. /// Whether we have gotten to this block in primary processing yet.
bool PrimaryCompleted = false; bool PrimaryCompleted = false;
// The number of predecessors for which primary processing has completed /// The number of predecessors for which primary processing has completed
unsigned IncomingProcessed = 0; unsigned IncomingProcessed = 0;
// The value of `IncomingProcessed` at the start of primary processing /// The value of `IncomingProcessed` at the start of primary processing
unsigned PrimaryIncoming = 0; unsigned PrimaryIncoming = 0;
// The number of predecessors for which all processing steps are done. /// The number of predecessors for which all processing steps are done.
unsigned IncomingCompleted = 0; unsigned IncomingCompleted = 0;
MBBInfo() = default; MBBInfo() = default;
}; };
using MBBInfoMap = SmallVector<MBBInfo, 4>; using MBBInfoMap = SmallVector<MBBInfo, 4>;
/// Helps keep track if we proccessed this block and all its predecessors.
MBBInfoMap MBBInfos; MBBInfoMap MBBInfos;
public: public:
struct TraversedMBBInfo { struct TraversedMBBInfo {
/// The basic block.
MachineBasicBlock *MBB = nullptr; MachineBasicBlock *MBB = nullptr;
/// True if this is the first time we process the basic block.
bool PrimaryPass = true; bool PrimaryPass = true;
/// True if the block that is ready for its final round of processing.
bool IsDone = true; bool IsDone = true;
TraversedMBBInfo(MachineBasicBlock *BB = nullptr, bool Primary = true, TraversedMBBInfo(MachineBasicBlock *BB = nullptr, bool Primary = true,
@ -146,10 +185,13 @@ public:
}; };
LoopTraversal() {} LoopTraversal() {}
/// \brief Identifies basic blocks that are part of loops and should to be
/// visited twise and returns efficient traversal order for all the blocks.
typedef SmallVector<TraversedMBBInfo, 4> TraversalOrder; typedef SmallVector<TraversedMBBInfo, 4> TraversalOrder;
TraversalOrder traverse(MachineFunction &MF); TraversalOrder traverse(MachineFunction &MF);
private: private:
/// Returens true if the block is ready for its final round of processing.
bool isBlockDone(MachineBasicBlock *MBB); bool isBlockDone(MachineBasicBlock *MBB);
}; };
@ -168,9 +210,9 @@ private:
using LiveRegsDefInfo = std::vector<int>; using LiveRegsDefInfo = std::vector<int>;
LiveRegsDefInfo LiveRegs; LiveRegsDefInfo LiveRegs;
// Keeps clearance information for all registers. Note that this /// Keeps clearance information for all registers. Note that this
// is different from the usual definition notion of liveness. The CPU /// is different from the usual definition notion of liveness. The CPU
// doesn't care whether or not we consider a register killed. /// doesn't care whether or not we consider a register killed.
using OutRegsInfoMap = SmallVector<LiveRegsDefInfo, 4>; using OutRegsInfoMap = SmallVector<LiveRegsDefInfo, 4>;
OutRegsInfoMap MBBOutRegsInfos; OutRegsInfoMap MBBOutRegsInfos;
@ -190,7 +232,7 @@ private:
using MBBReachingDefsInfo = SmallVector<MBBDefsInfo, 4>; using MBBReachingDefsInfo = SmallVector<MBBDefsInfo, 4>;
MBBReachingDefsInfo MBBReachingDefs; MBBReachingDefsInfo MBBReachingDefs;
// Default values are 'nothing happened a long time ago'. /// Default values are 'nothing happened a long time ago'.
const int ReachingDedDefaultVal = -(1 << 20); const int ReachingDedDefaultVal = -(1 << 20);
public: public:
@ -216,14 +258,23 @@ public:
/// Provides the instruction id of the closest reaching def instruction of /// Provides the instruction id of the closest reaching def instruction of
/// PhysReg that reaches MI, relative to the begining of MI's basic block. /// PhysReg that reaches MI, relative to the begining of MI's basic block.
int getReachingDef(MachineInstr *MI, int PhysReg); int getReachingDef(MachineInstr *MI, int PhysReg);
/// Provides the clearance - the number of instructions since the closest /// Provides the clearance - the number of instructions since the closest
/// reaching def instuction of PhysReg that reaches MI. /// reaching def instuction of PhysReg that reaches MI.
int getClearance(MachineInstr *MI, MCPhysReg PhysReg); int getClearance(MachineInstr *MI, MCPhysReg PhysReg);
private: private:
/// Set up LiveRegs by merging predecessor live-out values.
void enterBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB); void enterBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
/// Update live-out values.
void leaveBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB); void leaveBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
/// Process he given basic block.
void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB); void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
/// Update def-ages for registers defined by MI.
/// Also break dependencies on partial defs and undef uses.
void processDefs(MachineInstr *); void processDefs(MachineInstr *);
}; };
@ -241,9 +292,9 @@ class ExecutionDomainFix : public MachineFunctionPass {
/// This counts as a DomainValue reference. /// This counts as a DomainValue reference.
using LiveRegsDVInfo = std::vector<DomainValue *>; using LiveRegsDVInfo = std::vector<DomainValue *>;
LiveRegsDVInfo LiveRegs; LiveRegsDVInfo LiveRegs;
// Keeps domain information for all registers. Note that this /// Keeps domain information for all registers. Note that this
// is different from the usual definition notion of liveness. The CPU /// is different from the usual definition notion of liveness. The CPU
// doesn't care whether or not we consider a register killed. /// doesn't care whether or not we consider a register killed.
using OutRegsInfoMap = SmallVector<LiveRegsDVInfo, 4>; using OutRegsInfoMap = SmallVector<LiveRegsDVInfo, 4>;
OutRegsInfoMap MBBOutRegsInfos; OutRegsInfoMap MBBOutRegsInfos;
@ -267,30 +318,65 @@ public:
} }
private: private:
/// Translate TRI register number to a list of indices into our smaller tables
/// of interesting registers.
iterator_range<SmallVectorImpl<int>::const_iterator> iterator_range<SmallVectorImpl<int>::const_iterator>
regIndices(unsigned Reg) const; regIndices(unsigned Reg) const;
// DomainValue allocation.
/// DomainValue allocation.
DomainValue *alloc(int domain = -1); DomainValue *alloc(int domain = -1);
/// Add reference to DV.
DomainValue *retain(DomainValue *DV) { DomainValue *retain(DomainValue *DV) {
if (DV) ++DV->Refs; if (DV) ++DV->Refs;
return DV; return DV;
} }
/// Release a reference to DV. When the last reference is released,
/// collapse if needed.
void release(DomainValue*); void release(DomainValue*);
/// Follow the chain of dead DomainValues until a live DomainValue is reached.
/// Update the referenced pointer when necessary.
DomainValue *resolve(DomainValue*&); DomainValue *resolve(DomainValue*&);
// LiveRegs manipulations. /// Set LiveRegs[rx] = dv, updating reference counts.
void setLiveReg(int rx, DomainValue *DV); void setLiveReg(int rx, DomainValue *DV);
/// Kill register rx, recycle or collapse any DomainValue.
void kill(int rx); void kill(int rx);
/// Force register rx into domain.
void force(int rx, unsigned domain); void force(int rx, unsigned domain);
/// Collapse open DomainValue into given domain. If there are multiple
/// registers using dv, they each get a unique collapsed DomainValue.
void collapse(DomainValue *dv, unsigned domain); void collapse(DomainValue *dv, unsigned domain);
/// All instructions and registers in B are moved to A, and B is released.
bool merge(DomainValue *A, DomainValue *B); bool merge(DomainValue *A, DomainValue *B);
/// Set up LiveRegs by merging predecessor live-out values.
void enterBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB); void enterBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
/// Update live-out values.
void leaveBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB); void leaveBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
/// Process he given basic block.
void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB); void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
/// Visit given insturcion.
bool visitInstr(MachineInstr *); bool visitInstr(MachineInstr *);
/// Update def-ages for registers defined by MI.
/// If Kill is set, also kill off DomainValues clobbered by the defs.
void processDefs(MachineInstr *, bool Kill); void processDefs(MachineInstr *, bool Kill);
/// A soft instruction can be changed to work in other domains given by mask.
void visitSoftInstr(MachineInstr*, unsigned mask); void visitSoftInstr(MachineInstr*, unsigned mask);
/// A hard instruction only works in one domain. All input registers will be
/// forced into that domain.
void visitHardInstr(MachineInstr*, unsigned domain); void visitHardInstr(MachineInstr*, unsigned domain);
}; };
@ -330,11 +416,30 @@ public:
} }
private: private:
/// Process he given basic block.
void processBasicBlock(MachineBasicBlock *MBB); void processBasicBlock(MachineBasicBlock *MBB);
/// Update def-ages for registers defined by MI.
/// Also break dependencies on partial defs and undef uses.
void processDefs(MachineInstr *MI); void processDefs(MachineInstr *MI);
/// \brief Helps avoid false dependencies on undef registers by updating the
/// machine instructions' undef operand to use a register that the instruction
/// is truly dependent on, or use a register with clearance higher than Pref.
/// Returns true if it was able to find a true dependency, thus not requiring
/// a dependency breaking instruction regardless of clearance.
bool pickBestRegisterForUndef(MachineInstr *MI, unsigned OpIdx, bool pickBestRegisterForUndef(MachineInstr *MI, unsigned OpIdx,
unsigned Pref); unsigned Pref);
/// \brief Return true to if it makes sense to break dependence on a partial def
/// or undef use.
bool shouldBreakDependence(MachineInstr*, unsigned OpIdx, unsigned Pref); bool shouldBreakDependence(MachineInstr*, unsigned OpIdx, unsigned Pref);
/// \brief Break false dependencies on undefined register reads.
/// Walk the block backward computing precise liveness. This is expensive, so we
/// only do it on demand. Note that the occurrence of undefined register reads
/// that should be broken is very rare, but when they occur we may have many in
/// a single block.
void processUndefReads(MachineBasicBlock*); void processUndefReads(MachineBasicBlock*);
}; };

View File

@ -27,8 +27,6 @@ INITIALIZE_PASS_DEPENDENCY(ReachingDefAnalysis)
INITIALIZE_PASS_END(BreakFalseDeps, "break-false-deps", "BreakFalseDeps", false, INITIALIZE_PASS_END(BreakFalseDeps, "break-false-deps", "BreakFalseDeps", false,
false) false)
/// Translate TRI register number to a list of indices into our smaller tables
/// of interesting registers.
iterator_range<SmallVectorImpl<int>::const_iterator> iterator_range<SmallVectorImpl<int>::const_iterator>
ExecutionDomainFix::regIndices(unsigned Reg) const { ExecutionDomainFix::regIndices(unsigned Reg) const {
assert(Reg < AliasMap.size() && "Invalid register"); assert(Reg < AliasMap.size() && "Invalid register");
@ -47,8 +45,6 @@ DomainValue *ExecutionDomainFix::alloc(int domain) {
return dv; return dv;
} }
/// Release a reference to DV. When the last reference is released,
/// collapse if needed.
void ExecutionDomainFix::release(DomainValue *DV) { void ExecutionDomainFix::release(DomainValue *DV) {
while (DV) { while (DV) {
assert(DV->Refs && "Bad DomainValue"); assert(DV->Refs && "Bad DomainValue");
@ -67,8 +63,6 @@ void ExecutionDomainFix::release(DomainValue *DV) {
} }
} }
/// Follow the chain of dead DomainValues until a live DomainValue is reached.
/// Update the referenced pointer when necessary.
DomainValue *ExecutionDomainFix::resolve(DomainValue *&DVRef) { DomainValue *ExecutionDomainFix::resolve(DomainValue *&DVRef) {
DomainValue *DV = DVRef; DomainValue *DV = DVRef;
if (!DV || !DV->Next) if (!DV || !DV->Next)
@ -85,7 +79,6 @@ DomainValue *ExecutionDomainFix::resolve(DomainValue *&DVRef) {
return DV; return DV;
} }
/// Set LiveRegs[rx] = dv, updating reference counts.
void ExecutionDomainFix::setLiveReg(int rx, DomainValue *dv) { void ExecutionDomainFix::setLiveReg(int rx, DomainValue *dv) {
assert(unsigned(rx) < NumRegs && "Invalid index"); assert(unsigned(rx) < NumRegs && "Invalid index");
assert(!LiveRegs.empty() && "Must enter basic block first."); assert(!LiveRegs.empty() && "Must enter basic block first.");
@ -97,7 +90,6 @@ void ExecutionDomainFix::setLiveReg(int rx, DomainValue *dv) {
LiveRegs[rx] = retain(dv); LiveRegs[rx] = retain(dv);
} }
// Kill register rx, recycle or collapse any DomainValue.
void ExecutionDomainFix::kill(int rx) { void ExecutionDomainFix::kill(int rx) {
assert(unsigned(rx) < NumRegs && "Invalid index"); assert(unsigned(rx) < NumRegs && "Invalid index");
assert(!LiveRegs.empty() && "Must enter basic block first."); assert(!LiveRegs.empty() && "Must enter basic block first.");
@ -108,7 +100,6 @@ void ExecutionDomainFix::kill(int rx) {
LiveRegs[rx] = nullptr; LiveRegs[rx] = nullptr;
} }
/// Force register rx into domain.
void ExecutionDomainFix::force(int rx, unsigned domain) { void ExecutionDomainFix::force(int rx, unsigned domain) {
assert(unsigned(rx) < NumRegs && "Invalid index"); assert(unsigned(rx) < NumRegs && "Invalid index");
assert(!LiveRegs.empty() && "Must enter basic block first."); assert(!LiveRegs.empty() && "Must enter basic block first.");
@ -130,8 +121,6 @@ void ExecutionDomainFix::force(int rx, unsigned domain) {
} }
} }
/// Collapse open DomainValue into given domain. If there are multiple
/// registers using dv, they each get a unique collapsed DomainValue.
void ExecutionDomainFix::collapse(DomainValue *dv, unsigned domain) { void ExecutionDomainFix::collapse(DomainValue *dv, unsigned domain) {
assert(dv->hasDomain(domain) && "Cannot collapse"); assert(dv->hasDomain(domain) && "Cannot collapse");
@ -147,7 +136,6 @@ void ExecutionDomainFix::collapse(DomainValue *dv, unsigned domain) {
setLiveReg(rx, alloc(domain)); setLiveReg(rx, alloc(domain));
} }
/// All instructions and registers in B are moved to A, and B is released.
bool ExecutionDomainFix::merge(DomainValue *A, DomainValue *B) { bool ExecutionDomainFix::merge(DomainValue *A, DomainValue *B) {
assert(!A->isCollapsed() && "Cannot merge into collapsed"); assert(!A->isCollapsed() && "Cannot merge into collapsed");
assert(!B->isCollapsed() && "Cannot merge from collapsed"); assert(!B->isCollapsed() && "Cannot merge from collapsed");
@ -173,7 +161,6 @@ bool ExecutionDomainFix::merge(DomainValue *A, DomainValue *B) {
return true; return true;
} }
/// Set up LiveRegs by merging predecessor live-out values.
void ReachingDefAnalysis::enterBasicBlock( void ReachingDefAnalysis::enterBasicBlock(
const LoopTraversal::TraversedMBBInfo &TraversedMBB) { const LoopTraversal::TraversedMBBInfo &TraversedMBB) {
@ -228,7 +215,6 @@ void ReachingDefAnalysis::enterBasicBlock(
<< (!TraversedMBB.IsDone ? ": incomplete\n" : ": all preds known\n")); << (!TraversedMBB.IsDone ? ": incomplete\n" : ": all preds known\n"));
} }
/// Set up LiveRegs by merging predecessor live-out values.
void ExecutionDomainFix::enterBasicBlock( void ExecutionDomainFix::enterBasicBlock(
const LoopTraversal::TraversedMBBInfo &TraversedMBB) { const LoopTraversal::TraversedMBBInfo &TraversedMBB) {
@ -328,11 +314,6 @@ bool ExecutionDomainFix::visitInstr(MachineInstr *MI) {
return !DomP.first; return !DomP.first;
} }
/// \brief Helps avoid false dependencies on undef registers by updating the
/// machine instructions' undef operand to use a register that the instruction
/// is truly dependent on, or use a register with clearance higher than Pref.
/// Returns true if it was able to find a true dependency, thus not requiring
/// a dependency breaking instruction regardless of clearance.
bool BreakFalseDeps::pickBestRegisterForUndef(MachineInstr *MI, bool BreakFalseDeps::pickBestRegisterForUndef(MachineInstr *MI,
unsigned OpIdx, unsigned Pref) { unsigned OpIdx, unsigned Pref) {
MachineOperand &MO = MI->getOperand(OpIdx); MachineOperand &MO = MI->getOperand(OpIdx);
@ -389,8 +370,6 @@ bool BreakFalseDeps::pickBestRegisterForUndef(MachineInstr *MI,
return false; return false;
} }
/// \brief Return true to if it makes sense to break dependence on a partial def
/// or undef use.
bool BreakFalseDeps::shouldBreakDependence(MachineInstr *MI, unsigned OpIdx, bool BreakFalseDeps::shouldBreakDependence(MachineInstr *MI, unsigned OpIdx,
unsigned Pref) { unsigned Pref) {
unsigned reg = MI->getOperand(OpIdx).getReg(); unsigned reg = MI->getOperand(OpIdx).getReg();
@ -405,10 +384,6 @@ bool BreakFalseDeps::shouldBreakDependence(MachineInstr *MI, unsigned OpIdx,
return false; return false;
} }
// Update def-ages for registers defined by MI.
// If Kill is set, also kill off DomainValues clobbered by the defs.
//
// Also break dependencies on partial defs and undef uses.
void ExecutionDomainFix::processDefs(MachineInstr *MI, bool Kill) { void ExecutionDomainFix::processDefs(MachineInstr *MI, bool Kill) {
assert(!MI->isDebugValue() && "Won't process debug values"); assert(!MI->isDebugValue() && "Won't process debug values");
const MCInstrDesc &MCID = MI->getDesc(); const MCInstrDesc &MCID = MI->getDesc();
@ -431,8 +406,6 @@ void ExecutionDomainFix::processDefs(MachineInstr *MI, bool Kill) {
} }
} }
// Update def-ages for registers defined by MI.
// Also break dependencies on partial defs and undef uses.
void ReachingDefAnalysis::processDefs(MachineInstr *MI) { void ReachingDefAnalysis::processDefs(MachineInstr *MI) {
assert(!MI->isDebugValue() && "Won't process debug values"); assert(!MI->isDebugValue() && "Won't process debug values");
@ -461,8 +434,6 @@ void ReachingDefAnalysis::processDefs(MachineInstr *MI) {
++CurInstr; ++CurInstr;
} }
// Update def-ages for registers defined by MI.
// Also break dependencies on partial defs and undef uses.
void BreakFalseDeps::processDefs(MachineInstr *MI) { void BreakFalseDeps::processDefs(MachineInstr *MI) {
assert(!MI->isDebugValue() && "Won't process debug values"); assert(!MI->isDebugValue() && "Won't process debug values");
@ -494,12 +465,6 @@ void BreakFalseDeps::processDefs(MachineInstr *MI) {
} }
} }
/// \break Break false dependencies on undefined register reads.
///
/// Walk the block backward computing precise liveness. This is expensive, so we
/// only do it on demand. Note that the occurrence of undefined register reads
/// that should be broken is very rare, but when they occur we may have many in
/// a single block.
void BreakFalseDeps::processUndefReads(MachineBasicBlock *MBB) { void BreakFalseDeps::processUndefReads(MachineBasicBlock *MBB) {
if (UndefReads.empty()) if (UndefReads.empty())
return; return;
@ -531,8 +496,6 @@ void BreakFalseDeps::processUndefReads(MachineBasicBlock *MBB) {
} }
} }
// A hard instruction only works in one domain. All input registers will be
// forced into that domain.
void ExecutionDomainFix::visitHardInstr(MachineInstr *mi, unsigned domain) { void ExecutionDomainFix::visitHardInstr(MachineInstr *mi, unsigned domain) {
// Collapse all uses. // Collapse all uses.
for (unsigned i = mi->getDesc().getNumDefs(), for (unsigned i = mi->getDesc().getNumDefs(),
@ -555,7 +518,6 @@ void ExecutionDomainFix::visitHardInstr(MachineInstr *mi, unsigned domain) {
} }
} }
// A soft instruction can be changed to work in other domains given by mask.
void ExecutionDomainFix::visitSoftInstr(MachineInstr *mi, unsigned mask) { void ExecutionDomainFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
// Bitmask of available domains for this instruction after taking collapsed // Bitmask of available domains for this instruction after taking collapsed
// operands into account. // operands into account.
@ -720,34 +682,6 @@ LoopTraversal::traverse(MachineFunction &MF) {
// Initialize the MMBInfos // Initialize the MMBInfos
MBBInfos.assign(MF.getNumBlockIDs(), MBBInfo()); MBBInfos.assign(MF.getNumBlockIDs(), MBBInfo());
/*
* We want to visit every instruction in every basic block in order to update
* it's execution domain or break any false dependencies. However, for the
* dependency breaking, we need to know clearances from all predecessors
* (including any backedges). One way to do so would be to do two complete
* passes over all basic blocks/instructions, the first for recording
* clearances, the second to break the dependencies. However, for functions
* without backedges, or functions with a lot of straight-line code, and
* a small loop, that would be a lot of unnecessary work (since only the
* BBs that are part of the loop require two passes). As an example,
* consider the following loop.
*
*
* PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
* ^ |
* +----------------------------------+
*
* The iteration order is as follows:
* Naive: PH A B C D A' B' C' D'
* Optimized: PH A B C A' B' C' D
*
* Note that we avoid processing D twice, because we can entirely process
* the predecessors before getting to D. We call a block that is ready
* for its second round of processing `done` (isBlockDone). Once we finish
* processing some block, we update the counters in MBBInfos and re-process
* any successors that are now done.
*/
MachineBasicBlock *Entry = &*MF.begin(); MachineBasicBlock *Entry = &*MF.begin();
ReversePostOrderTraversal<MachineBasicBlock*> RPOT(Entry); ReversePostOrderTraversal<MachineBasicBlock*> RPOT(Entry);
SmallVector<MachineBasicBlock *, 4> Workqueue; SmallVector<MachineBasicBlock *, 4> Workqueue;