1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

RegAlloc: Start using Register

This commit is contained in:
Matt Arsenault 2020-06-30 11:57:24 -04:00
parent 9854836d08
commit cfb54aad1c
12 changed files with 184 additions and 184 deletions

View File

@ -68,7 +68,7 @@ public:
private:
LiveInterval *Parent;
SmallVectorImpl<unsigned> &NewRegs;
SmallVectorImpl<Register> &NewRegs;
MachineRegisterInfo &MRI;
LiveIntervals &LIS;
VirtRegMap *VRM;
@ -121,7 +121,7 @@ private:
bool useIsKill(const LiveInterval &LI, const MachineOperand &MO) const;
/// Create a new empty interval based on OldReg.
LiveInterval &createEmptyIntervalFrom(unsigned OldReg, bool createSubRanges);
LiveInterval &createEmptyIntervalFrom(Register OldReg, bool createSubRanges);
public:
/// Create a LiveRangeEdit for breaking down parent into smaller pieces.
@ -135,7 +135,7 @@ public:
/// be done. This could be the case if called before Regalloc.
/// @param deadRemats The collection of all the instructions defining an
/// original reg and are dead after remat.
LiveRangeEdit(LiveInterval *parent, SmallVectorImpl<unsigned> &newRegs,
LiveRangeEdit(LiveInterval *parent, SmallVectorImpl<Register> &newRegs,
MachineFunction &MF, LiveIntervals &lis, VirtRegMap *vrm,
Delegate *delegate = nullptr,
SmallPtrSet<MachineInstr *, 32> *deadRemats = nullptr)
@ -152,15 +152,15 @@ public:
return *Parent;
}
unsigned getReg() const { return getParent().reg; }
Register getReg() const { return getParent().reg; }
/// Iterator for accessing the new registers added by this edit.
using iterator = SmallVectorImpl<unsigned>::const_iterator;
using iterator = SmallVectorImpl<Register>::const_iterator;
iterator begin() const { return NewRegs.begin() + FirstNew; }
iterator end() const { return NewRegs.end(); }
unsigned size() const { return NewRegs.size() - FirstNew; }
bool empty() const { return size() == 0; }
unsigned get(unsigned idx) const { return NewRegs[idx + FirstNew]; }
Register get(unsigned idx) const { return NewRegs[idx + FirstNew]; }
/// pop_back - It allows LiveRangeEdit users to drop new registers.
/// The context is when an original def instruction of a register is
@ -172,12 +172,12 @@ public:
/// we want to drop it from the NewRegs set.
void pop_back() { NewRegs.pop_back(); }
ArrayRef<unsigned> regs() const {
ArrayRef<Register> regs() const {
return makeArrayRef(NewRegs).slice(FirstNew);
}
/// createFrom - Create a new virtual register based on OldReg.
unsigned createFrom(unsigned OldReg);
Register createFrom(Register OldReg);
/// create - Create a new register with the same class and original slot as
/// parent.
@ -185,7 +185,7 @@ public:
return createEmptyIntervalFrom(getReg(), true);
}
unsigned create() { return createFrom(getReg()); }
Register create() { return createFrom(getReg()); }
/// anyRematerializable - Return true if any parent values may be
/// rematerializable.
@ -234,7 +234,7 @@ public:
/// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
/// to erase it from LIS.
void eraseVirtReg(unsigned Reg);
void eraseVirtReg(Register Reg);
/// eliminateDeadDefs - Try to delete machine instructions that are now dead
/// (allDefsAreDead returns true). This may cause live intervals to be trimmed
@ -243,7 +243,7 @@ public:
/// allocator. These registers should not be split into new intervals
/// as currently those new intervals are not guaranteed to spill.
void eliminateDeadDefs(SmallVectorImpl<MachineInstr *> &Dead,
ArrayRef<unsigned> RegsBeingSpilled = None,
ArrayRef<Register> RegsBeingSpilled = None,
AAResults *AA = nullptr);
/// calculateRegClassAndHint - Recompute register class and hint for each new

View File

@ -114,10 +114,10 @@ class HoistSpillHelper : private LiveRangeEdit::Delegate {
/// This is the map from original register to a set containing all its
/// siblings. To hoist a spill to another BB, we need to find out a live
/// sibling there and use it as the source of the new spill.
DenseMap<unsigned, SmallSetVector<unsigned, 16>> Virt2SiblingsMap;
DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap;
bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
MachineBasicBlock &BB, unsigned &LiveReg);
MachineBasicBlock &BB, Register &LiveReg);
void rmRedundantSpills(
SmallPtrSet<MachineInstr *, 16> &Spills,
@ -176,7 +176,7 @@ class InlineSpiller : public Spiller {
unsigned Original;
// All registers to spill to StackSlot, including the main register.
SmallVector<unsigned, 8> RegsToSpill;
SmallVector<Register, 8> RegsToSpill;
// All COPY instructions to/from snippets.
// They are ignored since both operands refer to the same stack slot.
@ -212,24 +212,24 @@ private:
bool isSnippet(const LiveInterval &SnipLI);
void collectRegsToSpill();
bool isRegToSpill(unsigned Reg) { return is_contained(RegsToSpill, Reg); }
bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
bool isSibling(unsigned Reg);
bool isSibling(Register Reg);
bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
void markValueUsed(LiveInterval*, VNInfo*);
bool canGuaranteeAssignmentAfterRemat(unsigned VReg, MachineInstr &MI);
bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
void reMaterializeAll();
bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
bool coalesceStackAccess(MachineInstr *MI, Register Reg);
bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
MachineInstr *LoadMI = nullptr);
void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
void spillAroundUses(unsigned Reg);
void spillAroundUses(Register Reg);
void spillAll();
};
@ -259,21 +259,21 @@ Spiller *llvm::createInlineSpiller(MachineFunctionPass &pass,
/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
/// otherwise return 0.
static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) {
static Register isFullCopyOf(const MachineInstr &MI, Register Reg) {
if (!MI.isFullCopy())
return 0;
return Register();
if (MI.getOperand(0).getReg() == Reg)
return MI.getOperand(1).getReg();
if (MI.getOperand(1).getReg() == Reg)
return MI.getOperand(0).getReg();
return 0;
return Register();
}
/// isSnippet - Identify if a live interval is a snippet that should be spilled.
/// It is assumed that SnipLI is a virtual register with the same original as
/// Edit->getReg().
bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
unsigned Reg = Edit->getReg();
Register Reg = Edit->getReg();
// A snippet is a tiny live range with only a single instruction using it
// besides copies to/from Reg or spills/fills. We accept:
@ -317,7 +317,7 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
/// collectRegsToSpill - Collect live range snippets that only have a single
/// real use.
void InlineSpiller::collectRegsToSpill() {
unsigned Reg = Edit->getReg();
Register Reg = Edit->getReg();
// Main register always spills.
RegsToSpill.assign(1, Reg);
@ -331,7 +331,7 @@ void InlineSpiller::collectRegsToSpill() {
for (MachineRegisterInfo::reg_instr_iterator
RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
MachineInstr &MI = *RI++;
unsigned SnipReg = isFullCopyOf(MI, Reg);
Register SnipReg = isFullCopyOf(MI, Reg);
if (!isSibling(SnipReg))
continue;
LiveInterval &SnipLI = LIS.getInterval(SnipReg);
@ -346,8 +346,8 @@ void InlineSpiller::collectRegsToSpill() {
}
}
bool InlineSpiller::isSibling(unsigned Reg) {
return Register::isVirtualRegister(Reg) && VRM.getOriginal(Reg) == Original;
bool InlineSpiller::isSibling(Register Reg) {
return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
}
/// It is beneficial to spill to earlier place in the same BB in case
@ -432,7 +432,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
do {
LiveInterval *LI;
std::tie(LI, VNI) = WorkList.pop_back_val();
unsigned Reg = LI->reg;
Register Reg = LI->reg;
LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
<< VNI->def << " in " << *LI << '\n');
@ -456,7 +456,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
continue;
// Follow sibling copies down the dominator tree.
if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
if (Register DstReg = isFullCopyOf(MI, Reg)) {
if (isSibling(DstReg)) {
LiveInterval &DstLI = LIS.getInterval(DstReg);
VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
@ -518,7 +518,7 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
} while (!WorkList.empty());
}
bool InlineSpiller::canGuaranteeAssignmentAfterRemat(unsigned VReg,
bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
MachineInstr &MI) {
if (!RestrictStatepointRemat)
return true;
@ -615,7 +615,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
}
// Allocate a new register for the remat.
unsigned NewVReg = Edit->createFrom(Original);
Register NewVReg = Edit->createFrom(Original);
// Finally we can rematerialize OrigMI before MI.
SlotIndex DefIdx =
@ -654,7 +654,7 @@ void InlineSpiller::reMaterializeAll() {
// Try to remat before all uses of snippets.
bool anyRemat = false;
for (unsigned Reg : RegsToSpill) {
for (Register Reg : RegsToSpill) {
LiveInterval &LI = LIS.getInterval(Reg);
for (MachineRegisterInfo::reg_bundle_iterator
RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
@ -675,7 +675,7 @@ void InlineSpiller::reMaterializeAll() {
return;
// Remove any values that were completely rematted.
for (unsigned Reg : RegsToSpill) {
for (Register Reg : RegsToSpill) {
LiveInterval &LI = LIS.getInterval(Reg);
for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
I != E; ++I) {
@ -705,7 +705,7 @@ void InlineSpiller::reMaterializeAll() {
// So to get rid of unused reg, we need to check whether it has non-dbg
// reference instead of whether it has non-empty interval.
unsigned ResultPos = 0;
for (unsigned Reg : RegsToSpill) {
for (Register Reg : RegsToSpill) {
if (MRI.reg_nodbg_empty(Reg)) {
Edit->eraseVirtReg(Reg);
continue;
@ -727,9 +727,9 @@ void InlineSpiller::reMaterializeAll() {
//===----------------------------------------------------------------------===//
/// If MI is a load or store of StackSlot, it can be removed.
bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
int FI = 0;
unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI);
Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
bool IsLoad = InstrReg;
if (!IsLoad)
InstrReg = TII.isStoreToStackSlot(*MI, FI);
@ -763,7 +763,7 @@ static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
MachineBasicBlock::iterator E,
LiveIntervals const &LIS,
const char *const header,
unsigned VReg =0) {
Register VReg = Register()) {
char NextLine = '\n';
char SlotIndent = '\t';
@ -808,7 +808,7 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
return false;
bool WasCopy = MI->isCopy();
unsigned ImpReg = 0;
Register ImpReg;
// Spill subregs if the target allows it.
// We always want to spill subregs for stackmap/patchpoint pseudos.
@ -912,7 +912,7 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
return true;
}
void InlineSpiller::insertReload(unsigned NewVReg,
void InlineSpiller::insertReload(Register NewVReg,
SlotIndex Idx,
MachineBasicBlock::iterator MI) {
MachineBasicBlock &MBB = *MI->getParent();
@ -943,7 +943,7 @@ static bool isRealSpill(const MachineInstr &Def) {
}
/// insertSpill - Insert a spill of NewVReg after MI.
void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
MachineBasicBlock::iterator MI) {
// Spill are not terminators, so inserting spills after terminators will
// violate invariants in MachineVerifier.
@ -975,7 +975,7 @@ void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
}
/// spillAroundUses - insert spill code around each use of Reg.
void InlineSpiller::spillAroundUses(unsigned Reg) {
void InlineSpiller::spillAroundUses(Register Reg) {
LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
LiveInterval &OldLI = LIS.getInterval(Reg);
@ -1018,7 +1018,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
Idx = VNI->def;
// Check for a sibling copy.
unsigned SibReg = isFullCopyOf(*MI, Reg);
Register SibReg = isFullCopyOf(*MI, Reg);
if (SibReg && isSibling(SibReg)) {
// This may actually be a copy between snippets.
if (isRegToSpill(SibReg)) {
@ -1047,7 +1047,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
// Create a new virtual register for spill/fill.
// FIXME: Infer regclass from instruction alone.
unsigned NewVReg = Edit->createFrom(Reg);
Register NewVReg = Edit->createFrom(Reg);
if (RI.Reads)
insertReload(NewVReg, Idx, MI);
@ -1088,13 +1088,13 @@ void InlineSpiller::spillAll() {
VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
for (unsigned Reg : RegsToSpill)
for (Register Reg : RegsToSpill)
StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
StackInt->getValNumInfo(0));
LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
// Spill around uses of all RegsToSpill.
for (unsigned Reg : RegsToSpill)
for (Register Reg : RegsToSpill)
spillAroundUses(Reg);
// Hoisted spills may cause dead code.
@ -1104,7 +1104,7 @@ void InlineSpiller::spillAll() {
}
// Finally delete the SnippetCopies.
for (unsigned Reg : RegsToSpill) {
for (Register Reg : RegsToSpill) {
for (MachineRegisterInfo::reg_instr_iterator
RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
RI != E; ) {
@ -1117,7 +1117,7 @@ void InlineSpiller::spillAll() {
}
// Delete all spilled registers.
for (unsigned Reg : RegsToSpill)
for (Register Reg : RegsToSpill)
Edit->eraseVirtReg(Reg);
}
@ -1186,15 +1186,15 @@ bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
/// Check BB to see if it is a possible target BB to place a hoisted spill,
/// i.e., there should be a living sibling of OrigReg at the insert point.
bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
MachineBasicBlock &BB, unsigned &LiveReg) {
MachineBasicBlock &BB, Register &LiveReg) {
SlotIndex Idx;
unsigned OrigReg = OrigLI.reg;
Register OrigReg = OrigLI.reg;
MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
if (MI != BB.end())
Idx = LIS.getInstructionIndex(*MI);
else
Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
SmallSetVector<unsigned, 16> &Siblings = Virt2SiblingsMap[OrigReg];
SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
for (auto const SibReg : Siblings) {
@ -1406,7 +1406,7 @@ void HoistSpillHelper::runHoistSpills(
continue;
// Check whether Block is a possible candidate to insert spill.
unsigned LiveReg = 0;
Register LiveReg;
if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
continue;
@ -1468,12 +1468,12 @@ void HoistSpillHelper::runHoistSpills(
/// inside its subtree to that node. In this way, we can get benefit locally
/// even if hoisting all the equal spills to one cold place is impossible.
void HoistSpillHelper::hoistAllSpills() {
SmallVector<unsigned, 4> NewVRegs;
SmallVector<Register, 4> NewVRegs;
LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
unsigned Reg = Register::index2VirtReg(i);
unsigned Original = VRM.getPreSplitReg(Reg);
Register Reg = Register::index2VirtReg(i);
Register Original = VRM.getPreSplitReg(Reg);
if (!MRI.def_empty(Reg))
Virt2SiblingsMap[Original].insert(Reg);
}
@ -1521,7 +1521,7 @@ void HoistSpillHelper::hoistAllSpills() {
// Insert hoisted spills.
for (auto const &Insert : SpillsToIns) {
MachineBasicBlock *BB = Insert.first;
unsigned LiveReg = Insert.second;
Register LiveReg = Insert.second;
MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
MRI.getRegClass(LiveReg), &TRI);

View File

@ -189,7 +189,7 @@ static bool isRegOtherThanSPAndFP(const MachineOperand &Op,
const MachineFunction *MF = MI.getParent()->getParent();
const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
Register SP = TLI->getStackPointerRegisterToSaveRestore();
Register FP = TRI->getFrameRegister(*MF);
Register Reg = Op.getReg();
@ -354,7 +354,7 @@ private:
/// Take the variable and machine-location in DBG_VALUE MI, and build an
/// entry location using the given expression.
static VarLoc CreateEntryLoc(const MachineInstr &MI, LexicalScopes &LS,
const DIExpression *EntryExpr, unsigned Reg) {
const DIExpression *EntryExpr, Register Reg) {
VarLoc VL(MI, LS);
assert(VL.Kind == RegisterKind);
VL.Kind = EntryValueKind;
@ -383,7 +383,7 @@ private:
static VarLoc CreateEntryCopyBackupLoc(const MachineInstr &MI,
LexicalScopes &LS,
const DIExpression *EntryExpr,
unsigned NewReg) {
Register NewReg) {
VarLoc VL(MI, LS);
assert(VL.Kind == RegisterKind);
VL.Kind = EntryValueCopyBackupKind;
@ -395,7 +395,7 @@ private:
/// Copy the register location in DBG_VALUE MI, updating the register to
/// be NewReg.
static VarLoc CreateCopyLoc(const MachineInstr &MI, LexicalScopes &LS,
unsigned NewReg) {
Register NewReg) {
VarLoc VL(MI, LS);
assert(VL.Kind == RegisterKind);
VL.Loc.RegNo = NewReg;
@ -736,7 +736,7 @@ private:
/// TODO: Store optimization can fold spills into other stores (including
/// other spills). We do not handle this yet (more than one memory operand).
bool isLocationSpill(const MachineInstr &MI, MachineFunction *MF,
unsigned &Reg);
Register &Reg);
/// Returns true if the given machine instruction is a debug value which we
/// can emit entry values for.
@ -750,14 +750,14 @@ private:
/// and set \p Reg to the spilled register.
Optional<VarLoc::SpillLoc> isRestoreInstruction(const MachineInstr &MI,
MachineFunction *MF,
unsigned &Reg);
Register &Reg);
/// Given a spill instruction, extract the register and offset used to
/// address the spill location in a target independent way.
VarLoc::SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI);
void insertTransferDebugPair(MachineInstr &MI, OpenRangesSet &OpenRanges,
TransferMap &Transfers, VarLocMap &VarLocIDs,
LocIndex OldVarID, TransferKind Kind,
unsigned NewReg = 0);
Register NewReg = Register());
void transferDebugValue(const MachineInstr &MI, OpenRangesSet &OpenRanges,
VarLocMap &VarLocIDs);
@ -1153,7 +1153,7 @@ void LiveDebugValues::emitEntryValues(MachineInstr &MI,
void LiveDebugValues::insertTransferDebugPair(
MachineInstr &MI, OpenRangesSet &OpenRanges, TransferMap &Transfers,
VarLocMap &VarLocIDs, LocIndex OldVarID, TransferKind Kind,
unsigned NewReg) {
Register NewReg) {
const MachineInstr *DebugInstr = &VarLocIDs[OldVarID].MI;
auto ProcessVarLoc = [&MI, &OpenRanges, &Transfers, &VarLocIDs](VarLoc &VL) {
@ -1228,7 +1228,7 @@ void LiveDebugValues::transferRegisterDef(
MachineFunction *MF = MI.getMF();
const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
Register SP = TLI->getStackPointerRegisterToSaveRestore();
// Find the regs killed by MI, and find regmasks of preserved regs.
DefinedRegsSet DeadRegs;
@ -1299,11 +1299,11 @@ bool LiveDebugValues::isSpillInstruction(const MachineInstr &MI,
}
bool LiveDebugValues::isLocationSpill(const MachineInstr &MI,
MachineFunction *MF, unsigned &Reg) {
MachineFunction *MF, Register &Reg) {
if (!isSpillInstruction(MI, MF))
return false;
auto isKilledReg = [&](const MachineOperand MO, unsigned &Reg) {
auto isKilledReg = [&](const MachineOperand MO, Register &Reg) {
if (!MO.isReg() || !MO.isUse()) {
Reg = 0;
return false;
@ -1325,7 +1325,7 @@ bool LiveDebugValues::isLocationSpill(const MachineInstr &MI,
// Skip next instruction that points to basic block end iterator.
if (MI.getParent()->end() == NextI)
continue;
unsigned RegNext;
Register RegNext;
for (const MachineOperand &MONext : NextI->operands()) {
// Return true if we came across the register from the
// previous spill instruction that is killed in NextI.
@ -1340,7 +1340,7 @@ bool LiveDebugValues::isLocationSpill(const MachineInstr &MI,
Optional<LiveDebugValues::VarLoc::SpillLoc>
LiveDebugValues::isRestoreInstruction(const MachineInstr &MI,
MachineFunction *MF, unsigned &Reg) {
MachineFunction *MF, Register &Reg) {
if (!MI.hasOneMemOperand())
return None;
@ -1366,7 +1366,7 @@ void LiveDebugValues::transferSpillOrRestoreInst(MachineInstr &MI,
TransferMap &Transfers) {
MachineFunction *MF = MI.getMF();
TransferKind TKind;
unsigned Reg;
Register Reg;
Optional<VarLoc::SpillLoc> Loc;
LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump(););
@ -1463,7 +1463,7 @@ void LiveDebugValues::transferRegisterCopy(MachineInstr &MI,
if (!DestRegOp->isDef())
return;
auto isCalleeSavedReg = [&](unsigned Reg) {
auto isCalleeSavedReg = [&](Register Reg) {
for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI)
if (CalleeSavedRegs.test(*RAI))
return true;

View File

@ -184,7 +184,7 @@ class UserValue {
/// Replace OldLocNo ranges with NewRegs ranges where NewRegs
/// is live. Returns true if any changes were made.
bool splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
bool splitLocation(unsigned OldLocNo, ArrayRef<Register> NewRegs,
LiveIntervals &LIS);
public:
@ -332,7 +332,7 @@ public:
/// Replace OldReg ranges with NewRegs ranges where NewRegs is
/// live. Returns true if any changes were made.
bool splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs,
bool splitRegister(Register OldReg, ArrayRef<Register> NewRegs,
LiveIntervals &LIS);
/// Rewrite virtual register locations according to the provided virtual
@ -420,7 +420,7 @@ class LDVImpl {
const DebugLoc &DL);
/// Find the EC leader for VirtReg or null.
UserValue *lookupVirtReg(unsigned VirtReg);
UserValue *lookupVirtReg(Register VirtReg);
/// Add DBG_VALUE instruction to our maps.
///
@ -470,10 +470,10 @@ public:
}
/// Map virtual register to an equivalence class.
void mapVirtReg(unsigned VirtReg, UserValue *EC);
void mapVirtReg(Register VirtReg, UserValue *EC);
/// Replace all references to OldReg with NewRegs.
void splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs);
void splitRegister(Register OldReg, ArrayRef<Register> NewRegs);
/// Recreate DBG_VALUE instruction from data structures.
void emitDebugValues(VirtRegMap *VRM);
@ -593,13 +593,13 @@ UserValue *LDVImpl::getUserValue(const DILocalVariable *Var,
return UV;
}
void LDVImpl::mapVirtReg(unsigned VirtReg, UserValue *EC) {
void LDVImpl::mapVirtReg(Register VirtReg, UserValue *EC) {
assert(Register::isVirtualRegister(VirtReg) && "Only map VirtRegs");
UserValue *&Leader = virtRegToEqClass[VirtReg];
Leader = UserValue::merge(Leader, EC);
}
UserValue *LDVImpl::lookupVirtReg(unsigned VirtReg) {
UserValue *LDVImpl::lookupVirtReg(Register VirtReg) {
if (UserValue *UV = virtRegToEqClass.lookup(VirtReg))
return UV->getLeader();
return nullptr;
@ -1030,7 +1030,7 @@ LiveDebugVariables::~LiveDebugVariables() {
//===----------------------------------------------------------------------===//
bool
UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
UserValue::splitLocation(unsigned OldLocNo, ArrayRef<Register> NewRegs,
LiveIntervals& LIS) {
LLVM_DEBUG({
dbgs() << "Splitting Loc" << OldLocNo << '\t';
@ -1130,7 +1130,7 @@ UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
}
bool
UserValue::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs,
UserValue::splitRegister(Register OldReg, ArrayRef<Register> NewRegs,
LiveIntervals &LIS) {
bool DidChange = false;
// Split locations referring to OldReg. Iterate backwards so splitLocation can
@ -1145,7 +1145,7 @@ UserValue::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs,
return DidChange;
}
void LDVImpl::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs) {
void LDVImpl::splitRegister(Register OldReg, ArrayRef<Register> NewRegs) {
bool DidChange = false;
for (UserValue *UV = lookupVirtReg(OldReg); UV; UV = UV->getNext())
DidChange |= UV->splitRegister(OldReg, NewRegs, *LIS);
@ -1160,7 +1160,7 @@ void LDVImpl::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs) {
}
void LiveDebugVariables::
splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs, LiveIntervals &LIS) {
splitRegister(Register OldReg, ArrayRef<Register> NewRegs, LiveIntervals &LIS) {
if (pImpl)
static_cast<LDVImpl*>(pImpl)->splitRegister(OldReg, NewRegs);
}

View File

@ -41,7 +41,7 @@ public:
/// splitRegister - Move any user variables in OldReg to the live ranges in
/// NewRegs where they are live. Mark the values as unavailable where no new
/// register is live.
void splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs,
void splitRegister(Register OldReg, ArrayRef<Register> NewRegs,
LiveIntervals &LIS);
/// emitDebugValues - Emit new DBG_VALUE instructions reflecting the changes

View File

@ -31,7 +31,7 @@ STATISTIC(NumFracRanges, "Number of live ranges fractured by DCE");
void LiveRangeEdit::Delegate::anchor() { }
LiveInterval &LiveRangeEdit::createEmptyIntervalFrom(unsigned OldReg,
LiveInterval &LiveRangeEdit::createEmptyIntervalFrom(Register OldReg,
bool createSubRanges) {
Register VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
if (VRM)
@ -52,7 +52,7 @@ LiveInterval &LiveRangeEdit::createEmptyIntervalFrom(unsigned OldReg,
return LI;
}
unsigned LiveRangeEdit::createFrom(unsigned OldReg) {
Register LiveRangeEdit::createFrom(Register OldReg) {
Register VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
if (VRM) {
VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg));
@ -178,7 +178,7 @@ SlotIndex LiveRangeEdit::rematerializeAt(MachineBasicBlock &MBB,
return LIS.getSlotIndexes()->insertMachineInstrInMaps(*MI, Late).getRegSlot();
}
void LiveRangeEdit::eraseVirtReg(unsigned Reg) {
void LiveRangeEdit::eraseVirtReg(Register Reg) {
if (TheDelegate && TheDelegate->LRE_CanEraseVirtReg(Reg))
LIS.removeInterval(Reg);
}
@ -383,7 +383,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
// Erase any virtregs that are now empty and unused. There may be <undef>
// uses around. Keep the empty live range in that case.
for (unsigned i = 0, e = RegsToErase.size(); i != e; ++i) {
unsigned Reg = RegsToErase[i];
Register Reg = RegsToErase[i];
if (LIS.hasInterval(Reg) && MRI.reg_nodbg_empty(Reg)) {
ToShrink.remove(&LIS.getInterval(Reg));
eraseVirtReg(Reg);
@ -392,7 +392,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
}
void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr *> &Dead,
ArrayRef<unsigned> RegsBeingSpilled,
ArrayRef<Register> RegsBeingSpilled,
AAResults *AA) {
ToShrinkSet ToShrink;

View File

@ -107,7 +107,7 @@ void RegAllocBase::allocatePhysRegs() {
<< TRI->getRegClassName(MRI->getRegClass(VirtReg->reg))
<< ':' << *VirtReg << " w=" << VirtReg->weight << '\n');
using VirtRegVec = SmallVector<unsigned, 4>;
using VirtRegVec = SmallVector<Register, 4>;
VirtRegVec SplitVRegs;
unsigned AvailablePhysReg = selectOrSplit(*VirtReg, SplitVRegs);

View File

@ -101,8 +101,8 @@ protected:
// Each call must guarantee forward progess by returning an available PhysReg
// or new set of split live virtual registers. It is up to the splitter to
// converge quickly toward fully spilled live ranges.
virtual unsigned selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &splitLVRs) = 0;
virtual Register selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<Register> &splitLVRs) = 0;
// Use this group name for NamedRegionTimer.
static const char TimerGroupName[];

View File

@ -100,8 +100,8 @@ public:
return LI;
}
unsigned selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &SplitVRegs) override;
Register selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<Register> &SplitVRegs) override;
/// Perform register allocation.
bool runOnMachineFunction(MachineFunction &mf) override;
@ -114,8 +114,8 @@ public:
// Helper for spilling all live virtual registers currently unified under preg
// that interfere with the most recently queried lvr. Return true if spilling
// was successful, and append any new spilled/split intervals to splitLVRs.
bool spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<unsigned> &SplitVRegs);
bool spillInterferences(LiveInterval &VirtReg, Register PhysReg,
SmallVectorImpl<Register> &SplitVRegs);
static char ID;
};
@ -201,8 +201,8 @@ void RABasic::releaseMemory() {
// Spill or split all live virtual registers currently unified under PhysReg
// that interfere with VirtReg. The newly spilled or split live intervals are
// returned by appending them to SplitVRegs.
bool RABasic::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<unsigned> &SplitVRegs) {
bool RABasic::spillInterferences(LiveInterval &VirtReg, Register PhysReg,
SmallVectorImpl<Register> &SplitVRegs) {
// Record each interference and determine if all are spillable before mutating
// either the union or live intervals.
SmallVector<LiveInterval*, 8> Intfs;
@ -253,14 +253,14 @@ bool RABasic::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
// |vregs| * |machineregs|. And since the number of interference tests is
// minimal, there is no value in caching them outside the scope of
// selectOrSplit().
unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &SplitVRegs) {
Register RABasic::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<Register> &SplitVRegs) {
// Populate a list of physical register spill candidates.
SmallVector<unsigned, 8> PhysRegSpillCands;
SmallVector<Register, 8> PhysRegSpillCands;
// Check for an available register in this class.
AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo, Matrix);
while (unsigned PhysReg = Order.next()) {
while (Register PhysReg = Order.next()) {
// Check for interference in PhysReg
switch (Matrix->checkInterference(VirtReg, PhysReg)) {
case LiveRegMatrix::IK_Free:
@ -279,7 +279,7 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
}
// Try to spill another interfering reg with less spill weight.
for (SmallVectorImpl<unsigned>::iterator PhysRegI = PhysRegSpillCands.begin(),
for (SmallVectorImpl<Register>::iterator PhysRegI = PhysRegSpillCands.begin(),
PhysRegE = PhysRegSpillCands.end(); PhysRegI != PhysRegE; ++PhysRegI) {
if (!spillInterferences(VirtReg, *PhysRegI, SplitVRegs))
continue;

View File

@ -417,7 +417,7 @@ public:
Spiller &spiller() override { return *SpillerInstance; }
void enqueue(LiveInterval *LI) override;
LiveInterval *dequeue() override;
unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override;
Register selectOrSplit(LiveInterval&, SmallVectorImpl<Register>&) override;
void aboutToRemoveInterval(LiveInterval &) override;
/// Perform register allocation.
@ -431,7 +431,7 @@ public:
static char ID;
private:
unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &,
Register selectOrSplitImpl(LiveInterval &, SmallVectorImpl<Register> &,
SmallVirtRegSet &, unsigned = 0);
bool LRE_CanEraseVirtReg(unsigned) override;
@ -456,30 +456,30 @@ private:
bool calcCompactRegion(GlobalSplitCandidate&);
void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
void calcGapWeights(unsigned, SmallVectorImpl<float>&);
unsigned canReassign(LiveInterval &VirtReg, unsigned PrevReg);
Register canReassign(LiveInterval &VirtReg, Register PrevReg);
bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&,
bool canEvictInterference(LiveInterval&, Register, bool, EvictionCost&,
const SmallVirtRegSet&);
bool canEvictInterferenceInRange(LiveInterval &VirtReg, unsigned PhysReg,
bool canEvictInterferenceInRange(LiveInterval &VirtReg, Register oPhysReg,
SlotIndex Start, SlotIndex End,
EvictionCost &MaxCost);
unsigned getCheapestEvicteeWeight(const AllocationOrder &Order,
LiveInterval &VirtReg, SlotIndex Start,
SlotIndex End, float *BestEvictWeight);
void evictInterference(LiveInterval&, unsigned,
SmallVectorImpl<unsigned>&);
void evictInterference(LiveInterval&, Register,
SmallVectorImpl<Register>&);
bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
SmallLISet &RecoloringCandidates,
const SmallVirtRegSet &FixedRegisters);
unsigned tryAssign(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&,
Register tryAssign(LiveInterval&, AllocationOrder&,
SmallVectorImpl<Register>&,
const SmallVirtRegSet&);
unsigned tryEvict(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&, unsigned,
SmallVectorImpl<Register>&, unsigned,
const SmallVirtRegSet&);
unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
SmallVectorImpl<Register>&);
/// Calculate cost of region splitting.
unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
AllocationOrder &Order,
@ -489,26 +489,26 @@ private:
/// Perform region splitting.
unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
bool HasCompact,
SmallVectorImpl<unsigned> &NewVRegs);
SmallVectorImpl<Register> &NewVRegs);
/// Check other options before using a callee-saved register for the first
/// time.
unsigned tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order,
unsigned PhysReg, unsigned &CostPerUseLimit,
SmallVectorImpl<unsigned> &NewVRegs);
Register PhysReg, unsigned &CostPerUseLimit,
SmallVectorImpl<Register> &NewVRegs);
void initializeCSRCost();
unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
SmallVectorImpl<Register>&);
unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
SmallVectorImpl<Register>&);
unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&);
SmallVectorImpl<Register>&);
unsigned trySplit(LiveInterval&, AllocationOrder&,
SmallVectorImpl<unsigned>&,
SmallVectorImpl<Register>&,
const SmallVirtRegSet&);
unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
SmallVectorImpl<unsigned> &,
SmallVectorImpl<Register> &,
SmallVirtRegSet &, unsigned);
bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &,
bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<Register> &,
SmallVirtRegSet &, unsigned);
void tryHintRecoloring(LiveInterval &);
void tryHintsRecoloring();
@ -518,12 +518,12 @@ private:
/// The frequency of the copy.
BlockFrequency Freq;
/// The virtual register or physical register.
unsigned Reg;
Register Reg;
/// Its currently assigned register.
/// In case of a physical register Reg == PhysReg.
unsigned PhysReg;
MCRegister PhysReg;
HintInfo(BlockFrequency Freq, unsigned Reg, unsigned PhysReg)
HintInfo(BlockFrequency Freq, Register Reg, MCRegister PhysReg)
: Freq(Freq), Reg(Reg), PhysReg(PhysReg) {}
};
using HintsInfo = SmallVector<HintInfo, 4>;
@ -531,7 +531,7 @@ private:
BlockFrequency getBrokenHintFreq(const HintsInfo &, unsigned);
void collectHintInfo(unsigned, HintsInfo &);
bool isUnusedCalleeSavedReg(unsigned PhysReg) const;
bool isUnusedCalleeSavedReg(MCRegister PhysReg) const;
/// Compute and report the number of spills and reloads for a loop.
void reportNumberOfSplillsReloads(MachineLoop *L, unsigned &Reloads,
@ -752,12 +752,12 @@ LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
//===----------------------------------------------------------------------===//
/// tryAssign - Try to assign VirtReg to an available register.
unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
Register RAGreedy::tryAssign(LiveInterval &VirtReg,
AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs,
SmallVectorImpl<Register> &NewVRegs,
const SmallVirtRegSet &FixedRegisters) {
Order.rewind();
unsigned PhysReg;
Register PhysReg;
while ((PhysReg = Order.next()))
if (!Matrix->checkInterference(VirtReg, PhysReg))
break;
@ -768,7 +768,7 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
// If we missed a simple hint, try to cheaply evict interference from the
// preferred register.
if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
if (Register Hint = MRI->getSimpleHint(VirtReg.reg))
if (Order.isHint(Hint)) {
LLVM_DEBUG(dbgs() << "missed hint " << printReg(Hint, TRI) << '\n');
EvictionCost MaxCost;
@ -791,7 +791,7 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost "
<< Cost << '\n');
unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost, FixedRegisters);
Register CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost, FixedRegisters);
return CheapReg ? CheapReg : PhysReg;
}
@ -799,9 +799,9 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
// Interference eviction
//===----------------------------------------------------------------------===//
unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) {
Register RAGreedy::canReassign(LiveInterval &VirtReg, Register PrevReg) {
AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo, Matrix);
unsigned PhysReg;
Register PhysReg;
while ((PhysReg = Order.next())) {
if (PhysReg == PrevReg)
continue;
@ -862,7 +862,7 @@ bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
/// @param MaxCost Only look for cheaper candidates and update with new cost
/// when returning true.
/// @returns True when interference can be evicted cheaper than MaxCost.
bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, Register PhysReg,
bool IsHint, EvictionCost &MaxCost,
const SmallVirtRegSet &FixedRegisters) {
// It is only possible to evict virtual register interference.
@ -960,7 +960,7 @@ bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
/// when returning true.
/// \return True when interference can be evicted cheaper than MaxCost.
bool RAGreedy::canEvictInterferenceInRange(LiveInterval &VirtReg,
unsigned PhysReg, SlotIndex Start,
Register PhysReg, SlotIndex Start,
SlotIndex End,
EvictionCost &MaxCost) {
EvictionCost Cost;
@ -1038,8 +1038,8 @@ unsigned RAGreedy::getCheapestEvicteeWeight(const AllocationOrder &Order,
/// evictInterference - Evict any interferring registers that prevent VirtReg
/// from being assigned to Physreg. This assumes that canEvictInterference
/// returned true.
void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<unsigned> &NewVRegs) {
void RAGreedy::evictInterference(LiveInterval &VirtReg, Register PhysReg,
SmallVectorImpl<Register> &NewVRegs) {
// Make sure that VirtReg has a cascade number, and assign that cascade
// number to every evicted register. These live ranges than then only be
// evicted by a newer cascade, preventing infinite loops.
@ -1084,9 +1084,9 @@ void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
/// Returns true if the given \p PhysReg is a callee saved register and has not
/// been used for allocation yet.
bool RAGreedy::isUnusedCalleeSavedReg(unsigned PhysReg) const {
unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg);
if (CSR == 0)
bool RAGreedy::isUnusedCalleeSavedReg(MCRegister PhysReg) const {
MCRegister CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg);
if (!CSR)
return false;
return !Matrix->isPhysRegUsed(PhysReg);
@ -1098,7 +1098,7 @@ bool RAGreedy::isUnusedCalleeSavedReg(unsigned PhysReg) const {
/// @return Physreg to assign VirtReg, or 0.
unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs,
SmallVectorImpl<Register> &NewVRegs,
unsigned CostPerUseLimit,
const SmallVirtRegSet &FixedRegisters) {
NamedRegionTimer T("evict", "Evict", TimerGroupName, TimerGroupDescription,
@ -1135,7 +1135,7 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
}
Order.rewind();
while (unsigned PhysReg = Order.next(OrderLimit)) {
while (MCRegister PhysReg = Order.next(OrderLimit)) {
if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
continue;
// The first use of a callee-saved register in a function has cost 1.
@ -1809,7 +1809,7 @@ void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
}
unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
SmallVectorImpl<Register> &NewVRegs) {
if (!TRI->shouldRegionSplitForVirtReg(*MF, VirtReg))
return 0;
unsigned NumCands = 0;
@ -1953,7 +1953,7 @@ unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
bool HasCompact,
SmallVectorImpl<unsigned> &NewVRegs) {
SmallVectorImpl<Register> &NewVRegs) {
SmallVector<unsigned, 8> UsedCands;
// Prepare split editor.
LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
@ -1999,9 +1999,9 @@ unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
/// creates a lot of local live ranges, that will be split by tryLocalSplit if
/// they don't allocate.
unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
SmallVectorImpl<Register> &NewVRegs) {
assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
unsigned Reg = VirtReg.reg;
Register Reg = VirtReg.reg;
bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
SE->reset(LREdit, SplitSpillMode);
@ -2066,7 +2066,7 @@ static unsigned getNumAllocatableRegsForConstraints(
/// This is similar to spilling to a larger register class.
unsigned
RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
SmallVectorImpl<Register> &NewVRegs) {
const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
// There is no point to this if there are no larger sub-classes.
if (!RegClassInfo.isProperSubClass(CurRC))
@ -2209,7 +2209,7 @@ void RAGreedy::calcGapWeights(unsigned PhysReg,
/// basic block.
///
unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs) {
SmallVectorImpl<Register> &NewVRegs) {
// TODO: the function currently only handles a single UseBlock; it should be
// possible to generalize.
if (SA->getUseBlocks().size() != 1)
@ -2440,7 +2440,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
/// assignable.
/// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
SmallVectorImpl<unsigned>&NewVRegs,
SmallVectorImpl<Register> &NewVRegs,
const SmallVirtRegSet &FixedRegisters) {
// Ranges must be Split2 or less.
if (getStage(VirtReg) >= RS_Spill)
@ -2451,7 +2451,7 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
NamedRegionTimer T("local_split", "Local Splitting", TimerGroupName,
TimerGroupDescription, TimePassesIsEnabled);
SA->analyze(&VirtReg);
unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
Register PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
if (PhysReg || !NewVRegs.empty())
return PhysReg;
return tryInstructionSplit(VirtReg, Order, NewVRegs);
@ -2469,7 +2469,7 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
if (SA->didRepairRange()) {
// VirtReg has changed, so all cached queries are invalid.
Matrix->invalidateVirtRegs();
if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs, FixedRegisters))
if (Register PhysReg = tryAssign(VirtReg, Order, NewVRegs, FixedRegisters))
return PhysReg;
}
@ -2584,7 +2584,7 @@ RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
/// exists.
unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
AllocationOrder &Order,
SmallVectorImpl<unsigned> &NewVRegs,
SmallVectorImpl<Register> &NewVRegs,
SmallVirtRegSet &FixedRegisters,
unsigned Depth) {
LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
@ -2605,15 +2605,15 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
SmallLISet RecoloringCandidates;
// Record the original mapping virtual register to physical register in case
// the recoloring fails.
DenseMap<unsigned, unsigned> VirtRegToPhysReg;
DenseMap<Register, Register> VirtRegToPhysReg;
// Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
// this recoloring "session".
assert(!FixedRegisters.count(VirtReg.reg));
FixedRegisters.insert(VirtReg.reg);
SmallVector<unsigned, 4> CurrentNewVRegs;
SmallVector<Register, 4> CurrentNewVRegs;
Order.rewind();
while (unsigned PhysReg = Order.next()) {
while (Register PhysReg = Order.next()) {
LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
<< printReg(PhysReg, TRI) << '\n');
RecoloringCandidates.clear();
@ -2644,7 +2644,7 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
for (SmallLISet::iterator It = RecoloringCandidates.begin(),
EndIt = RecoloringCandidates.end();
It != EndIt; ++It) {
unsigned ItVirtReg = (*It)->reg;
Register ItVirtReg = (*It)->reg;
enqueue(RecoloringQueue, *It);
assert(VRM->hasPhys(ItVirtReg) &&
"Interferences are supposed to be with allocated variables");
@ -2667,7 +2667,7 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs,
FixedRegisters, Depth)) {
// Push the queued vregs into the main queue.
for (unsigned NewVReg : CurrentNewVRegs)
for (Register NewVReg : CurrentNewVRegs)
NewVRegs.push_back(NewVReg);
// Do not mess up with the global assignment process.
// I.e., VirtReg must be unassigned.
@ -2686,7 +2686,7 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
// don't add it to NewVRegs because its physical register will be restored
// below. Other vregs in CurrentNewVRegs are created by calling
// selectOrSplit and should be added into NewVRegs.
for (SmallVectorImpl<unsigned>::iterator Next = CurrentNewVRegs.begin(),
for (SmallVectorImpl<Register>::iterator Next = CurrentNewVRegs.begin(),
End = CurrentNewVRegs.end();
Next != End; ++Next) {
if (RecoloringCandidates.count(&LIS->getInterval(*Next)))
@ -2697,10 +2697,10 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
for (SmallLISet::iterator It = RecoloringCandidates.begin(),
EndIt = RecoloringCandidates.end();
It != EndIt; ++It) {
unsigned ItVirtReg = (*It)->reg;
Register ItVirtReg = (*It)->reg;
if (VRM->hasPhys(ItVirtReg))
Matrix->unassign(**It);
unsigned ItPhysReg = VirtRegToPhysReg[ItVirtReg];
Register ItPhysReg = VirtRegToPhysReg[ItVirtReg];
Matrix->assign(**It, ItPhysReg);
}
}
@ -2718,14 +2718,14 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
/// \return true if all virtual registers in RecoloringQueue were successfully
/// recolored, false otherwise.
bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
SmallVectorImpl<unsigned> &NewVRegs,
SmallVectorImpl<Register> &NewVRegs,
SmallVirtRegSet &FixedRegisters,
unsigned Depth) {
while (!RecoloringQueue.empty()) {
LiveInterval *LI = dequeue(RecoloringQueue);
LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
unsigned PhysReg;
PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
Register PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters,
Depth + 1);
// When splitting happens, the live-range may actually be empty.
// In that case, this is okay to continue the recoloring even
// if we did not find an alternative color for it. Indeed,
@ -2752,12 +2752,12 @@ bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
// Main Entry Point
//===----------------------------------------------------------------------===//
unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &NewVRegs) {
Register RAGreedy::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<Register> &NewVRegs) {
CutOffInfo = CO_None;
LLVMContext &Ctx = MF->getFunction().getContext();
SmallVirtRegSet FixedRegisters;
unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
Register Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
if (Reg == ~0U && (CutOffInfo != CO_None)) {
uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
if (CutOffEncountered == CO_Depth)
@ -2784,9 +2784,9 @@ unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
/// to use the CSR; otherwise return 0.
unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg,
AllocationOrder &Order,
unsigned PhysReg,
Register PhysReg,
unsigned &CostPerUseLimit,
SmallVectorImpl<unsigned> &NewVRegs) {
SmallVectorImpl<Register> &NewVRegs) {
if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
// We choose spill over using the CSR for the first time if the spill cost
// is lower than CSRCost.
@ -3013,8 +3013,8 @@ void RAGreedy::tryHintsRecoloring() {
}
}
unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &NewVRegs,
Register RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
SmallVectorImpl<Register> &NewVRegs,
SmallVirtRegSet &FixedRegisters,
unsigned Depth) {
unsigned CostPerUseLimit = ~0u;
@ -3028,7 +3028,7 @@ unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
// register.
if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) &&
NewVRegs.empty()) {
unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
Register CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
CostPerUseLimit, NewVRegs);
if (CSRReg || !NewVRegs.empty())
// Return now if we decide to use a CSR or create new vregs due to
@ -3046,10 +3046,10 @@ unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
// queue. The RS_Split ranges already failed to do this, and they should not
// get a second chance until they have been split.
if (Stage != RS_Split)
if (unsigned PhysReg =
if (Register PhysReg =
tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit,
FixedRegisters)) {
unsigned Hint = MRI->getSimpleHint(VirtReg.reg);
Register Hint = MRI->getSimpleHint(VirtReg.reg);
// If VirtReg has a hint and that hint is broken record this
// virtual register as a recoloring candidate for broken hint.
// Indeed, since we evicted a variable in its neighborhood it is
@ -3078,7 +3078,7 @@ unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
if (Stage < RS_Spill) {
// Try splitting VirtReg or interferences.
unsigned NewVRegSizeBefore = NewVRegs.size();
unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs, FixedRegisters);
Register PhysReg = trySplit(VirtReg, Order, NewVRegs, FixedRegisters);
if (PhysReg || (NewVRegs.size() - NewVRegSizeBefore)) {
// If VirtReg got split, the eviction info is no longer relevant.
LastEvicted.clearEvicteeInfo(VirtReg.reg);

View File

@ -166,7 +166,7 @@ private:
void initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM, Spiller &VRegSpiller);
/// Spill the given VReg.
void spillVReg(unsigned VReg, SmallVectorImpl<unsigned> &NewIntervals,
void spillVReg(Register VReg, SmallVectorImpl<Register> &NewIntervals,
MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM,
Spiller &VRegSpiller);
@ -637,7 +637,7 @@ void RegAllocPBQP::initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM,
// Check for vregs that have no allowed registers. These should be
// pre-spilled and the new vregs added to the worklist.
if (VRegAllowed.empty()) {
SmallVector<unsigned, 8> NewVRegs;
SmallVector<Register, 8> NewVRegs;
spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
Worklist.insert(Worklist.end(), NewVRegs.begin(), NewVRegs.end());
continue;
@ -673,8 +673,8 @@ void RegAllocPBQP::initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM,
}
}
void RegAllocPBQP::spillVReg(unsigned VReg,
SmallVectorImpl<unsigned> &NewIntervals,
void RegAllocPBQP::spillVReg(Register VReg,
SmallVectorImpl<Register> &NewIntervals,
MachineFunction &MF, LiveIntervals &LIS,
VirtRegMap &VRM, Spiller &VRegSpiller) {
VRegsToAlloc.erase(VReg);
@ -730,7 +730,7 @@ bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAGraph &G,
} else {
// Spill VReg. If this introduces new intervals we'll need another round
// of allocation.
SmallVector<unsigned, 8> NewVRegs;
SmallVector<Register, 8> NewVRegs;
spillVReg(VReg, NewVRegs, MF, LIS, VRM, VRegSpiller);
AnotherRoundNeeded |= !NewVRegs.empty();
}

View File

@ -571,7 +571,7 @@ void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const {
}
void RegisterCoalescer::eliminateDeadDefs() {
SmallVector<unsigned, 8> NewRegs;
SmallVector<Register, 8> NewRegs;
LiveRangeEdit(nullptr, NewRegs, *MF, *LIS,
nullptr, this).eliminateDeadDefs(DeadDefs);
}
@ -2429,7 +2429,7 @@ public:
/// Add foreign virtual registers to ShrinkRegs if their live range ended at
/// the erased instrs.
void eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
SmallVectorImpl<unsigned> &ShrinkRegs,
SmallVectorImpl<Register> &ShrinkRegs,
LiveInterval *LI = nullptr);
/// Remove liverange defs at places where implicit defs will be removed.
@ -3171,7 +3171,7 @@ void JoinVals::removeImplicitDefs() {
}
void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
SmallVectorImpl<unsigned> &ShrinkRegs,
SmallVectorImpl<Register> &ShrinkRegs,
LiveInterval *LI) {
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
// Get the def location before markUnused() below invalidates it.
@ -3439,7 +3439,7 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
// Erase COPY and IMPLICIT_DEF instructions. This may cause some external
// registers to require trimming.
SmallVector<unsigned, 8> ShrinkRegs;
SmallVector<Register, 8> ShrinkRegs;
LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs, &LHS);
RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs);
while (!ShrinkRegs.empty())