2007-02-23 02:01:19 +01:00
|
|
|
//===-- RegisterScavenging.cpp - Machine register scavenging --------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 21:36:04 +01:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-02-23 02:01:19 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2016-06-30 02:23:54 +02:00
|
|
|
/// \file
|
|
|
|
/// This file implements the machine register scavenger. It can provide
|
|
|
|
/// information, such as unused registers, at any point in a machine basic
|
|
|
|
/// block. It also provides a mechanism to make registers available by evicting
|
|
|
|
/// them to spill slots.
|
2007-02-23 02:01:19 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/CodeGen/RegisterScavenging.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2009-08-06 18:32:47 +02:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2007-02-23 02:01:19 +01:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2008-04-05 03:27:09 +02:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2010-09-02 02:51:37 +02:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-11 15:10:19 +02:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2010-09-02 02:51:37 +02:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2007-02-23 02:01:19 +01:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2014-08-04 23:25:23 +02:00
|
|
|
#include "llvm/Target/TargetSubtargetInfo.h"
|
2007-02-23 02:01:19 +01:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 04:02:50 +02:00
|
|
|
#define DEBUG_TYPE "reg-scavenging"
|
|
|
|
|
2015-09-25 23:51:14 +02:00
|
|
|
void RegScavenger::setRegUsed(unsigned Reg, LaneBitmask LaneMask) {
|
2016-08-19 05:03:24 +02:00
|
|
|
for (MCRegUnitMaskIterator RUI(Reg, TRI); RUI.isValid(); ++RUI) {
|
|
|
|
LaneBitmask UnitMask = (*RUI).second;
|
|
|
|
if (UnitMask == 0 || (LaneMask & UnitMask) != 0)
|
|
|
|
RegUnitsAvailable.reset((*RUI).first);
|
|
|
|
}
|
2009-08-11 08:25:12 +02:00
|
|
|
}
|
|
|
|
|
2016-07-20 00:37:02 +02:00
|
|
|
void RegScavenger::init(MachineBasicBlock &MBB) {
|
2016-04-06 04:47:09 +02:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
2014-10-14 09:22:00 +02:00
|
|
|
TII = MF.getSubtarget().getInstrInfo();
|
|
|
|
TRI = MF.getSubtarget().getRegisterInfo();
|
2008-04-05 03:27:09 +02:00
|
|
|
MRI = &MF.getRegInfo();
|
2007-02-23 02:01:19 +01:00
|
|
|
|
2014-08-05 01:07:49 +02:00
|
|
|
assert((NumRegUnits == 0 || NumRegUnits == TRI->getNumRegUnits()) &&
|
2007-02-27 23:58:43 +01:00
|
|
|
"Target changed?");
|
|
|
|
|
2012-03-27 17:13:58 +02:00
|
|
|
// It is not possible to use the register scavenger after late optimization
|
|
|
|
// passes that don't preserve accurate liveness information.
|
|
|
|
assert(MRI->tracksLiveness() &&
|
|
|
|
"Cannot use register scavenger with inaccurate liveness");
|
|
|
|
|
2009-08-06 18:32:47 +02:00
|
|
|
// Self-initialize.
|
2016-04-06 04:47:09 +02:00
|
|
|
if (!this->MBB) {
|
2014-08-05 01:07:49 +02:00
|
|
|
NumRegUnits = TRI->getNumRegUnits();
|
2016-08-19 05:03:24 +02:00
|
|
|
RegUnitsAvailable.resize(NumRegUnits);
|
2014-08-05 01:07:49 +02:00
|
|
|
KillRegUnits.resize(NumRegUnits);
|
|
|
|
DefRegUnits.resize(NumRegUnits);
|
|
|
|
TmpRegUnits.resize(NumRegUnits);
|
2007-02-27 23:58:43 +01:00
|
|
|
}
|
2016-04-06 04:47:09 +02:00
|
|
|
this->MBB = &MBB;
|
2007-02-27 23:58:43 +01:00
|
|
|
|
2016-07-20 00:37:02 +02:00
|
|
|
for (SmallVectorImpl<ScavengedInfo>::iterator I = Scavenged.begin(),
|
|
|
|
IE = Scavenged.end(); I != IE; ++I) {
|
|
|
|
I->Reg = 0;
|
|
|
|
I->Restore = nullptr;
|
|
|
|
}
|
|
|
|
|
2016-08-19 05:03:24 +02:00
|
|
|
// All register units start out unused.
|
|
|
|
RegUnitsAvailable.set();
|
|
|
|
|
|
|
|
// Pristine CSRs are not available.
|
|
|
|
BitVector PR = MF.getFrameInfo().getPristineRegs(MF);
|
|
|
|
for (int I = PR.find_first(); I>0; I = PR.find_next(I))
|
|
|
|
setRegUsed(I);
|
|
|
|
|
2007-03-01 03:19:39 +01:00
|
|
|
Tracking = false;
|
2007-02-23 02:01:19 +01:00
|
|
|
}
|
|
|
|
|
2016-08-19 05:03:24 +02:00
|
|
|
void RegScavenger::setLiveInsUsed(const MachineBasicBlock &MBB) {
|
|
|
|
for (const auto &LI : MBB.liveins())
|
|
|
|
setRegUsed(LI.PhysReg, LI.LaneMask);
|
|
|
|
}
|
|
|
|
|
2016-07-20 00:37:02 +02:00
|
|
|
void RegScavenger::enterBasicBlock(MachineBasicBlock &MBB) {
|
|
|
|
init(MBB);
|
2016-08-19 05:03:24 +02:00
|
|
|
setLiveInsUsed(MBB);
|
2016-07-20 00:37:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void RegScavenger::enterBasicBlockEnd(MachineBasicBlock &MBB) {
|
|
|
|
init(MBB);
|
2016-08-19 05:03:24 +02:00
|
|
|
// Merge live-ins of successors to get live-outs.
|
|
|
|
for (const MachineBasicBlock *Succ : MBB.successors())
|
|
|
|
setLiveInsUsed(*Succ);
|
2016-07-20 00:37:02 +02:00
|
|
|
|
|
|
|
// Move internal iterator at the last instruction of the block.
|
|
|
|
if (MBB.begin() != MBB.end()) {
|
|
|
|
MBBI = std::prev(MBB.end());
|
|
|
|
Tracking = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-05 01:07:49 +02:00
|
|
|
void RegScavenger::addRegUnits(BitVector &BV, unsigned Reg) {
|
|
|
|
for (MCRegUnitIterator RUI(Reg, TRI); RUI.isValid(); ++RUI)
|
|
|
|
BV.set(*RUI);
|
2009-08-08 15:18:47 +02:00
|
|
|
}
|
|
|
|
|
2016-07-20 00:37:02 +02:00
|
|
|
void RegScavenger::removeRegUnits(BitVector &BV, unsigned Reg) {
|
|
|
|
for (MCRegUnitIterator RUI(Reg, TRI); RUI.isValid(); ++RUI)
|
|
|
|
BV.reset(*RUI);
|
|
|
|
}
|
|
|
|
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
void RegScavenger::determineKillsAndDefs() {
|
|
|
|
assert(Tracking && "Must be tracking to determine kills and defs");
|
2007-02-27 02:58:48 +01:00
|
|
|
|
2016-07-08 19:16:57 +02:00
|
|
|
MachineInstr &MI = *MBBI;
|
|
|
|
assert(!MI.isDebugValue() && "Debug values have no kills or defs");
|
2010-04-15 22:28:39 +02:00
|
|
|
|
2009-08-08 15:18:47 +02:00
|
|
|
// Find out which registers are early clobbered, killed, defined, and marked
|
|
|
|
// def-dead in this instruction.
|
2014-08-05 01:07:49 +02:00
|
|
|
KillRegUnits.reset();
|
|
|
|
DefRegUnits.reset();
|
2016-07-08 19:16:57 +02:00
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
2014-08-05 01:07:49 +02:00
|
|
|
if (MO.isRegMask()) {
|
|
|
|
TmpRegUnits.clear();
|
|
|
|
for (unsigned RU = 0, RUEnd = TRI->getNumRegUnits(); RU != RUEnd; ++RU) {
|
|
|
|
for (MCRegUnitRootIterator RURI(RU, TRI); RURI.isValid(); ++RURI) {
|
|
|
|
if (MO.clobbersPhysReg(*RURI)) {
|
|
|
|
TmpRegUnits.set(RU);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-06-30 02:23:54 +02:00
|
|
|
|
2014-08-05 01:07:49 +02:00
|
|
|
// Apply the mask.
|
2015-06-10 00:10:58 +02:00
|
|
|
KillRegUnits |= TmpRegUnits;
|
2014-08-05 01:07:49 +02:00
|
|
|
}
|
2011-05-02 22:06:28 +02:00
|
|
|
if (!MO.isReg())
|
2007-02-23 02:01:19 +01:00
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
2016-06-30 02:23:54 +02:00
|
|
|
if (!TargetRegisterInfo::isPhysicalRegister(Reg) || isReserved(Reg))
|
2009-08-08 15:18:47 +02:00
|
|
|
continue;
|
2008-03-03 23:12:25 +01:00
|
|
|
|
2009-08-08 15:18:47 +02:00
|
|
|
if (MO.isUse()) {
|
2011-05-02 22:06:28 +02:00
|
|
|
// Ignore undef uses.
|
|
|
|
if (MO.isUndef())
|
|
|
|
continue;
|
2015-06-10 00:10:58 +02:00
|
|
|
if (MO.isKill())
|
2014-08-05 01:07:49 +02:00
|
|
|
addRegUnits(KillRegUnits, Reg);
|
2009-08-08 15:18:47 +02:00
|
|
|
} else {
|
|
|
|
assert(MO.isDef());
|
2015-06-10 00:10:58 +02:00
|
|
|
if (MO.isDead())
|
2014-08-05 01:07:49 +02:00
|
|
|
addRegUnits(KillRegUnits, Reg);
|
2009-08-08 15:18:47 +02:00
|
|
|
else
|
2014-08-05 01:07:49 +02:00
|
|
|
addRegUnits(DefRegUnits, Reg);
|
2008-03-03 23:12:25 +01:00
|
|
|
}
|
2007-02-23 02:01:19 +01:00
|
|
|
}
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void RegScavenger::unprocess() {
|
|
|
|
assert(Tracking && "Cannot unprocess because we're not tracking");
|
|
|
|
|
2016-07-08 19:16:57 +02:00
|
|
|
MachineInstr &MI = *MBBI;
|
|
|
|
if (!MI.isDebugValue()) {
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
determineKillsAndDefs();
|
|
|
|
|
|
|
|
// Commit the changes.
|
2014-08-05 01:07:49 +02:00
|
|
|
setUsed(KillRegUnits);
|
|
|
|
setUnused(DefRegUnits);
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (MBBI == MBB->begin()) {
|
2014-04-14 02:51:57 +02:00
|
|
|
MBBI = MachineBasicBlock::iterator(nullptr);
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
Tracking = false;
|
|
|
|
} else
|
|
|
|
--MBBI;
|
|
|
|
}
|
|
|
|
|
|
|
|
void RegScavenger::forward() {
|
|
|
|
// Move ptr forward.
|
|
|
|
if (!Tracking) {
|
|
|
|
MBBI = MBB->begin();
|
|
|
|
Tracking = true;
|
|
|
|
} else {
|
|
|
|
assert(MBBI != MBB->end() && "Already past the end of the basic block!");
|
2014-03-02 13:27:27 +01:00
|
|
|
MBBI = std::next(MBBI);
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
}
|
|
|
|
assert(MBBI != MBB->end() && "Already at the end of the basic block!");
|
|
|
|
|
2016-07-08 19:16:57 +02:00
|
|
|
MachineInstr &MI = *MBBI;
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
|
2013-07-03 07:11:49 +02:00
|
|
|
for (SmallVectorImpl<ScavengedInfo>::iterator I = Scavenged.begin(),
|
|
|
|
IE = Scavenged.end(); I != IE; ++I) {
|
2016-07-08 19:16:57 +02:00
|
|
|
if (I->Restore != &MI)
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
I->Reg = 0;
|
2014-04-14 02:51:57 +02:00
|
|
|
I->Restore = nullptr;
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
}
|
|
|
|
|
2016-07-08 19:16:57 +02:00
|
|
|
if (MI.isDebugValue())
|
Reapply r178845 with fix - Fix bug in PEI's virtual-register scavenging
This fixes PEI as previously described, but correctly handles the case where
the instruction defining the virtual register to be scavenged is the first in
the block. Arnold provided me with a bugpoint-reduced test case, but even that
seems too large to use as a regression test. If I'm successful in cleaning it
up then I'll commit that as well.
Original commit message:
This change fixes a bug that I introduced in r178058. After a register is
scavenged using one of the available spills slots the instruction defining the
virtual register needs to be moved to after the spill code. The scavenger has
already processed the defining instruction so that registers killed by that
instruction are available for definition in that same instruction. Unfortunately,
after this, the scavenger needs to iterate through the spill code and then
visit, again, the instruction that defines the now-scavenged register. In order
to avoid confusion, the register scavenger needs the ability to 'back up'
through the spill code so that it can again process the instructions in the
appropriate order. Prior to this fix, once the scavenger reached the
just-moved instruction, it would assert if it killed any registers because,
having already processed the instruction, it believed they were undefined.
Unfortunately, I don't yet have a small test case. Thanks to Pranav Bhandarkar
for diagnosing the problem and testing this fix.
llvm-svn: 178919
2013-04-06 00:31:56 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
determineKillsAndDefs();
|
2008-03-03 23:12:25 +01:00
|
|
|
|
2009-08-08 15:18:47 +02:00
|
|
|
// Verify uses and defs.
|
2012-01-29 02:29:28 +01:00
|
|
|
#ifndef NDEBUG
|
2016-07-08 19:16:57 +02:00
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
2011-05-02 22:36:53 +02:00
|
|
|
if (!MO.isReg())
|
2009-07-01 03:59:31 +02:00
|
|
|
continue;
|
2009-08-08 15:18:47 +02:00
|
|
|
unsigned Reg = MO.getReg();
|
2016-06-30 02:23:54 +02:00
|
|
|
if (!TargetRegisterInfo::isPhysicalRegister(Reg) || isReserved(Reg))
|
2007-03-02 11:43:16 +01:00
|
|
|
continue;
|
2009-08-08 15:18:47 +02:00
|
|
|
if (MO.isUse()) {
|
2011-05-02 22:36:53 +02:00
|
|
|
if (MO.isUndef())
|
|
|
|
continue;
|
2014-08-05 01:07:49 +02:00
|
|
|
if (!isRegUsed(Reg)) {
|
2009-10-26 05:56:07 +01:00
|
|
|
// Check if it's partial live: e.g.
|
|
|
|
// D0 = insert_subreg D0<undef>, S0
|
|
|
|
// ... D0
|
|
|
|
// The problem is the insert_subreg could be eliminated. The use of
|
|
|
|
// D0 is using a partially undef value. This is not *incorrect* since
|
|
|
|
// S1 is can be freely clobbered.
|
|
|
|
// Ideally we would like a way to model this, but leaving the
|
|
|
|
// insert_subreg around causes both correctness and performance issues.
|
|
|
|
bool SubUsed = false;
|
2012-06-02 01:28:30 +02:00
|
|
|
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
|
2014-08-05 01:07:49 +02:00
|
|
|
if (isRegUsed(*SubRegs)) {
|
2009-10-26 05:56:07 +01:00
|
|
|
SubUsed = true;
|
|
|
|
break;
|
|
|
|
}
|
2014-08-05 01:07:49 +02:00
|
|
|
bool SuperUsed = false;
|
|
|
|
for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR) {
|
|
|
|
if (isRegUsed(*SR)) {
|
|
|
|
SuperUsed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!SubUsed && !SuperUsed) {
|
2014-04-14 02:51:57 +02:00
|
|
|
MBB->getParent()->verify(nullptr, "In Register Scavenger");
|
2012-01-16 21:38:31 +01:00
|
|
|
llvm_unreachable("Using an undefined register!");
|
|
|
|
}
|
2011-08-12 16:54:45 +02:00
|
|
|
(void)SubUsed;
|
2014-08-05 01:07:49 +02:00
|
|
|
(void)SuperUsed;
|
2009-10-26 05:56:07 +01:00
|
|
|
}
|
2009-08-08 15:18:47 +02:00
|
|
|
} else {
|
|
|
|
assert(MO.isDef());
|
Fix PR5024 with a big hammer: disable the double-def assertion in the scavenger.
LiveVariables add implicit kills to correctly track partial register kills. This works well enough and is fairly accurate. But coalescer can make it impossible to maintain these markers. e.g.
BL <ga:sss1>, %R0<kill,undef>, %S0<kill>, %R0<imp-def>, %R1<imp-def,dead>, %R2<imp-def,dead>, %R3<imp-def,dead>, %R12<imp-def,dead>, %LR<imp-def,dead>, %D0<imp-def>, ...
...
%reg1031<def> = FLDS <cp#1>, 0, 14, %reg0, Mem:LD4[ConstantPool]
...
%S0<def> = FCPYS %reg1031<kill>, 14, %reg0, %D0<imp-use,kill>
When reg1031 and S0 are coalesced, the copy (FCPYS) will be eliminated the the implicit-kill of D0 is lost. In this case it's possible to move the marker to the FLDS. But in many cases, this is not possible. Suppose
%reg1031<def> = FOO <cp#1>, %D0<imp-def>
...
%S0<def> = FCPYS %reg1031<kill>, 14, %reg0, %D0<imp-use,kill>
When FCPYS goes away, the definition of S0 is the "FOO" instruction. However, transferring the D0 implicit-kill to FOO doesn't work since it is the def of D0 itself. We need to fix this in another time by introducing a "kill" pseudo instruction to track liveness.
Disabling the assertion is not ideal, but machine verifier is doing that job now. It's important to know double-def is not a miscomputation since it means a register should be free but it's not tracked as free. It's a performance issue instead.
llvm-svn: 82677
2009-09-24 04:27:09 +02:00
|
|
|
#if 0
|
|
|
|
// FIXME: Enable this once we've figured out how to correctly transfer
|
|
|
|
// implicit kills during codegen passes like the coalescer.
|
Remove RegisterScavenger::isSuperRegUsed(). This completely reverses the mistaken commit r77904.
Now there is no special treatment of instructions that redefine part of a
super-register. Instead, the super-register is marked with <imp-use,kill> and
<imp-def>. For instance, from LowerSubregs on ARM:
subreg: CONVERTING: %Q1<def> = INSERT_SUBREG %Q1<undef>, %D1<kill>, 5
subreg: %D2<def> = FCPYD %D1<kill>, 14, %reg0, %Q1<imp-def>
subreg: CONVERTING: %Q1<def> = INSERT_SUBREG %Q1, %D0<kill>, 6
subreg: %D3<def> = FCPYD %D0<kill>, 14, %reg0, %Q1<imp-use,kill>, %Q1<imp-def>
llvm-svn: 78466
2009-08-08 15:19:10 +02:00
|
|
|
assert((KillRegs.test(Reg) || isUnused(Reg) ||
|
2009-08-08 15:18:47 +02:00
|
|
|
isLiveInButUnusedBefore(Reg, MI, MBB, TRI, MRI)) &&
|
|
|
|
"Re-defining a live register!");
|
Fix PR5024 with a big hammer: disable the double-def assertion in the scavenger.
LiveVariables add implicit kills to correctly track partial register kills. This works well enough and is fairly accurate. But coalescer can make it impossible to maintain these markers. e.g.
BL <ga:sss1>, %R0<kill,undef>, %S0<kill>, %R0<imp-def>, %R1<imp-def,dead>, %R2<imp-def,dead>, %R3<imp-def,dead>, %R12<imp-def,dead>, %LR<imp-def,dead>, %D0<imp-def>, ...
...
%reg1031<def> = FLDS <cp#1>, 0, 14, %reg0, Mem:LD4[ConstantPool]
...
%S0<def> = FCPYS %reg1031<kill>, 14, %reg0, %D0<imp-use,kill>
When reg1031 and S0 are coalesced, the copy (FCPYS) will be eliminated the the implicit-kill of D0 is lost. In this case it's possible to move the marker to the FLDS. But in many cases, this is not possible. Suppose
%reg1031<def> = FOO <cp#1>, %D0<imp-def>
...
%S0<def> = FCPYS %reg1031<kill>, 14, %reg0, %D0<imp-use,kill>
When FCPYS goes away, the definition of S0 is the "FOO" instruction. However, transferring the D0 implicit-kill to FOO doesn't work since it is the def of D0 itself. We need to fix this in another time by introducing a "kill" pseudo instruction to track liveness.
Disabling the assertion is not ideal, but machine verifier is doing that job now. It's important to know double-def is not a miscomputation since it means a register should be free but it's not tracked as free. It's a performance issue instead.
llvm-svn: 82677
2009-09-24 04:27:09 +02:00
|
|
|
#endif
|
2007-03-02 11:43:16 +01:00
|
|
|
}
|
2007-02-23 02:01:19 +01:00
|
|
|
}
|
2012-01-29 02:29:28 +01:00
|
|
|
#endif // NDEBUG
|
2009-08-08 15:18:47 +02:00
|
|
|
|
|
|
|
// Commit the changes.
|
2014-08-05 01:07:49 +02:00
|
|
|
setUnused(KillRegUnits);
|
|
|
|
setUsed(DefRegUnits);
|
2007-02-23 02:01:19 +01:00
|
|
|
}
|
|
|
|
|
2016-07-20 00:37:02 +02:00
|
|
|
void RegScavenger::backward() {
|
|
|
|
assert(Tracking && "Must be tracking to determine kills and defs");
|
|
|
|
|
|
|
|
const MachineInstr &MI = *MBBI;
|
2016-08-19 05:03:24 +02:00
|
|
|
// Defined or clobbered registers are available now.
|
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
|
|
|
if (MO.isRegMask()) {
|
|
|
|
for (unsigned RU = 0, RUEnd = TRI->getNumRegUnits(); RU != RUEnd;
|
|
|
|
++RU) {
|
|
|
|
for (MCRegUnitRootIterator RURI(RU, TRI); RURI.isValid(); ++RURI) {
|
|
|
|
if (MO.clobbersPhysReg(*RURI)) {
|
|
|
|
RegUnitsAvailable.set(RU);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (MO.isReg() && MO.isDef()) {
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
|
|
|
|
isReserved(Reg))
|
|
|
|
continue;
|
|
|
|
addRegUnits(RegUnitsAvailable, Reg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Mark read registers as unavailable.
|
|
|
|
for (const MachineOperand &MO : MI.uses()) {
|
|
|
|
if (MO.isReg() && MO.readsReg()) {
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
|
|
|
|
isReserved(Reg))
|
|
|
|
continue;
|
|
|
|
removeRegUnits(RegUnitsAvailable, Reg);
|
2016-08-18 21:47:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-20 00:37:02 +02:00
|
|
|
if (MBBI == MBB->begin()) {
|
|
|
|
MBBI = MachineBasicBlock::iterator(nullptr);
|
|
|
|
Tracking = false;
|
|
|
|
} else
|
|
|
|
--MBBI;
|
|
|
|
}
|
|
|
|
|
2014-08-05 01:07:49 +02:00
|
|
|
bool RegScavenger::isRegUsed(unsigned Reg, bool includeReserved) const {
|
2016-08-19 05:03:24 +02:00
|
|
|
if (includeReserved && isReserved(Reg))
|
|
|
|
return true;
|
|
|
|
for (MCRegUnitIterator RUI(Reg, TRI); RUI.isValid(); ++RUI)
|
|
|
|
if (!RegUnitsAvailable.test(*RUI))
|
|
|
|
return true;
|
|
|
|
return false;
|
2007-03-20 22:35:06 +01:00
|
|
|
}
|
|
|
|
|
2009-08-18 23:14:54 +02:00
|
|
|
unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
|
2016-06-30 02:23:54 +02:00
|
|
|
for (unsigned Reg : *RC) {
|
|
|
|
if (!isRegUsed(Reg)) {
|
|
|
|
DEBUG(dbgs() << "Scavenger found unused reg: " << TRI->getName(Reg) <<
|
2010-09-02 02:51:37 +02:00
|
|
|
"\n");
|
2016-06-30 02:23:54 +02:00
|
|
|
return Reg;
|
2010-09-02 02:51:37 +02:00
|
|
|
}
|
2016-06-30 02:23:54 +02:00
|
|
|
}
|
2009-08-18 23:14:54 +02:00
|
|
|
return 0;
|
2007-02-23 02:01:19 +01:00
|
|
|
}
|
2007-03-06 11:01:25 +01:00
|
|
|
|
2011-03-05 01:20:19 +01:00
|
|
|
BitVector RegScavenger::getRegsAvailable(const TargetRegisterClass *RC) {
|
|
|
|
BitVector Mask(TRI->getNumRegs());
|
2016-06-30 02:23:54 +02:00
|
|
|
for (unsigned Reg : *RC)
|
|
|
|
if (!isRegUsed(Reg))
|
|
|
|
Mask.set(Reg);
|
2011-03-05 01:20:19 +01:00
|
|
|
return Mask;
|
2010-07-08 02:38:54 +02:00
|
|
|
}
|
|
|
|
|
2009-10-25 02:45:07 +02:00
|
|
|
unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
|
2009-08-16 19:41:39 +02:00
|
|
|
BitVector &Candidates,
|
|
|
|
unsigned InstrLimit,
|
|
|
|
MachineBasicBlock::iterator &UseMI) {
|
|
|
|
int Survivor = Candidates.find_first();
|
|
|
|
assert(Survivor > 0 && "No candidates for scavenging");
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator ME = MBB->getFirstTerminator();
|
2009-10-25 02:45:07 +02:00
|
|
|
assert(StartMI != ME && "MI already at terminator");
|
|
|
|
MachineBasicBlock::iterator RestorePointMI = StartMI;
|
|
|
|
MachineBasicBlock::iterator MI = StartMI;
|
2009-08-16 19:41:39 +02:00
|
|
|
|
2009-10-25 02:45:07 +02:00
|
|
|
bool inVirtLiveRange = false;
|
2009-08-16 19:41:39 +02:00
|
|
|
for (++MI; InstrLimit > 0 && MI != ME; ++MI, --InstrLimit) {
|
2010-06-04 22:18:30 +02:00
|
|
|
if (MI->isDebugValue()) {
|
|
|
|
++InstrLimit; // Don't count debug instructions
|
|
|
|
continue;
|
|
|
|
}
|
2009-10-25 02:45:07 +02:00
|
|
|
bool isVirtKillInsn = false;
|
|
|
|
bool isVirtDefInsn = false;
|
2009-08-16 19:41:39 +02:00
|
|
|
// Remove any candidates touched by instruction.
|
2016-06-30 02:23:54 +02:00
|
|
|
for (const MachineOperand &MO : MI->operands()) {
|
2012-02-22 23:50:14 +01:00
|
|
|
if (MO.isRegMask())
|
|
|
|
Candidates.clearBitsNotInMask(MO.getRegMask());
|
2009-10-25 02:45:07 +02:00
|
|
|
if (!MO.isReg() || MO.isUndef() || !MO.getReg())
|
2009-08-16 19:41:39 +02:00
|
|
|
continue;
|
2009-10-25 02:45:07 +02:00
|
|
|
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
|
|
|
|
if (MO.isDef())
|
|
|
|
isVirtDefInsn = true;
|
|
|
|
else if (MO.isKill())
|
|
|
|
isVirtKillInsn = true;
|
|
|
|
continue;
|
|
|
|
}
|
2012-06-02 00:38:17 +02:00
|
|
|
for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
|
|
|
|
Candidates.reset(*AI);
|
2009-08-16 19:41:39 +02:00
|
|
|
}
|
2009-10-25 02:45:07 +02:00
|
|
|
// If we're not in a virtual reg's live range, this is a valid
|
|
|
|
// restore point.
|
|
|
|
if (!inVirtLiveRange) RestorePointMI = MI;
|
|
|
|
|
|
|
|
// Update whether we're in the live range of a virtual register
|
|
|
|
if (isVirtKillInsn) inVirtLiveRange = false;
|
|
|
|
if (isVirtDefInsn) inVirtLiveRange = true;
|
2009-08-11 08:25:12 +02:00
|
|
|
|
2009-08-16 19:41:39 +02:00
|
|
|
// Was our survivor untouched by this instruction?
|
|
|
|
if (Candidates.test(Survivor))
|
2009-08-11 08:25:12 +02:00
|
|
|
continue;
|
2009-08-16 19:41:39 +02:00
|
|
|
|
|
|
|
// All candidates gone?
|
|
|
|
if (Candidates.none())
|
|
|
|
break;
|
|
|
|
|
|
|
|
Survivor = Candidates.find_first();
|
2007-03-06 11:01:25 +01:00
|
|
|
}
|
2009-10-25 02:45:07 +02:00
|
|
|
// If we ran off the end, that's where we want to restore.
|
|
|
|
if (MI == ME) RestorePointMI = ME;
|
2016-06-30 02:23:54 +02:00
|
|
|
assert(RestorePointMI != StartMI &&
|
|
|
|
"No available scavenger restore location!");
|
2009-08-16 19:41:39 +02:00
|
|
|
|
|
|
|
// We ran out of candidates, so stop the search.
|
2009-10-25 02:45:07 +02:00
|
|
|
UseMI = RestorePointMI;
|
2009-08-16 19:41:39 +02:00
|
|
|
return Survivor;
|
2007-03-06 11:01:25 +01:00
|
|
|
}
|
|
|
|
|
2016-07-08 19:16:57 +02:00
|
|
|
static unsigned getFrameIndexOperandNum(MachineInstr &MI) {
|
2013-01-31 21:02:54 +01:00
|
|
|
unsigned i = 0;
|
2016-07-08 19:16:57 +02:00
|
|
|
while (!MI.getOperand(i).isFI()) {
|
2013-01-31 21:02:54 +01:00
|
|
|
++i;
|
2016-07-08 19:16:57 +02:00
|
|
|
assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
|
2013-01-31 21:02:54 +01:00
|
|
|
}
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2016-08-19 05:03:24 +02:00
|
|
|
unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
int SPAdj) {
|
|
|
|
MachineInstr &MI = *I;
|
|
|
|
const MachineFunction &MF = *MI.getParent()->getParent();
|
|
|
|
// Consider all allocatable registers in the register class initially
|
|
|
|
BitVector Candidates = TRI->getAllocatableSet(MF, RC);
|
|
|
|
|
|
|
|
// Exclude all the registers being used by the instruction.
|
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
|
|
|
if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) &&
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(MO.getReg()))
|
2016-09-06 12:10:21 +02:00
|
|
|
for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
|
|
|
|
Candidates.reset(*AI);
|
2016-08-19 05:03:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try to find a register that's unused if there is one, as then we won't
|
|
|
|
// have to spill.
|
|
|
|
BitVector Available = getRegsAvailable(RC);
|
|
|
|
Available &= Candidates;
|
|
|
|
if (Available.any())
|
|
|
|
Candidates = Available;
|
|
|
|
|
|
|
|
// Find the register whose use is furthest away.
|
|
|
|
MachineBasicBlock::iterator UseMI;
|
|
|
|
unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI);
|
|
|
|
|
|
|
|
// If we found an unused register there is no reason to spill it.
|
|
|
|
if (!isRegUsed(SReg)) {
|
|
|
|
DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n");
|
|
|
|
return SReg;
|
|
|
|
}
|
|
|
|
|
2016-05-18 20:16:00 +02:00
|
|
|
// Find an available scavenging slot with size and alignment matching
|
|
|
|
// the requirements of the class RC.
|
2016-07-28 20:40:00 +02:00
|
|
|
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
2016-08-19 05:03:24 +02:00
|
|
|
unsigned NeedSize = RC->getSize();
|
|
|
|
unsigned NeedAlign = RC->getAlignment();
|
2016-05-18 20:16:00 +02:00
|
|
|
|
|
|
|
unsigned SI = Scavenged.size(), Diff = UINT_MAX;
|
2016-05-20 16:18:54 +02:00
|
|
|
int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd();
|
2016-05-18 20:16:00 +02:00
|
|
|
for (unsigned I = 0; I < Scavenged.size(); ++I) {
|
|
|
|
if (Scavenged[I].Reg != 0)
|
|
|
|
continue;
|
|
|
|
// Verify that this slot is valid for this register.
|
|
|
|
int FI = Scavenged[I].FrameIndex;
|
2016-05-20 16:18:54 +02:00
|
|
|
if (FI < FIB || FI >= FIE)
|
|
|
|
continue;
|
2016-05-18 20:16:00 +02:00
|
|
|
unsigned S = MFI.getObjectSize(FI);
|
|
|
|
unsigned A = MFI.getObjectAlignment(FI);
|
|
|
|
if (NeedSize > S || NeedAlign > A)
|
|
|
|
continue;
|
|
|
|
// Avoid wasting slots with large size and/or large alignment. Pick one
|
|
|
|
// that is the best fit for this register class (in street metric).
|
|
|
|
// Picking a larger slot than necessary could happen if a slot for a
|
|
|
|
// larger register is reserved before a slot for a smaller one. When
|
|
|
|
// trying to spill a smaller register, the large slot would be found
|
|
|
|
// first, thus making it impossible to spill the larger register later.
|
|
|
|
unsigned D = (S-NeedSize) + (A-NeedAlign);
|
|
|
|
if (D < Diff) {
|
|
|
|
SI = I;
|
|
|
|
Diff = D;
|
|
|
|
}
|
|
|
|
}
|
2013-03-23 00:32:27 +01:00
|
|
|
|
2013-03-27 14:00:56 +01:00
|
|
|
if (SI == Scavenged.size()) {
|
2013-03-26 22:20:15 +01:00
|
|
|
// We need to scavenge a register but have no spill slot, the target
|
|
|
|
// must know how to do it (if not, we'll assert below).
|
2016-05-20 16:18:54 +02:00
|
|
|
Scavenged.push_back(ScavengedInfo(FIE));
|
2013-03-26 22:20:15 +01:00
|
|
|
}
|
2007-03-06 11:01:25 +01:00
|
|
|
|
2009-08-06 18:32:47 +02:00
|
|
|
// Avoid infinite regress
|
2016-08-19 05:03:24 +02:00
|
|
|
Scavenged[SI].Reg = SReg;
|
2009-08-06 18:32:47 +02:00
|
|
|
|
2009-10-06 00:30:23 +02:00
|
|
|
// If the target knows how to save/restore the register, let it do so;
|
|
|
|
// otherwise, use the emergency stack spill slot.
|
2016-08-19 05:03:24 +02:00
|
|
|
if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) {
|
|
|
|
// Spill the scavenged register before I.
|
2016-05-20 16:34:03 +02:00
|
|
|
int FI = Scavenged[SI].FrameIndex;
|
|
|
|
if (FI < FIB || FI >= FIE) {
|
2016-05-20 18:38:34 +02:00
|
|
|
std::string Msg = std::string("Error while trying to spill ") +
|
2016-08-19 05:03:24 +02:00
|
|
|
TRI->getName(SReg) + " from class " + TRI->getRegClassName(RC) +
|
2016-05-20 18:38:34 +02:00
|
|
|
": Cannot scavenge register without an emergency spill slot!";
|
2016-05-20 21:46:42 +02:00
|
|
|
report_fatal_error(Msg.c_str());
|
2016-05-18 20:16:00 +02:00
|
|
|
}
|
2016-08-19 05:03:24 +02:00
|
|
|
TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex,
|
|
|
|
RC, TRI);
|
|
|
|
MachineBasicBlock::iterator II = std::prev(I);
|
2013-01-31 21:02:54 +01:00
|
|
|
|
2016-07-08 19:16:57 +02:00
|
|
|
unsigned FIOperandNum = getFrameIndexOperandNum(*II);
|
2013-01-31 21:02:54 +01:00
|
|
|
TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
|
2009-10-06 00:30:23 +02:00
|
|
|
|
|
|
|
// Restore the scavenged register before its use (or first terminator).
|
2016-08-19 05:03:24 +02:00
|
|
|
TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex,
|
|
|
|
RC, TRI);
|
2014-03-02 13:27:27 +01:00
|
|
|
II = std::prev(UseMI);
|
2013-01-31 21:02:54 +01:00
|
|
|
|
2016-07-08 19:16:57 +02:00
|
|
|
FIOperandNum = getFrameIndexOperandNum(*II);
|
2013-01-31 21:02:54 +01:00
|
|
|
TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this);
|
2009-10-20 00:27:30 +02:00
|
|
|
}
|
2016-08-18 21:47:59 +02:00
|
|
|
|
2016-08-19 05:03:24 +02:00
|
|
|
Scavenged[SI].Restore = &*std::prev(UseMI);
|
2016-08-18 21:47:59 +02:00
|
|
|
|
2016-08-19 05:03:24 +02:00
|
|
|
// Doing this here leads to infinite regress.
|
|
|
|
// Scavenged[SI].Reg = SReg;
|
2007-03-06 11:01:25 +01:00
|
|
|
|
2010-09-02 02:51:37 +02:00
|
|
|
DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) <<
|
|
|
|
"\n");
|
|
|
|
|
2007-03-06 11:01:25 +01:00
|
|
|
return SReg;
|
|
|
|
}
|