2004-07-23 19:56:30 +02:00
|
|
|
//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
|
2003-11-20 04:32:25 +01:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 21:36:04 +01:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2003-11-20 04:32:25 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the LiveInterval analysis pass which is used
|
|
|
|
// by the Linear Scan Register allocator. This pass linearizes the
|
|
|
|
// basic blocks of the function in DFS order and uses the
|
|
|
|
// LiveVariables pass to conservatively compute live intervals for
|
|
|
|
// each virtual and physical register.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "liveintervals"
|
2005-09-21 06:19:09 +02:00
|
|
|
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
2004-09-03 20:25:53 +02:00
|
|
|
#include "VirtRegMap.h"
|
2004-05-01 23:24:39 +02:00
|
|
|
#include "llvm/Value.h"
|
2008-07-25 02:02:30 +02:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2011-02-15 00:15:38 +01:00
|
|
|
#include "llvm/CodeGen/CalcSpillWeights.h"
|
2003-11-20 04:32:25 +01:00
|
|
|
#include "llvm/CodeGen/LiveVariables.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2009-07-01 03:59:31 +02:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2007-12-11 03:09:15 +01:00
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
2009-09-25 22:36:54 +02:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2007-12-31 05:13:23 +01:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2003-11-20 04:32:25 +01:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2009-11-04 00:52:08 +01:00
|
|
|
#include "llvm/CodeGen/ProcessImplicitDefs.h"
|
2008-02-10 19:45:23 +01:00
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2003-11-20 04:32:25 +01:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2008-10-07 22:22:28 +02:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2004-09-02 00:55:40 +02:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-11 15:10:19 +02:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2009-07-01 03:59:31 +02:00
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2004-09-02 00:55:40 +02:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2004-09-03 20:19:51 +02:00
|
|
|
#include <algorithm>
|
2009-06-02 18:53:25 +02:00
|
|
|
#include <limits>
|
2006-12-02 03:22:01 +01:00
|
|
|
#include <cmath>
|
2003-11-20 04:32:25 +01:00
|
|
|
using namespace llvm;
|
|
|
|
|
2008-05-13 02:00:25 +02:00
|
|
|
// Hidden options for help debugging.
|
2010-08-12 22:01:23 +02:00
|
|
|
static cl::opt<bool> DisableReMat("disable-rematerialization",
|
2008-05-13 02:00:25 +02:00
|
|
|
cl::init(false), cl::Hidden);
|
|
|
|
|
2009-09-14 23:33:42 +02:00
|
|
|
STATISTIC(numIntervals , "Number of original intervals");
|
|
|
|
STATISTIC(numFolds , "Number of loads/stores folded into instructions");
|
|
|
|
STATISTIC(numSplits , "Number of intervals split");
|
2006-12-19 23:41:21 +01:00
|
|
|
|
2007-05-03 03:11:54 +02:00
|
|
|
char LiveIntervals::ID = 0;
|
2010-10-12 21:48:12 +02:00
|
|
|
INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
|
|
|
|
"Live Interval Analysis", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LiveVariables)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(PHIElimination)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
|
|
|
|
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
|
|
|
|
INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
|
2010-10-08 00:25:06 +02:00
|
|
|
"Live Interval Analysis", false, false)
|
2003-11-20 04:32:25 +01:00
|
|
|
|
2006-08-25 00:43:55 +02:00
|
|
|
void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
|
2009-08-01 01:37:33 +02:00
|
|
|
AU.setPreservesCFG();
|
2008-07-25 02:02:30 +02:00
|
|
|
AU.addRequired<AliasAnalysis>();
|
|
|
|
AU.addPreserved<AliasAnalysis>();
|
2004-08-04 11:46:26 +02:00
|
|
|
AU.addRequired<LiveVariables>();
|
2010-08-17 23:00:37 +02:00
|
|
|
AU.addPreserved<LiveVariables>();
|
|
|
|
AU.addRequired<MachineLoopInfo>();
|
|
|
|
AU.addPreserved<MachineLoopInfo>();
|
2008-01-04 21:54:55 +01:00
|
|
|
AU.addPreservedID(MachineDominatorsID);
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2008-10-07 22:22:28 +02:00
|
|
|
if (!StrongPHIElim) {
|
|
|
|
AU.addPreservedID(PHIEliminationID);
|
|
|
|
AU.addRequiredID(PHIEliminationID);
|
|
|
|
}
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
AU.addRequiredID(TwoAddressInstructionPassID);
|
2009-11-04 00:52:08 +01:00
|
|
|
AU.addPreserved<ProcessImplicitDefs>();
|
|
|
|
AU.addRequired<ProcessImplicitDefs>();
|
|
|
|
AU.addPreserved<SlotIndexes>();
|
|
|
|
AU.addRequiredTransitive<SlotIndexes>();
|
2004-08-04 11:46:26 +02:00
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
|
|
|
|
2006-08-25 00:43:55 +02:00
|
|
|
void LiveIntervals::releaseMemory() {
|
2008-08-13 23:49:13 +02:00
|
|
|
// Free the live intervals themselves.
|
2008-08-14 00:08:30 +02:00
|
|
|
for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
|
2010-03-24 21:25:25 +01:00
|
|
|
E = r2iMap_.end(); I != E; ++I)
|
2008-08-13 23:49:13 +02:00
|
|
|
delete I->second;
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
r2iMap_.clear();
|
2009-07-09 05:57:02 +02:00
|
|
|
|
2010-06-26 13:30:59 +02:00
|
|
|
// Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
|
|
|
|
VNInfoAllocator.Reset();
|
2009-09-14 23:33:42 +02:00
|
|
|
while (!CloneMIs.empty()) {
|
|
|
|
MachineInstr *MI = CloneMIs.back();
|
|
|
|
CloneMIs.pop_back();
|
2008-07-19 02:37:25 +02:00
|
|
|
mf_->DeleteMachineInstr(MI);
|
|
|
|
}
|
2006-05-11 09:29:24 +02:00
|
|
|
}
|
|
|
|
|
2008-05-28 22:54:50 +02:00
|
|
|
/// runOnMachineFunction - Register allocate the whole function
|
|
|
|
///
|
|
|
|
bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
|
|
|
|
mf_ = &fn;
|
|
|
|
mri_ = &mf_->getRegInfo();
|
|
|
|
tm_ = &fn.getTarget();
|
|
|
|
tri_ = tm_->getRegisterInfo();
|
|
|
|
tii_ = tm_->getInstrInfo();
|
2008-07-25 02:02:30 +02:00
|
|
|
aa_ = &getAnalysis<AliasAnalysis>();
|
2008-05-28 22:54:50 +02:00
|
|
|
lv_ = &getAnalysis<LiveVariables>();
|
2009-11-04 00:52:08 +01:00
|
|
|
indexes_ = &getAnalysis<SlotIndexes>();
|
2008-05-28 22:54:50 +02:00
|
|
|
allocatableRegs_ = tri_->getAllocatableSet(fn);
|
2003-11-20 04:32:25 +01:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
computeIntervals();
|
2003-11-20 04:32:25 +01:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
numIntervals += getNumIntervals();
|
2004-02-15 11:24:21 +01:00
|
|
|
|
2004-09-30 17:59:17 +02:00
|
|
|
DEBUG(dump());
|
2004-08-04 11:46:26 +02:00
|
|
|
return true;
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
|
|
|
|
2004-09-30 17:59:17 +02:00
|
|
|
/// print - Implement the dump method.
|
2009-08-23 08:03:38 +02:00
|
|
|
void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
|
2009-08-23 05:41:05 +02:00
|
|
|
OS << "********** INTERVALS **********\n";
|
2005-07-28 01:03:38 +02:00
|
|
|
for (const_iterator I = begin(), E = end(); I != E; ++I) {
|
2009-08-23 05:41:05 +02:00
|
|
|
I->second->print(OS, tri_);
|
|
|
|
OS << "\n";
|
2005-07-28 01:03:38 +02:00
|
|
|
}
|
2004-09-30 17:59:17 +02:00
|
|
|
|
2009-09-14 23:33:42 +02:00
|
|
|
printInstrs(OS);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LiveIntervals::printInstrs(raw_ostream &OS) const {
|
2009-08-23 05:41:05 +02:00
|
|
|
OS << "********** MACHINEINSTRS **********\n";
|
2010-10-26 22:21:46 +02:00
|
|
|
mf_->print(OS, indexes_);
|
2004-09-30 17:59:17 +02:00
|
|
|
}
|
|
|
|
|
2009-09-14 23:33:42 +02:00
|
|
|
void LiveIntervals::dumpInstrs() const {
|
2010-01-04 23:49:02 +01:00
|
|
|
printInstrs(dbgs());
|
2009-09-14 23:33:42 +02:00
|
|
|
}
|
|
|
|
|
2009-12-10 18:48:32 +01:00
|
|
|
bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
|
|
|
|
VirtRegMap &vrm, unsigned reg) {
|
|
|
|
// We don't handle fancy stuff crossing basic block boundaries
|
|
|
|
if (li.ranges.size() != 1)
|
|
|
|
return true;
|
|
|
|
const LiveRange &range = li.ranges.front();
|
|
|
|
SlotIndex idx = range.start.getBaseIndex();
|
|
|
|
SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
|
|
|
|
|
|
|
|
// Skip deleted instructions
|
|
|
|
MachineInstr *firstMI = getInstructionFromIndex(idx);
|
|
|
|
while (!firstMI && idx != end) {
|
|
|
|
idx = idx.getNextIndex();
|
|
|
|
firstMI = getInstructionFromIndex(idx);
|
|
|
|
}
|
|
|
|
if (!firstMI)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Find last instruction in range
|
|
|
|
SlotIndex lastIdx = end.getPrevIndex();
|
|
|
|
MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
|
|
|
|
while (!lastMI && lastIdx != idx) {
|
|
|
|
lastIdx = lastIdx.getPrevIndex();
|
|
|
|
lastMI = getInstructionFromIndex(lastIdx);
|
|
|
|
}
|
|
|
|
if (!lastMI)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Range cannot cross basic block boundaries or terminators
|
|
|
|
MachineBasicBlock *MBB = firstMI->getParent();
|
|
|
|
if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
MachineBasicBlock::const_iterator E = lastMI;
|
|
|
|
++E;
|
|
|
|
for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
|
|
|
|
const MachineInstr &MI = *I;
|
|
|
|
|
|
|
|
// Allow copies to and from li.reg
|
2010-07-09 22:55:49 +02:00
|
|
|
if (MI.isCopy())
|
|
|
|
if (MI.getOperand(0).getReg() == li.reg ||
|
|
|
|
MI.getOperand(1).getReg() == li.reg)
|
|
|
|
continue;
|
2009-12-10 18:48:32 +01:00
|
|
|
|
|
|
|
// Check for operands using reg
|
|
|
|
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand& mop = MI.getOperand(i);
|
|
|
|
if (!mop.isReg())
|
|
|
|
continue;
|
|
|
|
unsigned PhysReg = mop.getReg();
|
|
|
|
if (PhysReg == 0 || PhysReg == li.reg)
|
|
|
|
continue;
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
|
|
|
|
if (!vrm.hasPhys(PhysReg))
|
2007-11-03 08:20:12 +01:00
|
|
|
continue;
|
2009-12-10 18:48:32 +01:00
|
|
|
PhysReg = vrm.getPhys(PhysReg);
|
2007-11-03 08:20:12 +01:00
|
|
|
}
|
2009-12-10 18:48:32 +01:00
|
|
|
if (PhysReg && tri_->regsOverlap(PhysReg, reg))
|
|
|
|
return true;
|
2007-11-03 08:20:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-10 18:48:32 +01:00
|
|
|
// No conflicts found.
|
2007-11-03 08:20:12 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-06-24 20:15:01 +02:00
|
|
|
bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
|
2009-01-07 03:08:57 +01:00
|
|
|
SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
|
|
|
|
for (LiveInterval::Ranges::const_iterator
|
|
|
|
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
|
2009-11-04 00:52:08 +01:00
|
|
|
for (SlotIndex index = I->start.getBaseIndex(),
|
|
|
|
end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
|
|
|
|
index != end;
|
|
|
|
index = index.getNextIndex()) {
|
2009-12-03 21:49:10 +01:00
|
|
|
MachineInstr *MI = getInstructionFromIndex(index);
|
|
|
|
if (!MI)
|
|
|
|
continue; // skip deleted instructions
|
2009-01-07 03:08:57 +01:00
|
|
|
|
|
|
|
if (JoinedCopies.count(MI))
|
|
|
|
continue;
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand& MO = MI->getOperand(i);
|
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
unsigned PhysReg = MO.getReg();
|
2010-06-24 20:15:01 +02:00
|
|
|
if (PhysReg == 0 || PhysReg == Reg ||
|
|
|
|
TargetRegisterInfo::isVirtualRegister(PhysReg))
|
2009-01-07 03:08:57 +01:00
|
|
|
continue;
|
2010-06-24 20:15:01 +02:00
|
|
|
if (tri_->regsOverlap(Reg, PhysReg))
|
2009-01-07 03:08:57 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-05-04 22:26:52 +02:00
|
|
|
static
|
2010-05-05 20:27:40 +02:00
|
|
|
bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
|
2010-05-04 22:26:52 +02:00
|
|
|
unsigned Reg = MI.getOperand(MOIdx).getReg();
|
|
|
|
for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(i);
|
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
if (MO.getReg() == Reg && MO.isDef()) {
|
|
|
|
assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
|
|
|
|
MI.getOperand(MOIdx).getSubReg() &&
|
2010-07-07 01:26:25 +02:00
|
|
|
(MO.getSubReg() || MO.isImplicit()));
|
2010-05-04 22:26:52 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-05-05 20:27:40 +02:00
|
|
|
/// isPartialRedef - Return true if the specified def at the specific index is
|
|
|
|
/// partially re-defining the specified live interval. A common case of this is
|
2010-08-12 22:01:23 +02:00
|
|
|
/// a definition of the sub-register.
|
2010-05-05 20:27:40 +02:00
|
|
|
bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
|
|
|
|
LiveInterval &interval) {
|
|
|
|
if (!MO.getSubReg() || MO.isEarlyClobber())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SlotIndex RedefIndex = MIIdx.getDefIndex();
|
|
|
|
const LiveRange *OldLR =
|
|
|
|
interval.getLiveRangeContaining(RedefIndex.getUseIndex());
|
2010-09-25 14:04:16 +02:00
|
|
|
MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
|
|
|
|
if (DefMI != 0) {
|
2010-05-05 20:27:40 +02:00
|
|
|
return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-08-22 20:19:46 +02:00
|
|
|
void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
2003-11-20 04:32:25 +01:00
|
|
|
MachineBasicBlock::iterator mi,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex MIIdx,
|
2009-09-04 22:41:11 +02:00
|
|
|
MachineOperand& MO,
|
2008-07-10 09:35:43 +02:00
|
|
|
unsigned MOIdx,
|
2006-08-22 20:19:46 +02:00
|
|
|
LiveInterval &interval) {
|
2011-01-09 04:05:53 +01:00
|
|
|
DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
|
2008-04-03 18:39:43 +02:00
|
|
|
|
2004-08-04 11:46:56 +02:00
|
|
|
// Virtual registers may be defined multiple times (due to phi
|
|
|
|
// elimination and 2-addr elimination). Much of what we do only has to be
|
|
|
|
// done once for the vreg. We use an empty interval to detect the first
|
2004-08-04 11:46:26 +02:00
|
|
|
// time we see a vreg.
|
2009-07-17 21:43:40 +02:00
|
|
|
LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
|
2004-08-04 11:46:26 +02:00
|
|
|
if (interval.empty()) {
|
|
|
|
// Get the Idx of the defining instructions.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex defIndex = MIIdx.getDefIndex();
|
2009-09-20 02:36:41 +02:00
|
|
|
// Earlyclobbers move back one, so that they overlap the live range
|
|
|
|
// of inputs.
|
2008-09-24 03:07:17 +02:00
|
|
|
if (MO.isEarlyClobber())
|
2009-11-04 00:52:08 +01:00
|
|
|
defIndex = MIIdx.getUseIndex();
|
2010-05-21 18:32:16 +02:00
|
|
|
|
|
|
|
// Make sure the first definition is not a partial redefinition. Add an
|
|
|
|
// <imp-def> of the full register.
|
|
|
|
if (MO.getSubReg())
|
|
|
|
mi->addRegisterDefined(interval.reg);
|
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
MachineInstr *CopyMI = NULL;
|
2010-07-16 06:45:42 +02:00
|
|
|
if (mi->isCopyLike()) {
|
2008-02-15 19:24:29 +01:00
|
|
|
CopyMI = mi;
|
2010-06-19 00:29:44 +02:00
|
|
|
}
|
|
|
|
|
2010-09-25 14:04:16 +02:00
|
|
|
VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
|
2007-08-29 22:45:00 +02:00
|
|
|
assert(ValNo->id == 0 && "First value in interval is not 0?");
|
2004-08-04 11:46:26 +02:00
|
|
|
|
|
|
|
// Loop over all of the blocks that the vreg is defined in. There are
|
|
|
|
// two cases we have to handle here. The most common case is a vreg
|
|
|
|
// whose lifetime is contained within a basic block. In this case there
|
|
|
|
// will be a single kill, in MBB, which comes after the definition.
|
|
|
|
if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
|
|
|
|
// FIXME: what about dead vars?
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex killIdx;
|
2004-08-04 11:46:26 +02:00
|
|
|
if (vi.Kills[0] != mi)
|
2009-11-04 00:52:08 +01:00
|
|
|
killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
|
2004-08-04 11:46:26 +02:00
|
|
|
else
|
2009-11-04 00:52:08 +01:00
|
|
|
killIdx = defIndex.getStoreIndex();
|
2004-08-04 11:46:26 +02:00
|
|
|
|
|
|
|
// If the kill happens after the definition, we have an intra-block
|
|
|
|
// live range.
|
|
|
|
if (killIdx > defIndex) {
|
2009-05-26 20:27:15 +02:00
|
|
|
assert(vi.AliveBlocks.empty() &&
|
2004-08-04 11:46:26 +02:00
|
|
|
"Shouldn't be alive across any blocks!");
|
2007-08-29 22:45:00 +02:00
|
|
|
LiveRange LR(defIndex, killIdx, ValNo);
|
2004-08-04 11:46:26 +02:00
|
|
|
interval.addRange(LR);
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " +" << LR << "\n");
|
2004-08-04 11:46:26 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2004-07-19 04:15:56 +02:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
// The other case we handle is when a virtual register lives to the end
|
|
|
|
// of the defining block, potentially live across some blocks, then is
|
|
|
|
// live into some number of blocks, but gets killed. Start by adding a
|
|
|
|
// range that goes from this definition to the end of the defining block.
|
2009-12-22 01:11:50 +01:00
|
|
|
LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " +" << NewLR);
|
2004-08-04 11:46:26 +02:00
|
|
|
interval.addRange(NewLR);
|
|
|
|
|
2010-02-23 23:43:58 +01:00
|
|
|
bool PHIJoin = lv_->isPHIJoin(interval.reg);
|
|
|
|
|
|
|
|
if (PHIJoin) {
|
|
|
|
// A phi join register is killed at the end of the MBB and revived as a new
|
|
|
|
// valno in the killing blocks.
|
|
|
|
assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
|
|
|
|
DEBUG(dbgs() << " phi-join");
|
|
|
|
ValNo->setHasPHIKill(true);
|
|
|
|
} else {
|
|
|
|
// Iterate over all of the blocks that the variable is completely
|
|
|
|
// live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
|
|
|
|
// live interval.
|
|
|
|
for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
|
|
|
|
E = vi.AliveBlocks.end(); I != E; ++I) {
|
|
|
|
MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
|
|
|
|
LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
|
|
|
|
interval.addRange(LR);
|
|
|
|
DEBUG(dbgs() << " +" << LR);
|
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, this virtual register is live from the start of any killing
|
|
|
|
// block to the 'use' slot of the killing instruction.
|
|
|
|
for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
|
|
|
|
MachineInstr *Kill = vi.Kills[i];
|
2010-02-23 23:43:58 +01:00
|
|
|
SlotIndex Start = getMBBStartIdx(Kill->getParent());
|
|
|
|
SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
|
|
|
|
|
|
|
|
// Create interval with one of a NEW value number. Note that this value
|
|
|
|
// number isn't actually defined by an instruction, weird huh? :)
|
|
|
|
if (PHIJoin) {
|
2010-09-25 14:04:16 +02:00
|
|
|
assert(getInstructionFromIndex(Start) == 0 &&
|
|
|
|
"PHI def index points at actual instruction.");
|
|
|
|
ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
|
2010-02-23 23:43:58 +01:00
|
|
|
ValNo->setIsPHIDef(true);
|
|
|
|
}
|
|
|
|
LiveRange LR(Start, killIdx, ValNo);
|
2004-08-04 11:46:26 +02:00
|
|
|
interval.addRange(LR);
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " +" << LR);
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
2010-05-05 20:27:40 +02:00
|
|
|
if (MultipleDefsBySameMI(*mi, MOIdx))
|
2010-05-20 05:30:09 +02:00
|
|
|
// Multiple defs of the same virtual register by the same instruction.
|
|
|
|
// e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
|
2010-05-04 22:26:52 +02:00
|
|
|
// This is likely due to elimination of REG_SEQUENCE instructions. Return
|
|
|
|
// here since there is nothing to do.
|
|
|
|
return;
|
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
// If this is the second time we see a virtual register definition, it
|
|
|
|
// must be due to phi elimination or two addr elimination. If this is
|
2006-11-03 04:04:46 +01:00
|
|
|
// the result of two address elimination, then the vreg is one of the
|
|
|
|
// def-and-use register operand.
|
2010-05-05 20:27:40 +02:00
|
|
|
|
|
|
|
// It may also be partial redef like this:
|
2010-08-12 22:01:23 +02:00
|
|
|
// 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
|
|
|
|
// 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
|
2010-05-05 20:27:40 +02:00
|
|
|
bool PartReDef = isPartialRedef(MIIdx, MO, interval);
|
|
|
|
if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
|
2004-08-04 11:46:26 +02:00
|
|
|
// If this is a two-address definition, then we have already processed
|
|
|
|
// the live range. The only problem is that we didn't realize there
|
|
|
|
// are actually two values in the live interval. Because of this we
|
|
|
|
// need to take the LiveRegion that defines this register and split it
|
|
|
|
// into two values.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex RedefIndex = MIIdx.getDefIndex();
|
2009-03-23 09:01:15 +01:00
|
|
|
if (MO.isEarlyClobber())
|
2009-11-04 00:52:08 +01:00
|
|
|
RedefIndex = MIIdx.getUseIndex();
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2009-09-12 05:34:03 +02:00
|
|
|
const LiveRange *OldLR =
|
2009-11-04 00:52:08 +01:00
|
|
|
interval.getLiveRangeContaining(RedefIndex.getUseIndex());
|
2007-08-29 22:45:00 +02:00
|
|
|
VNInfo *OldValNo = OldLR->valno;
|
Allow a register to be redefined multiple times in a basic block.
LiveVariableAnalysis was a bit picky about a register only being redefined once,
but that really isn't necessary.
Here is an example of chained INSERT_SUBREGs that we can handle now:
68 %reg1040<def> = INSERT_SUBREG %reg1040, %reg1028<kill>, 14
register: %reg1040 +[70,134:0)
76 %reg1040<def> = INSERT_SUBREG %reg1040, %reg1029<kill>, 13
register: %reg1040 replace range with [70,78:1) RESULT: %reg1040,0.000000e+00 = [70,78:1)[78,134:0) 0@78-(134) 1@70-(78)
84 %reg1040<def> = INSERT_SUBREG %reg1040, %reg1030<kill>, 12
register: %reg1040 replace range with [78,86:2) RESULT: %reg1040,0.000000e+00 = [70,78:1)[78,86:2)[86,134:0) 0@86-(134) 1@70-(78) 2@78-(86)
92 %reg1040<def> = INSERT_SUBREG %reg1040, %reg1031<kill>, 11
register: %reg1040 replace range with [86,94:3) RESULT: %reg1040,0.000000e+00 = [70,78:1)[78,86:2)[86,94:3)[94,134:0) 0@94-(134) 1@70-(78) 2@78-(86) 3@86-(94)
rdar://problem/8096390
llvm-svn: 106152
2010-06-16 23:29:40 +02:00
|
|
|
SlotIndex DefIndex = OldValNo->def.getDefIndex();
|
2007-08-11 02:59:19 +02:00
|
|
|
|
Allow a register to be redefined multiple times in a basic block.
LiveVariableAnalysis was a bit picky about a register only being redefined once,
but that really isn't necessary.
Here is an example of chained INSERT_SUBREGs that we can handle now:
68 %reg1040<def> = INSERT_SUBREG %reg1040, %reg1028<kill>, 14
register: %reg1040 +[70,134:0)
76 %reg1040<def> = INSERT_SUBREG %reg1040, %reg1029<kill>, 13
register: %reg1040 replace range with [70,78:1) RESULT: %reg1040,0.000000e+00 = [70,78:1)[78,134:0) 0@78-(134) 1@70-(78)
84 %reg1040<def> = INSERT_SUBREG %reg1040, %reg1030<kill>, 12
register: %reg1040 replace range with [78,86:2) RESULT: %reg1040,0.000000e+00 = [70,78:1)[78,86:2)[86,134:0) 0@86-(134) 1@70-(78) 2@78-(86)
92 %reg1040<def> = INSERT_SUBREG %reg1040, %reg1031<kill>, 11
register: %reg1040 replace range with [86,94:3) RESULT: %reg1040,0.000000e+00 = [70,78:1)[78,86:2)[86,94:3)[94,134:0) 0@94-(134) 1@70-(78) 2@78-(86) 3@86-(94)
rdar://problem/8096390
llvm-svn: 106152
2010-06-16 23:29:40 +02:00
|
|
|
// Delete the previous value, which should be short and continuous,
|
2006-08-22 20:19:46 +02:00
|
|
|
// because the 2-addr copy must be in the same MBB as the redef.
|
2004-08-04 11:46:26 +02:00
|
|
|
interval.removeRange(DefIndex, RedefIndex);
|
2004-08-04 11:46:56 +02:00
|
|
|
|
2006-08-31 07:54:43 +02:00
|
|
|
// The new value number (#1) is defined by the instruction we claimed
|
|
|
|
// defined value #0.
|
2010-09-25 14:04:16 +02:00
|
|
|
VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
|
2009-06-17 23:01:20 +02:00
|
|
|
|
2006-08-31 07:54:43 +02:00
|
|
|
// Value#0 is now defined by the 2-addr instruction.
|
2008-02-15 19:24:29 +01:00
|
|
|
OldValNo->def = RedefIndex;
|
2010-05-17 03:47:47 +02:00
|
|
|
OldValNo->setCopy(0);
|
|
|
|
|
|
|
|
// A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
|
2010-07-16 06:45:42 +02:00
|
|
|
if (PartReDef && mi->isCopyLike())
|
2010-05-17 03:47:47 +02:00
|
|
|
OldValNo->setCopy(&*mi);
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2006-08-22 20:19:46 +02:00
|
|
|
// Add the new live interval which replaces the range for the input copy.
|
|
|
|
LiveRange LR(DefIndex, RedefIndex, ValNo);
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " replace range with " << LR);
|
2004-08-04 11:46:26 +02:00
|
|
|
interval.addRange(LR);
|
|
|
|
|
|
|
|
// If this redefinition is dead, we need to add a dummy unit live
|
|
|
|
// range covering the def slot.
|
2008-06-26 01:39:39 +02:00
|
|
|
if (MO.isDead())
|
2009-11-04 00:52:08 +01:00
|
|
|
interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
|
|
|
|
OldValNo));
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2009-08-22 22:18:03 +02:00
|
|
|
DEBUG({
|
2010-01-04 23:49:02 +01:00
|
|
|
dbgs() << " RESULT: ";
|
|
|
|
interval.print(dbgs(), tri_);
|
2009-08-22 22:18:03 +02:00
|
|
|
});
|
2010-05-05 20:27:40 +02:00
|
|
|
} else if (lv_->isPHIJoin(interval.reg)) {
|
2004-08-04 11:46:26 +02:00
|
|
|
// In the case of PHI elimination, each variable definition is only
|
|
|
|
// live until the end of the block. We've already taken care of the
|
|
|
|
// rest of the live range.
|
2010-02-23 23:43:58 +01:00
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex defIndex = MIIdx.getDefIndex();
|
2009-03-23 09:01:15 +01:00
|
|
|
if (MO.isEarlyClobber())
|
2009-11-04 00:52:08 +01:00
|
|
|
defIndex = MIIdx.getUseIndex();
|
2009-09-14 23:33:42 +02:00
|
|
|
|
2007-08-29 22:45:00 +02:00
|
|
|
VNInfo *ValNo;
|
2008-02-15 19:24:29 +01:00
|
|
|
MachineInstr *CopyMI = NULL;
|
2010-07-16 06:45:42 +02:00
|
|
|
if (mi->isCopyLike())
|
2008-02-15 19:24:29 +01:00
|
|
|
CopyMI = mi;
|
2010-09-25 14:04:16 +02:00
|
|
|
ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2009-12-22 01:11:50 +01:00
|
|
|
SlotIndex killIndex = getMBBEndIdx(mbb);
|
2007-08-29 22:45:00 +02:00
|
|
|
LiveRange LR(defIndex, killIndex, ValNo);
|
2004-08-04 11:46:26 +02:00
|
|
|
interval.addRange(LR);
|
2009-06-17 23:01:20 +02:00
|
|
|
ValNo->setHasPHIKill(true);
|
2010-02-23 23:43:58 +01:00
|
|
|
DEBUG(dbgs() << " phi-join +" << LR);
|
2010-05-05 20:27:40 +02:00
|
|
|
} else {
|
|
|
|
llvm_unreachable("Multiply defined register");
|
2003-12-18 09:48:48 +01:00
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
2003-11-20 04:32:25 +01:00
|
|
|
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << '\n');
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
|
|
|
|
2004-07-23 23:24:19 +02:00
|
|
|
void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
|
2003-11-20 04:32:25 +01:00
|
|
|
MachineBasicBlock::iterator mi,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex MIIdx,
|
2008-06-26 01:39:39 +02:00
|
|
|
MachineOperand& MO,
|
2006-08-31 07:54:43 +02:00
|
|
|
LiveInterval &interval,
|
2008-02-15 19:24:29 +01:00
|
|
|
MachineInstr *CopyMI) {
|
2004-08-04 11:46:26 +02:00
|
|
|
// A physical register cannot be live across basic block, so its
|
|
|
|
// lifetime must end somewhere in its defining basic block.
|
2011-01-09 04:05:53 +01:00
|
|
|
DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex baseIndex = MIIdx;
|
|
|
|
SlotIndex start = baseIndex.getDefIndex();
|
2008-09-24 03:07:17 +02:00
|
|
|
// Earlyclobbers move back one.
|
|
|
|
if (MO.isEarlyClobber())
|
2009-11-04 00:52:08 +01:00
|
|
|
start = MIIdx.getUseIndex();
|
|
|
|
SlotIndex end = start;
|
2004-08-04 11:46:26 +02:00
|
|
|
|
|
|
|
// If it is not used after definition, it is considered dead at
|
|
|
|
// the instruction defining it. Hence its interval is:
|
|
|
|
// [defSlot(def), defSlot(def)+1)
|
2009-09-20 02:36:41 +02:00
|
|
|
// For earlyclobbers, the defSlot was pushed back one; the extra
|
|
|
|
// advance below compensates.
|
2008-06-26 01:39:39 +02:00
|
|
|
if (MO.isDead()) {
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " dead");
|
2009-11-04 00:52:08 +01:00
|
|
|
end = start.getStoreIndex();
|
2005-08-24 00:51:41 +02:00
|
|
|
goto exit;
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
2003-11-20 04:32:25 +01:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
// If it is not dead on definition, it must be killed by a
|
|
|
|
// subsequent instruction. Hence its interval is:
|
|
|
|
// [defSlot(def), useSlot(kill)+1)
|
2009-11-04 00:52:08 +01:00
|
|
|
baseIndex = baseIndex.getNextIndex();
|
2005-09-02 02:20:32 +02:00
|
|
|
while (++mi != MBB->end()) {
|
2009-11-04 00:52:08 +01:00
|
|
|
|
2010-02-10 01:55:42 +01:00
|
|
|
if (mi->isDebugValue())
|
|
|
|
continue;
|
2009-11-04 00:52:08 +01:00
|
|
|
if (getInstructionFromIndex(baseIndex) == 0)
|
|
|
|
baseIndex = indexes_->getNextNonNullIndex(baseIndex);
|
|
|
|
|
2008-03-05 01:59:57 +01:00
|
|
|
if (mi->killsRegister(interval.reg, tri_)) {
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " killed");
|
2009-11-04 00:52:08 +01:00
|
|
|
end = baseIndex.getDefIndex();
|
2005-08-24 00:51:41 +02:00
|
|
|
goto exit;
|
2009-04-27 22:42:46 +02:00
|
|
|
} else {
|
2010-05-21 22:53:24 +02:00
|
|
|
int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
|
2009-04-27 22:42:46 +02:00
|
|
|
if (DefIdx != -1) {
|
|
|
|
if (mi->isRegTiedToUseOperand(DefIdx)) {
|
|
|
|
// Two-address instruction.
|
2009-11-04 00:52:08 +01:00
|
|
|
end = baseIndex.getDefIndex();
|
2009-04-27 22:42:46 +02:00
|
|
|
} else {
|
|
|
|
// Another instruction redefines the register before it is ever read.
|
2010-02-10 01:55:42 +01:00
|
|
|
// Then the register is essentially dead at the instruction that
|
|
|
|
// defines it. Hence its interval is:
|
2009-04-27 22:42:46 +02:00
|
|
|
// [defSlot(def), defSlot(def)+1)
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " dead");
|
2009-11-04 00:52:08 +01:00
|
|
|
end = start.getStoreIndex();
|
2009-04-27 22:42:46 +02:00
|
|
|
}
|
|
|
|
goto exit;
|
|
|
|
}
|
2004-07-23 23:24:19 +02:00
|
|
|
}
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
baseIndex = baseIndex.getNextIndex();
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2005-09-02 02:20:32 +02:00
|
|
|
// The only case we should have a dead physreg here without a killing or
|
|
|
|
// instruction where we know it's dead is if it is live-in to the function
|
2009-04-27 19:36:47 +02:00
|
|
|
// and never used. Another possible case is the implicit use of the
|
|
|
|
// physical register has been deleted by two-address pass.
|
2009-11-04 00:52:08 +01:00
|
|
|
end = start.getStoreIndex();
|
2004-02-01 00:13:30 +01:00
|
|
|
|
2003-11-20 04:32:25 +01:00
|
|
|
exit:
|
2004-08-04 11:46:26 +02:00
|
|
|
assert(start < end && "did not find end of interval?");
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
llvm-svn: 20536
2005-03-10 00:05:19 +01:00
|
|
|
|
2007-04-25 09:30:23 +02:00
|
|
|
// Already exists? Extend old live interval.
|
2010-10-11 23:45:03 +02:00
|
|
|
VNInfo *ValNo = interval.getVNInfoAt(start);
|
|
|
|
bool Extend = ValNo != 0;
|
|
|
|
if (!Extend)
|
|
|
|
ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
|
|
|
|
if (Extend && MO.isEarlyClobber())
|
2009-06-17 23:01:20 +02:00
|
|
|
ValNo->setHasRedefByEC(true);
|
2007-08-29 22:45:00 +02:00
|
|
|
LiveRange LR(start, end, ValNo);
|
2004-08-04 11:46:26 +02:00
|
|
|
interval.addRange(LR);
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " +" << LR << '\n');
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
|
|
|
|
2004-07-23 23:24:19 +02:00
|
|
|
void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
|
|
|
|
MachineBasicBlock::iterator MI,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex MIIdx,
|
2008-07-10 09:35:43 +02:00
|
|
|
MachineOperand& MO,
|
|
|
|
unsigned MOIdx) {
|
2008-06-26 01:39:39 +02:00
|
|
|
if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
|
2008-07-10 09:35:43 +02:00
|
|
|
handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
|
2008-06-26 01:39:39 +02:00
|
|
|
getOrCreateInterval(MO.getReg()));
|
Allow coalescing with reserved physregs in certain cases:
When a virtual register has a single value that is defined as a copy of a
reserved register, permit that copy to be joined. These virtual register are
usually copies of the stack pointer:
%vreg75<def> = COPY %ESP; GR32:%vreg75
MOV32mr %vreg75, 1, %noreg, 0, %noreg, %vreg74<kill>
MOV32mi %vreg75, 1, %noreg, 8, %noreg, 0
MOV32mi %vreg75<kill>, 1, %noreg, 4, %noreg, 0
CALLpcrel32 ...
Coalescing these virtual registers early decreases register pressure.
Previously, they were coalesced by RALinScan::attemptTrivialCoalescing after
register allocation was completed.
The lower register pressure causes the mcinst-lowering-cmp0.ll test case to fail
because it depends on linear scan spilling a particular register.
I am deleting 2008-08-05-SpillerBug.ll because it is counting the number of
instructions emitted, and its revision history shows the 'correct' count being
edited many times.
llvm-svn: 128845
2011-04-04 23:00:03 +02:00
|
|
|
else {
|
2008-02-15 19:24:29 +01:00
|
|
|
MachineInstr *CopyMI = NULL;
|
2010-07-16 06:45:42 +02:00
|
|
|
if (MI->isCopyLike())
|
2008-02-15 19:24:29 +01:00
|
|
|
CopyMI = MI;
|
2009-04-27 22:42:46 +02:00
|
|
|
handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
|
2008-06-26 01:39:39 +02:00
|
|
|
getOrCreateInterval(MO.getReg()), CopyMI);
|
2004-07-23 23:24:19 +02:00
|
|
|
}
|
2004-01-31 15:37:41 +01:00
|
|
|
}
|
|
|
|
|
2007-02-19 22:49:54 +01:00
|
|
|
void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex MIIdx,
|
2007-04-25 09:30:23 +02:00
|
|
|
LiveInterval &interval, bool isAlias) {
|
2011-01-09 04:05:53 +01:00
|
|
|
DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
|
2007-02-19 22:49:54 +01:00
|
|
|
|
|
|
|
// Look for kills, if it reaches a def before it's killed, then it shouldn't
|
|
|
|
// be considered a livein.
|
|
|
|
MachineBasicBlock::iterator mi = MBB->begin();
|
2010-03-16 22:51:27 +01:00
|
|
|
MachineBasicBlock::iterator E = MBB->end();
|
|
|
|
// Skip over DBG_VALUE at the start of the MBB.
|
|
|
|
if (mi != E && mi->isDebugValue()) {
|
|
|
|
while (++mi != E && mi->isDebugValue())
|
|
|
|
;
|
|
|
|
if (mi == E)
|
|
|
|
// MBB is empty except for DBG_VALUE's.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex baseIndex = MIIdx;
|
|
|
|
SlotIndex start = baseIndex;
|
|
|
|
if (getInstructionFromIndex(baseIndex) == 0)
|
|
|
|
baseIndex = indexes_->getNextNonNullIndex(baseIndex);
|
|
|
|
|
|
|
|
SlotIndex end = baseIndex;
|
2009-03-05 04:34:26 +01:00
|
|
|
bool SeenDefUse = false;
|
2007-02-19 22:49:54 +01:00
|
|
|
|
2010-02-10 01:55:42 +01:00
|
|
|
while (mi != E) {
|
2010-02-10 02:31:26 +01:00
|
|
|
if (mi->killsRegister(interval.reg, tri_)) {
|
|
|
|
DEBUG(dbgs() << " killed");
|
|
|
|
end = baseIndex.getDefIndex();
|
|
|
|
SeenDefUse = true;
|
|
|
|
break;
|
2010-05-21 22:53:24 +02:00
|
|
|
} else if (mi->definesRegister(interval.reg, tri_)) {
|
2010-02-10 02:31:26 +01:00
|
|
|
// Another instruction redefines the register before it is ever read.
|
|
|
|
// Then the register is essentially dead at the instruction that defines
|
|
|
|
// it. Hence its interval is:
|
|
|
|
// [defSlot(def), defSlot(def)+1)
|
|
|
|
DEBUG(dbgs() << " dead");
|
|
|
|
end = start.getStoreIndex();
|
|
|
|
SeenDefUse = true;
|
|
|
|
break;
|
2010-02-10 01:55:42 +01:00
|
|
|
}
|
2010-02-10 02:31:26 +01:00
|
|
|
|
2010-03-16 22:51:27 +01:00
|
|
|
while (++mi != E && mi->isDebugValue())
|
|
|
|
// Skip over DBG_VALUE.
|
|
|
|
;
|
|
|
|
if (mi != E)
|
2009-11-04 00:52:08 +01:00
|
|
|
baseIndex = indexes_->getNextNonNullIndex(baseIndex);
|
2007-02-19 22:49:54 +01:00
|
|
|
}
|
|
|
|
|
2007-06-27 03:16:36 +02:00
|
|
|
// Live-in register might not be used at all.
|
2009-03-05 04:34:26 +01:00
|
|
|
if (!SeenDefUse) {
|
2007-06-27 20:47:28 +02:00
|
|
|
if (isAlias) {
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " dead");
|
2009-11-04 00:52:08 +01:00
|
|
|
end = MIIdx.getStoreIndex();
|
2007-06-27 20:47:28 +02:00
|
|
|
} else {
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " live through");
|
2011-04-30 21:12:33 +02:00
|
|
|
end = getMBBEndIdx(MBB);
|
2007-06-27 20:47:28 +02:00
|
|
|
}
|
2007-04-25 09:30:23 +02:00
|
|
|
}
|
|
|
|
|
2010-09-25 14:04:16 +02:00
|
|
|
SlotIndex defIdx = getMBBStartIdx(MBB);
|
|
|
|
assert(getInstructionFromIndex(defIdx) == 0 &&
|
|
|
|
"PHI def index points at actual instruction.");
|
2009-06-19 04:17:53 +02:00
|
|
|
VNInfo *vni =
|
2010-09-25 14:04:16 +02:00
|
|
|
interval.getNextValue(defIdx, 0, VNInfoAllocator);
|
2009-06-19 00:01:47 +02:00
|
|
|
vni->setIsPHIDef(true);
|
|
|
|
LiveRange LR(start, end, vni);
|
2009-11-07 02:58:40 +01:00
|
|
|
|
2007-02-21 23:41:17 +01:00
|
|
|
interval.addRange(LR);
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " +" << LR << '\n');
|
2007-02-19 22:49:54 +01:00
|
|
|
}
|
|
|
|
|
2003-11-20 04:32:25 +01:00
|
|
|
/// computeIntervals - computes the live intervals for virtual
|
2004-01-31 15:37:41 +01:00
|
|
|
/// registers. for some ordering of the machine instructions [1,N] a
|
2004-01-31 20:59:32 +01:00
|
|
|
/// live interval is an interval [i, j) where 1 <= i <= j < N for
|
2003-11-20 04:32:25 +01:00
|
|
|
/// which a variable is live
|
2010-08-12 22:01:23 +02:00
|
|
|
void LiveIntervals::computeIntervals() {
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
|
2009-08-22 22:18:03 +02:00
|
|
|
<< "********** Function: "
|
|
|
|
<< ((Value*)mf_->getFunction())->getName() << '\n');
|
2009-07-17 21:43:40 +02:00
|
|
|
|
|
|
|
SmallVector<unsigned, 8> UndefUses;
|
2006-09-15 05:57:23 +02:00
|
|
|
for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
|
|
|
|
MBBI != E; ++MBBI) {
|
|
|
|
MachineBasicBlock *MBB = MBBI;
|
2010-02-06 10:07:11 +01:00
|
|
|
if (MBB->empty())
|
|
|
|
continue;
|
|
|
|
|
2008-09-21 22:43:24 +02:00
|
|
|
// Track the index of the current machine instr.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex MIIndex = getMBBStartIdx(MBB);
|
2010-05-03 23:38:11 +02:00
|
|
|
DEBUG(dbgs() << "BB#" << MBB->getNumber()
|
|
|
|
<< ":\t\t# derived from " << MBB->getName() << "\n");
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2007-10-03 21:26:29 +02:00
|
|
|
// Create intervals for live-ins to this BB first.
|
2010-04-13 18:57:55 +02:00
|
|
|
for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
|
2007-10-03 21:26:29 +02:00
|
|
|
LE = MBB->livein_end(); LI != LE; ++LI) {
|
|
|
|
handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
|
|
|
|
// Multiple live-ins can alias the same register.
|
2008-02-10 19:45:23 +01:00
|
|
|
for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
|
2007-10-03 21:26:29 +02:00
|
|
|
if (!hasInterval(*AS))
|
|
|
|
handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
|
|
|
|
true);
|
2006-09-04 20:27:40 +02:00
|
|
|
}
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2008-09-16 00:00:38 +02:00
|
|
|
// Skip over empty initial indices.
|
2009-11-04 00:52:08 +01:00
|
|
|
if (getInstructionFromIndex(MIIndex) == 0)
|
|
|
|
MIIndex = indexes_->getNextNonNullIndex(MIIndex);
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2010-01-22 23:38:21 +01:00
|
|
|
for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
|
|
|
|
MI != miEnd; ++MI) {
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << MIIndex << "\t" << *MI);
|
2010-02-09 20:54:29 +01:00
|
|
|
if (MI->isDebugValue())
|
2010-01-22 23:38:21 +01:00
|
|
|
continue;
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2006-11-10 09:43:01 +01:00
|
|
|
// Handle defs.
|
2006-09-15 05:57:23 +02:00
|
|
|
for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2009-07-17 21:43:40 +02:00
|
|
|
if (!MO.isReg() || !MO.getReg())
|
|
|
|
continue;
|
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
// handle register defs - build intervals
|
2009-07-17 21:43:40 +02:00
|
|
|
if (MO.isDef())
|
2008-07-10 09:35:43 +02:00
|
|
|
handleRegisterDef(MBB, MI, MIIndex, MO, i);
|
2009-07-17 21:43:40 +02:00
|
|
|
else if (MO.isUndef())
|
|
|
|
UndefUses.push_back(MO.getReg());
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
// Move to the next instr slot.
|
|
|
|
MIIndex = indexes_->getNextNonNullIndex(MIIndex);
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
2009-07-17 21:43:40 +02:00
|
|
|
|
|
|
|
// Create empty intervals for registers defined by implicit_def's (except
|
|
|
|
// for those implicit_def that define values which are liveout of their
|
|
|
|
// blocks.
|
|
|
|
for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
|
|
|
|
unsigned UndefReg = UndefUses[i];
|
|
|
|
(void)getOrCreateInterval(UndefReg);
|
|
|
|
}
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
2003-12-05 11:38:28 +01:00
|
|
|
|
2008-08-13 23:49:13 +02:00
|
|
|
LiveInterval* LiveIntervals::createInterval(unsigned reg) {
|
2009-02-08 12:04:35 +01:00
|
|
|
float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
|
2008-08-13 23:49:13 +02:00
|
|
|
return new LiveInterval(reg, Weight);
|
2004-04-09 20:07:57 +02:00
|
|
|
}
|
2007-11-12 07:35:08 +01:00
|
|
|
|
2009-02-08 12:04:35 +01:00
|
|
|
/// dupInterval - Duplicate a live interval. The caller is responsible for
|
|
|
|
/// managing the allocated memory.
|
|
|
|
LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
|
|
|
|
LiveInterval *NewLI = createInterval(li->reg);
|
2009-06-14 22:22:55 +02:00
|
|
|
NewLI->Copy(*li, mri_, getVNInfoAllocator());
|
2009-02-08 12:04:35 +01:00
|
|
|
return NewLI;
|
|
|
|
}
|
|
|
|
|
2011-02-08 01:03:05 +01:00
|
|
|
/// shrinkToUses - After removing some uses of a register, shrink its live
|
|
|
|
/// range to just the remaining uses. This method does not compute reaching
|
|
|
|
/// defs for new uses, and it doesn't remove dead defs.
|
2011-03-17 21:37:07 +01:00
|
|
|
bool LiveIntervals::shrinkToUses(LiveInterval *li,
|
2011-03-08 00:29:10 +01:00
|
|
|
SmallVectorImpl<MachineInstr*> *dead) {
|
2011-02-08 01:03:05 +01:00
|
|
|
DEBUG(dbgs() << "Shrink: " << *li << '\n');
|
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(li->reg)
|
|
|
|
&& "Can't only shrink physical registers");
|
|
|
|
// Find all the values used, including PHI kills.
|
|
|
|
SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
|
|
|
|
|
|
|
|
// Visit all instructions reading li->reg.
|
|
|
|
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
|
|
|
|
MachineInstr *UseMI = I.skipInstruction();) {
|
|
|
|
if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
|
|
|
|
continue;
|
|
|
|
SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
|
|
|
|
VNInfo *VNI = li->getVNInfoAt(Idx);
|
2011-03-18 04:06:04 +01:00
|
|
|
if (!VNI) {
|
|
|
|
// This shouldn't happen: readsVirtualRegister returns true, but there is
|
|
|
|
// no live value. It is likely caused by a target getting <undef> flags
|
|
|
|
// wrong.
|
|
|
|
DEBUG(dbgs() << Idx << '\t' << *UseMI
|
|
|
|
<< "Warning: Instr claims to read non-existent value in "
|
|
|
|
<< *li << '\n');
|
|
|
|
continue;
|
|
|
|
}
|
2011-02-08 01:03:05 +01:00
|
|
|
if (VNI->def == Idx) {
|
|
|
|
// Special case: An early-clobber tied operand reads and writes the
|
|
|
|
// register one slot early.
|
|
|
|
Idx = Idx.getPrevSlot();
|
|
|
|
VNI = li->getVNInfoAt(Idx);
|
|
|
|
assert(VNI && "Early-clobber tied value not available");
|
|
|
|
}
|
|
|
|
WorkList.push_back(std::make_pair(Idx, VNI));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new live interval with only minimal live segments per def.
|
|
|
|
LiveInterval NewLI(li->reg, 0);
|
|
|
|
for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
|
|
|
|
I != E; ++I) {
|
|
|
|
VNInfo *VNI = *I;
|
|
|
|
if (VNI->isUnused())
|
|
|
|
continue;
|
2011-04-07 20:43:14 +02:00
|
|
|
// We may eliminate PHI values, so recompute PHIKill flags.
|
|
|
|
VNI->setHasPHIKill(false);
|
2011-02-08 01:03:05 +01:00
|
|
|
NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
|
2011-03-07 19:56:16 +01:00
|
|
|
|
|
|
|
// A use tied to an early-clobber def ends at the load slot and isn't caught
|
|
|
|
// above. Catch it here instead. This probably only ever happens for inline
|
|
|
|
// assembly.
|
|
|
|
if (VNI->def.isUse())
|
|
|
|
if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
|
|
|
|
WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
|
2011-02-08 01:03:05 +01:00
|
|
|
}
|
|
|
|
|
2011-03-02 01:33:03 +01:00
|
|
|
// Keep track of the PHIs that are in use.
|
|
|
|
SmallPtrSet<VNInfo*, 8> UsedPHIs;
|
|
|
|
|
2011-02-08 01:03:05 +01:00
|
|
|
// Extend intervals to reach all uses in WorkList.
|
|
|
|
while (!WorkList.empty()) {
|
|
|
|
SlotIndex Idx = WorkList.back().first;
|
|
|
|
VNInfo *VNI = WorkList.back().second;
|
|
|
|
WorkList.pop_back();
|
|
|
|
const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
|
|
|
|
SlotIndex BlockStart = getMBBStartIdx(MBB);
|
2011-03-02 01:33:03 +01:00
|
|
|
|
|
|
|
// Extend the live range for VNI to be live at Idx.
|
|
|
|
if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
|
2011-03-02 02:43:30 +01:00
|
|
|
(void)ExtVNI;
|
2011-03-02 01:33:03 +01:00
|
|
|
assert(ExtVNI == VNI && "Unexpected existing value number");
|
|
|
|
// Is this a PHIDef we haven't seen before?
|
2011-03-03 01:20:51 +01:00
|
|
|
if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
|
2011-03-02 01:33:03 +01:00
|
|
|
continue;
|
|
|
|
// The PHI is live, make sure the predecessors are live-out.
|
|
|
|
for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
|
|
|
|
PE = MBB->pred_end(); PI != PE; ++PI) {
|
|
|
|
SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
|
|
|
|
VNInfo *PVNI = li->getVNInfoAt(Stop);
|
|
|
|
// A predecessor is not required to have a live-out value for a PHI.
|
|
|
|
if (PVNI) {
|
2011-04-07 20:43:14 +02:00
|
|
|
PVNI->setHasPHIKill(true);
|
2011-03-02 01:33:03 +01:00
|
|
|
WorkList.push_back(std::make_pair(Stop, PVNI));
|
2011-02-08 01:03:05 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// VNI is live-in to MBB.
|
|
|
|
DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
|
|
|
|
NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
|
|
|
|
|
|
|
|
// Make sure VNI is live-out from the predecessors.
|
|
|
|
for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
|
|
|
|
PE = MBB->pred_end(); PI != PE; ++PI) {
|
|
|
|
SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
|
|
|
|
assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
|
|
|
|
WorkList.push_back(std::make_pair(Stop, VNI));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle dead values.
|
2011-03-17 21:37:07 +01:00
|
|
|
bool CanSeparate = false;
|
2011-02-08 01:03:05 +01:00
|
|
|
for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
|
|
|
|
I != E; ++I) {
|
|
|
|
VNInfo *VNI = *I;
|
|
|
|
if (VNI->isUnused())
|
|
|
|
continue;
|
|
|
|
LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
|
|
|
|
assert(LII != NewLI.end() && "Missing live range for PHI");
|
|
|
|
if (LII->end != VNI->def.getNextSlot())
|
|
|
|
continue;
|
2011-03-02 01:33:01 +01:00
|
|
|
if (VNI->isPHIDef()) {
|
2011-02-08 01:03:05 +01:00
|
|
|
// This is a dead PHI. Remove it.
|
|
|
|
VNI->setIsUnused(true);
|
|
|
|
NewLI.removeRange(*LII);
|
2011-03-17 21:37:07 +01:00
|
|
|
DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
|
|
|
|
CanSeparate = true;
|
2011-02-08 01:03:05 +01:00
|
|
|
} else {
|
|
|
|
// This is a dead def. Make sure the instruction knows.
|
|
|
|
MachineInstr *MI = getInstructionFromIndex(VNI->def);
|
|
|
|
assert(MI && "No instruction defining live value");
|
|
|
|
MI->addRegisterDead(li->reg, tri_);
|
2011-03-08 00:29:10 +01:00
|
|
|
if (dead && MI->allDefsAreDead()) {
|
2011-03-16 23:56:08 +01:00
|
|
|
DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
|
2011-03-08 00:29:10 +01:00
|
|
|
dead->push_back(MI);
|
|
|
|
}
|
2011-02-08 01:03:05 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move the trimmed ranges back.
|
|
|
|
li->ranges.swap(NewLI.ranges);
|
2011-03-16 23:56:08 +01:00
|
|
|
DEBUG(dbgs() << "Shrunk: " << *li << '\n');
|
2011-03-17 21:37:07 +01:00
|
|
|
return CanSeparate;
|
2011-02-08 01:03:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-11-12 07:35:08 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Register allocator hooks.
|
|
|
|
//
|
|
|
|
|
2011-02-04 20:33:11 +01:00
|
|
|
MachineBasicBlock::iterator
|
|
|
|
LiveIntervals::getLastSplitPoint(const LiveInterval &li,
|
2011-02-09 23:50:26 +01:00
|
|
|
MachineBasicBlock *mbb) const {
|
2011-02-04 20:33:11 +01:00
|
|
|
const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
|
|
|
|
|
|
|
|
// If li is not live into a landing pad, we can insert spill code before the
|
|
|
|
// first terminator.
|
|
|
|
if (!lpad || !isLiveInToMBB(li, lpad))
|
|
|
|
return mbb->getFirstTerminator();
|
|
|
|
|
|
|
|
// When there is a landing pad, spill code must go before the call instruction
|
|
|
|
// that can throw.
|
|
|
|
MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
|
|
|
|
while (I != B) {
|
|
|
|
--I;
|
|
|
|
if (I->getDesc().isCall())
|
|
|
|
return I;
|
|
|
|
}
|
2011-02-05 00:11:13 +01:00
|
|
|
// The block contains no calls that can throw, so use the first terminator.
|
2011-02-04 20:33:11 +01:00
|
|
|
return mbb->getFirstTerminator();
|
|
|
|
}
|
|
|
|
|
2011-02-08 22:13:03 +01:00
|
|
|
void LiveIntervals::addKillFlags() {
|
|
|
|
for (iterator I = begin(), E = end(); I != E; ++I) {
|
|
|
|
unsigned Reg = I->first;
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg))
|
|
|
|
continue;
|
|
|
|
if (mri_->reg_nodbg_empty(Reg))
|
|
|
|
continue;
|
|
|
|
LiveInterval *LI = I->second;
|
|
|
|
|
|
|
|
// Every instruction that kills Reg corresponds to a live range end point.
|
|
|
|
for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
|
|
|
|
++RI) {
|
|
|
|
// A LOAD index indicates an MBB edge.
|
|
|
|
if (RI->end.isLoad())
|
|
|
|
continue;
|
|
|
|
MachineInstr *MI = getInstructionFromIndex(RI->end);
|
|
|
|
if (!MI)
|
|
|
|
continue;
|
|
|
|
MI->addRegisterKilled(Reg, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-22 10:24:50 +01:00
|
|
|
/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
|
|
|
|
/// allow one) virtual register operand, then its uses are implicitly using
|
|
|
|
/// the register. Returns the virtual register.
|
|
|
|
unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
|
|
|
|
MachineInstr *MI) const {
|
|
|
|
unsigned RegOp = 0;
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2008-10-03 17:45:36 +02:00
|
|
|
if (!MO.isReg() || !MO.isUse())
|
2008-02-22 10:24:50 +01:00
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (Reg == 0 || Reg == li.reg)
|
|
|
|
continue;
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2009-06-27 06:06:41 +02:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
|
|
|
|
!allocatableRegs_[Reg])
|
|
|
|
continue;
|
2008-02-22 10:24:50 +01:00
|
|
|
// FIXME: For now, only remat MI with at most one register operand.
|
|
|
|
assert(!RegOp &&
|
|
|
|
"Can't rematerialize instruction with multiple register operand!");
|
|
|
|
RegOp = MO.getReg();
|
2008-07-25 02:02:30 +02:00
|
|
|
#ifndef NDEBUG
|
2008-02-22 10:24:50 +01:00
|
|
|
break;
|
2008-07-25 02:02:30 +02:00
|
|
|
#endif
|
2008-02-22 10:24:50 +01:00
|
|
|
}
|
|
|
|
return RegOp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isValNoAvailableAt - Return true if the val# of the specified interval
|
|
|
|
/// which reaches the given instruction also reaches the specified use index.
|
|
|
|
bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex UseIdx) const {
|
2010-10-11 23:45:03 +02:00
|
|
|
VNInfo *UValNo = li.getVNInfoAt(UseIdx);
|
|
|
|
return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
|
2008-02-22 10:24:50 +01:00
|
|
|
}
|
|
|
|
|
2007-11-12 07:35:08 +01:00
|
|
|
/// isReMaterializable - Returns true if the definition MI of the specified
|
|
|
|
/// val# of the specified interval is re-materializable.
|
2010-11-10 20:18:47 +01:00
|
|
|
bool
|
|
|
|
LiveIntervals::isReMaterializable(const LiveInterval &li,
|
|
|
|
const VNInfo *ValNo, MachineInstr *MI,
|
2011-03-10 02:21:58 +01:00
|
|
|
const SmallVectorImpl<LiveInterval*> *SpillIs,
|
2010-11-10 20:18:47 +01:00
|
|
|
bool &isLoad) {
|
2007-11-12 07:35:08 +01:00
|
|
|
if (DisableReMat)
|
|
|
|
return false;
|
|
|
|
|
2009-10-10 01:27:56 +02:00
|
|
|
if (!tii_->isTriviallyReMaterializable(MI, aa_))
|
|
|
|
return false;
|
2007-11-12 07:35:08 +01:00
|
|
|
|
2009-10-10 01:27:56 +02:00
|
|
|
// Target-specific code can mark an instruction as being rematerializable
|
|
|
|
// if it has one virtual reg use, though it had better be something like
|
|
|
|
// a PIC base register which is likely to be live everywhere.
|
2008-07-25 02:02:30 +02:00
|
|
|
unsigned ImpUse = getReMatImplicitUse(li, MI);
|
|
|
|
if (ImpUse) {
|
|
|
|
const LiveInterval &ImpLi = getInterval(ImpUse);
|
2010-03-30 07:49:07 +02:00
|
|
|
for (MachineRegisterInfo::use_nodbg_iterator
|
|
|
|
ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
|
|
|
|
ri != re; ++ri) {
|
2008-07-25 02:02:30 +02:00
|
|
|
MachineInstr *UseMI = &*ri;
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex UseIdx = getInstructionIndex(UseMI);
|
2010-10-11 23:45:03 +02:00
|
|
|
if (li.getVNInfoAt(UseIdx) != ValNo)
|
2008-07-25 02:02:30 +02:00
|
|
|
continue;
|
|
|
|
if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
|
|
|
|
return false;
|
|
|
|
}
|
2008-09-30 17:44:16 +02:00
|
|
|
|
|
|
|
// If a register operand of the re-materialized instruction is going to
|
|
|
|
// be spilled next, then it's not legal to re-materialize this instruction.
|
2011-03-10 02:21:58 +01:00
|
|
|
if (SpillIs)
|
|
|
|
for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
|
|
|
|
if (ImpUse == (*SpillIs)[i]->reg)
|
|
|
|
return false;
|
2008-07-25 02:02:30 +02:00
|
|
|
}
|
|
|
|
return true;
|
2007-12-06 01:01:56 +01:00
|
|
|
}
|
|
|
|
|
2008-10-24 04:05:00 +02:00
|
|
|
/// isReMaterializable - Returns true if the definition MI of the specified
|
|
|
|
/// val# of the specified interval is re-materializable.
|
|
|
|
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
|
|
|
|
const VNInfo *ValNo, MachineInstr *MI) {
|
|
|
|
bool Dummy2;
|
2011-03-10 02:21:58 +01:00
|
|
|
return isReMaterializable(li, ValNo, MI, 0, Dummy2);
|
2008-10-24 04:05:00 +02:00
|
|
|
}
|
|
|
|
|
2007-12-06 01:01:56 +01:00
|
|
|
/// isReMaterializable - Returns true if every definition of MI of every
|
|
|
|
/// val# of the specified interval is re-materializable.
|
2010-11-10 20:18:47 +01:00
|
|
|
bool
|
|
|
|
LiveIntervals::isReMaterializable(const LiveInterval &li,
|
2011-03-10 02:21:58 +01:00
|
|
|
const SmallVectorImpl<LiveInterval*> *SpillIs,
|
2010-11-10 20:18:47 +01:00
|
|
|
bool &isLoad) {
|
2007-12-06 01:01:56 +01:00
|
|
|
isLoad = false;
|
|
|
|
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
|
|
|
|
i != e; ++i) {
|
|
|
|
const VNInfo *VNI = *i;
|
2009-06-17 23:01:20 +02:00
|
|
|
if (VNI->isUnused())
|
2007-12-06 01:01:56 +01:00
|
|
|
continue; // Dead val#.
|
|
|
|
// Is the def for the val# rematerializable?
|
2009-06-17 23:01:20 +02:00
|
|
|
MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
|
2010-09-25 14:04:16 +02:00
|
|
|
if (!ReMatDefMI)
|
|
|
|
return false;
|
2007-12-06 01:01:56 +01:00
|
|
|
bool DefIsLoad = false;
|
2008-02-22 10:24:50 +01:00
|
|
|
if (!ReMatDefMI ||
|
2008-09-30 17:44:16 +02:00
|
|
|
!isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
|
2007-11-12 07:35:08 +01:00
|
|
|
return false;
|
2007-12-06 01:01:56 +01:00
|
|
|
isLoad |= DefIsLoad;
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-02-25 09:50:41 +01:00
|
|
|
/// FilterFoldedOps - Filter out two-address use operands. Return
|
|
|
|
/// true if it finds any issue with the operands that ought to prevent
|
|
|
|
/// folding.
|
|
|
|
static bool FilterFoldedOps(MachineInstr *MI,
|
|
|
|
SmallVector<unsigned, 2> &Ops,
|
|
|
|
unsigned &MRInfo,
|
|
|
|
SmallVector<unsigned, 2> &FoldOps) {
|
|
|
|
MRInfo = 0;
|
2007-12-02 09:30:39 +01:00
|
|
|
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
|
|
|
unsigned OpIdx = Ops[i];
|
2008-02-22 10:24:50 +01:00
|
|
|
MachineOperand &MO = MI->getOperand(OpIdx);
|
2007-12-02 09:30:39 +01:00
|
|
|
// FIXME: fold subreg use.
|
2008-02-22 10:24:50 +01:00
|
|
|
if (MO.getSubReg())
|
2008-02-25 09:50:41 +01:00
|
|
|
return true;
|
2008-02-22 10:24:50 +01:00
|
|
|
if (MO.isDef())
|
2007-12-02 09:30:39 +01:00
|
|
|
MRInfo |= (unsigned)VirtRegMap::isMod;
|
|
|
|
else {
|
|
|
|
// Filter out two-address use operand(s).
|
2009-03-19 21:30:06 +01:00
|
|
|
if (MI->isRegTiedToDefOperand(OpIdx)) {
|
2007-12-02 09:30:39 +01:00
|
|
|
MRInfo = VirtRegMap::isModRef;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MRInfo |= (unsigned)VirtRegMap::isRef;
|
|
|
|
}
|
|
|
|
FoldOps.push_back(OpIdx);
|
2007-12-01 03:07:52 +01:00
|
|
|
}
|
2008-02-25 09:50:41 +01:00
|
|
|
return false;
|
|
|
|
}
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2008-02-25 09:50:41 +01:00
|
|
|
|
|
|
|
/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
|
|
|
|
/// slot / to reg or any rematerialized load into ith operand of specified
|
|
|
|
/// MI. If it is successul, MI is updated with the newly created MI and
|
|
|
|
/// returns true.
|
|
|
|
bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
|
|
|
|
VirtRegMap &vrm, MachineInstr *DefMI,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex InstrIdx,
|
2008-02-25 09:50:41 +01:00
|
|
|
SmallVector<unsigned, 2> &Ops,
|
|
|
|
bool isSS, int Slot, unsigned Reg) {
|
|
|
|
// If it is an implicit def instruction, just delete it.
|
2010-02-09 20:54:29 +01:00
|
|
|
if (MI->isImplicitDef()) {
|
2008-02-25 09:50:41 +01:00
|
|
|
RemoveMachineInstrFromMaps(MI);
|
|
|
|
vrm.RemoveMachineInstrFromMaps(MI);
|
|
|
|
MI->eraseFromParent();
|
|
|
|
++numFolds;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Filter the list of operand indexes that are to be folded. Abort if
|
|
|
|
// any operand will prevent folding.
|
|
|
|
unsigned MRInfo = 0;
|
|
|
|
SmallVector<unsigned, 2> FoldOps;
|
|
|
|
if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
|
|
|
|
return false;
|
2007-11-30 22:23:43 +01:00
|
|
|
|
2008-04-01 01:19:51 +02:00
|
|
|
// The only time it's safe to fold into a two address instruction is when
|
|
|
|
// it's folding reload and spill from / into a spill stack slot.
|
|
|
|
if (DefMI && (MRInfo & VirtRegMap::isMod))
|
2008-02-23 04:38:34 +01:00
|
|
|
return false;
|
|
|
|
|
2010-07-09 19:29:08 +02:00
|
|
|
MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
|
|
|
|
: tii_->foldMemoryOperand(MI, FoldOps, DefMI);
|
2007-11-12 07:35:08 +01:00
|
|
|
if (fmi) {
|
2008-02-27 04:04:06 +01:00
|
|
|
// Remember this instruction uses the spill slot.
|
|
|
|
if (isSS) vrm.addSpillSlotUse(Slot, fmi);
|
|
|
|
|
2007-11-12 07:35:08 +01:00
|
|
|
// Attempt to fold the memory reference into the instruction. If
|
|
|
|
// we can do this, we don't need to insert spill code.
|
2008-01-10 09:24:38 +01:00
|
|
|
if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
|
2007-12-02 09:30:39 +01:00
|
|
|
vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
vrm.transferSpillPts(MI, fmi);
|
2007-11-29 02:06:25 +01:00
|
|
|
vrm.transferRestorePts(MI, fmi);
|
2008-03-11 22:34:46 +01:00
|
|
|
vrm.transferEmergencySpills(MI, fmi);
|
2009-11-04 00:52:08 +01:00
|
|
|
ReplaceMachineInstrInMaps(MI, fmi);
|
2010-07-09 19:29:08 +02:00
|
|
|
MI->eraseFromParent();
|
|
|
|
MI = fmi;
|
2007-11-29 02:06:25 +01:00
|
|
|
++numFolds;
|
2007-11-12 07:35:08 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-12-05 04:22:34 +01:00
|
|
|
/// canFoldMemoryOperand - Returns true if the specified load / store
|
|
|
|
/// folding is possible.
|
|
|
|
bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
|
2008-02-25 09:50:41 +01:00
|
|
|
SmallVector<unsigned, 2> &Ops,
|
2008-04-01 23:37:32 +02:00
|
|
|
bool ReMat) const {
|
2008-02-25 09:50:41 +01:00
|
|
|
// Filter the list of operand indexes that are to be folded. Abort if
|
|
|
|
// any operand will prevent folding.
|
|
|
|
unsigned MRInfo = 0;
|
2007-12-05 04:22:34 +01:00
|
|
|
SmallVector<unsigned, 2> FoldOps;
|
2008-02-25 09:50:41 +01:00
|
|
|
if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
|
|
|
|
return false;
|
2007-12-05 04:22:34 +01:00
|
|
|
|
2008-04-01 23:37:32 +02:00
|
|
|
// It's only legal to remat for a use, not a def.
|
|
|
|
if (ReMat && (MRInfo & VirtRegMap::isMod))
|
2008-02-25 09:50:41 +01:00
|
|
|
return false;
|
2007-12-05 04:22:34 +01:00
|
|
|
|
2008-02-22 10:24:50 +01:00
|
|
|
return tii_->canFoldMemoryOperand(MI, FoldOps);
|
|
|
|
}
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
|
2009-11-04 00:52:08 +01:00
|
|
|
LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
|
|
|
|
|
|
|
|
MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
|
|
|
|
|
|
|
|
if (mbb == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (++itr; itr != li.ranges.end(); ++itr) {
|
|
|
|
MachineBasicBlock *mbb2 =
|
|
|
|
indexes_->getMBBCoveringRange(itr->start, itr->end);
|
|
|
|
|
|
|
|
if (mbb2 != mbb)
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
return false;
|
|
|
|
}
|
2009-11-04 00:52:08 +01:00
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-02-22 10:24:50 +01:00
|
|
|
/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
|
|
|
|
/// interval on to-be re-materialized operands of MI) with new register.
|
|
|
|
void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
|
|
|
|
MachineInstr *MI, unsigned NewVReg,
|
|
|
|
VirtRegMap &vrm) {
|
|
|
|
// There is an implicit use. That means one of the other operand is
|
|
|
|
// being remat'ed and the remat'ed instruction has li.reg as an
|
|
|
|
// use operand. Make sure we rewrite that as well.
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2008-10-03 17:45:36 +02:00
|
|
|
if (!MO.isReg())
|
2008-02-22 10:24:50 +01:00
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
2011-01-10 03:58:51 +01:00
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(Reg))
|
2008-02-22 10:24:50 +01:00
|
|
|
continue;
|
|
|
|
if (!vrm.isReMaterialized(Reg))
|
|
|
|
continue;
|
|
|
|
MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
|
2008-03-05 01:59:57 +01:00
|
|
|
MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
|
|
|
|
if (UseMO)
|
|
|
|
UseMO->setReg(NewVReg);
|
2008-02-22 10:24:50 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-12 07:35:08 +01:00
|
|
|
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
|
|
|
|
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
|
2007-12-05 04:22:34 +01:00
|
|
|
bool LiveIntervals::
|
2008-02-22 10:24:50 +01:00
|
|
|
rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
2010-08-12 22:01:23 +02:00
|
|
|
bool TrySplit, SlotIndex index, SlotIndex end,
|
2009-09-04 22:41:11 +02:00
|
|
|
MachineInstr *MI,
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
|
2007-11-12 07:35:08 +01:00
|
|
|
unsigned Slot, int LdSlot,
|
|
|
|
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
2008-02-22 10:24:50 +01:00
|
|
|
VirtRegMap &vrm,
|
2007-11-12 07:35:08 +01:00
|
|
|
const TargetRegisterClass* rc,
|
|
|
|
SmallVector<int, 4> &ReMatIds,
|
2007-12-11 03:09:15 +01:00
|
|
|
const MachineLoopInfo *loopInfo,
|
2008-02-23 01:33:04 +01:00
|
|
|
unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned,unsigned> &MBBVRegsMap,
|
2009-05-03 20:32:42 +02:00
|
|
|
std::vector<LiveInterval*> &NewLIs) {
|
2007-12-05 04:22:34 +01:00
|
|
|
bool CanFold = false;
|
2007-11-12 07:35:08 +01:00
|
|
|
RestartInstruction:
|
|
|
|
for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
|
|
|
|
MachineOperand& mop = MI->getOperand(i);
|
2008-10-03 17:45:36 +02:00
|
|
|
if (!mop.isReg())
|
2007-11-12 07:35:08 +01:00
|
|
|
continue;
|
|
|
|
unsigned Reg = mop.getReg();
|
2011-01-10 03:58:51 +01:00
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(Reg))
|
2007-11-12 07:35:08 +01:00
|
|
|
continue;
|
|
|
|
if (Reg != li.reg)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bool TryFold = !DefIsReMat;
|
2007-11-30 00:02:50 +01:00
|
|
|
bool FoldSS = true; // Default behavior unless it's a remat.
|
2007-11-12 07:35:08 +01:00
|
|
|
int FoldSlot = Slot;
|
|
|
|
if (DefIsReMat) {
|
|
|
|
// If this is the rematerializable definition MI itself and
|
|
|
|
// all of its uses are rematerialized, simply delete it.
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
if (MI == ReMatOrigDefMI && CanDelete) {
|
2010-02-10 01:55:42 +01:00
|
|
|
DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
|
2010-03-30 07:49:07 +02:00
|
|
|
<< *MI << '\n');
|
2007-11-12 07:35:08 +01:00
|
|
|
RemoveMachineInstrFromMaps(MI);
|
2007-11-28 02:28:46 +01:00
|
|
|
vrm.RemoveMachineInstrFromMaps(MI);
|
2007-11-12 07:35:08 +01:00
|
|
|
MI->eraseFromParent();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If def for this use can't be rematerialized, then try folding.
|
2007-11-29 02:06:25 +01:00
|
|
|
// If def is rematerializable and it's a load, also try folding.
|
2007-11-30 00:02:50 +01:00
|
|
|
TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
|
2007-11-12 07:35:08 +01:00
|
|
|
if (isLoad) {
|
|
|
|
// Try fold loads (from stack slot, constant pool, etc.) into uses.
|
|
|
|
FoldSS = isLoadSS;
|
|
|
|
FoldSlot = LdSlot;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scan all of the operands of this instruction rewriting operands
|
|
|
|
// to use NewVReg instead of li.reg as appropriate. We do this for
|
|
|
|
// two reasons:
|
|
|
|
//
|
|
|
|
// 1. If the instr reads the same spilled vreg multiple times, we
|
|
|
|
// want to reuse the NewVReg.
|
|
|
|
// 2. If the instr is a two-addr instruction, we are required to
|
|
|
|
// keep the src/dst regs pinned.
|
|
|
|
//
|
|
|
|
// Keep track of whether we replace a use and/or def so that we can
|
2010-08-12 22:01:23 +02:00
|
|
|
// create the spill interval with the appropriate range.
|
2007-12-02 09:30:39 +01:00
|
|
|
SmallVector<unsigned, 2> Ops;
|
2010-06-03 02:07:47 +02:00
|
|
|
tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
|
2007-11-12 07:35:08 +01:00
|
|
|
|
2008-10-27 18:38:59 +01:00
|
|
|
// Create a new virtual register for the spill interval.
|
|
|
|
// Create the new register now so we can map the fold instruction
|
|
|
|
// to the new register so when it is unfolded we get the correct
|
|
|
|
// answer.
|
|
|
|
bool CreatedNewVReg = false;
|
|
|
|
if (NewVReg == 0) {
|
|
|
|
NewVReg = mri_->createVirtualRegister(rc);
|
|
|
|
vrm.grow();
|
|
|
|
CreatedNewVReg = true;
|
2009-11-30 23:55:54 +01:00
|
|
|
|
|
|
|
// The new virtual register should get the same allocation hints as the
|
|
|
|
// old one.
|
|
|
|
std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
|
|
|
|
if (Hint.first || Hint.second)
|
|
|
|
mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
|
2008-10-27 18:38:59 +01:00
|
|
|
}
|
|
|
|
|
2008-06-06 09:54:39 +02:00
|
|
|
if (!TryFold)
|
|
|
|
CanFold = false;
|
|
|
|
else {
|
2007-12-05 04:22:34 +01:00
|
|
|
// Do not fold load / store here if we are splitting. We'll find an
|
|
|
|
// optimal point to insert a load / store later.
|
|
|
|
if (!TrySplit) {
|
|
|
|
if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
|
2008-10-27 18:38:59 +01:00
|
|
|
Ops, FoldSS, FoldSlot, NewVReg)) {
|
2007-12-05 04:22:34 +01:00
|
|
|
// Folding the load/store can completely change the instruction in
|
|
|
|
// unpredictable ways, rescan it from the beginning.
|
2008-10-27 18:38:59 +01:00
|
|
|
|
|
|
|
if (FoldSS) {
|
|
|
|
// We need to give the new vreg the same stack slot as the
|
|
|
|
// spilled interval.
|
|
|
|
vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
|
|
|
|
}
|
|
|
|
|
2007-12-05 04:22:34 +01:00
|
|
|
HasUse = false;
|
|
|
|
HasDef = false;
|
|
|
|
CanFold = false;
|
2009-05-03 20:32:42 +02:00
|
|
|
if (isNotInMIMap(MI))
|
2008-04-09 22:57:25 +02:00
|
|
|
break;
|
2007-12-05 04:22:34 +01:00
|
|
|
goto RestartInstruction;
|
|
|
|
}
|
|
|
|
} else {
|
2008-06-06 09:54:39 +02:00
|
|
|
// We'll try to fold it later if it's profitable.
|
2008-04-01 23:37:32 +02:00
|
|
|
CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
|
2007-12-05 04:22:34 +01:00
|
|
|
}
|
2008-06-06 09:54:39 +02:00
|
|
|
}
|
2007-11-30 22:23:43 +01:00
|
|
|
|
|
|
|
mop.setReg(NewVReg);
|
2008-02-22 10:24:50 +01:00
|
|
|
if (mop.isImplicit())
|
|
|
|
rewriteImplicitOps(li, MI, NewVReg, vrm);
|
2007-11-30 22:23:43 +01:00
|
|
|
|
|
|
|
// Reuse NewVReg for other reads.
|
2010-11-16 01:40:59 +01:00
|
|
|
bool HasEarlyClobber = false;
|
2008-02-22 10:24:50 +01:00
|
|
|
for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
|
|
|
|
MachineOperand &mopj = MI->getOperand(Ops[j]);
|
|
|
|
mopj.setReg(NewVReg);
|
|
|
|
if (mopj.isImplicit())
|
|
|
|
rewriteImplicitOps(li, MI, NewVReg, vrm);
|
2010-11-16 01:40:59 +01:00
|
|
|
if (mopj.isEarlyClobber())
|
|
|
|
HasEarlyClobber = true;
|
2008-02-22 10:24:50 +01:00
|
|
|
}
|
2010-08-12 22:01:23 +02:00
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
if (CreatedNewVReg) {
|
|
|
|
if (DefIsReMat) {
|
2009-07-16 11:20:10 +02:00
|
|
|
vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
|
2008-02-22 10:24:50 +01:00
|
|
|
if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
// Each valnum may have its own remat id.
|
2008-02-22 10:24:50 +01:00
|
|
|
ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
} else {
|
2008-02-22 10:24:50 +01:00
|
|
|
vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
|
|
|
if (!CanDelete || (HasUse && HasDef)) {
|
|
|
|
// If this is a two-addr instruction then its use operands are
|
|
|
|
// rematerializable but its def is not. It should be assigned a
|
|
|
|
// stack slot.
|
|
|
|
vrm.assignVirt2StackSlot(NewVReg, Slot);
|
|
|
|
}
|
2007-11-12 07:35:08 +01:00
|
|
|
} else {
|
|
|
|
vrm.assignVirt2StackSlot(NewVReg, Slot);
|
|
|
|
}
|
2007-11-30 00:02:50 +01:00
|
|
|
} else if (HasUse && HasDef &&
|
|
|
|
vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
|
|
|
|
// If this interval hasn't been assigned a stack slot (because earlier
|
|
|
|
// def is a deleted remat def), do it now.
|
|
|
|
assert(Slot != VirtRegMap::NO_STACK_SLOT);
|
|
|
|
vrm.assignVirt2StackSlot(NewVReg, Slot);
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
|
|
|
|
2008-02-23 01:33:04 +01:00
|
|
|
// Re-matting an instruction with virtual register use. Add the
|
|
|
|
// register as an implicit use on the use MI.
|
|
|
|
if (DefIsReMat && ImpUse)
|
|
|
|
MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
|
|
|
|
|
2009-04-22 00:46:52 +02:00
|
|
|
// Create a new register interval for this spill / remat.
|
2007-11-12 07:35:08 +01:00
|
|
|
LiveInterval &nI = getOrCreateInterval(NewVReg);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
if (CreatedNewVReg) {
|
|
|
|
NewLIs.push_back(&nI);
|
2007-11-29 11:12:14 +01:00
|
|
|
MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
if (TrySplit)
|
|
|
|
vrm.setIsSplitFromReg(NewVReg, li.reg);
|
|
|
|
}
|
2007-11-12 07:35:08 +01:00
|
|
|
|
|
|
|
if (HasUse) {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
if (CreatedNewVReg) {
|
2009-11-04 00:52:08 +01:00
|
|
|
LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
|
2010-09-25 14:04:16 +02:00
|
|
|
nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " +" << LR);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
nI.addRange(LR);
|
|
|
|
} else {
|
|
|
|
// Extend the split live interval to this def / use.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex End = index.getDefIndex();
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
|
|
|
|
nI.getValNumInfo(nI.getNumValNums()-1));
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " +" << LR);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
nI.addRange(LR);
|
|
|
|
}
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
|
|
|
if (HasDef) {
|
2010-11-16 01:40:59 +01:00
|
|
|
// An early clobber starts at the use slot, except for an early clobber
|
|
|
|
// tied to a use operand (yes, that is a thing).
|
|
|
|
LiveRange LR(HasEarlyClobber && !HasUse ?
|
|
|
|
index.getUseIndex() : index.getDefIndex(),
|
|
|
|
index.getStoreIndex(),
|
2010-09-25 14:04:16 +02:00
|
|
|
nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
|
2010-01-04 23:49:02 +01:00
|
|
|
DEBUG(dbgs() << " +" << LR);
|
2007-11-12 07:35:08 +01:00
|
|
|
nI.addRange(LR);
|
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
|
2009-08-22 22:18:03 +02:00
|
|
|
DEBUG({
|
2010-01-04 23:49:02 +01:00
|
|
|
dbgs() << "\t\t\t\tAdded new interval: ";
|
|
|
|
nI.print(dbgs(), tri_);
|
|
|
|
dbgs() << '\n';
|
2009-08-22 22:18:03 +02:00
|
|
|
});
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
2007-12-05 04:22:34 +01:00
|
|
|
return CanFold;
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
|
2007-11-29 02:06:25 +01:00
|
|
|
const VNInfo *VNI,
|
2009-09-04 22:41:11 +02:00
|
|
|
MachineBasicBlock *MBB,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex Idx) const {
|
2010-06-26 00:53:05 +02:00
|
|
|
return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
|
|
|
|
2008-02-21 01:34:19 +01:00
|
|
|
/// RewriteInfo - Keep track of machine instrs that will be rewritten
|
|
|
|
/// during spilling.
|
2008-05-13 02:00:25 +02:00
|
|
|
namespace {
|
|
|
|
struct RewriteInfo {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex Index;
|
2008-05-13 02:00:25 +02:00
|
|
|
MachineInstr *MI;
|
2010-06-03 02:07:47 +02:00
|
|
|
RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
|
2008-05-13 02:00:25 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct RewriteInfoCompare {
|
|
|
|
bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
|
|
|
|
return LHS.Index < RHS.Index;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2008-02-21 01:34:19 +01:00
|
|
|
|
2007-11-12 07:35:08 +01:00
|
|
|
void LiveIntervals::
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
2007-11-12 07:35:08 +01:00
|
|
|
LiveInterval::Ranges::const_iterator &I,
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
|
2007-11-12 07:35:08 +01:00
|
|
|
unsigned Slot, int LdSlot,
|
|
|
|
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
2008-02-22 10:24:50 +01:00
|
|
|
VirtRegMap &vrm,
|
2007-11-12 07:35:08 +01:00
|
|
|
const TargetRegisterClass* rc,
|
|
|
|
SmallVector<int, 4> &ReMatIds,
|
2007-12-11 03:09:15 +01:00
|
|
|
const MachineLoopInfo *loopInfo,
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
BitVector &SpillMBBs,
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
|
2007-11-29 02:06:25 +01:00
|
|
|
BitVector &RestoreMBBs,
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
|
|
|
|
DenseMap<unsigned,unsigned> &MBBVRegsMap,
|
2009-05-03 20:32:42 +02:00
|
|
|
std::vector<LiveInterval*> &NewLIs) {
|
2007-12-05 04:22:34 +01:00
|
|
|
bool AllCanFold = true;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
unsigned NewVReg = 0;
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex start = I->start.getBaseIndex();
|
|
|
|
SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
|
2007-11-12 07:35:08 +01:00
|
|
|
|
2008-02-21 01:34:19 +01:00
|
|
|
// First collect all the def / use in this live range that will be rewritten.
|
2008-04-09 22:57:25 +02:00
|
|
|
// Make sure they are sorted according to instruction index.
|
2008-02-21 01:34:19 +01:00
|
|
|
std::vector<RewriteInfo> RewriteMIs;
|
2008-02-22 10:24:50 +01:00
|
|
|
for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
|
|
|
|
re = mri_->reg_end(); ri != re; ) {
|
2008-04-03 18:39:43 +02:00
|
|
|
MachineInstr *MI = &*ri;
|
2008-02-21 01:34:19 +01:00
|
|
|
MachineOperand &O = ri.getOperand();
|
|
|
|
++ri;
|
2010-02-10 01:55:42 +01:00
|
|
|
if (MI->isDebugValue()) {
|
2010-04-26 09:38:55 +02:00
|
|
|
// Modify DBG_VALUE now that the value is in a spill slot.
|
2010-04-29 01:52:26 +02:00
|
|
|
if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
|
2010-04-26 20:37:21 +02:00
|
|
|
uint64_t Offset = MI->getOperand(1).getImm();
|
|
|
|
const MDNode *MDPtr = MI->getOperand(2).getMetadata();
|
|
|
|
DebugLoc DL = MI->getDebugLoc();
|
2010-04-29 01:52:26 +02:00
|
|
|
int FI = isLoadSS ? LdSlot : (int)Slot;
|
|
|
|
if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
|
2010-04-26 20:37:21 +02:00
|
|
|
Offset, MDPtr, DL)) {
|
|
|
|
DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
|
|
|
|
ReplaceMachineInstrInMaps(MI, NewDV);
|
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
|
|
|
MBB->insert(MBB->erase(MI), NewDV);
|
|
|
|
continue;
|
|
|
|
}
|
2010-04-26 09:38:55 +02:00
|
|
|
}
|
2010-04-26 20:37:21 +02:00
|
|
|
|
|
|
|
DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
|
|
|
|
RemoveMachineInstrFromMaps(MI);
|
|
|
|
vrm.RemoveMachineInstrFromMaps(MI);
|
|
|
|
MI->eraseFromParent();
|
2010-02-10 01:55:42 +01:00
|
|
|
continue;
|
|
|
|
}
|
2010-05-21 18:32:16 +02:00
|
|
|
assert(!(O.isImplicit() && O.isUse()) &&
|
|
|
|
"Spilling register that's used as implicit use?");
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex index = getInstructionIndex(MI);
|
2008-02-21 01:34:19 +01:00
|
|
|
if (index < start || index >= end)
|
|
|
|
continue;
|
2009-07-17 21:43:40 +02:00
|
|
|
|
|
|
|
if (O.isUndef())
|
2008-07-12 03:56:02 +02:00
|
|
|
// Must be defined by an implicit def. It should not be spilled. Note,
|
|
|
|
// this is for correctness reason. e.g.
|
|
|
|
// 8 %reg1024<def> = IMPLICIT_DEF
|
|
|
|
// 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
|
|
|
|
// The live range [12, 14) are not part of the r1024 live interval since
|
|
|
|
// it's defined by an implicit def. It will not conflicts with live
|
|
|
|
// interval of r1025. Now suppose both registers are spilled, you can
|
2008-07-12 04:22:07 +02:00
|
|
|
// easily see a situation where both registers are reloaded before
|
2008-07-12 03:56:02 +02:00
|
|
|
// the INSERT_SUBREG and both target registers that would overlap.
|
|
|
|
continue;
|
2010-06-03 02:07:47 +02:00
|
|
|
RewriteMIs.push_back(RewriteInfo(index, MI));
|
2008-02-21 01:34:19 +01:00
|
|
|
}
|
|
|
|
std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
|
|
|
|
|
2008-02-23 01:33:04 +01:00
|
|
|
unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
|
2008-02-21 01:34:19 +01:00
|
|
|
// Now rewrite the defs and uses.
|
|
|
|
for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
|
|
|
|
RewriteInfo &rwi = RewriteMIs[i];
|
|
|
|
++i;
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex index = rwi.Index;
|
2008-02-21 01:34:19 +01:00
|
|
|
MachineInstr *MI = rwi.MI;
|
|
|
|
// If MI def and/or use the same register multiple times, then there
|
|
|
|
// are multiple entries.
|
|
|
|
while (i != e && RewriteMIs[i].MI == MI) {
|
|
|
|
assert(RewriteMIs[i].Index == index);
|
|
|
|
++i;
|
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
2008-02-23 01:33:04 +01:00
|
|
|
|
2008-05-24 01:00:04 +02:00
|
|
|
if (ImpUse && MI != ReMatDefMI) {
|
2010-03-01 21:59:38 +01:00
|
|
|
// Re-matting an instruction with virtual register use. Prevent interval
|
|
|
|
// from being spilled.
|
|
|
|
getInterval(ImpUse).markNotSpillable();
|
2008-02-23 01:33:04 +01:00
|
|
|
}
|
|
|
|
|
2008-02-21 01:34:19 +01:00
|
|
|
unsigned MBBId = MBB->getNumber();
|
2007-12-05 04:22:34 +01:00
|
|
|
unsigned ThisVReg = 0;
|
2007-12-03 10:58:48 +01:00
|
|
|
if (TrySplit) {
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
|
2007-11-29 11:12:14 +01:00
|
|
|
if (NVI != MBBVRegsMap.end()) {
|
2007-12-05 04:22:34 +01:00
|
|
|
ThisVReg = NVI->second;
|
2007-11-29 11:12:14 +01:00
|
|
|
// One common case:
|
|
|
|
// x = use
|
|
|
|
// ...
|
|
|
|
// ...
|
|
|
|
// def = ...
|
|
|
|
// = use
|
2011-04-15 07:18:47 +02:00
|
|
|
// It's better to start a new interval to avoid artificially
|
2007-11-29 11:12:14 +01:00
|
|
|
// extend the new interval.
|
2010-06-03 02:07:47 +02:00
|
|
|
if (MI->readsWritesVirtualRegister(li.reg) ==
|
|
|
|
std::make_pair(false,true)) {
|
2007-11-29 11:12:14 +01:00
|
|
|
MBBVRegsMap.erase(MBB->getNumber());
|
2007-12-05 04:22:34 +01:00
|
|
|
ThisVReg = 0;
|
2007-11-29 11:12:14 +01:00
|
|
|
}
|
|
|
|
}
|
2007-11-28 02:28:46 +01:00
|
|
|
}
|
2007-12-05 04:22:34 +01:00
|
|
|
|
|
|
|
bool IsNew = ThisVReg == 0;
|
|
|
|
if (IsNew) {
|
|
|
|
// This ends the previous live interval. If all of its def / use
|
|
|
|
// can be folded, give it a low spill weight.
|
|
|
|
if (NewVReg && TrySplit && AllCanFold) {
|
|
|
|
LiveInterval &nI = getOrCreateInterval(NewVReg);
|
|
|
|
nI.weight /= 10.0F;
|
|
|
|
}
|
|
|
|
AllCanFold = true;
|
|
|
|
}
|
|
|
|
NewVReg = ThisVReg;
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
bool HasDef = false;
|
|
|
|
bool HasUse = false;
|
2008-02-22 10:24:50 +01:00
|
|
|
bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
|
2008-06-06 09:54:39 +02:00
|
|
|
index, end, MI, ReMatOrigDefMI, ReMatDefMI,
|
|
|
|
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
|
|
|
|
CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
|
2009-05-03 20:32:42 +02:00
|
|
|
ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
if (!HasDef && !HasUse)
|
|
|
|
continue;
|
|
|
|
|
2007-12-05 04:22:34 +01:00
|
|
|
AllCanFold &= CanFold;
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
// Update weight of spill interval.
|
|
|
|
LiveInterval &nI = getOrCreateInterval(NewVReg);
|
2007-12-03 10:58:48 +01:00
|
|
|
if (!TrySplit) {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
// The spill weight is now infinity as it cannot be spilled again.
|
2010-03-01 21:59:38 +01:00
|
|
|
nI.markNotSpillable();
|
2007-11-29 02:06:25 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep track of the last def and first use in each MBB.
|
|
|
|
if (HasDef) {
|
|
|
|
if (MI != ReMatOrigDefMI || !CanDelete) {
|
|
|
|
bool HasKill = false;
|
|
|
|
if (!HasUse)
|
2009-11-04 00:52:08 +01:00
|
|
|
HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
|
2007-11-29 02:06:25 +01:00
|
|
|
else {
|
2007-11-29 11:12:14 +01:00
|
|
|
// If this is a two-address code, then this index starts a new VNInfo.
|
2009-11-04 00:52:08 +01:00
|
|
|
const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
|
2007-11-29 02:06:25 +01:00
|
|
|
if (VNI)
|
2009-11-04 00:52:08 +01:00
|
|
|
HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
|
2007-12-01 05:42:39 +01:00
|
|
|
SpillIdxes.find(MBBId);
|
2007-11-29 02:06:25 +01:00
|
|
|
if (!HasKill) {
|
2007-11-29 11:12:14 +01:00
|
|
|
if (SII == SpillIdxes.end()) {
|
|
|
|
std::vector<SRInfo> S;
|
|
|
|
S.push_back(SRInfo(index, NewVReg, true));
|
|
|
|
SpillIdxes.insert(std::make_pair(MBBId, S));
|
|
|
|
} else if (SII->second.back().vreg != NewVReg) {
|
|
|
|
SII->second.push_back(SRInfo(index, NewVReg, true));
|
2009-09-04 22:41:11 +02:00
|
|
|
} else if (index > SII->second.back().index) {
|
2007-11-29 02:06:25 +01:00
|
|
|
// If there is an earlier def and this is a two-address
|
|
|
|
// instruction, then it's not possible to fold the store (which
|
|
|
|
// would also fold the load).
|
2007-11-29 11:12:14 +01:00
|
|
|
SRInfo &Info = SII->second.back();
|
|
|
|
Info.index = index;
|
|
|
|
Info.canFold = !HasUse;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
2007-11-29 02:06:25 +01:00
|
|
|
SpillMBBs.set(MBBId);
|
2007-12-01 05:42:39 +01:00
|
|
|
} else if (SII != SpillIdxes.end() &&
|
|
|
|
SII->second.back().vreg == NewVReg &&
|
2009-09-04 22:41:11 +02:00
|
|
|
index > SII->second.back().index) {
|
2007-12-01 05:42:39 +01:00
|
|
|
// There is an earlier def that's not killed (must be two-address).
|
|
|
|
// The spill is no longer needed.
|
|
|
|
SII->second.pop_back();
|
|
|
|
if (SII->second.empty()) {
|
|
|
|
SpillIdxes.erase(MBBId);
|
|
|
|
SpillMBBs.reset(MBBId);
|
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
|
|
|
}
|
2007-11-29 02:06:25 +01:00
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
|
2007-11-29 02:06:25 +01:00
|
|
|
if (HasUse) {
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
|
2007-11-29 02:06:25 +01:00
|
|
|
SpillIdxes.find(MBBId);
|
2007-11-29 11:12:14 +01:00
|
|
|
if (SII != SpillIdxes.end() &&
|
|
|
|
SII->second.back().vreg == NewVReg &&
|
2009-09-04 22:41:11 +02:00
|
|
|
index > SII->second.back().index)
|
2007-11-29 02:06:25 +01:00
|
|
|
// Use(s) following the last def, it's not safe to fold the spill.
|
2007-11-29 11:12:14 +01:00
|
|
|
SII->second.back().canFold = false;
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
|
2007-11-29 02:06:25 +01:00
|
|
|
RestoreIdxes.find(MBBId);
|
2007-11-29 11:12:14 +01:00
|
|
|
if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
|
2007-11-29 02:06:25 +01:00
|
|
|
// If we are splitting live intervals, only fold if it's the first
|
|
|
|
// use and there isn't another use later in the MBB.
|
2007-11-29 11:12:14 +01:00
|
|
|
RII->second.back().canFold = false;
|
2007-11-29 02:06:25 +01:00
|
|
|
else if (IsNew) {
|
|
|
|
// Only need a reload if there isn't an earlier def / use.
|
2007-11-29 11:12:14 +01:00
|
|
|
if (RII == RestoreIdxes.end()) {
|
|
|
|
std::vector<SRInfo> Infos;
|
|
|
|
Infos.push_back(SRInfo(index, NewVReg, true));
|
|
|
|
RestoreIdxes.insert(std::make_pair(MBBId, Infos));
|
|
|
|
} else {
|
|
|
|
RII->second.push_back(SRInfo(index, NewVReg, true));
|
|
|
|
}
|
2007-11-29 02:06:25 +01:00
|
|
|
RestoreMBBs.set(MBBId);
|
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
2007-11-29 02:06:25 +01:00
|
|
|
|
|
|
|
// Update spill weight.
|
2007-12-11 03:09:15 +01:00
|
|
|
unsigned loopDepth = loopInfo->getLoopDepth(MBB);
|
2008-06-21 08:45:54 +02:00
|
|
|
nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
2007-12-05 04:22:34 +01:00
|
|
|
|
|
|
|
if (NewVReg && TrySplit && AllCanFold) {
|
|
|
|
// If all of its def / use can be folded, give it a low spill weight.
|
|
|
|
LiveInterval &nI = getOrCreateInterval(NewVReg);
|
|
|
|
nI.weight /= 10.0F;
|
|
|
|
}
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
|
2009-09-04 22:41:11 +02:00
|
|
|
unsigned vr, BitVector &RestoreMBBs,
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
|
2007-11-29 11:12:14 +01:00
|
|
|
if (!RestoreMBBs[Id])
|
|
|
|
return false;
|
|
|
|
std::vector<SRInfo> &Restores = RestoreIdxes[Id];
|
|
|
|
for (unsigned i = 0, e = Restores.size(); i != e; ++i)
|
|
|
|
if (Restores[i].index == index &&
|
|
|
|
Restores[i].vreg == vr &&
|
|
|
|
Restores[i].canFold)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
|
2009-09-04 22:41:11 +02:00
|
|
|
unsigned vr, BitVector &RestoreMBBs,
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
|
2007-11-29 11:12:14 +01:00
|
|
|
if (!RestoreMBBs[Id])
|
|
|
|
return;
|
|
|
|
std::vector<SRInfo> &Restores = RestoreIdxes[Id];
|
|
|
|
for (unsigned i = 0, e = Restores.size(); i != e; ++i)
|
|
|
|
if (Restores[i].index == index && Restores[i].vreg)
|
2009-11-04 00:52:08 +01:00
|
|
|
Restores[i].index = SlotIndex();
|
2007-11-29 11:12:14 +01:00
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
|
2008-04-11 19:53:36 +02:00
|
|
|
/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
|
|
|
|
/// spilled and create empty intervals for their uses.
|
|
|
|
void
|
|
|
|
LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
|
|
|
|
const TargetRegisterClass* rc,
|
|
|
|
std::vector<LiveInterval*> &NewLIs) {
|
2008-04-03 18:39:43 +02:00
|
|
|
for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
|
|
|
|
re = mri_->reg_end(); ri != re; ) {
|
2008-04-11 19:53:36 +02:00
|
|
|
MachineOperand &O = ri.getOperand();
|
2008-04-03 18:39:43 +02:00
|
|
|
MachineInstr *MI = &*ri;
|
|
|
|
++ri;
|
2010-03-30 07:49:07 +02:00
|
|
|
if (MI->isDebugValue()) {
|
|
|
|
// Remove debug info for now.
|
|
|
|
O.setReg(0U);
|
|
|
|
DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
|
|
|
|
continue;
|
|
|
|
}
|
2008-04-11 19:53:36 +02:00
|
|
|
if (O.isDef()) {
|
2010-02-09 20:54:29 +01:00
|
|
|
assert(MI->isImplicitDef() &&
|
2008-04-11 19:53:36 +02:00
|
|
|
"Register def was not rewritten?");
|
|
|
|
RemoveMachineInstrFromMaps(MI);
|
|
|
|
vrm.RemoveMachineInstrFromMaps(MI);
|
|
|
|
MI->eraseFromParent();
|
|
|
|
} else {
|
|
|
|
// This must be an use of an implicit_def so it's not part of the live
|
|
|
|
// interval. Create a new empty live interval for it.
|
|
|
|
// FIXME: Can we simply erase some of the instructions? e.g. Stores?
|
|
|
|
unsigned NewVReg = mri_->createVirtualRegister(rc);
|
|
|
|
vrm.grow();
|
|
|
|
vrm.setIsImplicitlyDefined(NewVReg);
|
|
|
|
NewLIs.push_back(&getOrCreateInterval(NewVReg));
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2009-06-30 10:49:04 +02:00
|
|
|
if (MO.isReg() && MO.getReg() == li.reg) {
|
2008-04-11 19:53:36 +02:00
|
|
|
MO.setReg(NewVReg);
|
2009-06-30 10:49:04 +02:00
|
|
|
MO.setIsUndef();
|
|
|
|
}
|
2008-04-11 19:53:36 +02:00
|
|
|
}
|
|
|
|
}
|
2008-04-03 18:39:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-01 21:59:38 +01:00
|
|
|
float
|
|
|
|
LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
|
|
|
|
// Limit the loop depth ridiculousness.
|
|
|
|
if (loopDepth > 200)
|
|
|
|
loopDepth = 200;
|
|
|
|
|
|
|
|
// The loop depth is used to roughly estimate the number of times the
|
|
|
|
// instruction is executed. Something like 10^d is simple, but will quickly
|
|
|
|
// overflow a float. This expression behaves like 10^d for small d, but is
|
|
|
|
// more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
|
|
|
|
// headroom before overflow.
|
2011-03-31 14:11:33 +02:00
|
|
|
// By the way, powf() might be unavailable here. For consistency,
|
|
|
|
// We may take pow(double,double).
|
|
|
|
float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
|
2010-03-01 21:59:38 +01:00
|
|
|
|
|
|
|
return (isDef + isUse) * lc;
|
|
|
|
}
|
|
|
|
|
2011-02-15 00:15:38 +01:00
|
|
|
static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
|
2010-02-18 22:33:05 +01:00
|
|
|
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
|
2011-02-15 00:15:38 +01:00
|
|
|
NewLIs[i]->weight =
|
|
|
|
normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
|
2010-02-18 22:33:05 +01:00
|
|
|
}
|
|
|
|
|
2007-11-12 07:35:08 +01:00
|
|
|
std::vector<LiveInterval*> LiveIntervals::
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
addIntervalsForSpills(const LiveInterval &li,
|
2011-03-10 02:21:58 +01:00
|
|
|
const SmallVectorImpl<LiveInterval*> *SpillIs,
|
2009-05-03 20:32:42 +02:00
|
|
|
const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
|
2010-03-01 21:59:38 +01:00
|
|
|
assert(li.isSpillable() && "attempt to spill already spilled interval!");
|
2007-11-12 07:35:08 +01:00
|
|
|
|
2009-08-22 22:18:03 +02:00
|
|
|
DEBUG({
|
2010-01-04 23:49:02 +01:00
|
|
|
dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
|
|
|
|
li.print(dbgs(), tri_);
|
|
|
|
dbgs() << '\n';
|
2009-08-22 22:18:03 +02:00
|
|
|
});
|
2007-11-12 07:35:08 +01:00
|
|
|
|
2008-12-05 18:00:16 +01:00
|
|
|
// Each bit specify whether a spill is required in the MBB.
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
BitVector SpillMBBs(mf_->getNumBlockIDs());
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
|
2007-11-29 02:06:25 +01:00
|
|
|
BitVector RestoreMBBs(mf_->getNumBlockIDs());
|
2008-08-14 00:28:50 +02:00
|
|
|
DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
|
|
|
|
DenseMap<unsigned,unsigned> MBBVRegsMap;
|
2007-11-12 07:35:08 +01:00
|
|
|
std::vector<LiveInterval*> NewLIs;
|
2008-02-22 10:24:50 +01:00
|
|
|
const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
|
2007-11-12 07:35:08 +01:00
|
|
|
|
|
|
|
unsigned NumValNums = li.getNumValNums();
|
|
|
|
SmallVector<MachineInstr*, 4> ReMatDefs;
|
|
|
|
ReMatDefs.resize(NumValNums, NULL);
|
|
|
|
SmallVector<MachineInstr*, 4> ReMatOrigDefs;
|
|
|
|
ReMatOrigDefs.resize(NumValNums, NULL);
|
|
|
|
SmallVector<int, 4> ReMatIds;
|
|
|
|
ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
|
|
|
|
BitVector ReMatDelete(NumValNums);
|
|
|
|
unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
// Spilling a split live interval. It cannot be split any further. Also,
|
|
|
|
// it's also guaranteed to be a single val# / range interval.
|
|
|
|
if (vrm.getPreSplitReg(li.reg)) {
|
|
|
|
vrm.setIsSplitFromReg(li.reg, 0);
|
2007-12-05 11:24:35 +01:00
|
|
|
// Unset the split kill marker on the last use.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex KillIdx = vrm.getKillPoint(li.reg);
|
|
|
|
if (KillIdx != SlotIndex()) {
|
2007-12-05 11:24:35 +01:00
|
|
|
MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
|
|
|
|
assert(KillMI && "Last use disappeared?");
|
|
|
|
int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
|
|
|
|
assert(KillOp != -1 && "Last use disappeared?");
|
2007-12-30 22:56:09 +01:00
|
|
|
KillMI->getOperand(KillOp).setIsKill(false);
|
2007-12-05 11:24:35 +01:00
|
|
|
}
|
2007-12-05 10:51:10 +01:00
|
|
|
vrm.removeKillPoint(li.reg);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
bool DefIsReMat = vrm.isReMaterialized(li.reg);
|
|
|
|
Slot = vrm.getStackSlot(li.reg);
|
|
|
|
assert(Slot != VirtRegMap::MAX_STACK_SLOT);
|
|
|
|
MachineInstr *ReMatDefMI = DefIsReMat ?
|
|
|
|
vrm.getReMaterializedMI(li.reg) : NULL;
|
|
|
|
int LdSlot = 0;
|
|
|
|
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
|
|
|
|
bool isLoad = isLoadSS ||
|
2008-12-03 19:15:48 +01:00
|
|
|
(DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
bool IsFirstRange = true;
|
|
|
|
for (LiveInterval::Ranges::const_iterator
|
|
|
|
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
|
|
|
|
// If this is a split live interval with multiple ranges, it means there
|
|
|
|
// are two-address instructions that re-defined the value. Only the
|
|
|
|
// first def can be rematerialized!
|
|
|
|
if (IsFirstRange) {
|
2007-11-30 00:02:50 +01:00
|
|
|
// Note ReMatOrigDefMI has already been deleted.
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
|
|
|
|
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
|
2008-02-22 10:24:50 +01:00
|
|
|
false, vrm, rc, ReMatIds, loopInfo,
|
2007-11-29 02:06:25 +01:00
|
|
|
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
|
2009-05-03 20:32:42 +02:00
|
|
|
MBBVRegsMap, NewLIs);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
} else {
|
|
|
|
rewriteInstructionsForSpills(li, false, I, NULL, 0,
|
|
|
|
Slot, 0, false, false, false,
|
2008-02-22 10:24:50 +01:00
|
|
|
false, vrm, rc, ReMatIds, loopInfo,
|
2007-11-29 02:06:25 +01:00
|
|
|
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
|
2009-05-03 20:32:42 +02:00
|
|
|
MBBVRegsMap, NewLIs);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
|
|
|
IsFirstRange = false;
|
|
|
|
}
|
2008-04-03 18:39:43 +02:00
|
|
|
|
2008-04-11 19:53:36 +02:00
|
|
|
handleSpilledImpDefs(li, vrm, rc, NewLIs);
|
2010-02-18 22:33:05 +01:00
|
|
|
normalizeSpillWeights(NewLIs);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
return NewLIs;
|
|
|
|
}
|
|
|
|
|
2009-09-14 23:33:42 +02:00
|
|
|
bool TrySplit = !intervalIsInOneMBB(li);
|
2007-11-29 02:06:25 +01:00
|
|
|
if (TrySplit)
|
|
|
|
++numSplits;
|
2007-11-12 07:35:08 +01:00
|
|
|
bool NeedStackSlot = false;
|
|
|
|
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
|
|
|
|
i != e; ++i) {
|
|
|
|
const VNInfo *VNI = *i;
|
|
|
|
unsigned VN = VNI->id;
|
2009-06-17 23:01:20 +02:00
|
|
|
if (VNI->isUnused())
|
2007-11-12 07:35:08 +01:00
|
|
|
continue; // Dead val#.
|
|
|
|
// Is the def for the val# rematerializable?
|
2010-09-25 14:04:16 +02:00
|
|
|
MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
|
2007-12-06 01:01:56 +01:00
|
|
|
bool dummy;
|
2008-09-30 17:44:16 +02:00
|
|
|
if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
|
2007-11-12 07:35:08 +01:00
|
|
|
// Remember how to remat the def of this val#.
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
ReMatOrigDefs[VN] = ReMatDefMI;
|
2008-07-18 01:49:46 +02:00
|
|
|
// Original def may be modified so we have to make a copy here.
|
2008-07-19 02:37:25 +02:00
|
|
|
MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
|
2009-09-14 23:33:42 +02:00
|
|
|
CloneMIs.push_back(Clone);
|
2008-07-19 02:37:25 +02:00
|
|
|
ReMatDefs[VN] = Clone;
|
2007-11-12 07:35:08 +01:00
|
|
|
|
|
|
|
bool CanDelete = true;
|
2009-06-17 23:01:20 +02:00
|
|
|
if (VNI->hasPHIKill()) {
|
2007-11-29 10:49:23 +01:00
|
|
|
// A kill is a phi node, not all of its uses can be rematerialized.
|
2007-11-12 07:35:08 +01:00
|
|
|
// It must not be deleted.
|
2007-11-29 10:49:23 +01:00
|
|
|
CanDelete = false;
|
|
|
|
// Need a stack slot if there is any live range where uses cannot be
|
|
|
|
// rematerialized.
|
|
|
|
NeedStackSlot = true;
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
|
|
|
if (CanDelete)
|
|
|
|
ReMatDelete.set(VN);
|
|
|
|
} else {
|
|
|
|
// Need a stack slot if there is any live range where uses cannot be
|
|
|
|
// rematerialized.
|
|
|
|
NeedStackSlot = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// One stack slot per live interval.
|
2009-03-26 19:53:38 +01:00
|
|
|
if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
|
|
|
|
if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
|
|
|
|
Slot = vrm.assignVirt2StackSlot(li.reg);
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2009-03-26 19:53:38 +01:00
|
|
|
// This case only occurs when the prealloc splitter has already assigned
|
|
|
|
// a stack slot to this vreg.
|
|
|
|
else
|
|
|
|
Slot = vrm.getStackSlot(li.reg);
|
|
|
|
}
|
2007-11-12 07:35:08 +01:00
|
|
|
|
|
|
|
// Create new intervals and rewrite defs and uses.
|
|
|
|
for (LiveInterval::Ranges::const_iterator
|
|
|
|
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
|
|
|
|
MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
|
|
|
|
bool DefIsReMat = ReMatDefMI != NULL;
|
2007-11-12 07:35:08 +01:00
|
|
|
bool CanDelete = ReMatDelete[I->valno->id];
|
|
|
|
int LdSlot = 0;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
|
2007-11-12 07:35:08 +01:00
|
|
|
bool isLoad = isLoadSS ||
|
2008-12-03 19:15:48 +01:00
|
|
|
(DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
|
2007-11-29 02:06:25 +01:00
|
|
|
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
|
2008-02-22 10:24:50 +01:00
|
|
|
CanDelete, vrm, rc, ReMatIds, loopInfo,
|
2007-11-29 02:06:25 +01:00
|
|
|
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
|
2009-05-03 20:32:42 +02:00
|
|
|
MBBVRegsMap, NewLIs);
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
|
|
|
|
2007-11-29 02:06:25 +01:00
|
|
|
// Insert spills / restores if we are splitting.
|
2008-04-03 18:39:43 +02:00
|
|
|
if (!TrySplit) {
|
2008-04-11 19:53:36 +02:00
|
|
|
handleSpilledImpDefs(li, vrm, rc, NewLIs);
|
2010-02-18 22:33:05 +01:00
|
|
|
normalizeSpillWeights(NewLIs);
|
2007-11-29 11:12:14 +01:00
|
|
|
return NewLIs;
|
2008-04-03 18:39:43 +02:00
|
|
|
}
|
2007-11-29 11:12:14 +01:00
|
|
|
|
2007-12-05 09:16:32 +01:00
|
|
|
SmallPtrSet<LiveInterval*, 4> AddedKill;
|
2007-12-02 09:30:39 +01:00
|
|
|
SmallVector<unsigned, 2> Ops;
|
2007-11-29 11:12:14 +01:00
|
|
|
if (NeedStackSlot) {
|
|
|
|
int Id = SpillMBBs.find_first();
|
|
|
|
while (Id != -1) {
|
|
|
|
std::vector<SRInfo> &spills = SpillIdxes[Id];
|
|
|
|
for (unsigned i = 0, e = spills.size(); i != e; ++i) {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex index = spills[i].index;
|
2007-11-29 11:12:14 +01:00
|
|
|
unsigned VReg = spills[i].vreg;
|
2007-12-04 01:32:23 +01:00
|
|
|
LiveInterval &nI = getOrCreateInterval(VReg);
|
2007-11-29 02:06:25 +01:00
|
|
|
bool isReMat = vrm.isReMaterialized(VReg);
|
|
|
|
MachineInstr *MI = getInstructionFromIndex(index);
|
2007-12-02 09:30:39 +01:00
|
|
|
bool CanFold = false;
|
|
|
|
bool FoundUse = false;
|
|
|
|
Ops.clear();
|
2007-11-30 22:23:43 +01:00
|
|
|
if (spills[i].canFold) {
|
2007-12-02 09:30:39 +01:00
|
|
|
CanFold = true;
|
2007-11-29 02:06:25 +01:00
|
|
|
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
|
|
|
|
MachineOperand &MO = MI->getOperand(j);
|
2008-10-03 17:45:36 +02:00
|
|
|
if (!MO.isReg() || MO.getReg() != VReg)
|
2007-11-29 02:06:25 +01:00
|
|
|
continue;
|
2007-12-02 09:30:39 +01:00
|
|
|
|
|
|
|
Ops.push_back(j);
|
|
|
|
if (MO.isDef())
|
2007-11-30 22:23:43 +01:00
|
|
|
continue;
|
2010-08-12 22:01:23 +02:00
|
|
|
if (isReMat ||
|
2007-12-02 09:30:39 +01:00
|
|
|
(!FoundUse && !alsoFoldARestore(Id, index, VReg,
|
|
|
|
RestoreMBBs, RestoreIdxes))) {
|
|
|
|
// MI has two-address uses of the same register. If the use
|
|
|
|
// isn't the first and only use in the BB, then we can't fold
|
|
|
|
// it. FIXME: Move this to rewriteInstructionsForSpills.
|
|
|
|
CanFold = false;
|
2007-11-30 22:23:43 +01:00
|
|
|
break;
|
|
|
|
}
|
2007-12-02 09:30:39 +01:00
|
|
|
FoundUse = true;
|
2007-11-29 02:06:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Fold the store into the def if possible.
|
2007-11-30 22:23:43 +01:00
|
|
|
bool Folded = false;
|
2007-12-02 09:30:39 +01:00
|
|
|
if (CanFold && !Ops.empty()) {
|
|
|
|
if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
|
2007-11-30 22:23:43 +01:00
|
|
|
Folded = true;
|
2009-03-20 00:26:52 +01:00
|
|
|
if (FoundUse) {
|
2007-12-02 09:30:39 +01:00
|
|
|
// Also folded uses, do not issue a load.
|
|
|
|
eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
|
2009-11-04 00:52:08 +01:00
|
|
|
nI.removeRange(index.getLoadIndex(), index.getDefIndex());
|
2007-12-05 10:05:34 +01:00
|
|
|
}
|
2009-11-04 00:52:08 +01:00
|
|
|
nI.removeRange(index.getDefIndex(), index.getStoreIndex());
|
2007-11-30 22:23:43 +01:00
|
|
|
}
|
2007-11-29 02:06:25 +01:00
|
|
|
}
|
|
|
|
|
2008-04-09 22:57:25 +02:00
|
|
|
// Otherwise tell the spiller to issue a spill.
|
2007-12-05 09:16:32 +01:00
|
|
|
if (!Folded) {
|
|
|
|
LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
|
2009-11-04 00:52:08 +01:00
|
|
|
bool isKill = LR->end == index.getStoreIndex();
|
2008-05-20 10:10:37 +02:00
|
|
|
if (!MI->registerDefIsDead(nI.reg))
|
|
|
|
// No need to spill a dead def.
|
|
|
|
vrm.addSpillPoint(VReg, isKill, MI);
|
2007-12-05 09:16:32 +01:00
|
|
|
if (isKill)
|
|
|
|
AddedKill.insert(&nI);
|
|
|
|
}
|
2007-11-29 02:06:25 +01:00
|
|
|
}
|
2007-11-29 11:12:14 +01:00
|
|
|
Id = SpillMBBs.find_next(Id);
|
2007-11-29 02:06:25 +01:00
|
|
|
}
|
2007-11-29 11:12:14 +01:00
|
|
|
}
|
2007-11-29 02:06:25 +01:00
|
|
|
|
2007-11-29 11:12:14 +01:00
|
|
|
int Id = RestoreMBBs.find_first();
|
|
|
|
while (Id != -1) {
|
|
|
|
std::vector<SRInfo> &restores = RestoreIdxes[Id];
|
|
|
|
for (unsigned i = 0, e = restores.size(); i != e; ++i) {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex index = restores[i].index;
|
|
|
|
if (index == SlotIndex())
|
2007-11-29 11:12:14 +01:00
|
|
|
continue;
|
|
|
|
unsigned VReg = restores[i].vreg;
|
2007-12-04 01:32:23 +01:00
|
|
|
LiveInterval &nI = getOrCreateInterval(VReg);
|
2008-06-06 09:54:39 +02:00
|
|
|
bool isReMat = vrm.isReMaterialized(VReg);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
MachineInstr *MI = getInstructionFromIndex(index);
|
2007-12-02 09:30:39 +01:00
|
|
|
bool CanFold = false;
|
|
|
|
Ops.clear();
|
2007-11-30 22:23:43 +01:00
|
|
|
if (restores[i].canFold) {
|
2007-12-02 09:30:39 +01:00
|
|
|
CanFold = true;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
|
|
|
|
MachineOperand &MO = MI->getOperand(j);
|
2008-10-03 17:45:36 +02:00
|
|
|
if (!MO.isReg() || MO.getReg() != VReg)
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
continue;
|
2007-12-02 09:30:39 +01:00
|
|
|
|
2007-11-29 02:06:25 +01:00
|
|
|
if (MO.isDef()) {
|
2007-12-02 09:30:39 +01:00
|
|
|
// If this restore were to be folded, it would have been folded
|
|
|
|
// already.
|
|
|
|
CanFold = false;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
break;
|
|
|
|
}
|
2007-12-02 09:30:39 +01:00
|
|
|
Ops.push_back(j);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
|
|
|
}
|
2007-11-29 02:06:25 +01:00
|
|
|
|
|
|
|
// Fold the load into the use if possible.
|
2007-11-30 22:23:43 +01:00
|
|
|
bool Folded = false;
|
2007-12-02 09:30:39 +01:00
|
|
|
if (CanFold && !Ops.empty()) {
|
2008-06-06 09:54:39 +02:00
|
|
|
if (!isReMat)
|
2007-12-02 09:30:39 +01:00
|
|
|
Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
|
|
|
|
else {
|
2007-11-29 02:06:25 +01:00
|
|
|
MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
|
|
|
|
int LdSlot = 0;
|
|
|
|
bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
|
|
|
|
// If the rematerializable def is a load, also try to fold it.
|
2008-12-03 19:15:48 +01:00
|
|
|
if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
|
2007-12-02 09:30:39 +01:00
|
|
|
Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
|
|
|
|
Ops, isLoadSS, LdSlot, VReg);
|
2008-12-05 18:41:31 +01:00
|
|
|
if (!Folded) {
|
|
|
|
unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
|
|
|
|
if (ImpUse) {
|
|
|
|
// Re-matting an instruction with virtual register use. Add the
|
2010-03-01 21:59:38 +01:00
|
|
|
// register as an implicit use on the use MI and mark the register
|
|
|
|
// interval as unspillable.
|
2008-12-05 18:41:31 +01:00
|
|
|
LiveInterval &ImpLi = getInterval(ImpUse);
|
2010-03-01 21:59:38 +01:00
|
|
|
ImpLi.markNotSpillable();
|
2008-12-05 18:41:31 +01:00
|
|
|
MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
|
|
|
|
}
|
2008-02-22 10:24:50 +01:00
|
|
|
}
|
2007-12-02 09:30:39 +01:00
|
|
|
}
|
2007-11-29 02:06:25 +01:00
|
|
|
}
|
|
|
|
// If folding is not possible / failed, then tell the spiller to issue a
|
|
|
|
// load / rematerialization for us.
|
2007-12-04 01:32:23 +01:00
|
|
|
if (Folded)
|
2009-11-04 00:52:08 +01:00
|
|
|
nI.removeRange(index.getLoadIndex(), index.getDefIndex());
|
2007-12-05 09:16:32 +01:00
|
|
|
else
|
2007-11-29 02:06:25 +01:00
|
|
|
vrm.addRestorePoint(VReg, MI);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
2007-11-29 11:12:14 +01:00
|
|
|
Id = RestoreMBBs.find_next(Id);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
}
|
|
|
|
|
2007-12-05 09:16:32 +01:00
|
|
|
// Finalize intervals: add kills, finalize spill weights, and filter out
|
|
|
|
// dead intervals.
|
2007-12-04 01:32:23 +01:00
|
|
|
std::vector<LiveInterval*> RetNewLIs;
|
|
|
|
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
|
|
|
|
LiveInterval *LI = NewLIs[i];
|
|
|
|
if (!LI->empty()) {
|
2007-12-05 09:16:32 +01:00
|
|
|
if (!AddedKill.count(LI)) {
|
|
|
|
LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex LastUseIdx = LR->end.getBaseIndex();
|
2007-12-05 11:24:35 +01:00
|
|
|
MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
|
2008-03-05 01:59:57 +01:00
|
|
|
int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
|
2007-12-05 09:16:32 +01:00
|
|
|
assert(UseIdx != -1);
|
2009-03-19 21:30:06 +01:00
|
|
|
if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
|
2007-12-05 09:16:32 +01:00
|
|
|
LastUse->getOperand(UseIdx).setIsKill();
|
2007-12-05 11:24:35 +01:00
|
|
|
vrm.addKillPoint(LI->reg, LastUseIdx);
|
2007-12-05 10:51:10 +01:00
|
|
|
}
|
2007-12-05 09:16:32 +01:00
|
|
|
}
|
2007-12-04 01:32:23 +01:00
|
|
|
RetNewLIs.push_back(LI);
|
|
|
|
}
|
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
|
2008-04-11 19:53:36 +02:00
|
|
|
handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
|
2010-02-18 22:33:05 +01:00
|
|
|
normalizeSpillWeights(RetNewLIs);
|
2007-12-04 01:32:23 +01:00
|
|
|
return RetNewLIs;
|
2007-11-12 07:35:08 +01:00
|
|
|
}
|
2008-03-11 08:19:34 +01:00
|
|
|
|
|
|
|
/// hasAllocatableSuperReg - Return true if the specified physical register has
|
|
|
|
/// any super register that's allocatable.
|
|
|
|
bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
|
|
|
|
for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
|
|
|
|
if (allocatableRegs_[*AS] && hasInterval(*AS))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getRepresentativeReg - Find the largest super register of the specified
|
|
|
|
/// physical register.
|
|
|
|
unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
|
2010-08-12 22:01:23 +02:00
|
|
|
// Find the largest super-register that is allocatable.
|
2008-03-11 08:19:34 +01:00
|
|
|
unsigned BestReg = Reg;
|
|
|
|
for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
|
|
|
|
unsigned SuperReg = *AS;
|
|
|
|
if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
|
|
|
|
BestReg = SuperReg;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return BestReg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
|
|
|
|
/// specified interval that conflicts with the specified physical register.
|
|
|
|
unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
|
|
|
|
unsigned PhysReg) const {
|
|
|
|
unsigned NumConflicts = 0;
|
|
|
|
const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
|
|
|
|
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
|
|
|
|
E = mri_->reg_end(); I != E; ++I) {
|
|
|
|
MachineOperand &O = I.getOperand();
|
|
|
|
MachineInstr *MI = O.getParent();
|
2010-03-30 07:49:07 +02:00
|
|
|
if (MI->isDebugValue())
|
|
|
|
continue;
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex Index = getInstructionIndex(MI);
|
2008-03-11 08:19:34 +01:00
|
|
|
if (pli.liveAt(Index))
|
|
|
|
++NumConflicts;
|
|
|
|
}
|
|
|
|
return NumConflicts;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
|
2009-03-23 19:24:37 +01:00
|
|
|
/// around all defs and uses of the specified interval. Return true if it
|
|
|
|
/// was able to cut its interval.
|
|
|
|
bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
|
2008-03-11 08:19:34 +01:00
|
|
|
unsigned PhysReg, VirtRegMap &vrm) {
|
|
|
|
unsigned SpillReg = getRepresentativeReg(PhysReg);
|
|
|
|
|
2010-11-16 20:55:14 +01:00
|
|
|
DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
|
|
|
|
<< " represented by " << tri_->getName(SpillReg) << '\n');
|
|
|
|
|
2008-03-11 08:19:34 +01:00
|
|
|
for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
|
|
|
|
// If there are registers which alias PhysReg, but which are not a
|
|
|
|
// sub-register of the chosen representative super register. Assert
|
|
|
|
// since we can't handle it yet.
|
2009-04-13 17:22:29 +02:00
|
|
|
assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
|
2008-03-11 08:19:34 +01:00
|
|
|
tri_->isSuperRegister(*AS, SpillReg));
|
|
|
|
|
2009-03-23 19:24:37 +01:00
|
|
|
bool Cut = false;
|
2009-10-20 03:31:09 +02:00
|
|
|
SmallVector<unsigned, 4> PRegs;
|
|
|
|
if (hasInterval(SpillReg))
|
|
|
|
PRegs.push_back(SpillReg);
|
2010-11-16 20:55:14 +01:00
|
|
|
for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
|
|
|
|
if (hasInterval(*SR))
|
|
|
|
PRegs.push_back(*SR);
|
|
|
|
|
|
|
|
DEBUG({
|
|
|
|
dbgs() << "Trying to spill:";
|
|
|
|
for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
|
|
|
|
dbgs() << ' ' << tri_->getName(PRegs[i]);
|
|
|
|
dbgs() << '\n';
|
|
|
|
});
|
2009-10-20 03:31:09 +02:00
|
|
|
|
2008-03-11 08:19:34 +01:00
|
|
|
SmallPtrSet<MachineInstr*, 8> SeenMIs;
|
|
|
|
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
|
|
|
|
E = mri_->reg_end(); I != E; ++I) {
|
|
|
|
MachineOperand &O = I.getOperand();
|
|
|
|
MachineInstr *MI = O.getParent();
|
2010-03-30 07:49:07 +02:00
|
|
|
if (MI->isDebugValue() || SeenMIs.count(MI))
|
2008-03-11 08:19:34 +01:00
|
|
|
continue;
|
|
|
|
SeenMIs.insert(MI);
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex Index = getInstructionIndex(MI);
|
2010-11-16 20:55:14 +01:00
|
|
|
bool LiveReg = false;
|
2009-10-20 03:31:09 +02:00
|
|
|
for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
|
|
|
|
unsigned PReg = PRegs[i];
|
|
|
|
LiveInterval &pli = getInterval(PReg);
|
|
|
|
if (!pli.liveAt(Index))
|
|
|
|
continue;
|
2010-11-16 20:55:14 +01:00
|
|
|
LiveReg = true;
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex StartIdx = Index.getLoadIndex();
|
|
|
|
SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
|
2010-11-16 20:55:14 +01:00
|
|
|
if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
|
2009-07-11 15:10:19 +02:00
|
|
|
std::string msg;
|
|
|
|
raw_string_ostream Msg(msg);
|
|
|
|
Msg << "Ran out of registers during register allocation!";
|
2010-02-09 20:54:29 +01:00
|
|
|
if (MI->isInlineAsm()) {
|
2009-07-11 15:10:19 +02:00
|
|
|
Msg << "\nPlease check your inline asm statement for invalid "
|
2009-10-20 03:31:09 +02:00
|
|
|
<< "constraints:\n";
|
2009-07-11 15:10:19 +02:00
|
|
|
MI->print(Msg, tm_);
|
2009-01-29 03:20:59 +01:00
|
|
|
}
|
2010-04-08 00:58:41 +02:00
|
|
|
report_fatal_error(Msg.str());
|
2009-01-29 03:20:59 +01:00
|
|
|
}
|
2010-11-16 20:55:14 +01:00
|
|
|
pli.removeRange(StartIdx, EndIdx);
|
|
|
|
LiveReg = true;
|
2008-03-11 08:19:34 +01:00
|
|
|
}
|
2010-11-16 20:55:14 +01:00
|
|
|
if (!LiveReg)
|
|
|
|
continue;
|
|
|
|
DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
|
|
|
|
vrm.addEmergencySpill(SpillReg, MI);
|
|
|
|
Cut = true;
|
2008-03-11 08:19:34 +01:00
|
|
|
}
|
2009-03-23 19:24:37 +01:00
|
|
|
return Cut;
|
2008-03-11 08:19:34 +01:00
|
|
|
}
|
2008-06-05 19:15:43 +02:00
|
|
|
|
|
|
|
LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
|
2009-07-09 05:57:02 +02:00
|
|
|
MachineInstr* startInst) {
|
2008-06-05 19:15:43 +02:00
|
|
|
LiveInterval& Interval = getOrCreateInterval(reg);
|
|
|
|
VNInfo* VN = Interval.getNextValue(
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex(getInstructionIndex(startInst).getDefIndex()),
|
2010-09-25 14:04:16 +02:00
|
|
|
startInst, getVNInfoAllocator());
|
2009-06-17 23:01:20 +02:00
|
|
|
VN->setHasPHIKill(true);
|
2009-09-04 22:41:11 +02:00
|
|
|
LiveRange LR(
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex(getInstructionIndex(startInst).getDefIndex()),
|
2009-12-22 01:11:50 +01:00
|
|
|
getMBBEndIdx(startInst->getParent()), VN);
|
2008-06-05 19:15:43 +02:00
|
|
|
Interval.addRange(LR);
|
2010-08-12 22:01:23 +02:00
|
|
|
|
2008-06-05 19:15:43 +02:00
|
|
|
return LR;
|
|
|
|
}
|
2009-08-03 23:55:09 +02:00
|
|
|
|