2003-11-20 04:32:25 +01:00
|
|
|
//===-- RegAllocLinearScan.cpp - Linear Scan register allocator -----------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 21:36:04 +01:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2003-11-20 04:32:25 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements a linear scan register allocator.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2004-02-24 09:58:30 +01:00
|
|
|
|
2003-11-20 04:32:25 +01:00
|
|
|
#define DEBUG_TYPE "regalloc"
|
2005-08-24 00:27:31 +02:00
|
|
|
#include "VirtRegMap.h"
|
2009-05-06 04:36:21 +02:00
|
|
|
#include "VirtRegRewriter.h"
|
2009-05-18 21:03:16 +02:00
|
|
|
#include "Spiller.h"
|
2003-11-20 04:32:25 +01:00
|
|
|
#include "llvm/Function.h"
|
2008-06-04 11:18:41 +02:00
|
|
|
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
|
|
|
#include "llvm/CodeGen/LiveStackAnalysis.h"
|
2003-11-20 04:32:25 +01:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2007-12-11 03:09:15 +01:00
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
2007-12-31 05:13:23 +01:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2003-11-20 04:32:25 +01:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2006-08-02 14:30:23 +02:00
|
|
|
#include "llvm/CodeGen/RegAllocRegistry.h"
|
2007-09-06 18:18:45 +02:00
|
|
|
#include "llvm/CodeGen/RegisterCoalescer.h"
|
2008-02-10 19:45:23 +01:00
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2003-11-20 04:32:25 +01:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2008-10-07 22:22:28 +02:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2007-11-03 08:20:12 +01:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2005-08-24 00:27:31 +02:00
|
|
|
#include "llvm/ADT/EquivalenceClasses.h"
|
2009-01-05 18:59:02 +01:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2004-09-02 00:55:40 +02:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2006-08-27 14:54:02 +02:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2009-08-22 22:30:53 +02:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-11 15:10:19 +02:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2009-07-25 02:23:56 +02:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2004-02-15 11:24:21 +01:00
|
|
|
#include <algorithm>
|
2004-05-30 09:24:39 +02:00
|
|
|
#include <set>
|
2004-07-22 10:14:44 +02:00
|
|
|
#include <queue>
|
2005-12-28 05:55:42 +01:00
|
|
|
#include <memory>
|
2006-12-02 03:22:01 +01:00
|
|
|
#include <cmath>
|
2009-06-02 18:53:25 +02:00
|
|
|
|
2003-11-20 04:32:25 +01:00
|
|
|
using namespace llvm;
|
|
|
|
|
2006-12-19 23:41:21 +01:00
|
|
|
STATISTIC(NumIters , "Number of iterations performed");
|
|
|
|
STATISTIC(NumBacktracks, "Number of times we had to backtrack");
|
2007-11-03 08:20:12 +01:00
|
|
|
STATISTIC(NumCoalesce, "Number of copies coalesced");
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
STATISTIC(NumDowngrade, "Number of registers downgraded");
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2008-06-20 23:45:16 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
NewHeuristic("new-spilling-heuristic",
|
|
|
|
cl::desc("Use new spilling heuristic"),
|
|
|
|
cl::init(false), cl::Hidden);
|
|
|
|
|
2008-10-23 22:43:13 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
PreSplitIntervals("pre-alloc-split",
|
|
|
|
cl::desc("Pre-register allocation live interval splitting"),
|
|
|
|
cl::init(false), cl::Hidden);
|
|
|
|
|
2009-05-18 21:03:16 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
NewSpillFramework("new-spill-framework",
|
|
|
|
cl::desc("New spilling framework"),
|
|
|
|
cl::init(false), cl::Hidden);
|
|
|
|
|
2006-12-19 23:41:21 +01:00
|
|
|
static RegisterRegAlloc
|
2008-10-14 22:25:08 +02:00
|
|
|
linearscanRegAlloc("linearscan", "linear scan register allocator",
|
2006-12-19 23:41:21 +01:00
|
|
|
createLinearScanRegisterAllocator);
|
2006-08-01 16:21:23 +02:00
|
|
|
|
2006-12-19 23:41:21 +01:00
|
|
|
namespace {
|
2007-05-08 21:02:46 +02:00
|
|
|
struct VISIBILITY_HIDDEN RALinScan : public MachineFunctionPass {
|
2007-05-03 03:11:54 +02:00
|
|
|
static char ID;
|
2008-09-04 19:05:41 +02:00
|
|
|
RALinScan() : MachineFunctionPass(&ID) {}
|
2007-05-01 23:15:47 +02:00
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
typedef std::pair<LiveInterval*, LiveInterval::iterator> IntervalPtr;
|
2008-08-15 20:49:41 +02:00
|
|
|
typedef SmallVector<IntervalPtr, 32> IntervalPtrs;
|
2004-11-18 03:42:27 +01:00
|
|
|
private:
|
2005-08-24 00:27:31 +02:00
|
|
|
/// RelatedRegClasses - This structure is built the first time a function is
|
|
|
|
/// compiled, and keeps track of which register classes have registers that
|
|
|
|
/// belong to multiple classes or have aliases that are in other classes.
|
|
|
|
EquivalenceClasses<const TargetRegisterClass*> RelatedRegClasses;
|
2008-08-14 01:36:23 +02:00
|
|
|
DenseMap<unsigned, const TargetRegisterClass*> OneClassForEachPhysReg;
|
2005-08-24 00:27:31 +02:00
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
// NextReloadMap - For each register in the map, it maps to the another
|
|
|
|
// register which is defined by a reload from the same stack slot and
|
|
|
|
// both reloads are in the same basic block.
|
|
|
|
DenseMap<unsigned, unsigned> NextReloadMap;
|
|
|
|
|
|
|
|
// DowngradedRegs - A set of registers which are being "downgraded", i.e.
|
|
|
|
// un-favored for allocation.
|
|
|
|
SmallSet<unsigned, 8> DowngradedRegs;
|
|
|
|
|
|
|
|
// DowngradeMap - A map from virtual registers to physical registers being
|
|
|
|
// downgraded for the virtual registers.
|
|
|
|
DenseMap<unsigned, unsigned> DowngradeMap;
|
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
MachineFunction* mf_;
|
2008-06-20 23:45:16 +02:00
|
|
|
MachineRegisterInfo* mri_;
|
2004-08-04 11:46:26 +02:00
|
|
|
const TargetMachine* tm_;
|
2008-02-10 19:45:23 +01:00
|
|
|
const TargetRegisterInfo* tri_;
|
2007-11-03 08:20:12 +01:00
|
|
|
const TargetInstrInfo* tii_;
|
|
|
|
BitVector allocatableRegs_;
|
2004-08-04 11:46:26 +02:00
|
|
|
LiveIntervals* li_;
|
2008-06-04 11:18:41 +02:00
|
|
|
LiveStacks* ls_;
|
2007-12-11 03:09:15 +01:00
|
|
|
const MachineLoopInfo *loopInfo;
|
2004-11-18 03:42:27 +01:00
|
|
|
|
|
|
|
/// handled_ - Intervals are added to the handled_ set in the order of their
|
|
|
|
/// start value. This is uses for backtracking.
|
|
|
|
std::vector<LiveInterval*> handled_;
|
|
|
|
|
|
|
|
/// fixed_ - Intervals that correspond to machine registers.
|
|
|
|
///
|
|
|
|
IntervalPtrs fixed_;
|
|
|
|
|
|
|
|
/// active_ - Intervals that are currently being processed, and which have a
|
|
|
|
/// live range active for the current point.
|
|
|
|
IntervalPtrs active_;
|
|
|
|
|
|
|
|
/// inactive_ - Intervals that are currently being processed, but which have
|
|
|
|
/// a hold at the current point.
|
|
|
|
IntervalPtrs inactive_;
|
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
typedef std::priority_queue<LiveInterval*,
|
2008-08-15 20:49:41 +02:00
|
|
|
SmallVector<LiveInterval*, 64>,
|
2004-08-04 11:46:26 +02:00
|
|
|
greater_ptr<LiveInterval> > IntervalHeap;
|
|
|
|
IntervalHeap unhandled_;
|
2009-05-01 03:03:49 +02:00
|
|
|
|
|
|
|
/// regUse_ - Tracks register usage.
|
|
|
|
SmallVector<unsigned, 32> regUse_;
|
|
|
|
SmallVector<unsigned, 32> regUseBackUp_;
|
|
|
|
|
|
|
|
/// vrm_ - Tracks register assignments.
|
2009-03-13 06:55:11 +01:00
|
|
|
VirtRegMap* vrm_;
|
2009-05-01 03:03:49 +02:00
|
|
|
|
2009-05-06 04:36:21 +02:00
|
|
|
std::auto_ptr<VirtRegRewriter> rewriter_;
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2009-05-18 21:03:16 +02:00
|
|
|
std::auto_ptr<Spiller> spiller_;
|
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
public:
|
|
|
|
virtual const char* getPassName() const {
|
|
|
|
return "Linear Scan Register Allocator";
|
|
|
|
}
|
2003-11-20 04:32:25 +01:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
2009-08-01 01:37:33 +02:00
|
|
|
AU.setPreservesCFG();
|
2004-08-04 11:46:26 +02:00
|
|
|
AU.addRequired<LiveIntervals>();
|
2008-10-07 22:22:28 +02:00
|
|
|
if (StrongPHIElim)
|
|
|
|
AU.addRequiredID(StrongPHIEliminationID);
|
2007-09-06 18:18:45 +02:00
|
|
|
// Make sure PassManager knows which analyses to make available
|
|
|
|
// to coalescing and which analyses coalescing invalidates.
|
|
|
|
AU.addRequiredTransitive<RegisterCoalescer>();
|
2008-10-23 22:43:13 +02:00
|
|
|
if (PreSplitIntervals)
|
|
|
|
AU.addRequiredID(PreAllocSplittingID);
|
2008-06-04 11:18:41 +02:00
|
|
|
AU.addRequired<LiveStacks>();
|
|
|
|
AU.addPreserved<LiveStacks>();
|
2007-12-11 03:09:15 +01:00
|
|
|
AU.addRequired<MachineLoopInfo>();
|
2008-01-04 21:54:55 +01:00
|
|
|
AU.addPreserved<MachineLoopInfo>();
|
2009-03-13 06:55:11 +01:00
|
|
|
AU.addRequired<VirtRegMap>();
|
|
|
|
AU.addPreserved<VirtRegMap>();
|
2008-01-04 21:54:55 +01:00
|
|
|
AU.addPreservedID(MachineDominatorsID);
|
2004-08-04 11:46:26 +02:00
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// runOnMachineFunction - register allocate the whole function
|
|
|
|
bool runOnMachineFunction(MachineFunction&);
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// linearScan - the linear scan algorithm
|
|
|
|
void linearScan();
|
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
/// initIntervalSets - initialize the interval sets.
|
|
|
|
///
|
2004-08-04 11:46:26 +02:00
|
|
|
void initIntervalSets();
|
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
/// processActiveIntervals - expire old intervals and move non-overlapping
|
|
|
|
/// ones to the inactive list.
|
|
|
|
void processActiveIntervals(unsigned CurPoint);
|
2003-11-20 04:32:25 +01:00
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
/// processInactiveIntervals - expire old intervals and move overlapping
|
|
|
|
/// ones to the active list.
|
|
|
|
void processInactiveIntervals(unsigned CurPoint);
|
2004-08-04 11:46:26 +02:00
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
/// hasNextReloadInterval - Return the next liveinterval that's being
|
|
|
|
/// defined by a reload from the same SS as the specified one.
|
|
|
|
LiveInterval *hasNextReloadInterval(LiveInterval *cur);
|
|
|
|
|
|
|
|
/// DowngradeRegister - Downgrade a register for allocation.
|
|
|
|
void DowngradeRegister(LiveInterval *li, unsigned Reg);
|
|
|
|
|
|
|
|
/// UpgradeRegister - Upgrade a register for allocation.
|
|
|
|
void UpgradeRegister(unsigned Reg);
|
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
/// assignRegOrStackSlotAtInterval - assign a register if one
|
|
|
|
/// is available, or spill.
|
|
|
|
void assignRegOrStackSlotAtInterval(LiveInterval* cur);
|
|
|
|
|
2009-03-23 23:57:19 +01:00
|
|
|
void updateSpillWeights(std::vector<float> &Weights,
|
|
|
|
unsigned reg, float weight,
|
|
|
|
const TargetRegisterClass *RC);
|
|
|
|
|
2008-06-20 23:45:16 +02:00
|
|
|
/// findIntervalsToSpill - Determine the intervals to spill for the
|
|
|
|
/// specified interval. It's passed the physical registers whose spill
|
|
|
|
/// weight is the lowest among all the registers whose live intervals
|
|
|
|
/// conflict with the interval.
|
|
|
|
void findIntervalsToSpill(LiveInterval *cur,
|
|
|
|
std::vector<std::pair<unsigned,float> > &Candidates,
|
|
|
|
unsigned NumCands,
|
|
|
|
SmallVector<LiveInterval*, 8> &SpillIntervals);
|
|
|
|
|
2007-11-03 08:20:12 +01:00
|
|
|
/// attemptTrivialCoalescing - If a simple interval is defined by a copy,
|
|
|
|
/// try allocate the definition the same register as the source register
|
|
|
|
/// if the register is not defined during live time of the interval. This
|
|
|
|
/// eliminate a copy. This is used to coalesce copies which were not
|
|
|
|
/// coalesced away before allocation either due to dest and src being in
|
|
|
|
/// different register classes or because the coalescer was overly
|
|
|
|
/// conservative.
|
|
|
|
unsigned attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg);
|
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
///
|
2009-05-01 03:03:49 +02:00
|
|
|
/// Register usage / availability tracking helpers.
|
|
|
|
///
|
|
|
|
|
|
|
|
void initRegUses() {
|
|
|
|
regUse_.resize(tri_->getNumRegs(), 0);
|
|
|
|
regUseBackUp_.resize(tri_->getNumRegs(), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void finalizeRegUses() {
|
2009-05-03 20:32:42 +02:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// Verify all the registers are "freed".
|
|
|
|
bool Error = false;
|
|
|
|
for (unsigned i = 0, e = tri_->getNumRegs(); i != e; ++i) {
|
|
|
|
if (regUse_[i] != 0) {
|
2009-08-23 13:37:21 +02:00
|
|
|
errs() << tri_->getName(i) << " is still in use!\n";
|
2009-05-03 20:32:42 +02:00
|
|
|
Error = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (Error)
|
2009-07-14 18:55:14 +02:00
|
|
|
llvm_unreachable(0);
|
2009-05-03 20:32:42 +02:00
|
|
|
#endif
|
2009-05-01 03:03:49 +02:00
|
|
|
regUse_.clear();
|
|
|
|
regUseBackUp_.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void addRegUse(unsigned physReg) {
|
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(physReg) &&
|
|
|
|
"should be physical register!");
|
|
|
|
++regUse_[physReg];
|
|
|
|
for (const unsigned* as = tri_->getAliasSet(physReg); *as; ++as)
|
|
|
|
++regUse_[*as];
|
|
|
|
}
|
|
|
|
|
|
|
|
void delRegUse(unsigned physReg) {
|
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(physReg) &&
|
|
|
|
"should be physical register!");
|
|
|
|
assert(regUse_[physReg] != 0);
|
|
|
|
--regUse_[physReg];
|
|
|
|
for (const unsigned* as = tri_->getAliasSet(physReg); *as; ++as) {
|
|
|
|
assert(regUse_[*as] != 0);
|
|
|
|
--regUse_[*as];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isRegAvail(unsigned physReg) const {
|
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(physReg) &&
|
|
|
|
"should be physical register!");
|
|
|
|
return regUse_[physReg] == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void backUpRegUses() {
|
|
|
|
regUseBackUp_ = regUse_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void restoreRegUses() {
|
|
|
|
regUse_ = regUseBackUp_;
|
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// Register handling helpers.
|
2004-08-04 11:46:26 +02:00
|
|
|
///
|
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
/// getFreePhysReg - return a free physical register for this virtual
|
|
|
|
/// register interval if we have one, otherwise return 0.
|
2004-08-04 11:46:26 +02:00
|
|
|
unsigned getFreePhysReg(LiveInterval* cur);
|
2009-06-15 10:28:29 +02:00
|
|
|
unsigned getFreePhysReg(LiveInterval* cur,
|
|
|
|
const TargetRegisterClass *RC,
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
unsigned MaxInactiveCount,
|
|
|
|
SmallVector<unsigned, 256> &inactiveCounts,
|
|
|
|
bool SkipDGRegs);
|
2004-08-04 11:46:26 +02:00
|
|
|
|
|
|
|
/// assignVirt2StackSlot - assigns this virtual register to a
|
|
|
|
/// stack slot. returns the stack slot
|
|
|
|
int assignVirt2StackSlot(unsigned virtReg);
|
|
|
|
|
2005-08-24 00:27:31 +02:00
|
|
|
void ComputeRelatedRegClasses();
|
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
template <typename ItTy>
|
|
|
|
void printIntervals(const char* const str, ItTy i, ItTy e) const {
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG({
|
|
|
|
if (str)
|
|
|
|
errs() << str << " intervals:\n";
|
|
|
|
|
|
|
|
for (; i != e; ++i) {
|
|
|
|
errs() << "\t" << *i->first << " -> ";
|
|
|
|
|
|
|
|
unsigned reg = i->first->reg;
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(reg))
|
|
|
|
reg = vrm_->getPhys(reg);
|
|
|
|
|
|
|
|
errs() << tri_->getName(reg) << '\n';
|
|
|
|
}
|
|
|
|
});
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
|
|
|
};
|
2007-05-08 21:02:46 +02:00
|
|
|
char RALinScan::ID = 0;
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
|
|
|
|
2008-06-04 11:18:41 +02:00
|
|
|
static RegisterPass<RALinScan>
|
|
|
|
X("linearscan-regalloc", "Linear Scan Register Allocator");
|
|
|
|
|
2007-05-08 21:02:46 +02:00
|
|
|
void RALinScan::ComputeRelatedRegClasses() {
|
2005-08-24 00:27:31 +02:00
|
|
|
// First pass, add all reg classes to the union, and determine at least one
|
|
|
|
// reg class that each register is in.
|
|
|
|
bool HasAliases = false;
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
for (TargetRegisterInfo::regclass_iterator RCI = tri_->regclass_begin(),
|
|
|
|
E = tri_->regclass_end(); RCI != E; ++RCI) {
|
2005-08-24 00:27:31 +02:00
|
|
|
RelatedRegClasses.insert(*RCI);
|
|
|
|
for (TargetRegisterClass::iterator I = (*RCI)->begin(), E = (*RCI)->end();
|
|
|
|
I != E; ++I) {
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
HasAliases = HasAliases || *tri_->getAliasSet(*I) != 0;
|
2005-08-24 00:27:31 +02:00
|
|
|
|
|
|
|
const TargetRegisterClass *&PRC = OneClassForEachPhysReg[*I];
|
|
|
|
if (PRC) {
|
|
|
|
// Already processed this register. Just make sure we know that
|
|
|
|
// multiple register classes share a register.
|
|
|
|
RelatedRegClasses.unionSets(PRC, *RCI);
|
|
|
|
} else {
|
|
|
|
PRC = *RCI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Second pass, now that we know conservatively what register classes each reg
|
|
|
|
// belongs to, add info about aliases. We don't need to do this for targets
|
|
|
|
// without register aliases.
|
|
|
|
if (HasAliases)
|
2008-08-14 01:36:23 +02:00
|
|
|
for (DenseMap<unsigned, const TargetRegisterClass*>::iterator
|
2005-08-24 00:27:31 +02:00
|
|
|
I = OneClassForEachPhysReg.begin(), E = OneClassForEachPhysReg.end();
|
|
|
|
I != E; ++I)
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
for (const unsigned *AS = tri_->getAliasSet(I->first); *AS; ++AS)
|
2005-08-24 00:27:31 +02:00
|
|
|
RelatedRegClasses.unionSets(I->second, OneClassForEachPhysReg[*AS]);
|
|
|
|
}
|
|
|
|
|
2007-11-03 08:20:12 +01:00
|
|
|
/// attemptTrivialCoalescing - If a simple interval is defined by a copy,
|
|
|
|
/// try allocate the definition the same register as the source register
|
|
|
|
/// if the register is not defined during live time of the interval. This
|
|
|
|
/// eliminate a copy. This is used to coalesce copies which were not
|
|
|
|
/// coalesced away before allocation either due to dest and src being in
|
|
|
|
/// different register classes or because the coalescer was overly
|
|
|
|
/// conservative.
|
|
|
|
unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
|
2009-06-14 22:22:55 +02:00
|
|
|
unsigned Preference = vrm_->getRegAllocPref(cur.reg);
|
|
|
|
if ((Preference && Preference == Reg) || !cur.containsOneValue())
|
2007-11-03 08:20:12 +01:00
|
|
|
return Reg;
|
|
|
|
|
2009-01-20 01:16:18 +01:00
|
|
|
VNInfo *vni = cur.begin()->valno;
|
2009-06-17 23:01:20 +02:00
|
|
|
if (!vni->def || vni->isUnused() || !vni->isDefAccurate())
|
2007-11-03 08:20:12 +01:00
|
|
|
return Reg;
|
|
|
|
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
|
2009-05-13 01:07:00 +02:00
|
|
|
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg, PhysReg;
|
2009-01-20 20:12:24 +01:00
|
|
|
if (!CopyMI ||
|
|
|
|
!tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg))
|
2007-11-03 08:20:12 +01:00
|
|
|
return Reg;
|
2009-05-13 01:07:00 +02:00
|
|
|
PhysReg = SrcReg;
|
2008-02-20 13:07:57 +01:00
|
|
|
if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
|
2007-11-03 08:20:12 +01:00
|
|
|
if (!vrm_->isAssignedReg(SrcReg))
|
|
|
|
return Reg;
|
2009-05-13 01:07:00 +02:00
|
|
|
PhysReg = vrm_->getPhys(SrcReg);
|
2008-02-20 13:07:57 +01:00
|
|
|
}
|
2009-05-13 01:07:00 +02:00
|
|
|
if (Reg == PhysReg)
|
2007-11-03 08:20:12 +01:00
|
|
|
return Reg;
|
|
|
|
|
2008-09-19 00:38:47 +02:00
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(cur.reg);
|
2009-05-13 01:07:00 +02:00
|
|
|
if (!RC->contains(PhysReg))
|
2007-11-03 08:20:12 +01:00
|
|
|
return Reg;
|
|
|
|
|
|
|
|
// Try to coalesce.
|
2009-05-13 01:07:00 +02:00
|
|
|
if (!li_->conflictsWithPhysRegDef(cur, *vrm_, PhysReg)) {
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "Coalescing: " << cur << " -> " << tri_->getName(PhysReg)
|
|
|
|
<< '\n');
|
2007-11-03 08:20:12 +01:00
|
|
|
vrm_->clearVirt(cur.reg);
|
2009-05-13 01:07:00 +02:00
|
|
|
vrm_->assignVirt2Phys(cur.reg, PhysReg);
|
|
|
|
|
|
|
|
// Remove unnecessary kills since a copy does not clobber the register.
|
|
|
|
if (li_->hasInterval(SrcReg)) {
|
|
|
|
LiveInterval &SrcLI = li_->getInterval(SrcReg);
|
|
|
|
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(cur.reg),
|
|
|
|
E = mri_->reg_end(); I != E; ++I) {
|
|
|
|
MachineOperand &O = I.getOperand();
|
|
|
|
if (!O.isUse() || !O.isKill())
|
|
|
|
continue;
|
|
|
|
MachineInstr *MI = &*I;
|
|
|
|
if (SrcLI.liveAt(li_->getDefIndex(li_->getInstructionIndex(MI))))
|
|
|
|
O.setIsKill(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-03 08:20:12 +01:00
|
|
|
++NumCoalesce;
|
2009-06-04 22:53:36 +02:00
|
|
|
return PhysReg;
|
2007-11-03 08:20:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return Reg;
|
|
|
|
}
|
|
|
|
|
2007-05-08 21:02:46 +02:00
|
|
|
bool RALinScan::runOnMachineFunction(MachineFunction &fn) {
|
2004-08-04 11:46:26 +02:00
|
|
|
mf_ = &fn;
|
2008-06-20 23:45:16 +02:00
|
|
|
mri_ = &fn.getRegInfo();
|
2004-08-04 11:46:26 +02:00
|
|
|
tm_ = &fn.getTarget();
|
2008-02-10 19:45:23 +01:00
|
|
|
tri_ = tm_->getRegisterInfo();
|
2007-11-03 08:20:12 +01:00
|
|
|
tii_ = tm_->getInstrInfo();
|
2008-02-10 19:45:23 +01:00
|
|
|
allocatableRegs_ = tri_->getAllocatableSet(fn);
|
2004-08-04 11:46:26 +02:00
|
|
|
li_ = &getAnalysis<LiveIntervals>();
|
2008-06-04 11:18:41 +02:00
|
|
|
ls_ = &getAnalysis<LiveStacks>();
|
2007-12-11 03:09:15 +01:00
|
|
|
loopInfo = &getAnalysis<MachineLoopInfo>();
|
2004-11-18 05:33:31 +01:00
|
|
|
|
2007-09-06 18:18:45 +02:00
|
|
|
// We don't run the coalescer here because we have no reason to
|
|
|
|
// interact with it. If the coalescer requires interaction, it
|
|
|
|
// won't do anything. If it doesn't require interaction, we assume
|
|
|
|
// it was run as a separate pass.
|
|
|
|
|
2005-08-24 00:27:31 +02:00
|
|
|
// If this is the first function compiled, compute the related reg classes.
|
|
|
|
if (RelatedRegClasses.empty())
|
|
|
|
ComputeRelatedRegClasses();
|
2009-05-01 03:03:49 +02:00
|
|
|
|
|
|
|
// Also resize register usage trackers.
|
|
|
|
initRegUses();
|
|
|
|
|
2009-03-13 06:55:11 +01:00
|
|
|
vrm_ = &getAnalysis<VirtRegMap>();
|
2009-05-06 04:36:21 +02:00
|
|
|
if (!rewriter_.get()) rewriter_.reset(createVirtRegRewriter());
|
2009-05-18 21:03:16 +02:00
|
|
|
|
|
|
|
if (NewSpillFramework) {
|
2009-06-02 18:53:25 +02:00
|
|
|
spiller_.reset(createSpiller(mf_, li_, ls_, vrm_));
|
2009-05-18 21:03:16 +02:00
|
|
|
}
|
2009-06-02 18:53:25 +02:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
initIntervalSets();
|
2003-11-20 04:32:25 +01:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
linearScan();
|
2004-02-24 09:58:30 +01:00
|
|
|
|
2005-01-23 23:45:13 +01:00
|
|
|
// Rewrite spill code and update the PhysRegsUsed set.
|
2009-05-06 04:36:21 +02:00
|
|
|
rewriter_->runOnMachineFunction(*mf_, *vrm_, li_);
|
2004-11-18 03:42:27 +01:00
|
|
|
|
2008-06-24 01:51:16 +02:00
|
|
|
assert(unhandled_.empty() && "Unhandled live intervals remain!");
|
2009-05-01 03:03:49 +02:00
|
|
|
|
|
|
|
finalizeRegUses();
|
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
fixed_.clear();
|
|
|
|
active_.clear();
|
|
|
|
inactive_.clear();
|
|
|
|
handled_.clear();
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
NextReloadMap.clear();
|
|
|
|
DowngradedRegs.clear();
|
|
|
|
DowngradeMap.clear();
|
2009-06-02 18:53:25 +02:00
|
|
|
spiller_.reset(0);
|
2004-11-18 03:42:27 +01:00
|
|
|
|
2004-08-04 11:46:26 +02:00
|
|
|
return true;
|
2004-02-24 09:58:30 +01:00
|
|
|
}
|
|
|
|
|
2004-11-18 07:01:45 +01:00
|
|
|
/// initIntervalSets - initialize the interval sets.
|
|
|
|
///
|
2007-05-08 21:02:46 +02:00
|
|
|
void RALinScan::initIntervalSets()
|
2004-11-18 07:01:45 +01:00
|
|
|
{
|
|
|
|
assert(unhandled_.empty() && fixed_.empty() &&
|
|
|
|
active_.empty() && inactive_.empty() &&
|
|
|
|
"interval sets should be empty on initialization");
|
|
|
|
|
2008-08-15 20:49:41 +02:00
|
|
|
handled_.reserve(li_->getNumIntervals());
|
|
|
|
|
2004-11-18 07:01:45 +01:00
|
|
|
for (LiveIntervals::iterator i = li_->begin(), e = li_->end(); i != e; ++i) {
|
2008-08-13 23:49:13 +02:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(i->second->reg)) {
|
2008-09-19 00:38:47 +02:00
|
|
|
mri_->setPhysRegUsed(i->second->reg);
|
2008-08-13 23:49:13 +02:00
|
|
|
fixed_.push_back(std::make_pair(i->second, i->second->begin()));
|
2005-01-23 23:45:13 +01:00
|
|
|
} else
|
2008-08-13 23:49:13 +02:00
|
|
|
unhandled_.push(i->second);
|
2004-11-18 07:01:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-22 22:30:53 +02:00
|
|
|
void RALinScan::linearScan() {
|
2004-08-04 11:46:26 +02:00
|
|
|
// linear scan algorithm
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG({
|
|
|
|
errs() << "********** LINEAR SCAN **********\n"
|
|
|
|
<< "********** Function: "
|
|
|
|
<< mf_->getFunction()->getName() << '\n';
|
|
|
|
printIntervals("fixed", fixed_.begin(), fixed_.end());
|
|
|
|
});
|
2004-08-04 11:46:26 +02:00
|
|
|
|
|
|
|
while (!unhandled_.empty()) {
|
|
|
|
// pick the interval with the earliest start point
|
|
|
|
LiveInterval* cur = unhandled_.top();
|
|
|
|
unhandled_.pop();
|
2007-10-16 23:09:14 +02:00
|
|
|
++NumIters;
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\n*** CURRENT ***: " << *cur << '\n');
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2008-04-03 18:40:27 +02:00
|
|
|
if (!cur->empty()) {
|
|
|
|
processActiveIntervals(cur->beginNumber());
|
|
|
|
processInactiveIntervals(cur->beginNumber());
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2008-04-03 18:40:27 +02:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(cur->reg) &&
|
|
|
|
"Can only allocate virtual registers!");
|
|
|
|
}
|
2005-04-22 00:36:52 +02:00
|
|
|
|
2004-11-18 07:01:45 +01:00
|
|
|
// Allocating a virtual register. try to find a free
|
|
|
|
// physical register or spill an interval (possibly this one) in order to
|
|
|
|
// assign it one.
|
|
|
|
assignRegOrStackSlotAtInterval(cur);
|
2004-07-22 10:14:44 +02:00
|
|
|
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG({
|
|
|
|
printIntervals("active", active_.begin(), active_.end());
|
|
|
|
printIntervals("inactive", inactive_.begin(), inactive_.end());
|
|
|
|
});
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
|
|
|
|
2009-05-01 03:03:49 +02:00
|
|
|
// Expire any remaining active intervals
|
2007-10-16 23:09:14 +02:00
|
|
|
while (!active_.empty()) {
|
|
|
|
IntervalPtr &IP = active_.back();
|
|
|
|
unsigned reg = IP.first->reg;
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\tinterval " << *IP.first << " expired\n");
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(reg) &&
|
2004-11-18 07:01:45 +01:00
|
|
|
"Can only allocate virtual registers!");
|
|
|
|
reg = vrm_->getPhys(reg);
|
2009-05-01 03:03:49 +02:00
|
|
|
delRegUse(reg);
|
2007-10-16 23:09:14 +02:00
|
|
|
active_.pop_back();
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
|
|
|
|
2009-05-01 03:03:49 +02:00
|
|
|
// Expire any remaining inactive intervals
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG({
|
|
|
|
for (IntervalPtrs::reverse_iterator
|
|
|
|
i = inactive_.rbegin(); i != inactive_.rend(); ++i)
|
|
|
|
errs() << "\tinterval " << *i->first << " expired\n";
|
|
|
|
});
|
2007-10-16 23:09:14 +02:00
|
|
|
inactive_.clear();
|
2004-08-04 11:46:26 +02:00
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
// Add live-ins to every BB except for entry. Also perform trivial coalescing.
|
2007-10-17 04:12:22 +02:00
|
|
|
MachineFunction::iterator EntryMBB = mf_->begin();
|
2007-10-17 08:53:44 +02:00
|
|
|
SmallVector<MachineBasicBlock*, 8> LiveInMBBs;
|
2007-10-17 04:12:22 +02:00
|
|
|
for (LiveIntervals::iterator i = li_->begin(), e = li_->end(); i != e; ++i) {
|
2008-08-13 23:49:13 +02:00
|
|
|
LiveInterval &cur = *i->second;
|
2007-10-17 04:12:22 +02:00
|
|
|
unsigned Reg = 0;
|
2008-02-10 19:45:23 +01:00
|
|
|
bool isPhys = TargetRegisterInfo::isPhysicalRegister(cur.reg);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
if (isPhys)
|
2008-08-13 23:49:13 +02:00
|
|
|
Reg = cur.reg;
|
2007-10-17 04:12:22 +02:00
|
|
|
else if (vrm_->isAssignedReg(cur.reg))
|
2007-11-03 08:20:12 +01:00
|
|
|
Reg = attemptTrivialCoalescing(cur, vrm_->getPhys(cur.reg));
|
2007-10-17 04:12:22 +02:00
|
|
|
if (!Reg)
|
|
|
|
continue;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 01:40:40 +01:00
|
|
|
// Ignore splited live intervals.
|
|
|
|
if (!isPhys && vrm_->getPreSplitReg(cur.reg))
|
|
|
|
continue;
|
2009-06-04 22:28:22 +02:00
|
|
|
|
2007-10-17 04:12:22 +02:00
|
|
|
for (LiveInterval::Ranges::const_iterator I = cur.begin(), E = cur.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
const LiveRange &LR = *I;
|
2008-10-29 06:06:14 +01:00
|
|
|
if (li_->findLiveInMBBs(LR.start, LR.end, LiveInMBBs)) {
|
2007-10-17 04:12:22 +02:00
|
|
|
for (unsigned i = 0, e = LiveInMBBs.size(); i != e; ++i)
|
2009-06-04 22:53:36 +02:00
|
|
|
if (LiveInMBBs[i] != EntryMBB) {
|
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
|
|
|
|
"Adding a virtual register to livein set?");
|
2007-10-17 04:12:22 +02:00
|
|
|
LiveInMBBs[i]->addLiveIn(Reg);
|
2009-06-04 22:53:36 +02:00
|
|
|
}
|
2007-10-17 08:53:44 +02:00
|
|
|
LiveInMBBs.clear();
|
2007-02-16 10:05:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << *vrm_);
|
2009-05-03 20:32:42 +02:00
|
|
|
|
|
|
|
// Look for physical registers that end up not being allocated even though
|
|
|
|
// register allocator had to spill other registers in its register class.
|
|
|
|
if (ls_->getNumIntervals() == 0)
|
|
|
|
return;
|
2009-06-14 22:22:55 +02:00
|
|
|
if (!vrm_->FindUnusedRegisters(li_))
|
2009-05-03 20:32:42 +02:00
|
|
|
return;
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
/// processActiveIntervals - expire old intervals and move non-overlapping ones
|
|
|
|
/// to the inactive list.
|
2007-05-08 21:02:46 +02:00
|
|
|
void RALinScan::processActiveIntervals(unsigned CurPoint)
|
2003-11-20 04:32:25 +01:00
|
|
|
{
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\tprocessing active intervals:\n");
|
2004-11-18 02:29:39 +01:00
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
for (unsigned i = 0, e = active_.size(); i != e; ++i) {
|
|
|
|
LiveInterval *Interval = active_[i].first;
|
|
|
|
LiveInterval::iterator IntervalPos = active_[i].second;
|
|
|
|
unsigned reg = Interval->reg;
|
|
|
|
|
|
|
|
IntervalPos = Interval->advanceTo(IntervalPos, CurPoint);
|
2004-09-02 00:52:29 +02:00
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
if (IntervalPos == Interval->end()) { // Remove expired intervals.
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\t\tinterval " << *Interval << " expired\n");
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(reg) &&
|
2004-11-18 07:01:45 +01:00
|
|
|
"Can only allocate virtual registers!");
|
|
|
|
reg = vrm_->getPhys(reg);
|
2009-05-01 03:03:49 +02:00
|
|
|
delRegUse(reg);
|
2004-11-18 03:42:27 +01:00
|
|
|
|
|
|
|
// Pop off the end of the list.
|
|
|
|
active_[i] = active_.back();
|
|
|
|
active_.pop_back();
|
|
|
|
--i; --e;
|
2005-04-22 00:36:52 +02:00
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
} else if (IntervalPos->start > CurPoint) {
|
|
|
|
// Move inactive intervals to inactive list.
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\t\tinterval " << *Interval << " inactive\n");
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(reg) &&
|
2004-11-18 07:01:45 +01:00
|
|
|
"Can only allocate virtual registers!");
|
|
|
|
reg = vrm_->getPhys(reg);
|
2009-05-01 03:03:49 +02:00
|
|
|
delRegUse(reg);
|
2004-11-18 03:42:27 +01:00
|
|
|
// add to inactive.
|
|
|
|
inactive_.push_back(std::make_pair(Interval, IntervalPos));
|
|
|
|
|
|
|
|
// Pop off the end of the list.
|
|
|
|
active_[i] = active_.back();
|
|
|
|
active_.pop_back();
|
|
|
|
--i; --e;
|
|
|
|
} else {
|
|
|
|
// Otherwise, just update the iterator position.
|
|
|
|
active_[i].second = IntervalPos;
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
|
|
|
}
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
/// processInactiveIntervals - expire old intervals and move overlapping
|
|
|
|
/// ones to the active list.
|
2007-05-08 21:02:46 +02:00
|
|
|
void RALinScan::processInactiveIntervals(unsigned CurPoint)
|
2003-11-20 04:32:25 +01:00
|
|
|
{
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\tprocessing inactive intervals:\n");
|
2004-11-18 05:13:02 +01:00
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
for (unsigned i = 0, e = inactive_.size(); i != e; ++i) {
|
|
|
|
LiveInterval *Interval = inactive_[i].first;
|
|
|
|
LiveInterval::iterator IntervalPos = inactive_[i].second;
|
|
|
|
unsigned reg = Interval->reg;
|
|
|
|
|
|
|
|
IntervalPos = Interval->advanceTo(IntervalPos, CurPoint);
|
2005-04-22 00:36:52 +02:00
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
if (IntervalPos == Interval->end()) { // remove expired intervals.
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\t\tinterval " << *Interval << " expired\n");
|
2004-11-18 03:42:27 +01:00
|
|
|
|
|
|
|
// Pop off the end of the list.
|
|
|
|
inactive_[i] = inactive_.back();
|
|
|
|
inactive_.pop_back();
|
|
|
|
--i; --e;
|
|
|
|
} else if (IntervalPos->start <= CurPoint) {
|
|
|
|
// move re-activated intervals in active list
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\t\tinterval " << *Interval << " active\n");
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(reg) &&
|
2004-11-18 07:01:45 +01:00
|
|
|
"Can only allocate virtual registers!");
|
|
|
|
reg = vrm_->getPhys(reg);
|
2009-05-01 03:03:49 +02:00
|
|
|
addRegUse(reg);
|
2004-08-04 11:46:26 +02:00
|
|
|
// add to active
|
2004-11-18 03:42:27 +01:00
|
|
|
active_.push_back(std::make_pair(Interval, IntervalPos));
|
|
|
|
|
|
|
|
// Pop off the end of the list.
|
|
|
|
inactive_[i] = inactive_.back();
|
|
|
|
inactive_.pop_back();
|
|
|
|
--i; --e;
|
|
|
|
} else {
|
|
|
|
// Otherwise, just update the iterator position.
|
|
|
|
inactive_[i].second = IntervalPos;
|
2003-12-21 06:43:40 +01:00
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
2003-12-21 06:43:40 +01:00
|
|
|
}
|
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
/// updateSpillWeights - updates the spill weights of the specifed physical
|
|
|
|
/// register and its weight.
|
2009-03-23 23:57:19 +01:00
|
|
|
void RALinScan::updateSpillWeights(std::vector<float> &Weights,
|
|
|
|
unsigned reg, float weight,
|
|
|
|
const TargetRegisterClass *RC) {
|
|
|
|
SmallSet<unsigned, 4> Processed;
|
|
|
|
SmallSet<unsigned, 4> SuperAdded;
|
|
|
|
SmallVector<unsigned, 4> Supers;
|
2004-11-18 07:01:45 +01:00
|
|
|
Weights[reg] += weight;
|
2009-03-23 23:57:19 +01:00
|
|
|
Processed.insert(reg);
|
|
|
|
for (const unsigned* as = tri_->getAliasSet(reg); *as; ++as) {
|
2004-11-18 07:01:45 +01:00
|
|
|
Weights[*as] += weight;
|
2009-03-23 23:57:19 +01:00
|
|
|
Processed.insert(*as);
|
|
|
|
if (tri_->isSubRegister(*as, reg) &&
|
|
|
|
SuperAdded.insert(*as) &&
|
|
|
|
RC->contains(*as)) {
|
|
|
|
Supers.push_back(*as);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the alias is a super-register, and the super-register is in the
|
|
|
|
// register class we are trying to allocate. Then add the weight to all
|
|
|
|
// sub-registers of the super-register even if they are not aliases.
|
|
|
|
// e.g. allocating for GR32, bh is not used, updating bl spill weight.
|
|
|
|
// bl should get the same spill weight otherwise it will be choosen
|
|
|
|
// as a spill candidate since spilling bh doesn't make ebx available.
|
|
|
|
for (unsigned i = 0, e = Supers.size(); i != e; ++i) {
|
2009-05-03 20:32:42 +02:00
|
|
|
for (const unsigned *sr = tri_->getSubRegisters(Supers[i]); *sr; ++sr)
|
|
|
|
if (!Processed.count(*sr))
|
|
|
|
Weights[*sr] += weight;
|
2009-03-23 23:57:19 +01:00
|
|
|
}
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
|
|
|
|
2007-05-08 21:02:46 +02:00
|
|
|
static
|
|
|
|
RALinScan::IntervalPtrs::iterator
|
|
|
|
FindIntervalInVector(RALinScan::IntervalPtrs &IP, LiveInterval *LI) {
|
|
|
|
for (RALinScan::IntervalPtrs::iterator I = IP.begin(), E = IP.end();
|
|
|
|
I != E; ++I)
|
2004-11-18 03:42:27 +01:00
|
|
|
if (I->first == LI) return I;
|
|
|
|
return IP.end();
|
|
|
|
}
|
|
|
|
|
2007-05-08 21:02:46 +02:00
|
|
|
static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, unsigned Point){
|
2004-11-18 04:49:30 +01:00
|
|
|
for (unsigned i = 0, e = V.size(); i != e; ++i) {
|
2007-05-08 21:02:46 +02:00
|
|
|
RALinScan::IntervalPtr &IP = V[i];
|
2004-11-18 04:49:30 +01:00
|
|
|
LiveInterval::iterator I = std::upper_bound(IP.first->begin(),
|
|
|
|
IP.second, Point);
|
|
|
|
if (I != IP.first->begin()) --I;
|
|
|
|
IP.second = I;
|
|
|
|
}
|
|
|
|
}
|
2004-11-18 03:42:27 +01:00
|
|
|
|
2008-06-04 11:18:41 +02:00
|
|
|
/// addStackInterval - Create a LiveInterval for stack if the specified live
|
|
|
|
/// interval has been spilled.
|
|
|
|
static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
|
2009-05-03 20:32:42 +02:00
|
|
|
LiveIntervals *li_,
|
|
|
|
MachineRegisterInfo* mri_, VirtRegMap &vrm_) {
|
2008-06-04 11:18:41 +02:00
|
|
|
int SS = vrm_.getStackSlot(cur->reg);
|
|
|
|
if (SS == VirtRegMap::NO_STACK_SLOT)
|
|
|
|
return;
|
2009-05-03 20:32:42 +02:00
|
|
|
|
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
|
|
|
|
LiveInterval &SI = ls_->getOrCreateInterval(SS, RC);
|
2008-06-06 09:54:39 +02:00
|
|
|
|
2008-06-04 11:18:41 +02:00
|
|
|
VNInfo *VNI;
|
2008-10-29 09:39:34 +01:00
|
|
|
if (SI.hasAtLeastOneValue())
|
2008-06-04 11:18:41 +02:00
|
|
|
VNI = SI.getValNumInfo(0);
|
|
|
|
else
|
2009-06-17 23:01:20 +02:00
|
|
|
VNI = SI.getNextValue(0, 0, false, ls_->getVNInfoAllocator());
|
2008-06-04 11:18:41 +02:00
|
|
|
|
|
|
|
LiveInterval &RI = li_->getInterval(cur->reg);
|
|
|
|
// FIXME: This may be overly conservative.
|
|
|
|
SI.MergeRangesInAsValue(RI, VNI);
|
|
|
|
}
|
|
|
|
|
2008-06-20 23:45:16 +02:00
|
|
|
/// getConflictWeight - Return the number of conflicts between cur
|
|
|
|
/// live interval and defs and uses of Reg weighted by loop depthes.
|
2009-05-03 20:32:42 +02:00
|
|
|
static
|
|
|
|
float getConflictWeight(LiveInterval *cur, unsigned Reg, LiveIntervals *li_,
|
|
|
|
MachineRegisterInfo *mri_,
|
|
|
|
const MachineLoopInfo *loopInfo) {
|
2008-06-20 23:45:16 +02:00
|
|
|
float Conflicts = 0;
|
|
|
|
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(Reg),
|
|
|
|
E = mri_->reg_end(); I != E; ++I) {
|
|
|
|
MachineInstr *MI = &*I;
|
|
|
|
if (cur->liveAt(li_->getInstructionIndex(MI))) {
|
|
|
|
unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
|
|
|
|
Conflicts += powf(10.0f, (float)loopDepth);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Conflicts;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// findIntervalsToSpill - Determine the intervals to spill for the
|
|
|
|
/// specified interval. It's passed the physical registers whose spill
|
|
|
|
/// weight is the lowest among all the registers whose live intervals
|
|
|
|
/// conflict with the interval.
|
|
|
|
void RALinScan::findIntervalsToSpill(LiveInterval *cur,
|
|
|
|
std::vector<std::pair<unsigned,float> > &Candidates,
|
|
|
|
unsigned NumCands,
|
|
|
|
SmallVector<LiveInterval*, 8> &SpillIntervals) {
|
|
|
|
// We have figured out the *best* register to spill. But there are other
|
|
|
|
// registers that are pretty good as well (spill weight within 3%). Spill
|
|
|
|
// the one that has fewest defs and uses that conflict with cur.
|
|
|
|
float Conflicts[3] = { 0.0f, 0.0f, 0.0f };
|
|
|
|
SmallVector<LiveInterval*, 8> SLIs[3];
|
|
|
|
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG({
|
|
|
|
errs() << "\tConsidering " << NumCands << " candidates: ";
|
|
|
|
for (unsigned i = 0; i != NumCands; ++i)
|
|
|
|
errs() << tri_->getName(Candidates[i].first) << " ";
|
|
|
|
errs() << "\n";
|
|
|
|
});
|
2008-06-20 23:45:16 +02:00
|
|
|
|
|
|
|
// Calculate the number of conflicts of each candidate.
|
|
|
|
for (IntervalPtrs::iterator i = active_.begin(); i != active_.end(); ++i) {
|
|
|
|
unsigned Reg = i->first->reg;
|
|
|
|
unsigned PhysReg = vrm_->getPhys(Reg);
|
|
|
|
if (!cur->overlapsFrom(*i->first, i->second))
|
|
|
|
continue;
|
|
|
|
for (unsigned j = 0; j < NumCands; ++j) {
|
|
|
|
unsigned Candidate = Candidates[j].first;
|
|
|
|
if (tri_->regsOverlap(PhysReg, Candidate)) {
|
|
|
|
if (NumCands > 1)
|
|
|
|
Conflicts[j] += getConflictWeight(cur, Reg, li_, mri_, loopInfo);
|
|
|
|
SLIs[j].push_back(i->first);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (IntervalPtrs::iterator i = inactive_.begin(); i != inactive_.end(); ++i){
|
|
|
|
unsigned Reg = i->first->reg;
|
|
|
|
unsigned PhysReg = vrm_->getPhys(Reg);
|
|
|
|
if (!cur->overlapsFrom(*i->first, i->second-1))
|
|
|
|
continue;
|
|
|
|
for (unsigned j = 0; j < NumCands; ++j) {
|
|
|
|
unsigned Candidate = Candidates[j].first;
|
|
|
|
if (tri_->regsOverlap(PhysReg, Candidate)) {
|
|
|
|
if (NumCands > 1)
|
|
|
|
Conflicts[j] += getConflictWeight(cur, Reg, li_, mri_, loopInfo);
|
|
|
|
SLIs[j].push_back(i->first);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Which is the best candidate?
|
|
|
|
unsigned BestCandidate = 0;
|
|
|
|
float MinConflicts = Conflicts[0];
|
|
|
|
for (unsigned i = 1; i != NumCands; ++i) {
|
|
|
|
if (Conflicts[i] < MinConflicts) {
|
|
|
|
BestCandidate = i;
|
|
|
|
MinConflicts = Conflicts[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::copy(SLIs[BestCandidate].begin(), SLIs[BestCandidate].end(),
|
|
|
|
std::back_inserter(SpillIntervals));
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct WeightCompare {
|
|
|
|
typedef std::pair<unsigned, float> RegWeightPair;
|
|
|
|
bool operator()(const RegWeightPair &LHS, const RegWeightPair &RHS) const {
|
|
|
|
return LHS.second < RHS.second;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool weightsAreClose(float w1, float w2) {
|
|
|
|
if (!NewHeuristic)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
float diff = w1 - w2;
|
|
|
|
if (diff <= 0.02f) // Within 0.02f
|
|
|
|
return true;
|
|
|
|
return (diff / w2) <= 0.05f; // Within 5%.
|
|
|
|
}
|
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
LiveInterval *RALinScan::hasNextReloadInterval(LiveInterval *cur) {
|
|
|
|
DenseMap<unsigned, unsigned>::iterator I = NextReloadMap.find(cur->reg);
|
|
|
|
if (I == NextReloadMap.end())
|
|
|
|
return 0;
|
|
|
|
return &li_->getInterval(I->second);
|
|
|
|
}
|
|
|
|
|
|
|
|
void RALinScan::DowngradeRegister(LiveInterval *li, unsigned Reg) {
|
|
|
|
bool isNew = DowngradedRegs.insert(Reg);
|
|
|
|
isNew = isNew; // Silence compiler warning.
|
|
|
|
assert(isNew && "Multiple reloads holding the same register?");
|
|
|
|
DowngradeMap.insert(std::make_pair(li->reg, Reg));
|
|
|
|
for (const unsigned *AS = tri_->getAliasSet(Reg); *AS; ++AS) {
|
|
|
|
isNew = DowngradedRegs.insert(*AS);
|
|
|
|
isNew = isNew; // Silence compiler warning.
|
|
|
|
assert(isNew && "Multiple reloads holding the same register?");
|
|
|
|
DowngradeMap.insert(std::make_pair(li->reg, *AS));
|
|
|
|
}
|
|
|
|
++NumDowngrade;
|
|
|
|
}
|
|
|
|
|
|
|
|
void RALinScan::UpgradeRegister(unsigned Reg) {
|
|
|
|
if (Reg) {
|
|
|
|
DowngradedRegs.erase(Reg);
|
|
|
|
for (const unsigned *AS = tri_->getAliasSet(Reg); *AS; ++AS)
|
|
|
|
DowngradedRegs.erase(*AS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct LISorter {
|
|
|
|
bool operator()(LiveInterval* A, LiveInterval* B) {
|
|
|
|
return A->beginNumber() < B->beginNumber();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
/// assignRegOrStackSlotAtInterval - assign a register if one is available, or
|
|
|
|
/// spill.
|
2009-08-22 22:30:53 +02:00
|
|
|
void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
|
|
|
DEBUG(errs() << "\tallocating current interval: ");
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2008-04-03 18:40:27 +02:00
|
|
|
// This is an implicitly defined live interval, just assign any register.
|
2008-09-19 00:38:47 +02:00
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
|
2008-04-03 18:40:27 +02:00
|
|
|
if (cur->empty()) {
|
2009-06-14 22:22:55 +02:00
|
|
|
unsigned physReg = vrm_->getRegAllocPref(cur->reg);
|
2008-04-03 18:40:27 +02:00
|
|
|
if (!physReg)
|
|
|
|
physReg = *RC->allocation_order_begin(*mf_);
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << tri_->getName(physReg) << '\n');
|
2008-04-03 18:40:27 +02:00
|
|
|
// Note the register is not really in use.
|
|
|
|
vrm_->assignVirt2Phys(cur->reg, physReg);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-05-01 03:03:49 +02:00
|
|
|
backUpRegUses();
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2005-08-22 22:20:42 +02:00
|
|
|
std::vector<std::pair<unsigned, float> > SpillWeightsToAdd;
|
2004-11-18 05:13:02 +01:00
|
|
|
unsigned StartPosition = cur->beginNumber();
|
2005-08-24 00:27:31 +02:00
|
|
|
const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
|
2007-11-03 08:20:12 +01:00
|
|
|
|
2009-01-20 01:16:18 +01:00
|
|
|
// If start of this live interval is defined by a move instruction and its
|
|
|
|
// source is assigned a physical register that is compatible with the target
|
|
|
|
// register class, then we should try to assign it the same register.
|
2007-11-03 08:20:12 +01:00
|
|
|
// This can happen when the move is from a larger register class to a smaller
|
|
|
|
// one, e.g. X86::mov32to32_. These move instructions are not coalescable.
|
2009-06-14 22:22:55 +02:00
|
|
|
if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) {
|
2009-01-20 01:16:18 +01:00
|
|
|
VNInfo *vni = cur->begin()->valno;
|
2009-06-17 23:01:20 +02:00
|
|
|
if (vni->def && !vni->isUnused() && vni->isDefAccurate()) {
|
2007-11-03 08:20:12 +01:00
|
|
|
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
|
2009-01-20 20:12:24 +01:00
|
|
|
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
|
|
|
|
if (CopyMI &&
|
|
|
|
tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
|
2007-11-03 08:20:12 +01:00
|
|
|
unsigned Reg = 0;
|
2008-02-10 19:45:23 +01:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
|
2007-11-03 08:20:12 +01:00
|
|
|
Reg = SrcReg;
|
|
|
|
else if (vrm_->isAssignedReg(SrcReg))
|
|
|
|
Reg = vrm_->getPhys(SrcReg);
|
2009-04-29 02:42:27 +02:00
|
|
|
if (Reg) {
|
|
|
|
if (SrcSubReg)
|
|
|
|
Reg = tri_->getSubReg(Reg, SrcSubReg);
|
|
|
|
if (DstSubReg)
|
|
|
|
Reg = tri_->getMatchingSuperReg(Reg, DstSubReg, RC);
|
|
|
|
if (Reg && allocatableRegs_[Reg] && RC->contains(Reg))
|
2009-06-15 10:28:29 +02:00
|
|
|
mri_->setRegAllocationHint(cur->reg, 0, Reg);
|
2009-04-29 02:42:27 +02:00
|
|
|
}
|
2007-11-03 08:20:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-01 03:03:49 +02:00
|
|
|
// For every interval in inactive we overlap with, mark the
|
2005-08-22 22:20:42 +02:00
|
|
|
// register as not free and update spill weights.
|
2004-08-04 11:46:26 +02:00
|
|
|
for (IntervalPtrs::const_iterator i = inactive_.begin(),
|
|
|
|
e = inactive_.end(); i != e; ++i) {
|
2005-08-24 00:27:31 +02:00
|
|
|
unsigned Reg = i->first->reg;
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
|
2005-08-24 00:27:31 +02:00
|
|
|
"Can only allocate virtual registers!");
|
2008-09-19 00:38:47 +02:00
|
|
|
const TargetRegisterClass *RegRC = mri_->getRegClass(Reg);
|
2005-08-24 00:27:31 +02:00
|
|
|
// If this is not in a related reg class to the register we're allocating,
|
|
|
|
// don't check it.
|
|
|
|
if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
|
|
|
|
cur->overlapsFrom(*i->first, i->second-1)) {
|
|
|
|
Reg = vrm_->getPhys(Reg);
|
2009-05-01 03:03:49 +02:00
|
|
|
addRegUse(Reg);
|
2005-08-24 00:27:31 +02:00
|
|
|
SpillWeightsToAdd.push_back(std::make_pair(Reg, i->first->weight));
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
2005-08-22 22:59:30 +02:00
|
|
|
|
|
|
|
// Speculatively check to see if we can get a register right now. If not,
|
|
|
|
// we know we won't be able to by adding more constraints. If so, we can
|
|
|
|
// check to see if it is valid. Doing an exhaustive search of the fixed_ list
|
|
|
|
// is very bad (it contains all callee clobbered registers for any functions
|
|
|
|
// with a call), so we want to avoid doing that if possible.
|
|
|
|
unsigned physReg = getFreePhysReg(cur);
|
2008-03-11 08:19:34 +01:00
|
|
|
unsigned BestPhysReg = physReg;
|
2005-08-22 22:59:30 +02:00
|
|
|
if (physReg) {
|
|
|
|
// We got a register. However, if it's in the fixed_ list, we might
|
2005-08-30 23:03:36 +02:00
|
|
|
// conflict with it. Check to see if we conflict with it or any of its
|
|
|
|
// aliases.
|
2007-11-03 08:20:12 +01:00
|
|
|
SmallSet<unsigned, 8> RegAliases;
|
2008-02-10 19:45:23 +01:00
|
|
|
for (const unsigned *AS = tri_->getAliasSet(physReg); *AS; ++AS)
|
2005-08-30 23:03:36 +02:00
|
|
|
RegAliases.insert(*AS);
|
|
|
|
|
2005-08-22 22:59:30 +02:00
|
|
|
bool ConflictsWithFixed = false;
|
|
|
|
for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
|
2006-10-24 16:35:25 +02:00
|
|
|
IntervalPtr &IP = fixed_[i];
|
|
|
|
if (physReg == IP.first->reg || RegAliases.count(IP.first->reg)) {
|
2005-08-22 22:59:30 +02:00
|
|
|
// Okay, this reg is on the fixed list. Check to see if we actually
|
|
|
|
// conflict.
|
|
|
|
LiveInterval *I = IP.first;
|
|
|
|
if (I->endNumber() > StartPosition) {
|
|
|
|
LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
|
|
|
|
IP.second = II;
|
|
|
|
if (II != I->begin() && II->start > StartPosition)
|
|
|
|
--II;
|
2005-08-30 23:03:36 +02:00
|
|
|
if (cur->overlapsFrom(*I, II)) {
|
2005-08-22 22:59:30 +02:00
|
|
|
ConflictsWithFixed = true;
|
2005-08-30 23:03:36 +02:00
|
|
|
break;
|
|
|
|
}
|
2005-08-22 22:59:30 +02:00
|
|
|
}
|
2004-11-18 05:33:31 +01:00
|
|
|
}
|
2004-02-06 19:08:18 +01:00
|
|
|
}
|
2005-08-22 22:59:30 +02:00
|
|
|
|
|
|
|
// Okay, the register picked by our speculative getFreePhysReg call turned
|
|
|
|
// out to be in use. Actually add all of the conflicting fixed registers to
|
2009-05-01 03:03:49 +02:00
|
|
|
// regUse_ so we can do an accurate query.
|
2005-08-22 22:59:30 +02:00
|
|
|
if (ConflictsWithFixed) {
|
2005-08-24 00:27:31 +02:00
|
|
|
// For every interval in fixed we overlap with, mark the register as not
|
|
|
|
// free and update spill weights.
|
2005-08-22 22:59:30 +02:00
|
|
|
for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
|
|
|
|
IntervalPtr &IP = fixed_[i];
|
|
|
|
LiveInterval *I = IP.first;
|
2005-08-24 00:27:31 +02:00
|
|
|
|
|
|
|
const TargetRegisterClass *RegRC = OneClassForEachPhysReg[I->reg];
|
|
|
|
if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
|
|
|
|
I->endNumber() > StartPosition) {
|
2005-08-22 22:59:30 +02:00
|
|
|
LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
|
|
|
|
IP.second = II;
|
|
|
|
if (II != I->begin() && II->start > StartPosition)
|
|
|
|
--II;
|
|
|
|
if (cur->overlapsFrom(*I, II)) {
|
|
|
|
unsigned reg = I->reg;
|
2009-05-01 03:03:49 +02:00
|
|
|
addRegUse(reg);
|
2005-08-22 22:59:30 +02:00
|
|
|
SpillWeightsToAdd.push_back(std::make_pair(reg, I->weight));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2009-05-01 03:03:49 +02:00
|
|
|
// Using the newly updated regUse_ object, which includes conflicts in the
|
2005-08-22 22:59:30 +02:00
|
|
|
// future, see if there are any registers available.
|
|
|
|
physReg = getFreePhysReg(cur);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-08-22 22:20:42 +02:00
|
|
|
// Restore the physical register tracker, removing information about the
|
|
|
|
// future.
|
2009-05-01 03:03:49 +02:00
|
|
|
restoreRegUses();
|
2005-08-22 22:20:42 +02:00
|
|
|
|
2009-05-01 03:03:49 +02:00
|
|
|
// If we find a free register, we are done: assign this virtual to
|
2004-08-04 11:46:26 +02:00
|
|
|
// the free physical register and add this interval to the active
|
|
|
|
// list.
|
|
|
|
if (physReg) {
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << tri_->getName(physReg) << '\n');
|
2004-08-04 11:46:26 +02:00
|
|
|
vrm_->assignVirt2Phys(cur->reg, physReg);
|
2009-05-01 03:03:49 +02:00
|
|
|
addRegUse(physReg);
|
2004-11-18 03:42:27 +01:00
|
|
|
active_.push_back(std::make_pair(cur, cur->begin()));
|
2004-08-04 11:46:26 +02:00
|
|
|
handled_.push_back(cur);
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
|
|
|
|
// "Upgrade" the physical register since it has been allocated.
|
|
|
|
UpgradeRegister(physReg);
|
|
|
|
if (LiveInterval *NextReloadLI = hasNextReloadInterval(cur)) {
|
|
|
|
// "Downgrade" physReg to try to keep physReg from being allocated until
|
|
|
|
// the next reload from the same SS is allocated.
|
2009-06-15 10:28:29 +02:00
|
|
|
mri_->setRegAllocationHint(NextReloadLI->reg, 0, physReg);
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
DowngradeRegister(cur, physReg);
|
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
return;
|
|
|
|
}
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "no free registers\n");
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2005-08-22 22:20:42 +02:00
|
|
|
// Compile the spill weights into an array that is better for scanning.
|
2008-06-20 23:45:16 +02:00
|
|
|
std::vector<float> SpillWeights(tri_->getNumRegs(), 0.0f);
|
2005-08-22 22:20:42 +02:00
|
|
|
for (std::vector<std::pair<unsigned, float> >::iterator
|
|
|
|
I = SpillWeightsToAdd.begin(), E = SpillWeightsToAdd.end(); I != E; ++I)
|
2009-03-23 23:57:19 +01:00
|
|
|
updateSpillWeights(SpillWeights, I->first, I->second, RC);
|
2005-08-22 22:20:42 +02:00
|
|
|
|
|
|
|
// for each interval in active, update spill weights.
|
|
|
|
for (IntervalPtrs::const_iterator i = active_.begin(), e = active_.end();
|
|
|
|
i != e; ++i) {
|
|
|
|
unsigned reg = i->first->reg;
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(reg) &&
|
2005-08-22 22:20:42 +02:00
|
|
|
"Can only allocate virtual registers!");
|
|
|
|
reg = vrm_->getPhys(reg);
|
2009-03-23 23:57:19 +01:00
|
|
|
updateSpillWeights(SpillWeights, reg, i->first->weight, RC);
|
2005-08-22 22:20:42 +02:00
|
|
|
}
|
|
|
|
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\tassigning stack slot at interval "<< *cur << ":\n");
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2006-03-26 00:00:56 +01:00
|
|
|
// Find a register to spill.
|
2006-11-07 13:25:45 +01:00
|
|
|
float minWeight = HUGE_VALF;
|
2009-06-14 22:22:55 +02:00
|
|
|
unsigned minReg = 0;
|
2008-06-20 23:45:16 +02:00
|
|
|
|
|
|
|
bool Found = false;
|
|
|
|
std::vector<std::pair<unsigned,float> > RegsWeights;
|
2007-04-17 22:32:26 +02:00
|
|
|
if (!minReg || SpillWeights[minReg] == HUGE_VALF)
|
|
|
|
for (TargetRegisterClass::iterator i = RC->allocation_order_begin(*mf_),
|
|
|
|
e = RC->allocation_order_end(*mf_); i != e; ++i) {
|
|
|
|
unsigned reg = *i;
|
2008-06-20 23:45:16 +02:00
|
|
|
float regWeight = SpillWeights[reg];
|
|
|
|
if (minWeight > regWeight)
|
|
|
|
Found = true;
|
|
|
|
RegsWeights.push_back(std::make_pair(reg, regWeight));
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
2006-03-26 00:00:56 +01:00
|
|
|
|
|
|
|
// If we didn't find a register that is spillable, try aliases?
|
2008-06-20 23:45:16 +02:00
|
|
|
if (!Found) {
|
2006-05-12 21:07:46 +02:00
|
|
|
for (TargetRegisterClass::iterator i = RC->allocation_order_begin(*mf_),
|
|
|
|
e = RC->allocation_order_end(*mf_); i != e; ++i) {
|
|
|
|
unsigned reg = *i;
|
|
|
|
// No need to worry about if the alias register size < regsize of RC.
|
|
|
|
// We are going to spill all registers that alias it anyway.
|
2008-06-20 23:45:16 +02:00
|
|
|
for (const unsigned* as = tri_->getAliasSet(reg); *as; ++as)
|
|
|
|
RegsWeights.push_back(std::make_pair(*as, SpillWeights[*as]));
|
2006-05-12 21:07:46 +02:00
|
|
|
}
|
2008-06-20 23:45:16 +02:00
|
|
|
}
|
2006-05-12 21:07:46 +02:00
|
|
|
|
2008-06-20 23:45:16 +02:00
|
|
|
// Sort all potential spill candidates by weight.
|
|
|
|
std::sort(RegsWeights.begin(), RegsWeights.end(), WeightCompare());
|
|
|
|
minReg = RegsWeights[0].first;
|
|
|
|
minWeight = RegsWeights[0].second;
|
|
|
|
if (minWeight == HUGE_VALF) {
|
2006-05-12 21:07:46 +02:00
|
|
|
// All registers must have inf weight. Just grab one!
|
2008-06-20 23:45:16 +02:00
|
|
|
minReg = BestPhysReg ? BestPhysReg : *RC->allocation_order_begin(*mf_);
|
2008-07-23 00:46:49 +02:00
|
|
|
if (cur->weight == HUGE_VALF ||
|
2008-09-20 03:28:05 +02:00
|
|
|
li_->getApproximateInstructionCount(*cur) == 0) {
|
2008-06-20 23:45:16 +02:00
|
|
|
// Spill a physical register around defs and uses.
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
if (li_->spillPhysRegAroundRegDefsUses(*cur, minReg, *vrm_)) {
|
2009-04-29 09:16:34 +02:00
|
|
|
// spillPhysRegAroundRegDefsUses may have invalidated iterator stored
|
|
|
|
// in fixed_. Reset them.
|
|
|
|
for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
|
|
|
|
IntervalPtr &IP = fixed_[i];
|
|
|
|
LiveInterval *I = IP.first;
|
|
|
|
if (I->reg == minReg || tri_->isSubRegister(minReg, I->reg))
|
|
|
|
IP.second = I->advanceTo(I->begin(), StartPosition);
|
|
|
|
}
|
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
DowngradedRegs.clear();
|
2009-03-23 19:24:37 +01:00
|
|
|
assignRegOrStackSlotAtInterval(cur);
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
} else {
|
2009-07-11 15:10:19 +02:00
|
|
|
llvm_report_error("Ran out of registers during register allocation!");
|
2009-03-23 19:24:37 +01:00
|
|
|
}
|
2008-09-20 03:28:05 +02:00
|
|
|
return;
|
|
|
|
}
|
2006-05-12 21:07:46 +02:00
|
|
|
}
|
2008-06-20 23:45:16 +02:00
|
|
|
|
|
|
|
// Find up to 3 registers to consider as spill candidates.
|
|
|
|
unsigned LastCandidate = RegsWeights.size() >= 3 ? 3 : 1;
|
|
|
|
while (LastCandidate > 1) {
|
|
|
|
if (weightsAreClose(RegsWeights[LastCandidate-1].second, minWeight))
|
|
|
|
break;
|
|
|
|
--LastCandidate;
|
|
|
|
}
|
|
|
|
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG({
|
|
|
|
errs() << "\t\tregister(s) with min weight(s): ";
|
|
|
|
|
|
|
|
for (unsigned i = 0; i != LastCandidate; ++i)
|
|
|
|
errs() << tri_->getName(RegsWeights[i].first)
|
|
|
|
<< " (" << RegsWeights[i].second << ")\n";
|
|
|
|
});
|
2004-08-04 11:46:26 +02:00
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
// If the current has the minimum weight, we need to spill it and
|
2004-08-04 11:46:26 +02:00
|
|
|
// add any added intervals back to unhandled, and restart
|
|
|
|
// linearscan.
|
2006-11-07 13:25:45 +01:00
|
|
|
if (cur->weight != HUGE_VALF && cur->weight <= minWeight) {
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\t\t\tspilling(c): " << *cur << '\n');
|
2008-09-30 17:44:16 +02:00
|
|
|
SmallVector<LiveInterval*, 8> spillIs;
|
2009-05-18 21:03:16 +02:00
|
|
|
std::vector<LiveInterval*> added;
|
|
|
|
|
|
|
|
if (!NewSpillFramework) {
|
|
|
|
added = li_->addIntervalsForSpills(*cur, spillIs, loopInfo, *vrm_);
|
2009-06-02 18:53:25 +02:00
|
|
|
} else {
|
2009-05-18 21:03:16 +02:00
|
|
|
added = spiller_->spill(cur);
|
|
|
|
}
|
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
std::sort(added.begin(), added.end(), LISorter());
|
2009-05-03 20:32:42 +02:00
|
|
|
addStackInterval(cur, ls_, li_, mri_, *vrm_);
|
2004-08-04 11:46:26 +02:00
|
|
|
if (added.empty())
|
|
|
|
return; // Early exit if all spills were folded.
|
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
// Merge added with unhandled. Note that we have already sorted
|
|
|
|
// intervals returned by addIntervalsForSpills by their starting
|
2004-08-04 11:46:26 +02:00
|
|
|
// point.
|
2009-04-20 19:23:48 +02:00
|
|
|
// This also update the NextReloadMap. That is, it adds mapping from a
|
|
|
|
// register defined by a reload from SS to the next reload from SS in the
|
|
|
|
// same basic block.
|
|
|
|
MachineBasicBlock *LastReloadMBB = 0;
|
|
|
|
LiveInterval *LastReload = 0;
|
|
|
|
int LastReloadSS = VirtRegMap::NO_STACK_SLOT;
|
|
|
|
for (unsigned i = 0, e = added.size(); i != e; ++i) {
|
|
|
|
LiveInterval *ReloadLi = added[i];
|
|
|
|
if (ReloadLi->weight == HUGE_VALF &&
|
|
|
|
li_->getApproximateInstructionCount(*ReloadLi) == 0) {
|
|
|
|
unsigned ReloadIdx = ReloadLi->beginNumber();
|
|
|
|
MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
|
|
|
|
int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
|
|
|
|
if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
|
|
|
|
// Last reload of same SS is in the same MBB. We want to try to
|
|
|
|
// allocate both reloads the same register and make sure the reg
|
|
|
|
// isn't clobbered in between if at all possible.
|
|
|
|
assert(LastReload->beginNumber() < ReloadIdx);
|
|
|
|
NextReloadMap.insert(std::make_pair(LastReload->reg, ReloadLi->reg));
|
|
|
|
}
|
|
|
|
LastReloadMBB = ReloadMBB;
|
|
|
|
LastReload = ReloadLi;
|
|
|
|
LastReloadSS = ReloadSS;
|
|
|
|
}
|
|
|
|
unhandled_.push(ReloadLi);
|
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2004-11-18 04:49:30 +01:00
|
|
|
++NumBacktracks;
|
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
// Push the current interval back to unhandled since we are going
|
2004-08-04 11:46:26 +02:00
|
|
|
// to re-run at least this iteration. Since we didn't modify it it
|
|
|
|
// should go back right in the front of the list
|
|
|
|
unhandled_.push(cur);
|
|
|
|
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(minReg) &&
|
2004-08-04 11:46:26 +02:00
|
|
|
"did not choose a register to spill?");
|
2004-11-18 04:49:30 +01:00
|
|
|
|
2008-06-20 23:45:16 +02:00
|
|
|
// We spill all intervals aliasing the register with
|
|
|
|
// minimum weight, rollback to the interval with the earliest
|
|
|
|
// start point and let the linear scan algorithm run again
|
|
|
|
SmallVector<LiveInterval*, 8> spillIs;
|
|
|
|
|
|
|
|
// Determine which intervals have to be spilled.
|
|
|
|
findIntervalsToSpill(cur, RegsWeights, LastCandidate, spillIs);
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2008-06-20 23:45:16 +02:00
|
|
|
// Set of spilled vregs (used later to rollback properly)
|
|
|
|
SmallSet<unsigned, 8> spilled;
|
|
|
|
|
|
|
|
// The earliest start of a Spilled interval indicates up to where
|
2004-08-04 11:46:26 +02:00
|
|
|
// in handled we need to roll back
|
2009-06-02 18:53:25 +02:00
|
|
|
|
|
|
|
LiveInterval *earliestStartInterval = cur;
|
2004-08-04 11:46:26 +02:00
|
|
|
|
2008-06-20 23:45:16 +02:00
|
|
|
// Spill live intervals of virtual regs mapped to the physical register we
|
2004-11-18 04:49:30 +01:00
|
|
|
// want to clear (and its aliases). We only spill those that overlap with the
|
|
|
|
// current interval as the rest do not affect its allocation. we also keep
|
|
|
|
// track of the earliest start of all spilled live intervals since this will
|
|
|
|
// mark our rollback point.
|
2008-06-20 23:45:16 +02:00
|
|
|
std::vector<LiveInterval*> added;
|
|
|
|
while (!spillIs.empty()) {
|
|
|
|
LiveInterval *sli = spillIs.back();
|
|
|
|
spillIs.pop_back();
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\t\t\tspilling(a): " << *sli << '\n');
|
2009-06-02 18:53:25 +02:00
|
|
|
earliestStartInterval =
|
|
|
|
(earliestStartInterval->beginNumber() < sli->beginNumber()) ?
|
|
|
|
earliestStartInterval : sli;
|
2009-06-04 03:04:22 +02:00
|
|
|
|
2009-06-02 18:53:25 +02:00
|
|
|
std::vector<LiveInterval*> newIs;
|
|
|
|
if (!NewSpillFramework) {
|
|
|
|
newIs = li_->addIntervalsForSpills(*sli, spillIs, loopInfo, *vrm_);
|
|
|
|
} else {
|
|
|
|
newIs = spiller_->spill(sli);
|
|
|
|
}
|
2009-05-03 20:32:42 +02:00
|
|
|
addStackInterval(sli, ls_, li_, mri_, *vrm_);
|
2008-06-20 23:45:16 +02:00
|
|
|
std::copy(newIs.begin(), newIs.end(), std::back_inserter(added));
|
|
|
|
spilled.insert(sli->reg);
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
|
|
|
|
2009-06-04 03:04:22 +02:00
|
|
|
unsigned earliestStart = earliestStartInterval->beginNumber();
|
2009-06-02 18:53:25 +02:00
|
|
|
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\t\trolling back to: " << earliestStart << '\n');
|
2004-11-18 03:42:27 +01:00
|
|
|
|
|
|
|
// Scan handled in reverse order up to the earliest start of a
|
2004-08-04 11:46:26 +02:00
|
|
|
// spilled live interval and undo each one, restoring the state of
|
2004-11-18 03:42:27 +01:00
|
|
|
// unhandled.
|
2004-08-04 11:46:26 +02:00
|
|
|
while (!handled_.empty()) {
|
|
|
|
LiveInterval* i = handled_.back();
|
2004-11-18 03:42:27 +01:00
|
|
|
// If this interval starts before t we are done.
|
2004-11-18 02:29:39 +01:00
|
|
|
if (i->beginNumber() < earliestStart)
|
2004-08-04 11:46:26 +02:00
|
|
|
break;
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\t\t\tundo changes for: " << *i << '\n');
|
2004-08-04 11:46:26 +02:00
|
|
|
handled_.pop_back();
|
2004-11-18 03:42:27 +01:00
|
|
|
|
|
|
|
// When undoing a live interval allocation we must know if it is active or
|
2009-05-01 03:03:49 +02:00
|
|
|
// inactive to properly update regUse_ and the VirtRegMap.
|
2004-08-04 11:46:26 +02:00
|
|
|
IntervalPtrs::iterator it;
|
2004-11-18 03:42:27 +01:00
|
|
|
if ((it = FindIntervalInVector(active_, i)) != active_.end()) {
|
2004-08-04 11:46:26 +02:00
|
|
|
active_.erase(it);
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(!TargetRegisterInfo::isPhysicalRegister(i->reg));
|
2006-02-23 07:44:17 +01:00
|
|
|
if (!spilled.count(i->reg))
|
2004-08-04 11:46:26 +02:00
|
|
|
unhandled_.push(i);
|
2009-05-01 03:03:49 +02:00
|
|
|
delRegUse(vrm_->getPhys(i->reg));
|
2006-02-23 07:44:17 +01:00
|
|
|
vrm_->clearVirt(i->reg);
|
2004-11-18 03:42:27 +01:00
|
|
|
} else if ((it = FindIntervalInVector(inactive_, i)) != inactive_.end()) {
|
2004-08-04 11:46:26 +02:00
|
|
|
inactive_.erase(it);
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(!TargetRegisterInfo::isPhysicalRegister(i->reg));
|
2006-02-23 07:44:17 +01:00
|
|
|
if (!spilled.count(i->reg))
|
2004-08-04 11:46:26 +02:00
|
|
|
unhandled_.push(i);
|
2006-02-23 07:44:17 +01:00
|
|
|
vrm_->clearVirt(i->reg);
|
2004-11-18 07:01:45 +01:00
|
|
|
} else {
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(i->reg) &&
|
2004-11-18 07:01:45 +01:00
|
|
|
"Can only allocate virtual registers!");
|
|
|
|
vrm_->clearVirt(i->reg);
|
2004-08-04 11:46:26 +02:00
|
|
|
unhandled_.push(i);
|
2004-02-15 11:24:21 +01:00
|
|
|
}
|
2007-11-04 09:32:21 +01:00
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
DenseMap<unsigned, unsigned>::iterator ii = DowngradeMap.find(i->reg);
|
|
|
|
if (ii == DowngradeMap.end())
|
|
|
|
// It interval has a preference, it must be defined by a copy. Clear the
|
|
|
|
// preference now since the source interval allocation may have been
|
|
|
|
// undone as well.
|
2009-06-15 10:28:29 +02:00
|
|
|
mri_->setRegAllocationHint(i->reg, 0, 0);
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
else {
|
|
|
|
UpgradeRegister(ii->second);
|
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
|
|
|
|
2004-11-18 04:49:30 +01:00
|
|
|
// Rewind the iterators in the active, inactive, and fixed lists back to the
|
|
|
|
// point we reverted to.
|
|
|
|
RevertVectorIteratorsTo(active_, earliestStart);
|
|
|
|
RevertVectorIteratorsTo(inactive_, earliestStart);
|
|
|
|
RevertVectorIteratorsTo(fixed_, earliestStart);
|
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
// Scan the rest and undo each interval that expired after t and
|
2004-08-04 11:46:26 +02:00
|
|
|
// insert it in active (the next iteration of the algorithm will
|
|
|
|
// put it in inactive if required)
|
2004-11-18 03:42:27 +01:00
|
|
|
for (unsigned i = 0, e = handled_.size(); i != e; ++i) {
|
|
|
|
LiveInterval *HI = handled_[i];
|
|
|
|
if (!HI->expiredAt(earliestStart) &&
|
|
|
|
HI->expiredAt(cur->beginNumber())) {
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "\t\t\tundo changes for: " << *HI << '\n');
|
2004-11-18 03:42:27 +01:00
|
|
|
active_.push_back(std::make_pair(HI, HI->begin()));
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(!TargetRegisterInfo::isPhysicalRegister(HI->reg));
|
2009-05-01 03:03:49 +02:00
|
|
|
addRegUse(vrm_->getPhys(HI->reg));
|
2004-02-06 19:08:18 +01:00
|
|
|
}
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
2004-05-30 09:24:39 +02:00
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
// Merge added with unhandled.
|
|
|
|
// This also update the NextReloadMap. That is, it adds mapping from a
|
|
|
|
// register defined by a reload from SS to the next reload from SS in the
|
|
|
|
// same basic block.
|
|
|
|
MachineBasicBlock *LastReloadMBB = 0;
|
|
|
|
LiveInterval *LastReload = 0;
|
|
|
|
int LastReloadSS = VirtRegMap::NO_STACK_SLOT;
|
|
|
|
std::sort(added.begin(), added.end(), LISorter());
|
|
|
|
for (unsigned i = 0, e = added.size(); i != e; ++i) {
|
|
|
|
LiveInterval *ReloadLi = added[i];
|
|
|
|
if (ReloadLi->weight == HUGE_VALF &&
|
|
|
|
li_->getApproximateInstructionCount(*ReloadLi) == 0) {
|
|
|
|
unsigned ReloadIdx = ReloadLi->beginNumber();
|
|
|
|
MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
|
|
|
|
int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
|
|
|
|
if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
|
|
|
|
// Last reload of same SS is in the same MBB. We want to try to
|
|
|
|
// allocate both reloads the same register and make sure the reg
|
|
|
|
// isn't clobbered in between if at all possible.
|
|
|
|
assert(LastReload->beginNumber() < ReloadIdx);
|
|
|
|
NextReloadMap.insert(std::make_pair(LastReload->reg, ReloadLi->reg));
|
|
|
|
}
|
|
|
|
LastReloadMBB = ReloadMBB;
|
|
|
|
LastReload = ReloadLi;
|
|
|
|
LastReloadSS = ReloadSS;
|
|
|
|
}
|
|
|
|
unhandled_.push(ReloadLi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-15 10:28:29 +02:00
|
|
|
unsigned RALinScan::getFreePhysReg(LiveInterval* cur,
|
|
|
|
const TargetRegisterClass *RC,
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
unsigned MaxInactiveCount,
|
|
|
|
SmallVector<unsigned, 256> &inactiveCounts,
|
|
|
|
bool SkipDGRegs) {
|
|
|
|
unsigned FreeReg = 0;
|
|
|
|
unsigned FreeRegInactiveCount = 0;
|
|
|
|
|
2009-06-18 04:04:01 +02:00
|
|
|
std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(cur->reg);
|
|
|
|
// Resolve second part of the hint (if possible) given the current allocation.
|
|
|
|
unsigned physReg = Hint.second;
|
|
|
|
if (physReg &&
|
|
|
|
TargetRegisterInfo::isVirtualRegister(physReg) && vrm_->hasPhys(physReg))
|
|
|
|
physReg = vrm_->getPhys(physReg);
|
|
|
|
|
2009-06-15 10:28:29 +02:00
|
|
|
TargetRegisterClass::iterator I, E;
|
2009-06-18 04:04:01 +02:00
|
|
|
tie(I, E) = tri_->getAllocationOrder(RC, Hint.first, physReg, *mf_);
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
assert(I != E && "No allocatable register in this register class!");
|
|
|
|
|
|
|
|
// Scan for the first available register.
|
|
|
|
for (; I != E; ++I) {
|
|
|
|
unsigned Reg = *I;
|
|
|
|
// Ignore "downgraded" registers.
|
|
|
|
if (SkipDGRegs && DowngradedRegs.count(Reg))
|
|
|
|
continue;
|
2009-05-01 03:03:49 +02:00
|
|
|
if (isRegAvail(Reg)) {
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
FreeReg = Reg;
|
|
|
|
if (FreeReg < inactiveCounts.size())
|
|
|
|
FreeRegInactiveCount = inactiveCounts[FreeReg];
|
|
|
|
else
|
|
|
|
FreeRegInactiveCount = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there are no free regs, or if this reg has the max inactive count,
|
|
|
|
// return this register.
|
|
|
|
if (FreeReg == 0 || FreeRegInactiveCount == MaxInactiveCount)
|
|
|
|
return FreeReg;
|
2009-06-15 10:28:29 +02:00
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
// Continue scanning the registers, looking for the one with the highest
|
|
|
|
// inactive count. Alkis found that this reduced register pressure very
|
|
|
|
// slightly on X86 (in rev 1.94 of this file), though this should probably be
|
|
|
|
// reevaluated now.
|
|
|
|
for (; I != E; ++I) {
|
|
|
|
unsigned Reg = *I;
|
|
|
|
// Ignore "downgraded" registers.
|
|
|
|
if (SkipDGRegs && DowngradedRegs.count(Reg))
|
|
|
|
continue;
|
2009-05-01 03:03:49 +02:00
|
|
|
if (isRegAvail(Reg) && Reg < inactiveCounts.size() &&
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
FreeRegInactiveCount < inactiveCounts[Reg]) {
|
|
|
|
FreeReg = Reg;
|
|
|
|
FreeRegInactiveCount = inactiveCounts[Reg];
|
|
|
|
if (FreeRegInactiveCount == MaxInactiveCount)
|
|
|
|
break; // We found the one with the max inactive count.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return FreeReg;
|
2004-02-15 11:24:21 +01:00
|
|
|
}
|
2003-12-21 06:43:40 +01:00
|
|
|
|
2004-11-18 03:42:27 +01:00
|
|
|
/// getFreePhysReg - return a free physical register for this virtual register
|
|
|
|
/// interval if we have one, otherwise return 0.
|
2007-05-08 21:02:46 +02:00
|
|
|
unsigned RALinScan::getFreePhysReg(LiveInterval *cur) {
|
2008-02-26 23:08:41 +01:00
|
|
|
SmallVector<unsigned, 256> inactiveCounts;
|
2005-08-22 18:55:22 +02:00
|
|
|
unsigned MaxInactiveCount = 0;
|
|
|
|
|
2008-09-19 00:38:47 +02:00
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
|
2005-08-24 00:27:31 +02:00
|
|
|
const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
|
|
|
|
|
2004-09-02 23:23:32 +02:00
|
|
|
for (IntervalPtrs::iterator i = inactive_.begin(), e = inactive_.end();
|
|
|
|
i != e; ++i) {
|
2004-11-18 03:42:27 +01:00
|
|
|
unsigned reg = i->first->reg;
|
2008-02-10 19:45:23 +01:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(reg) &&
|
2004-11-18 07:01:45 +01:00
|
|
|
"Can only allocate virtual registers!");
|
2005-08-24 00:27:31 +02:00
|
|
|
|
|
|
|
// If this is not in a related reg class to the register we're allocating,
|
|
|
|
// don't check it.
|
2008-09-19 00:38:47 +02:00
|
|
|
const TargetRegisterClass *RegRC = mri_->getRegClass(reg);
|
2005-08-24 00:27:31 +02:00
|
|
|
if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader) {
|
|
|
|
reg = vrm_->getPhys(reg);
|
2008-02-26 23:08:41 +01:00
|
|
|
if (inactiveCounts.size() <= reg)
|
|
|
|
inactiveCounts.resize(reg+1);
|
2005-08-24 00:27:31 +02:00
|
|
|
++inactiveCounts[reg];
|
|
|
|
MaxInactiveCount = std::max(MaxInactiveCount, inactiveCounts[reg]);
|
|
|
|
}
|
2004-09-02 23:23:32 +02:00
|
|
|
}
|
|
|
|
|
2007-04-17 22:32:26 +02:00
|
|
|
// If copy coalescer has assigned a "preferred" register, check if it's
|
2008-09-24 03:07:17 +02:00
|
|
|
// available first.
|
2009-06-14 22:22:55 +02:00
|
|
|
unsigned Preference = vrm_->getRegAllocPref(cur->reg);
|
|
|
|
if (Preference) {
|
2009-08-22 22:30:53 +02:00
|
|
|
DEBUG(errs() << "(preferred: " << tri_->getName(Preference) << ") ");
|
2009-06-14 22:22:55 +02:00
|
|
|
if (isRegAvail(Preference) &&
|
|
|
|
RC->contains(Preference))
|
|
|
|
return Preference;
|
2008-02-20 13:07:57 +01:00
|
|
|
}
|
2007-04-17 22:32:26 +02:00
|
|
|
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
if (!DowngradedRegs.empty()) {
|
2009-06-15 10:28:29 +02:00
|
|
|
unsigned FreeReg = getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts,
|
Added a linearscan register allocation optimization. When the register allocator spill an interval with multiple uses in the same basic block, it creates a different virtual register for each of the reloads. e.g.
%reg1498<def> = MOV32rm %reg1024, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg1024, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg1024, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
=>
%reg1498<def> = MOV32rm %reg2036, 1, %reg0, 12, %reg0, Mem:LD(4,4) [sunkaddr39 + 0]
%reg1506<def> = MOV32rm %reg2037, 1, %reg0, 8, %reg0, Mem:LD(4,4) [sunkaddr42 + 0]
%reg1486<def> = MOV32rr %reg1506
%reg1486<def> = XOR32rr %reg1486, %reg1498, %EFLAGS<imp-def,dead>
%reg1510<def> = MOV32rm %reg2038, 1, %reg0, 4, %reg0, Mem:LD(4,4) [sunkaddr45 + 0]
From linearscan's point of view, each of reg2036, 2037, and 2038 are separate registers, each is "killed" after a single use. The reloaded register is available and it's often clobbered right away. e.g. In thise case reg1498 is allocated EAX while reg2036 is allocated RAX. This means we end up with multiple reloads from the same stack slot in the same basic block.
Now linearscan recognize there are other reloads from same SS in the same BB. So it'll "downgrade" RAX (and its aliases) after reg2036 is allocated until the next reload (reg2037) is done. This greatly increase the likihood reloads from SS are reused.
This speeds up sha1 from OpenSSL by 5.8%. It is also an across the board win for SPEC2000 and 2006.
llvm-svn: 69585
2009-04-20 10:01:12 +02:00
|
|
|
true);
|
|
|
|
if (FreeReg)
|
|
|
|
return FreeReg;
|
2004-08-04 11:46:26 +02:00
|
|
|
}
|
2009-06-15 10:28:29 +02:00
|
|
|
return getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts, false);
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
FunctionPass* llvm::createLinearScanRegisterAllocator() {
|
2007-05-08 21:02:46 +02:00
|
|
|
return new RALinScan();
|
2003-11-20 04:32:25 +01:00
|
|
|
}
|