2007-06-08 19:18:56 +02:00
|
|
|
//===-- SimpleRegisterCoalescing.cpp - Register Coalescing ----------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 21:36:04 +01:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-06-08 19:18:56 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements a simple register coalescing pass that attempts to
|
|
|
|
// aggressively coalesce every register copy that it can.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2007-08-01 00:37:44 +02:00
|
|
|
#define DEBUG_TYPE "regcoalescing"
|
2007-11-05 18:41:38 +01:00
|
|
|
#include "SimpleRegisterCoalescing.h"
|
2007-06-08 19:18:56 +02:00
|
|
|
#include "VirtRegMap.h"
|
2007-11-05 18:41:38 +01:00
|
|
|
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
2007-06-08 19:18:56 +02:00
|
|
|
#include "llvm/Value.h"
|
2009-10-10 01:27:56 +02:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2007-06-08 19:18:56 +02:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2007-12-11 03:09:15 +01:00
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
2007-12-31 05:13:23 +01:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2007-06-08 19:18:56 +02:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2007-09-06 18:18:45 +02:00
|
|
|
#include "llvm/CodeGen/RegisterCoalescer.h"
|
2007-06-08 19:18:56 +02:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2008-10-07 22:22:28 +02:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2007-06-08 19:18:56 +02:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-11 22:10:48 +02:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2009-07-25 02:23:56 +02:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2007-06-08 19:18:56 +02:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include <algorithm>
|
|
|
|
#include <cmath>
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
STATISTIC(numJoins , "Number of interval joins performed");
|
2009-01-23 03:15:19 +01:00
|
|
|
STATISTIC(numCrossRCs , "Number of cross class joins performed");
|
2008-02-13 04:01:43 +01:00
|
|
|
STATISTIC(numCommutes , "Number of instruction commuting performed");
|
|
|
|
STATISTIC(numExtends , "Number of copies extended");
|
2008-08-30 11:09:33 +02:00
|
|
|
STATISTIC(NumReMats , "Number of instructions re-materialized");
|
2007-06-08 19:18:56 +02:00
|
|
|
STATISTIC(numPeep , "Number of identity moves eliminated after coalescing");
|
|
|
|
STATISTIC(numAborts , "Number of times interval joining aborted");
|
2009-02-08 08:48:37 +01:00
|
|
|
STATISTIC(numDeadValNo, "Number of valno def marked dead");
|
2007-06-08 19:18:56 +02:00
|
|
|
|
|
|
|
char SimpleRegisterCoalescing::ID = 0;
|
2008-05-13 02:00:25 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableJoining("join-liveintervals",
|
|
|
|
cl::desc("Coalesce copies (default=true)"),
|
|
|
|
cl::init(true));
|
2007-06-08 19:18:56 +02:00
|
|
|
|
2008-06-19 03:39:21 +02:00
|
|
|
static cl::opt<bool>
|
2009-07-21 02:22:59 +02:00
|
|
|
DisableCrossClassJoin("disable-cross-class-join",
|
|
|
|
cl::desc("Avoid coalescing cross register class copies"),
|
|
|
|
cl::init(false), cl::Hidden);
|
2007-11-06 09:52:21 +01:00
|
|
|
|
2009-04-30 20:39:57 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
PhysJoinTweak("tweak-phys-join-heuristics",
|
|
|
|
cl::desc("Tweak heuristics for joining phys reg with vr"),
|
|
|
|
cl::init(false), cl::Hidden);
|
|
|
|
|
2009-09-20 04:20:51 +02:00
|
|
|
static RegisterPass<SimpleRegisterCoalescing>
|
2008-05-13 02:00:25 +02:00
|
|
|
X("simple-register-coalescing", "Simple Register Coalescing");
|
2007-09-06 18:18:45 +02:00
|
|
|
|
2008-05-13 02:00:25 +02:00
|
|
|
// Declare that we implement the RegisterCoalescer interface
|
|
|
|
static RegisterAnalysisGroup<RegisterCoalescer, true/*The Default*/> V(X);
|
2007-06-08 19:18:56 +02:00
|
|
|
|
2008-05-13 04:05:11 +02:00
|
|
|
const PassInfo *const llvm::SimpleRegisterCoalescingID = &X;
|
2007-06-08 19:18:56 +02:00
|
|
|
|
|
|
|
void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
|
2009-08-01 01:37:33 +02:00
|
|
|
AU.setPreservesCFG();
|
2009-10-10 01:27:56 +02:00
|
|
|
AU.addRequired<AliasAnalysis>();
|
2008-09-22 22:58:04 +02:00
|
|
|
AU.addRequired<LiveIntervals>();
|
2007-06-08 19:18:56 +02:00
|
|
|
AU.addPreserved<LiveIntervals>();
|
2009-11-04 00:52:08 +01:00
|
|
|
AU.addPreserved<SlotIndexes>();
|
2008-09-22 22:58:04 +02:00
|
|
|
AU.addRequired<MachineLoopInfo>();
|
2008-01-04 21:54:55 +01:00
|
|
|
AU.addPreserved<MachineLoopInfo>();
|
|
|
|
AU.addPreservedID(MachineDominatorsID);
|
2008-10-07 22:22:28 +02:00
|
|
|
if (StrongPHIElim)
|
|
|
|
AU.addPreservedID(StrongPHIEliminationID);
|
|
|
|
else
|
|
|
|
AU.addPreservedID(PHIEliminationID);
|
2007-06-08 19:18:56 +02:00
|
|
|
AU.addPreservedID(TwoAddressInstructionPassID);
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
2007-07-09 14:00:59 +02:00
|
|
|
/// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA
|
2007-06-08 19:18:56 +02:00
|
|
|
/// being the source and IntB being the dest, thus this defines a value number
|
|
|
|
/// in IntB. If the source value number (in IntA) is defined by a copy from B,
|
|
|
|
/// see if we can merge these two pieces of B into a single value number,
|
|
|
|
/// eliminating a copy. For example:
|
|
|
|
///
|
|
|
|
/// A3 = B0
|
|
|
|
/// ...
|
|
|
|
/// B1 = A3 <- this copy
|
|
|
|
///
|
|
|
|
/// In this case, B0 can be extended to where the B1 copy lives, allowing the B1
|
|
|
|
/// value number to be replaced with B0 (which simplifies the B liveinterval).
|
|
|
|
///
|
|
|
|
/// This returns true if an interval was modified.
|
|
|
|
///
|
2008-01-04 09:59:18 +01:00
|
|
|
bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
|
|
|
LiveInterval &IntB,
|
|
|
|
MachineInstr *CopyMI) {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
|
2007-06-08 19:18:56 +02:00
|
|
|
|
|
|
|
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
|
|
|
|
// the example above.
|
|
|
|
LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
|
2009-01-13 21:25:24 +01:00
|
|
|
assert(BLR != IntB.end() && "Live range not found!");
|
2007-08-29 22:45:00 +02:00
|
|
|
VNInfo *BValNo = BLR->valno;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Get the location that B is defined at. Two options: either this value has
|
2009-09-20 04:20:51 +02:00
|
|
|
// an unknown definition point or it is defined at CopyIdx. If unknown, we
|
2007-06-08 19:18:56 +02:00
|
|
|
// can't process it.
|
2009-08-11 01:43:28 +02:00
|
|
|
if (!BValNo->getCopy()) return false;
|
2008-02-15 19:24:29 +01:00
|
|
|
assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-02-13 04:01:43 +01:00
|
|
|
// AValNo is the value number in A that defines the copy, A3 in the example.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
|
2009-08-03 10:41:59 +02:00
|
|
|
LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
|
2009-01-13 21:25:24 +01:00
|
|
|
assert(ALR != IntA.end() && "Live range not found!");
|
2008-02-13 04:01:43 +01:00
|
|
|
VNInfo *AValNo = ALR->valno;
|
Fix PR3149. If an early clobber def is a physical register and it is tied to an input operand, it effectively extends the live range of the physical register. Currently we do not have a good way to represent this.
172 %ECX<def> = MOV32rr %reg1039<kill>
180 INLINEASM <es:subl $5,$1
sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9, %EAX<kill>,
36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
188 %EAX<def> = MOV32rr %EAX<kill>
196 %ECX<def> = MOV32rr %ECX<kill>
204 %ECX<def> = MOV32rr %ECX<kill>
212 %EAX<def> = MOV32rr %EAX<kill>
220 %EAX<def> = MOV32rr %EAX
228 %reg1039<def> = MOV32rr %ECX<kill>
The early clobber operand ties ECX input to the ECX def.
The live interval of ECX is represented as this:
%reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
The right way to represent this is something like
%reg20,inf = [46,47:2)[174,182:1)[181:230:0) 0@174-(182) 1@181-230 @2@46-(47)
Of course that won't work since that means overlapping live ranges defined by two val#.
The workaround for now is to add a bit to val# which says the val# is redefined by a early clobber def somewhere. This prevents the move at 228 from being optimized away by SimpleRegisterCoalescing::AdjustCopiesBackFrom.
llvm-svn: 61259
2008-12-19 21:58:01 +01:00
|
|
|
// If it's re-defined by an early clobber somewhere in the live range, then
|
|
|
|
// it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
|
|
|
|
// See PR3149:
|
|
|
|
// 172 %ECX<def> = MOV32rr %reg1039<kill>
|
|
|
|
// 180 INLINEASM <es:subl $5,$1
|
2009-12-03 01:50:42 +01:00
|
|
|
// sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9,
|
|
|
|
// %EAX<kill>,
|
Fix PR3149. If an early clobber def is a physical register and it is tied to an input operand, it effectively extends the live range of the physical register. Currently we do not have a good way to represent this.
172 %ECX<def> = MOV32rr %reg1039<kill>
180 INLINEASM <es:subl $5,$1
sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9, %EAX<kill>,
36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
188 %EAX<def> = MOV32rr %EAX<kill>
196 %ECX<def> = MOV32rr %ECX<kill>
204 %ECX<def> = MOV32rr %ECX<kill>
212 %EAX<def> = MOV32rr %EAX<kill>
220 %EAX<def> = MOV32rr %EAX
228 %reg1039<def> = MOV32rr %ECX<kill>
The early clobber operand ties ECX input to the ECX def.
The live interval of ECX is represented as this:
%reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
The right way to represent this is something like
%reg20,inf = [46,47:2)[174,182:1)[181:230:0) 0@174-(182) 1@181-230 @2@46-(47)
Of course that won't work since that means overlapping live ranges defined by two val#.
The workaround for now is to add a bit to val# which says the val# is redefined by a early clobber def somewhere. This prevents the move at 228 from being optimized away by SimpleRegisterCoalescing::AdjustCopiesBackFrom.
llvm-svn: 61259
2008-12-19 21:58:01 +01:00
|
|
|
// 36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
|
|
|
|
// 188 %EAX<def> = MOV32rr %EAX<kill>
|
|
|
|
// 196 %ECX<def> = MOV32rr %ECX<kill>
|
|
|
|
// 204 %ECX<def> = MOV32rr %ECX<kill>
|
|
|
|
// 212 %EAX<def> = MOV32rr %EAX<kill>
|
|
|
|
// 220 %EAX<def> = MOV32rr %EAX
|
|
|
|
// 228 %reg1039<def> = MOV32rr %ECX<kill>
|
|
|
|
// The early clobber operand ties ECX input to the ECX def.
|
|
|
|
//
|
|
|
|
// The live interval of ECX is represented as this:
|
|
|
|
// %reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
|
|
|
|
// The coalescer has no idea there was a def in the middle of [174,230].
|
2009-06-17 23:01:20 +02:00
|
|
|
if (AValNo->hasRedefByEC())
|
Fix PR3149. If an early clobber def is a physical register and it is tied to an input operand, it effectively extends the live range of the physical register. Currently we do not have a good way to represent this.
172 %ECX<def> = MOV32rr %reg1039<kill>
180 INLINEASM <es:subl $5,$1
sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9, %EAX<kill>,
36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
188 %EAX<def> = MOV32rr %EAX<kill>
196 %ECX<def> = MOV32rr %ECX<kill>
204 %ECX<def> = MOV32rr %ECX<kill>
212 %EAX<def> = MOV32rr %EAX<kill>
220 %EAX<def> = MOV32rr %EAX
228 %reg1039<def> = MOV32rr %ECX<kill>
The early clobber operand ties ECX input to the ECX def.
The live interval of ECX is represented as this:
%reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
The right way to represent this is something like
%reg20,inf = [46,47:2)[174,182:1)[181:230:0) 0@174-(182) 1@181-230 @2@46-(47)
Of course that won't work since that means overlapping live ranges defined by two val#.
The workaround for now is to add a bit to val# which says the val# is redefined by a early clobber def somewhere. This prevents the move at 228 from being optimized away by SimpleRegisterCoalescing::AdjustCopiesBackFrom.
llvm-svn: 61259
2008-12-19 21:58:01 +01:00
|
|
|
return false;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
|
|
|
// If AValNo is defined as a copy from IntB, we can potentially process this.
|
2007-06-08 19:18:56 +02:00
|
|
|
// Get the instruction that defines this value number.
|
2008-02-15 19:24:29 +01:00
|
|
|
unsigned SrcReg = li_->getVNInfoSourceReg(AValNo);
|
2007-06-08 19:18:56 +02:00
|
|
|
if (!SrcReg) return false; // Not defined by a copy.
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// If the value number is not defined by a copy instruction, ignore it.
|
2008-02-15 19:24:29 +01:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// If the source register comes from an interval other than IntB, we can't
|
|
|
|
// handle this.
|
2008-02-15 19:24:29 +01:00
|
|
|
if (SrcReg != IntB.reg) return false;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Get the LiveRange in IntB that this value number starts with.
|
2009-09-04 22:41:11 +02:00
|
|
|
LiveInterval::iterator ValLR =
|
2009-11-04 00:52:08 +01:00
|
|
|
IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
|
2009-01-13 21:25:24 +01:00
|
|
|
assert(ValLR != IntB.end() && "Live range not found!");
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Make sure that the end of the live range is inside the same block as
|
|
|
|
// CopyMI.
|
2009-09-04 22:41:11 +02:00
|
|
|
MachineInstr *ValLREndInst =
|
2009-11-04 00:52:08 +01:00
|
|
|
li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
|
2009-09-20 04:20:51 +02:00
|
|
|
if (!ValLREndInst ||
|
2007-06-08 19:18:56 +02:00
|
|
|
ValLREndInst->getParent() != CopyMI->getParent()) return false;
|
|
|
|
|
|
|
|
// Okay, we now know that ValLR ends in the same block that the CopyMI
|
|
|
|
// live-range starts. If there are no intervening live ranges between them in
|
|
|
|
// IntB, we can merge them.
|
|
|
|
if (ValLR+1 != BLR) return false;
|
2007-08-15 01:19:28 +02:00
|
|
|
|
|
|
|
// If a live interval is a physical register, conservatively check if any
|
|
|
|
// of its sub-registers is overlapping the live interval of the virtual
|
|
|
|
// register. If so, do not coalesce.
|
2008-02-10 19:45:23 +01:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(IntB.reg) &&
|
|
|
|
*tri_->getSubRegisters(IntB.reg)) {
|
|
|
|
for (const unsigned* SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR)
|
2007-08-15 01:19:28 +02:00
|
|
|
if (li_->hasInterval(*SR) && IntA.overlaps(li_->getInterval(*SR))) {
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "Interfere with sub-register ";
|
|
|
|
li_->getInterval(*SR).print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2007-08-15 01:19:28 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "\nExtending: ";
|
|
|
|
IntB.print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
|
2007-06-08 19:18:56 +02:00
|
|
|
// We are about to delete CopyMI, so need to remove it as the 'instruction
|
2007-08-08 01:49:57 +02:00
|
|
|
// that defines this value #'. Update the the valnum with the new defining
|
|
|
|
// instruction #.
|
2008-02-15 19:24:29 +01:00
|
|
|
BValNo->def = FillerStart;
|
2009-08-11 01:43:28 +02:00
|
|
|
BValNo->setCopy(0);
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Okay, we can merge them. We need to insert a new liverange:
|
|
|
|
// [ValLR.end, BLR.begin) of either value number, then we merge the
|
|
|
|
// two value numbers.
|
|
|
|
IntB.addRange(LiveRange(FillerStart, FillerEnd, BValNo));
|
|
|
|
|
|
|
|
// If the IntB live range is assigned to a physical register, and if that
|
2009-09-20 04:20:51 +02:00
|
|
|
// physreg has sub-registers, update their live intervals as well.
|
2008-02-10 19:45:23 +01:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
|
2009-03-11 01:03:21 +01:00
|
|
|
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
|
|
|
|
LiveInterval &SRLI = li_->getInterval(*SR);
|
|
|
|
SRLI.addRange(LiveRange(FillerStart, FillerEnd,
|
2009-06-17 23:01:20 +02:00
|
|
|
SRLI.getNextValue(FillerStart, 0, true,
|
|
|
|
li_->getVNInfoAllocator())));
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Okay, merge "B1" into the same value number as "B0".
|
2008-09-15 08:28:41 +02:00
|
|
|
if (BValNo != ValLR->valno) {
|
|
|
|
IntB.addKills(ValLR->valno, BValNo->kills);
|
2007-08-29 22:45:00 +02:00
|
|
|
IntB.MergeValueNumberInto(BValNo, ValLR->valno);
|
2008-09-15 08:28:41 +02:00
|
|
|
}
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << " result = ";
|
|
|
|
IntB.print(dbgs(), tri_);
|
|
|
|
dbgs() << "\n";
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2007-06-08 19:18:56 +02:00
|
|
|
|
|
|
|
// If the source instruction was killing the source register before the
|
|
|
|
// merge, unset the isKill marker given the live range has been extended.
|
|
|
|
int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
|
2008-09-15 08:28:41 +02:00
|
|
|
if (UIdx != -1) {
|
2007-12-30 22:56:09 +01:00
|
|
|
ValLREndInst->getOperand(UIdx).setIsKill(false);
|
2009-09-04 22:41:11 +02:00
|
|
|
ValLR->valno->removeKill(FillerStart);
|
2008-09-15 08:28:41 +02:00
|
|
|
}
|
2008-02-13 04:01:43 +01:00
|
|
|
|
2009-08-03 10:41:59 +02:00
|
|
|
// If the copy instruction was killing the destination register before the
|
|
|
|
// merge, find the last use and trim the live range. That will also add the
|
|
|
|
// isKill marker.
|
|
|
|
if (CopyMI->killsRegister(IntA.reg))
|
|
|
|
TrimLiveIntervalToLastUse(CopyUseIdx, CopyMI->getParent(), IntA, ALR);
|
|
|
|
|
2008-02-13 04:01:43 +01:00
|
|
|
++numExtends;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-02-16 03:32:17 +01:00
|
|
|
/// HasOtherReachingDefs - Return true if there are definitions of IntB
|
|
|
|
/// other than BValNo val# that can reach uses of AValno val# of IntA.
|
|
|
|
bool SimpleRegisterCoalescing::HasOtherReachingDefs(LiveInterval &IntA,
|
|
|
|
LiveInterval &IntB,
|
|
|
|
VNInfo *AValNo,
|
|
|
|
VNInfo *BValNo) {
|
|
|
|
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
|
|
|
|
AI != AE; ++AI) {
|
|
|
|
if (AI->valno != AValNo) continue;
|
|
|
|
LiveInterval::Ranges::iterator BI =
|
|
|
|
std::upper_bound(IntB.ranges.begin(), IntB.ranges.end(), AI->start);
|
|
|
|
if (BI != IntB.ranges.begin())
|
|
|
|
--BI;
|
|
|
|
for (; BI != IntB.ranges.end() && AI->end >= BI->start; ++BI) {
|
|
|
|
if (BI->valno == BValNo)
|
|
|
|
continue;
|
|
|
|
if (BI->start <= AI->start && BI->end > AI->start)
|
|
|
|
return true;
|
|
|
|
if (BI->start > AI->start && BI->start < AI->end)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-09-24 04:15:22 +02:00
|
|
|
static void
|
|
|
|
TransferImplicitOps(MachineInstr *MI, MachineInstr *NewMI) {
|
|
|
|
for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands();
|
|
|
|
i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (MO.isReg() && MO.isImplicit())
|
|
|
|
NewMI->addOperand(MO);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-03 01:50:42 +01:00
|
|
|
/// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with
|
|
|
|
/// IntA being the source and IntB being the dest, thus this defines a value
|
|
|
|
/// number in IntB. If the source value number (in IntA) is defined by a
|
|
|
|
/// commutable instruction and its other operand is coalesced to the copy dest
|
|
|
|
/// register, see if we can transform the copy into a noop by commuting the
|
|
|
|
/// definition. For example,
|
2008-02-13 04:01:43 +01:00
|
|
|
///
|
|
|
|
/// A3 = op A2 B0<kill>
|
|
|
|
/// ...
|
|
|
|
/// B1 = A3 <- this copy
|
|
|
|
/// ...
|
|
|
|
/// = op A3 <- more uses
|
|
|
|
///
|
|
|
|
/// ==>
|
|
|
|
///
|
|
|
|
/// B2 = op B0 A2<kill>
|
|
|
|
/// ...
|
|
|
|
/// B1 = B2 <- now an identify copy
|
|
|
|
/// ...
|
|
|
|
/// = op B2 <- more uses
|
|
|
|
///
|
|
|
|
/// This returns true if an interval was modified.
|
|
|
|
///
|
|
|
|
bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
|
|
|
LiveInterval &IntB,
|
|
|
|
MachineInstr *CopyMI) {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex CopyIdx =
|
|
|
|
li_->getInstructionIndex(CopyMI).getDefIndex();
|
2008-02-13 04:01:43 +01:00
|
|
|
|
2008-02-18 19:56:31 +01:00
|
|
|
// FIXME: For now, only eliminate the copy by commuting its def when the
|
|
|
|
// source register is a virtual register. We want to guard against cases
|
|
|
|
// where the copy is a back edge copy and commuting the def lengthen the
|
|
|
|
// live interval of the source register to the entire loop.
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(IntA.reg))
|
2008-02-18 09:40:53 +01:00
|
|
|
return false;
|
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
|
2008-02-13 04:01:43 +01:00
|
|
|
// the example above.
|
|
|
|
LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
|
2009-01-13 21:25:24 +01:00
|
|
|
assert(BLR != IntB.end() && "Live range not found!");
|
2008-02-13 04:01:43 +01:00
|
|
|
VNInfo *BValNo = BLR->valno;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-02-13 04:01:43 +01:00
|
|
|
// Get the location that B is defined at. Two options: either this value has
|
2009-09-20 04:20:51 +02:00
|
|
|
// an unknown definition point or it is defined at CopyIdx. If unknown, we
|
2008-02-13 04:01:43 +01:00
|
|
|
// can't process it.
|
2009-08-11 01:43:28 +02:00
|
|
|
if (!BValNo->getCopy()) return false;
|
2008-02-13 04:01:43 +01:00
|
|
|
assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-02-13 04:01:43 +01:00
|
|
|
// AValNo is the value number in A that defines the copy, A3 in the example.
|
2009-09-04 22:41:11 +02:00
|
|
|
LiveInterval::iterator ALR =
|
2009-11-04 00:52:08 +01:00
|
|
|
IntA.FindLiveRangeContaining(CopyIdx.getUseIndex()); //
|
2009-09-04 22:41:11 +02:00
|
|
|
|
2009-01-13 21:25:24 +01:00
|
|
|
assert(ALR != IntA.end() && "Live range not found!");
|
2008-02-13 04:01:43 +01:00
|
|
|
VNInfo *AValNo = ALR->valno;
|
2008-02-13 09:41:08 +01:00
|
|
|
// If other defs can reach uses of this def, then it's not safe to perform
|
2009-06-17 23:01:20 +02:00
|
|
|
// the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be
|
|
|
|
// tested?
|
|
|
|
if (AValNo->isPHIDef() || !AValNo->isDefAccurate() ||
|
|
|
|
AValNo->isUnused() || AValNo->hasPHIKill())
|
2008-02-13 04:01:43 +01:00
|
|
|
return false;
|
|
|
|
MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
|
|
|
|
const TargetInstrDesc &TID = DefMI->getDesc();
|
2009-07-10 21:15:51 +02:00
|
|
|
if (!TID.isCommutable())
|
|
|
|
return false;
|
|
|
|
// If DefMI is a two-address instruction then commuting it will change the
|
|
|
|
// destination register.
|
|
|
|
int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg);
|
|
|
|
assert(DefIdx != -1);
|
|
|
|
unsigned UseOpIdx;
|
|
|
|
if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx))
|
|
|
|
return false;
|
|
|
|
unsigned Op1, Op2, NewDstIdx;
|
|
|
|
if (!tii_->findCommutedOpIndices(DefMI, Op1, Op2))
|
|
|
|
return false;
|
|
|
|
if (Op1 == UseOpIdx)
|
|
|
|
NewDstIdx = Op2;
|
|
|
|
else if (Op2 == UseOpIdx)
|
|
|
|
NewDstIdx = Op1;
|
|
|
|
else
|
2008-02-13 04:01:43 +01:00
|
|
|
return false;
|
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
|
|
|
|
unsigned NewReg = NewDstMO.getReg();
|
|
|
|
if (NewReg != IntB.reg || !NewDstMO.isKill())
|
2008-02-13 04:01:43 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Make sure there are no other definitions of IntB that would reach the
|
|
|
|
// uses which the new definition can reach.
|
2008-02-16 03:32:17 +01:00
|
|
|
if (HasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
|
|
|
|
return false;
|
2008-02-13 04:01:43 +01:00
|
|
|
|
2008-03-26 20:03:01 +01:00
|
|
|
// If some of the uses of IntA.reg is already coalesced away, return false.
|
|
|
|
// It's not possible to determine whether it's safe to perform the coalescing.
|
|
|
|
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
|
|
|
|
UE = mri_->use_end(); UI != UE; ++UI) {
|
|
|
|
MachineInstr *UseMI = &*UI;
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex UseIdx = li_->getInstructionIndex(UseMI);
|
2008-03-26 20:03:01 +01:00
|
|
|
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
|
2008-04-16 20:48:43 +02:00
|
|
|
if (ULR == IntA.end())
|
|
|
|
continue;
|
2008-03-26 20:03:01 +01:00
|
|
|
if (ULR->valno == AValNo && JoinedCopies.count(UseMI))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-02-13 10:56:03 +01:00
|
|
|
// At this point we have decided that it is legal to do this
|
|
|
|
// transformation. Start by commuting the instruction.
|
2008-02-13 04:01:43 +01:00
|
|
|
MachineBasicBlock *MBB = DefMI->getParent();
|
|
|
|
MachineInstr *NewMI = tii_->commuteInstruction(DefMI);
|
2008-02-16 03:32:17 +01:00
|
|
|
if (!NewMI)
|
|
|
|
return false;
|
2008-02-13 04:01:43 +01:00
|
|
|
if (NewMI != DefMI) {
|
|
|
|
li_->ReplaceMachineInstrInMaps(DefMI, NewMI);
|
|
|
|
MBB->insert(DefMI, NewMI);
|
|
|
|
MBB->erase(DefMI);
|
|
|
|
}
|
2008-03-05 01:59:57 +01:00
|
|
|
unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false);
|
2008-02-13 04:01:43 +01:00
|
|
|
NewMI->getOperand(OpIdx).setIsKill();
|
|
|
|
|
2009-06-17 23:01:20 +02:00
|
|
|
bool BHasPHIKill = BValNo->hasPHIKill();
|
2008-02-13 04:01:43 +01:00
|
|
|
SmallVector<VNInfo*, 4> BDeadValNos;
|
2009-07-09 05:57:02 +02:00
|
|
|
VNInfo::KillSet BKills;
|
2009-11-04 00:52:08 +01:00
|
|
|
std::map<SlotIndex, SlotIndex> BExtend;
|
2008-03-10 09:11:32 +01:00
|
|
|
|
|
|
|
// If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
|
|
|
|
// A = or A, B
|
|
|
|
// ...
|
|
|
|
// B = A
|
|
|
|
// ...
|
|
|
|
// C = A<kill>
|
|
|
|
// ...
|
|
|
|
// = B
|
|
|
|
//
|
|
|
|
// then do not add kills of A to the newly created B interval.
|
|
|
|
bool Extended = BLR->end > ALR->end && ALR->end != ALR->start;
|
|
|
|
if (Extended)
|
|
|
|
BExtend[ALR->end] = BLR->end;
|
|
|
|
|
|
|
|
// Update uses of IntA of the specific Val# with IntB.
|
2009-03-11 01:03:21 +01:00
|
|
|
bool BHasSubRegs = false;
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(IntB.reg))
|
|
|
|
BHasSubRegs = *tri_->getSubRegisters(IntB.reg);
|
2008-02-13 04:01:43 +01:00
|
|
|
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
|
|
|
|
UE = mri_->use_end(); UI != UE;) {
|
|
|
|
MachineOperand &UseMO = UI.getOperand();
|
2008-02-13 10:56:03 +01:00
|
|
|
MachineInstr *UseMI = &*UI;
|
2008-02-13 04:01:43 +01:00
|
|
|
++UI;
|
2008-02-13 10:56:03 +01:00
|
|
|
if (JoinedCopies.count(UseMI))
|
2008-03-26 20:03:01 +01:00
|
|
|
continue;
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex UseIdx = li_->getInstructionIndex(UseMI).getUseIndex();
|
2008-02-13 04:01:43 +01:00
|
|
|
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
|
2008-04-16 20:48:43 +02:00
|
|
|
if (ULR == IntA.end() || ULR->valno != AValNo)
|
2008-02-13 04:01:43 +01:00
|
|
|
continue;
|
|
|
|
UseMO.setReg(NewReg);
|
2008-02-13 10:56:03 +01:00
|
|
|
if (UseMI == CopyMI)
|
|
|
|
continue;
|
2008-03-10 09:11:32 +01:00
|
|
|
if (UseMO.isKill()) {
|
|
|
|
if (Extended)
|
|
|
|
UseMO.setIsKill(false);
|
|
|
|
else
|
2009-11-04 00:52:08 +01:00
|
|
|
BKills.push_back(UseIdx.getDefIndex());
|
2008-03-10 09:11:32 +01:00
|
|
|
}
|
2009-01-20 20:12:24 +01:00
|
|
|
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
|
|
|
if (!tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
|
2008-02-13 10:56:03 +01:00
|
|
|
continue;
|
2008-02-15 19:24:29 +01:00
|
|
|
if (DstReg == IntB.reg) {
|
2008-02-13 10:56:03 +01:00
|
|
|
// This copy will become a noop. If it's defining a new val#,
|
|
|
|
// remove that val# as well. However this live range is being
|
|
|
|
// extended to the end of the existing live range defined by the copy.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex DefIdx = UseIdx.getDefIndex();
|
2008-04-16 20:48:43 +02:00
|
|
|
const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
|
2009-06-17 23:01:20 +02:00
|
|
|
BHasPHIKill |= DLR->valno->hasPHIKill();
|
2008-02-13 10:56:03 +01:00
|
|
|
assert(DLR->valno->def == DefIdx);
|
|
|
|
BDeadValNos.push_back(DLR->valno);
|
|
|
|
BExtend[DLR->start] = DLR->end;
|
|
|
|
JoinedCopies.insert(UseMI);
|
|
|
|
// If this is a kill but it's going to be removed, the last use
|
|
|
|
// of the same val# is the new kill.
|
2008-03-10 09:11:32 +01:00
|
|
|
if (UseMO.isKill())
|
2008-02-13 10:56:03 +01:00
|
|
|
BKills.pop_back();
|
2008-02-13 04:01:43 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to insert a new liverange: [ALR.start, LastUse). It may be we can
|
|
|
|
// simply extend BLR if CopyMI doesn't end the range.
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "\nExtending: ";
|
|
|
|
IntB.print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2008-02-13 04:01:43 +01:00
|
|
|
|
2008-06-17 22:11:16 +02:00
|
|
|
// Remove val#'s defined by copies that will be coalesced away.
|
2009-03-11 23:18:44 +01:00
|
|
|
for (unsigned i = 0, e = BDeadValNos.size(); i != e; ++i) {
|
|
|
|
VNInfo *DeadVNI = BDeadValNos[i];
|
|
|
|
if (BHasSubRegs) {
|
|
|
|
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
|
|
|
|
LiveInterval &SRLI = li_->getInterval(*SR);
|
|
|
|
const LiveRange *SRLR = SRLI.getLiveRangeContaining(DeadVNI->def);
|
|
|
|
SRLI.removeValNo(SRLR->valno);
|
|
|
|
}
|
|
|
|
}
|
2008-02-13 04:01:43 +01:00
|
|
|
IntB.removeValNo(BDeadValNos[i]);
|
2009-03-11 23:18:44 +01:00
|
|
|
}
|
2008-06-17 22:11:16 +02:00
|
|
|
|
|
|
|
// Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
|
|
|
|
// is updated. Kills are also updated.
|
|
|
|
VNInfo *ValNo = BValNo;
|
|
|
|
ValNo->def = AValNo->def;
|
2009-08-11 01:43:28 +02:00
|
|
|
ValNo->setCopy(0);
|
2008-06-17 22:11:16 +02:00
|
|
|
for (unsigned j = 0, ee = ValNo->kills.size(); j != ee; ++j) {
|
2009-09-04 22:41:11 +02:00
|
|
|
if (ValNo->kills[j] != BLR->end)
|
|
|
|
BKills.push_back(ValNo->kills[j]);
|
2008-06-17 22:11:16 +02:00
|
|
|
}
|
|
|
|
ValNo->kills.clear();
|
2008-02-13 04:01:43 +01:00
|
|
|
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
|
|
|
|
AI != AE; ++AI) {
|
|
|
|
if (AI->valno != AValNo) continue;
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex End = AI->end;
|
|
|
|
std::map<SlotIndex, SlotIndex>::iterator
|
2009-09-04 22:41:11 +02:00
|
|
|
EI = BExtend.find(End);
|
2008-02-13 04:01:43 +01:00
|
|
|
if (EI != BExtend.end())
|
|
|
|
End = EI->second;
|
|
|
|
IntB.addRange(LiveRange(AI->start, End, ValNo));
|
2009-03-11 01:03:21 +01:00
|
|
|
|
|
|
|
// If the IntB live range is assigned to a physical register, and if that
|
2009-09-20 04:20:51 +02:00
|
|
|
// physreg has sub-registers, update their live intervals as well.
|
2009-03-11 23:18:44 +01:00
|
|
|
if (BHasSubRegs) {
|
2009-03-11 01:03:21 +01:00
|
|
|
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
|
|
|
|
LiveInterval &SRLI = li_->getInterval(*SR);
|
2009-12-03 01:50:42 +01:00
|
|
|
SRLI.MergeInClobberRange(*li_, AI->start, End,
|
|
|
|
li_->getVNInfoAllocator());
|
2009-03-11 01:03:21 +01:00
|
|
|
}
|
|
|
|
}
|
2008-02-13 04:01:43 +01:00
|
|
|
}
|
|
|
|
IntB.addKills(ValNo, BKills);
|
2009-06-17 23:01:20 +02:00
|
|
|
ValNo->setHasPHIKill(BHasPHIKill);
|
2008-02-13 04:01:43 +01:00
|
|
|
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << " result = ";
|
|
|
|
IntB.print(dbgs(), tri_);
|
|
|
|
dbgs() << '\n';
|
|
|
|
dbgs() << "\nShortening: ";
|
|
|
|
IntA.print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2008-02-13 04:01:43 +01:00
|
|
|
|
|
|
|
IntA.removeValNo(AValNo);
|
2009-08-22 22:52:46 +02:00
|
|
|
|
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << " result = ";
|
|
|
|
IntA.print(dbgs(), tri_);
|
|
|
|
dbgs() << '\n';
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2008-02-13 04:01:43 +01:00
|
|
|
|
|
|
|
++numCommutes;
|
2007-06-08 19:18:56 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-02-05 09:45:04 +01:00
|
|
|
/// isSameOrFallThroughBB - Return true if MBB == SuccMBB or MBB simply
|
|
|
|
/// fallthoughs to SuccMBB.
|
|
|
|
static bool isSameOrFallThroughBB(MachineBasicBlock *MBB,
|
|
|
|
MachineBasicBlock *SuccMBB,
|
|
|
|
const TargetInstrInfo *tii_) {
|
|
|
|
if (MBB == SuccMBB)
|
|
|
|
return true;
|
|
|
|
MachineBasicBlock *TBB = 0, *FBB = 0;
|
|
|
|
SmallVector<MachineOperand, 4> Cond;
|
|
|
|
return !tii_->AnalyzeBranch(*MBB, TBB, FBB, Cond) && !TBB && !FBB &&
|
|
|
|
MBB->isSuccessor(SuccMBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// removeRange - Wrapper for LiveInterval::removeRange. This removes a range
|
|
|
|
/// from a physical register live interval as well as from the live intervals
|
|
|
|
/// of its sub-registers.
|
2009-09-04 22:41:11 +02:00
|
|
|
static void removeRange(LiveInterval &li,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex Start, SlotIndex End,
|
2009-02-05 09:45:04 +01:00
|
|
|
LiveIntervals *li_, const TargetRegisterInfo *tri_) {
|
|
|
|
li.removeRange(Start, End, true);
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(li.reg)) {
|
|
|
|
for (const unsigned* SR = tri_->getSubRegisters(li.reg); *SR; ++SR) {
|
|
|
|
if (!li_->hasInterval(*SR))
|
|
|
|
continue;
|
|
|
|
LiveInterval &sli = li_->getInterval(*SR);
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex RemoveStart = Start;
|
|
|
|
SlotIndex RemoveEnd = Start;
|
|
|
|
|
2009-02-05 09:45:04 +01:00
|
|
|
while (RemoveEnd != End) {
|
2009-09-17 02:57:15 +02:00
|
|
|
LiveInterval::iterator LR = sli.FindLiveRangeContaining(RemoveStart);
|
2009-02-05 09:45:04 +01:00
|
|
|
if (LR == sli.end())
|
|
|
|
break;
|
|
|
|
RemoveEnd = (LR->end < End) ? LR->end : End;
|
2009-09-17 02:57:15 +02:00
|
|
|
sli.removeRange(RemoveStart, RemoveEnd, true);
|
|
|
|
RemoveStart = RemoveEnd;
|
2009-02-05 09:45:04 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// TrimLiveIntervalToLastUse - If there is a last use in the same basic block
|
2009-02-08 09:24:28 +01:00
|
|
|
/// as the copy instruction, trim the live interval to the last use and return
|
2009-02-05 09:45:04 +01:00
|
|
|
/// true.
|
|
|
|
bool
|
2009-11-04 00:52:08 +01:00
|
|
|
SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(SlotIndex CopyIdx,
|
2009-02-05 09:45:04 +01:00
|
|
|
MachineBasicBlock *CopyMBB,
|
|
|
|
LiveInterval &li,
|
|
|
|
const LiveRange *LR) {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex MBBStart = li_->getMBBStartIdx(CopyMBB);
|
|
|
|
SlotIndex LastUseIdx;
|
2009-09-04 22:41:11 +02:00
|
|
|
MachineOperand *LastUse =
|
2009-11-04 00:52:08 +01:00
|
|
|
lastRegisterUse(LR->start, CopyIdx.getPrevSlot(), li.reg, LastUseIdx);
|
2009-02-05 09:45:04 +01:00
|
|
|
if (LastUse) {
|
|
|
|
MachineInstr *LastUseMI = LastUse->getParent();
|
|
|
|
if (!isSameOrFallThroughBB(LastUseMI->getParent(), CopyMBB, tii_)) {
|
|
|
|
// r1024 = op
|
|
|
|
// ...
|
|
|
|
// BB1:
|
|
|
|
// = r1024
|
|
|
|
//
|
|
|
|
// BB2:
|
|
|
|
// r1025<dead> = r1024<kill>
|
|
|
|
if (MBBStart < LR->end)
|
|
|
|
removeRange(li, MBBStart, LR->end, li_, tri_);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// There are uses before the copy, just shorten the live range to the end
|
|
|
|
// of last use.
|
|
|
|
LastUse->setIsKill();
|
2009-11-04 00:52:08 +01:00
|
|
|
removeRange(li, LastUseIdx.getDefIndex(), LR->end, li_, tri_);
|
|
|
|
LR->valno->addKill(LastUseIdx.getDefIndex());
|
2009-02-05 09:45:04 +01:00
|
|
|
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
|
|
|
if (tii_->isMoveInstr(*LastUseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
|
|
|
|
DstReg == li.reg) {
|
|
|
|
// Last use is itself an identity code.
|
|
|
|
int DeadIdx = LastUseMI->findRegisterDefOperandIdx(li.reg, false, tri_);
|
|
|
|
LastUseMI->getOperand(DeadIdx).setIsDead();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Is it livein?
|
|
|
|
if (LR->start <= MBBStart && LR->end > MBBStart) {
|
2009-11-04 00:52:08 +01:00
|
|
|
if (LR->start == li_->getZeroIndex()) {
|
2009-02-05 09:45:04 +01:00
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(li.reg));
|
|
|
|
// Live-in to the function but dead. Remove it from entry live-in set.
|
|
|
|
mf_->begin()->removeLiveIn(li.reg);
|
|
|
|
}
|
|
|
|
// FIXME: Shorten intervals in BBs that reaches this BB.
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-08-30 11:09:33 +02:00
|
|
|
/// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
|
|
|
|
/// computation, replace the copy by rematerialize the definition.
|
|
|
|
bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
|
|
|
|
unsigned DstReg,
|
2009-07-16 11:20:10 +02:00
|
|
|
unsigned DstSubIdx,
|
2008-08-30 11:09:33 +02:00
|
|
|
MachineInstr *CopyMI) {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getUseIndex();
|
2008-08-30 11:09:33 +02:00
|
|
|
LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
|
2009-01-13 21:25:24 +01:00
|
|
|
assert(SrcLR != SrcInt.end() && "Live range not found!");
|
2008-08-30 11:09:33 +02:00
|
|
|
VNInfo *ValNo = SrcLR->valno;
|
|
|
|
// If other defs can reach uses of this def, then it's not safe to perform
|
2009-06-17 23:01:20 +02:00
|
|
|
// the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be
|
|
|
|
// tested?
|
|
|
|
if (ValNo->isPHIDef() || !ValNo->isDefAccurate() ||
|
|
|
|
ValNo->isUnused() || ValNo->hasPHIKill())
|
2008-08-30 11:09:33 +02:00
|
|
|
return false;
|
|
|
|
MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
|
|
|
|
const TargetInstrDesc &TID = DefMI->getDesc();
|
|
|
|
if (!TID.isAsCheapAsAMove())
|
|
|
|
return false;
|
2009-10-10 01:27:56 +02:00
|
|
|
if (!tii_->isTriviallyReMaterializable(DefMI, AA))
|
2009-02-05 23:24:17 +01:00
|
|
|
return false;
|
2008-08-30 11:09:33 +02:00
|
|
|
bool SawStore = false;
|
2009-10-10 01:27:56 +02:00
|
|
|
if (!DefMI->isSafeToMove(tii_, SawStore, AA))
|
2008-08-30 11:09:33 +02:00
|
|
|
return false;
|
2009-07-14 02:51:06 +02:00
|
|
|
if (TID.getNumDefs() != 1)
|
|
|
|
return false;
|
2009-07-20 21:47:55 +02:00
|
|
|
if (DefMI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
|
|
|
|
// Make sure the copy destination register class fits the instruction
|
|
|
|
// definition register class. The mismatch can happen as a result of earlier
|
|
|
|
// extract_subreg, insert_subreg, subreg_to_reg coalescing.
|
2009-07-29 23:36:49 +02:00
|
|
|
const TargetRegisterClass *RC = TID.OpInfo[0].getRegClass(tri_);
|
2009-07-20 21:47:55 +02:00
|
|
|
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
|
|
|
|
if (mri_->getRegClass(DstReg) != RC)
|
|
|
|
return false;
|
|
|
|
} else if (!RC->contains(DstReg))
|
2009-07-14 02:51:06 +02:00
|
|
|
return false;
|
2009-07-20 21:47:55 +02:00
|
|
|
}
|
2008-08-30 11:09:33 +02:00
|
|
|
|
2009-09-08 08:39:07 +02:00
|
|
|
// If destination register has a sub-register index on it, make sure it mtches
|
|
|
|
// the instruction register class.
|
|
|
|
if (DstSubIdx) {
|
|
|
|
const TargetInstrDesc &TID = DefMI->getDesc();
|
|
|
|
if (TID.getNumDefs() != 1)
|
|
|
|
return false;
|
|
|
|
const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
|
|
|
|
const TargetRegisterClass *DstSubRC =
|
|
|
|
DstRC->getSubRegisterRegClass(DstSubIdx);
|
|
|
|
const TargetRegisterClass *DefRC = TID.OpInfo[0].getRegClass(tri_);
|
|
|
|
if (DefRC == DstRC)
|
|
|
|
DstSubIdx = 0;
|
|
|
|
else if (DefRC != DstSubRC)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex DefIdx = CopyIdx.getDefIndex();
|
2008-08-30 11:09:33 +02:00
|
|
|
const LiveRange *DLR= li_->getInterval(DstReg).getLiveRangeContaining(DefIdx);
|
2009-08-11 01:43:28 +02:00
|
|
|
DLR->valno->setCopy(0);
|
2008-10-13 20:35:52 +02:00
|
|
|
// Don't forget to update sub-register intervals.
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
|
|
|
|
for (const unsigned* SR = tri_->getSubRegisters(DstReg); *SR; ++SR) {
|
|
|
|
if (!li_->hasInterval(*SR))
|
|
|
|
continue;
|
|
|
|
DLR = li_->getInterval(*SR).getLiveRangeContaining(DefIdx);
|
2009-08-11 01:43:28 +02:00
|
|
|
if (DLR && DLR->valno->getCopy() == CopyMI)
|
|
|
|
DLR->valno->setCopy(0);
|
2008-10-13 20:35:52 +02:00
|
|
|
}
|
|
|
|
}
|
2008-08-30 11:09:33 +02:00
|
|
|
|
2009-02-05 09:45:04 +01:00
|
|
|
// If copy kills the source register, find the last use and propagate
|
|
|
|
// kill.
|
2009-05-12 01:14:13 +02:00
|
|
|
bool checkForDeadDef = false;
|
2008-08-30 11:09:33 +02:00
|
|
|
MachineBasicBlock *MBB = CopyMI->getParent();
|
2009-02-05 09:45:04 +01:00
|
|
|
if (CopyMI->killsRegister(SrcInt.reg))
|
2009-05-12 01:14:13 +02:00
|
|
|
if (!TrimLiveIntervalToLastUse(CopyIdx, MBB, SrcInt, SrcLR)) {
|
|
|
|
checkForDeadDef = true;
|
|
|
|
}
|
2009-02-05 09:45:04 +01:00
|
|
|
|
2009-12-03 01:50:42 +01:00
|
|
|
MachineBasicBlock::iterator MII =
|
|
|
|
llvm::next(MachineBasicBlock::iterator(CopyMI));
|
2009-11-14 03:55:43 +01:00
|
|
|
tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, tri_);
|
2008-08-30 11:09:33 +02:00
|
|
|
MachineInstr *NewMI = prior(MII);
|
2009-05-12 01:14:13 +02:00
|
|
|
|
|
|
|
if (checkForDeadDef) {
|
2009-06-16 09:12:58 +02:00
|
|
|
// PR4090 fix: Trim interval failed because there was no use of the
|
|
|
|
// source interval in this MBB. If the def is in this MBB too then we
|
|
|
|
// should mark it dead:
|
|
|
|
if (DefMI->getParent() == MBB) {
|
|
|
|
DefMI->addRegisterDead(SrcInt.reg, tri_);
|
2009-11-04 00:52:08 +01:00
|
|
|
SrcLR->end = SrcLR->start.getNextSlot();
|
2009-06-16 09:12:58 +02:00
|
|
|
}
|
2009-05-12 01:14:13 +02:00
|
|
|
}
|
|
|
|
|
2008-10-12 01:59:03 +02:00
|
|
|
// CopyMI may have implicit operands, transfer them over to the newly
|
2008-08-30 11:09:33 +02:00
|
|
|
// rematerialized instruction. And update implicit def interval valnos.
|
|
|
|
for (unsigned i = CopyMI->getDesc().getNumOperands(),
|
|
|
|
e = CopyMI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = CopyMI->getOperand(i);
|
2008-10-03 17:45:36 +02:00
|
|
|
if (MO.isReg() && MO.isImplicit())
|
2008-08-30 11:09:33 +02:00
|
|
|
NewMI->addOperand(MO);
|
2008-09-10 22:41:13 +02:00
|
|
|
if (MO.isDef() && li_->hasInterval(MO.getReg())) {
|
2008-08-30 11:09:33 +02:00
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
DLR = li_->getInterval(Reg).getLiveRangeContaining(DefIdx);
|
2009-08-11 01:43:28 +02:00
|
|
|
if (DLR && DLR->valno->getCopy() == CopyMI)
|
|
|
|
DLR->valno->setCopy(0);
|
2008-08-30 11:09:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-24 04:15:22 +02:00
|
|
|
TransferImplicitOps(CopyMI, NewMI);
|
2008-08-30 11:09:33 +02:00
|
|
|
li_->ReplaceMachineInstrInMaps(CopyMI, NewMI);
|
2009-06-16 09:12:58 +02:00
|
|
|
CopyMI->eraseFromParent();
|
2008-08-30 11:09:33 +02:00
|
|
|
ReMatCopies.insert(CopyMI);
|
2008-09-19 19:38:47 +02:00
|
|
|
ReMatDefs.insert(DefMI);
|
2008-08-30 11:09:33 +02:00
|
|
|
++NumReMats;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
/// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
|
|
|
|
/// update the subregister number if it is not zero. If DstReg is a
|
|
|
|
/// physical register and the existing subregister number of the def / use
|
|
|
|
/// being updated is not zero, make sure to set it to the correct physical
|
|
|
|
/// subregister.
|
|
|
|
void
|
|
|
|
SimpleRegisterCoalescing::UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg,
|
|
|
|
unsigned SubIdx) {
|
|
|
|
bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
|
|
|
|
if (DstIsPhys && SubIdx) {
|
|
|
|
// Figure out the real physical register we are updating with.
|
|
|
|
DstReg = tri_->getSubReg(DstReg, SubIdx);
|
|
|
|
SubIdx = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg),
|
|
|
|
E = mri_->reg_end(); I != E; ) {
|
|
|
|
MachineOperand &O = I.getOperand();
|
2008-03-21 20:09:30 +01:00
|
|
|
MachineInstr *UseMI = &*I;
|
2008-02-15 19:24:29 +01:00
|
|
|
++I;
|
2008-04-17 02:06:42 +02:00
|
|
|
unsigned OldSubIdx = O.getSubReg();
|
2008-02-15 19:24:29 +01:00
|
|
|
if (DstIsPhys) {
|
|
|
|
unsigned UseDstReg = DstReg;
|
2008-04-17 02:06:42 +02:00
|
|
|
if (OldSubIdx)
|
|
|
|
UseDstReg = tri_->getSubReg(DstReg, OldSubIdx);
|
2008-08-30 11:09:33 +02:00
|
|
|
|
2009-01-20 20:12:24 +01:00
|
|
|
unsigned CopySrcReg, CopyDstReg, CopySrcSubIdx, CopyDstSubIdx;
|
|
|
|
if (tii_->isMoveInstr(*UseMI, CopySrcReg, CopyDstReg,
|
|
|
|
CopySrcSubIdx, CopyDstSubIdx) &&
|
2008-08-30 11:09:33 +02:00
|
|
|
CopySrcReg != CopyDstReg &&
|
|
|
|
CopySrcReg == SrcReg && CopyDstReg != UseDstReg) {
|
|
|
|
// If the use is a copy and it won't be coalesced away, and its source
|
|
|
|
// is defined by a trivial computation, try to rematerialize it instead.
|
2009-07-16 11:20:10 +02:00
|
|
|
if (ReMaterializeTrivialDef(li_->getInterval(SrcReg), CopyDstReg,
|
|
|
|
CopyDstSubIdx, UseMI))
|
2008-08-30 11:09:33 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
O.setReg(UseDstReg);
|
|
|
|
O.setSubReg(0);
|
2008-09-12 20:13:14 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sub-register indexes goes from small to large. e.g.
|
|
|
|
// RAX: 1 -> AL, 2 -> AX, 3 -> EAX
|
|
|
|
// EAX: 1 -> AL, 2 -> AX
|
|
|
|
// So RAX's sub-register 2 is AX, RAX's sub-regsiter 3 is EAX, whose
|
|
|
|
// sub-register 2 is also AX.
|
|
|
|
if (SubIdx && OldSubIdx && SubIdx != OldSubIdx)
|
|
|
|
assert(OldSubIdx < SubIdx && "Conflicting sub-register index!");
|
|
|
|
else if (SubIdx)
|
|
|
|
O.setSubReg(SubIdx);
|
|
|
|
// Remove would-be duplicated kill marker.
|
|
|
|
if (O.isKill() && UseMI->killsRegister(DstReg))
|
|
|
|
O.setIsKill(false);
|
|
|
|
O.setReg(DstReg);
|
|
|
|
|
|
|
|
// After updating the operand, check if the machine instruction has
|
|
|
|
// become a copy. If so, update its val# information.
|
2009-06-22 22:49:32 +02:00
|
|
|
if (JoinedCopies.count(UseMI))
|
|
|
|
continue;
|
|
|
|
|
2008-09-12 20:13:14 +02:00
|
|
|
const TargetInstrDesc &TID = UseMI->getDesc();
|
2009-01-20 20:12:24 +01:00
|
|
|
unsigned CopySrcReg, CopyDstReg, CopySrcSubIdx, CopyDstSubIdx;
|
2008-09-12 20:13:14 +02:00
|
|
|
if (TID.getNumDefs() == 1 && TID.getNumOperands() > 2 &&
|
2009-01-20 20:12:24 +01:00
|
|
|
tii_->isMoveInstr(*UseMI, CopySrcReg, CopyDstReg,
|
|
|
|
CopySrcSubIdx, CopyDstSubIdx) &&
|
2008-09-17 20:36:25 +02:00
|
|
|
CopySrcReg != CopyDstReg &&
|
|
|
|
(TargetRegisterInfo::isVirtualRegister(CopyDstReg) ||
|
|
|
|
allocatableRegs_[CopyDstReg])) {
|
2008-09-12 20:13:14 +02:00
|
|
|
LiveInterval &LI = li_->getInterval(CopyDstReg);
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex DefIdx =
|
|
|
|
li_->getInstructionIndex(UseMI).getDefIndex();
|
2009-06-22 22:49:32 +02:00
|
|
|
if (const LiveRange *DLR = LI.getLiveRangeContaining(DefIdx)) {
|
|
|
|
if (DLR->valno->def == DefIdx)
|
2009-08-11 01:43:28 +02:00
|
|
|
DLR->valno->setCopy(UseMI);
|
2009-06-22 22:49:32 +02:00
|
|
|
}
|
2008-02-15 19:24:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-03-10 09:11:32 +01:00
|
|
|
/// RemoveUnnecessaryKills - Remove kill markers that are no longer accurate
|
|
|
|
/// due to live range lengthening as the result of coalescing.
|
|
|
|
void SimpleRegisterCoalescing::RemoveUnnecessaryKills(unsigned Reg,
|
|
|
|
LiveInterval &LI) {
|
|
|
|
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
|
|
|
|
UE = mri_->use_end(); UI != UE; ++UI) {
|
|
|
|
MachineOperand &UseMO = UI.getOperand();
|
Another nasty coalescer bug (is there another kind):
After coalescing reg1027's def and kill are both at the same point:
%reg1027,0.000000e+00 = [56,814:0) 0@70-(814)
bb5:
60 %reg1027<def> = t2MOVr %reg1027, 14, %reg0, %reg0
68 %reg1027<def> = t2LDRi12 %reg1027<kill>, 8, 14, %reg0
76 t2CMPzri %reg1038<kill,undef>, 0, 14, %reg0, %CPSR<imp-def>
84 %reg1027<def> = t2MOVr %reg1027, 14, %reg0, %reg0
96 t2Bcc mbb<bb5,0x2030910>, 1, %CPSR<kill>
Do not remove the kill marker on t2LDRi12.
llvm-svn: 78178
2009-08-05 09:05:41 +02:00
|
|
|
if (!UseMO.isKill())
|
|
|
|
continue;
|
|
|
|
MachineInstr *UseMI = UseMO.getParent();
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex UseIdx =
|
|
|
|
li_->getInstructionIndex(UseMI).getUseIndex();
|
2009-08-05 18:08:58 +02:00
|
|
|
const LiveRange *LR = LI.getLiveRangeContaining(UseIdx);
|
2009-09-23 07:23:19 +02:00
|
|
|
if (!LR ||
|
2009-11-04 00:52:08 +01:00
|
|
|
(!LR->valno->isKill(UseIdx.getDefIndex()) &&
|
|
|
|
LR->valno->def != UseIdx.getDefIndex())) {
|
2009-09-23 07:23:19 +02:00
|
|
|
// Interesting problem. After coalescing reg1027's def and kill are both
|
|
|
|
// at the same point: %reg1027,0.000000e+00 = [56,814:0) 0@70-(814)
|
|
|
|
//
|
|
|
|
// bb5:
|
|
|
|
// 60 %reg1027<def> = t2MOVr %reg1027, 14, %reg0, %reg0
|
|
|
|
// 68 %reg1027<def> = t2LDRi12 %reg1027<kill>, 8, 14, %reg0
|
|
|
|
// 76 t2CMPzri %reg1038<kill,undef>, 0, 14, %reg0, %CPSR<imp-def>
|
|
|
|
// 84 %reg1027<def> = t2MOVr %reg1027, 14, %reg0, %reg0
|
|
|
|
// 96 t2Bcc mbb<bb5,0x2030910>, 1, %CPSR<kill>
|
|
|
|
//
|
|
|
|
// Do not remove the kill marker on t2LDRi12.
|
|
|
|
UseMO.setIsKill(false);
|
2008-03-10 09:11:32 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-03-18 09:26:47 +01:00
|
|
|
/// removeIntervalIfEmpty - Check if the live interval of a physical register
|
|
|
|
/// is empty, if so remove it and also remove the empty intervals of its
|
2008-04-16 22:24:25 +02:00
|
|
|
/// sub-registers. Return true if live interval is removed.
|
|
|
|
static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *li_,
|
2008-03-18 09:26:47 +01:00
|
|
|
const TargetRegisterInfo *tri_) {
|
|
|
|
if (li.empty()) {
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(li.reg))
|
|
|
|
for (const unsigned* SR = tri_->getSubRegisters(li.reg); *SR; ++SR) {
|
|
|
|
if (!li_->hasInterval(*SR))
|
|
|
|
continue;
|
|
|
|
LiveInterval &sli = li_->getInterval(*SR);
|
|
|
|
if (sli.empty())
|
|
|
|
li_->removeInterval(*SR);
|
|
|
|
}
|
2008-04-16 03:22:28 +02:00
|
|
|
li_->removeInterval(li.reg);
|
2008-04-16 22:24:25 +02:00
|
|
|
return true;
|
2008-03-18 09:26:47 +01:00
|
|
|
}
|
2008-04-16 22:24:25 +02:00
|
|
|
return false;
|
2008-03-18 09:26:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// ShortenDeadCopyLiveRange - Shorten a live range defined by a dead copy.
|
2008-04-16 22:24:25 +02:00
|
|
|
/// Return true if live interval is removed.
|
|
|
|
bool SimpleRegisterCoalescing::ShortenDeadCopyLiveRange(LiveInterval &li,
|
2008-03-05 23:09:42 +01:00
|
|
|
MachineInstr *CopyMI) {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI);
|
2008-03-05 23:09:42 +01:00
|
|
|
LiveInterval::iterator MLR =
|
2009-11-04 00:52:08 +01:00
|
|
|
li.FindLiveRangeContaining(CopyIdx.getDefIndex());
|
2008-03-18 09:26:47 +01:00
|
|
|
if (MLR == li.end())
|
2008-04-16 22:24:25 +02:00
|
|
|
return false; // Already removed by ShortenDeadCopySrcLiveRange.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex RemoveStart = MLR->start;
|
|
|
|
SlotIndex RemoveEnd = MLR->end;
|
|
|
|
SlotIndex DefIdx = CopyIdx.getDefIndex();
|
2008-03-18 09:26:47 +01:00
|
|
|
// Remove the liverange that's defined by this.
|
2009-11-04 00:52:08 +01:00
|
|
|
if (RemoveStart == DefIdx && RemoveEnd == DefIdx.getStoreIndex()) {
|
2008-03-18 09:26:47 +01:00
|
|
|
removeRange(li, RemoveStart, RemoveEnd, li_, tri_);
|
2008-04-16 22:24:25 +02:00
|
|
|
return removeIntervalIfEmpty(li, li_, tri_);
|
2008-03-18 09:26:47 +01:00
|
|
|
}
|
2008-04-16 22:24:25 +02:00
|
|
|
return false;
|
2008-03-18 09:26:47 +01:00
|
|
|
}
|
|
|
|
|
2008-10-28 00:21:01 +01:00
|
|
|
/// RemoveDeadDef - If a def of a live interval is now determined dead, remove
|
|
|
|
/// the val# it defines. If the live interval becomes empty, remove it as well.
|
|
|
|
bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
|
|
|
|
MachineInstr *DefMI) {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex DefIdx = li_->getInstructionIndex(DefMI).getDefIndex();
|
2008-10-28 00:21:01 +01:00
|
|
|
LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
|
|
|
|
if (DefIdx != MLR->valno->def)
|
|
|
|
return false;
|
|
|
|
li.removeValNo(MLR->valno);
|
|
|
|
return removeIntervalIfEmpty(li, li_, tri_);
|
|
|
|
}
|
|
|
|
|
2008-03-26 21:15:49 +01:00
|
|
|
/// PropagateDeadness - Propagate the dead marker to the instruction which
|
|
|
|
/// defines the val#.
|
|
|
|
static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex &LRStart, LiveIntervals *li_,
|
2008-03-26 21:15:49 +01:00
|
|
|
const TargetRegisterInfo* tri_) {
|
|
|
|
MachineInstr *DefMI =
|
2009-11-04 00:52:08 +01:00
|
|
|
li_->getInstructionFromIndex(LRStart.getDefIndex());
|
2008-03-26 21:15:49 +01:00
|
|
|
if (DefMI && DefMI != CopyMI) {
|
2009-08-07 09:14:14 +02:00
|
|
|
int DeadIdx = DefMI->findRegisterDefOperandIdx(li.reg, false);
|
|
|
|
if (DeadIdx != -1)
|
2008-03-26 21:15:49 +01:00
|
|
|
DefMI->getOperand(DeadIdx).setIsDead();
|
2009-08-07 09:14:14 +02:00
|
|
|
else
|
|
|
|
DefMI->addOperand(MachineOperand::CreateReg(li.reg,
|
2009-10-25 08:48:51 +01:00
|
|
|
/*def*/true, /*implicit*/true, /*kill*/false, /*dead*/true));
|
2009-11-04 00:52:08 +01:00
|
|
|
LRStart = LRStart.getNextSlot();
|
2008-03-26 21:15:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-17 07:20:39 +02:00
|
|
|
/// ShortenDeadCopySrcLiveRange - Shorten a live range as it's artificially
|
|
|
|
/// extended by a dead copy. Mark the last use (if any) of the val# as kill as
|
|
|
|
/// ends the live range there. If there isn't another use, then this live range
|
|
|
|
/// is dead. Return true if live interval is removed.
|
2008-04-16 22:24:25 +02:00
|
|
|
bool
|
2008-03-18 09:26:47 +01:00
|
|
|
SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
|
|
|
|
MachineInstr *CopyMI) {
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI);
|
|
|
|
if (CopyIdx == SlotIndex()) {
|
2008-03-18 09:26:47 +01:00
|
|
|
// FIXME: special case: function live in. It can be a general case if the
|
|
|
|
// first instruction index starts at > 0 value.
|
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(li.reg));
|
|
|
|
// Live-in to the function but dead. Remove it from entry live-in set.
|
2008-04-24 11:06:33 +02:00
|
|
|
if (mf_->begin()->isLiveIn(li.reg))
|
|
|
|
mf_->begin()->removeLiveIn(li.reg);
|
2008-04-16 20:48:43 +02:00
|
|
|
const LiveRange *LR = li.getLiveRangeContaining(CopyIdx);
|
2008-03-18 09:26:47 +01:00
|
|
|
removeRange(li, LR->start, LR->end, li_, tri_);
|
2008-04-16 22:24:25 +02:00
|
|
|
return removeIntervalIfEmpty(li, li_, tri_);
|
2008-03-18 09:26:47 +01:00
|
|
|
}
|
|
|
|
|
2009-09-04 22:41:11 +02:00
|
|
|
LiveInterval::iterator LR =
|
2009-11-04 00:52:08 +01:00
|
|
|
li.FindLiveRangeContaining(CopyIdx.getPrevIndex().getStoreIndex());
|
2008-03-18 09:26:47 +01:00
|
|
|
if (LR == li.end())
|
|
|
|
// Livein but defined by a phi.
|
2008-04-16 22:24:25 +02:00
|
|
|
return false;
|
2008-03-18 09:26:47 +01:00
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex RemoveStart = LR->start;
|
|
|
|
SlotIndex RemoveEnd = CopyIdx.getStoreIndex();
|
2008-03-18 09:26:47 +01:00
|
|
|
if (LR->end > RemoveEnd)
|
|
|
|
// More uses past this copy? Nothing to do.
|
2008-04-16 22:24:25 +02:00
|
|
|
return false;
|
2008-03-18 09:26:47 +01:00
|
|
|
|
2009-02-05 09:45:04 +01:00
|
|
|
// If there is a last use in the same bb, we can't remove the live range.
|
|
|
|
// Shorten the live interval and return.
|
2009-02-09 09:37:45 +01:00
|
|
|
MachineBasicBlock *CopyMBB = CopyMI->getParent();
|
|
|
|
if (TrimLiveIntervalToLastUse(CopyIdx, CopyMBB, li, LR))
|
2008-04-16 22:24:25 +02:00
|
|
|
return false;
|
2008-03-18 09:26:47 +01:00
|
|
|
|
2009-07-15 23:39:50 +02:00
|
|
|
// There are other kills of the val#. Nothing to do.
|
|
|
|
if (!li.isOnlyLROfValNo(LR))
|
|
|
|
return false;
|
|
|
|
|
2009-02-09 09:37:45 +01:00
|
|
|
MachineBasicBlock *StartMBB = li_->getMBBFromIndex(RemoveStart);
|
|
|
|
if (!isSameOrFallThroughBB(StartMBB, CopyMBB, tii_))
|
|
|
|
// If the live range starts in another mbb and the copy mbb is not a fall
|
|
|
|
// through mbb, then we can only cut the range from the beginning of the
|
|
|
|
// copy mbb.
|
2009-11-04 00:52:08 +01:00
|
|
|
RemoveStart = li_->getMBBStartIdx(CopyMBB).getNextIndex().getBaseIndex();
|
2009-02-09 09:37:45 +01:00
|
|
|
|
2009-02-08 08:48:37 +01:00
|
|
|
if (LR->valno->def == RemoveStart) {
|
|
|
|
// If the def MI defines the val# and this copy is the only kill of the
|
|
|
|
// val#, then propagate the dead marker.
|
2009-07-16 11:20:10 +02:00
|
|
|
PropagateDeadness(li, CopyMI, RemoveStart, li_, tri_);
|
|
|
|
++numDeadValNo;
|
|
|
|
|
2009-09-04 22:41:11 +02:00
|
|
|
if (LR->valno->isKill(RemoveEnd))
|
|
|
|
LR->valno->removeKill(RemoveEnd);
|
2009-02-08 08:48:37 +01:00
|
|
|
}
|
2008-03-26 21:15:49 +01:00
|
|
|
|
2009-02-09 09:37:45 +01:00
|
|
|
removeRange(li, RemoveStart, RemoveEnd, li_, tri_);
|
2008-04-16 22:24:25 +02:00
|
|
|
return removeIntervalIfEmpty(li, li_, tri_);
|
2008-03-05 23:09:42 +01:00
|
|
|
}
|
|
|
|
|
2008-04-09 22:57:25 +02:00
|
|
|
/// CanCoalesceWithImpDef - Returns true if the specified copy instruction
|
|
|
|
/// from an implicit def to another register can be coalesced away.
|
|
|
|
bool SimpleRegisterCoalescing::CanCoalesceWithImpDef(MachineInstr *CopyMI,
|
|
|
|
LiveInterval &li,
|
|
|
|
LiveInterval &ImpLi) const{
|
|
|
|
if (!CopyMI->killsRegister(ImpLi.reg))
|
|
|
|
return false;
|
2009-07-17 23:06:58 +02:00
|
|
|
// Make sure this is the only use.
|
|
|
|
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(ImpLi.reg),
|
2008-04-09 22:57:25 +02:00
|
|
|
UE = mri_->use_end(); UI != UE;) {
|
|
|
|
MachineInstr *UseMI = &*UI;
|
|
|
|
++UI;
|
2009-07-17 23:06:58 +02:00
|
|
|
if (CopyMI == UseMI || JoinedCopies.count(UseMI))
|
2008-04-09 22:57:25 +02:00
|
|
|
continue;
|
2009-07-17 23:06:58 +02:00
|
|
|
return false;
|
2008-04-09 22:57:25 +02:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-04-30 20:39:57 +02:00
|
|
|
/// isWinToJoinVRWithSrcPhysReg - Return true if it's worth while to join a
|
|
|
|
/// a virtual destination register with physical source register.
|
|
|
|
bool
|
|
|
|
SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
|
|
|
|
MachineBasicBlock *CopyMBB,
|
|
|
|
LiveInterval &DstInt,
|
|
|
|
LiveInterval &SrcInt) {
|
|
|
|
// If the virtual register live interval is long but it has low use desity,
|
|
|
|
// do not join them, instead mark the physical register as its allocation
|
|
|
|
// preference.
|
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(DstInt.reg);
|
|
|
|
unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
|
|
|
|
unsigned Length = li_->getApproximateInstructionCount(DstInt);
|
|
|
|
if (Length > Threshold &&
|
|
|
|
(((float)std::distance(mri_->use_begin(DstInt.reg),
|
|
|
|
mri_->use_end()) / Length) < (1.0 / Threshold)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the virtual register live interval extends into a loop, turn down
|
|
|
|
// aggressiveness.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex CopyIdx =
|
|
|
|
li_->getInstructionIndex(CopyMI).getDefIndex();
|
2009-04-30 20:39:57 +02:00
|
|
|
const MachineLoop *L = loopInfo->getLoopFor(CopyMBB);
|
|
|
|
if (!L) {
|
|
|
|
// Let's see if the virtual register live interval extends into the loop.
|
|
|
|
LiveInterval::iterator DLR = DstInt.FindLiveRangeContaining(CopyIdx);
|
|
|
|
assert(DLR != DstInt.end() && "Live range not found!");
|
2009-11-04 00:52:08 +01:00
|
|
|
DLR = DstInt.FindLiveRangeContaining(DLR->end.getNextSlot());
|
2009-04-30 20:39:57 +02:00
|
|
|
if (DLR != DstInt.end()) {
|
|
|
|
CopyMBB = li_->getMBBFromIndex(DLR->start);
|
|
|
|
L = loopInfo->getLoopFor(CopyMBB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!L || Length <= Threshold)
|
|
|
|
return true;
|
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex UseIdx = CopyIdx.getUseIndex();
|
2009-04-30 20:39:57 +02:00
|
|
|
LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
|
|
|
|
MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
|
|
|
|
if (loopInfo->getLoopFor(SMBB) != L) {
|
|
|
|
if (!loopInfo->isLoopHeader(CopyMBB))
|
|
|
|
return false;
|
|
|
|
// If vr's live interval extends pass the loop header, do not join.
|
|
|
|
for (MachineBasicBlock::succ_iterator SI = CopyMBB->succ_begin(),
|
|
|
|
SE = CopyMBB->succ_end(); SI != SE; ++SI) {
|
|
|
|
MachineBasicBlock *SuccMBB = *SI;
|
|
|
|
if (SuccMBB == CopyMBB)
|
|
|
|
continue;
|
|
|
|
if (DstInt.overlaps(li_->getMBBStartIdx(SuccMBB),
|
2009-12-22 01:11:50 +01:00
|
|
|
li_->getMBBEndIdx(SuccMBB)))
|
2009-04-30 20:39:57 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isWinToJoinVRWithDstPhysReg - Return true if it's worth while to join a
|
|
|
|
/// copy from a virtual source register to a physical destination register.
|
|
|
|
bool
|
|
|
|
SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
|
|
|
|
MachineBasicBlock *CopyMBB,
|
|
|
|
LiveInterval &DstInt,
|
|
|
|
LiveInterval &SrcInt) {
|
|
|
|
// If the virtual register live interval is long but it has low use desity,
|
|
|
|
// do not join them, instead mark the physical register as its allocation
|
|
|
|
// preference.
|
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(SrcInt.reg);
|
|
|
|
unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
|
|
|
|
unsigned Length = li_->getApproximateInstructionCount(SrcInt);
|
|
|
|
if (Length > Threshold &&
|
|
|
|
(((float)std::distance(mri_->use_begin(SrcInt.reg),
|
|
|
|
mri_->use_end()) / Length) < (1.0 / Threshold)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (SrcInt.empty())
|
|
|
|
// Must be implicit_def.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the virtual register live interval is defined or cross a loop, turn
|
|
|
|
// down aggressiveness.
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex CopyIdx =
|
|
|
|
li_->getInstructionIndex(CopyMI).getDefIndex();
|
|
|
|
SlotIndex UseIdx = CopyIdx.getUseIndex();
|
2009-04-30 20:39:57 +02:00
|
|
|
LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
|
|
|
|
assert(SLR != SrcInt.end() && "Live range not found!");
|
2009-11-04 00:52:08 +01:00
|
|
|
SLR = SrcInt.FindLiveRangeContaining(SLR->start.getPrevSlot());
|
2009-04-30 20:39:57 +02:00
|
|
|
if (SLR == SrcInt.end())
|
|
|
|
return true;
|
|
|
|
MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
|
|
|
|
const MachineLoop *L = loopInfo->getLoopFor(SMBB);
|
|
|
|
|
|
|
|
if (!L || Length <= Threshold)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (loopInfo->getLoopFor(CopyMBB) != L) {
|
|
|
|
if (SMBB != L->getLoopLatch())
|
|
|
|
return false;
|
|
|
|
// If vr's live interval is extended from before the loop latch, do not
|
|
|
|
// join.
|
|
|
|
for (MachineBasicBlock::pred_iterator PI = SMBB->pred_begin(),
|
|
|
|
PE = SMBB->pred_end(); PI != PE; ++PI) {
|
|
|
|
MachineBasicBlock *PredMBB = *PI;
|
|
|
|
if (PredMBB == SMBB)
|
|
|
|
continue;
|
|
|
|
if (SrcInt.overlaps(li_->getMBBStartIdx(PredMBB),
|
2009-12-22 01:11:50 +01:00
|
|
|
li_->getMBBEndIdx(PredMBB)))
|
2009-04-30 20:39:57 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-01-23 03:15:19 +01:00
|
|
|
/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
|
|
|
|
/// two virtual registers from different register classes.
|
2008-06-19 03:39:21 +02:00
|
|
|
bool
|
2009-01-23 03:15:19 +01:00
|
|
|
SimpleRegisterCoalescing::isWinToJoinCrossClass(unsigned LargeReg,
|
|
|
|
unsigned SmallReg,
|
|
|
|
unsigned Threshold) {
|
2008-06-19 03:39:21 +02:00
|
|
|
// Then make sure the intervals are *short*.
|
2009-01-23 03:15:19 +01:00
|
|
|
LiveInterval &LargeInt = li_->getInterval(LargeReg);
|
|
|
|
LiveInterval &SmallInt = li_->getInterval(SmallReg);
|
|
|
|
unsigned LargeSize = li_->getApproximateInstructionCount(LargeInt);
|
|
|
|
unsigned SmallSize = li_->getApproximateInstructionCount(SmallInt);
|
|
|
|
if (SmallSize > Threshold || LargeSize > Threshold)
|
|
|
|
if ((float)std::distance(mri_->use_begin(SmallReg),
|
|
|
|
mri_->use_end()) / SmallSize <
|
|
|
|
(float)std::distance(mri_->use_begin(LargeReg),
|
|
|
|
mri_->use_end()) / LargeSize)
|
|
|
|
return false;
|
|
|
|
return true;
|
2008-06-19 03:39:21 +02:00
|
|
|
}
|
|
|
|
|
2008-09-11 22:07:10 +02:00
|
|
|
/// HasIncompatibleSubRegDefUse - If we are trying to coalesce a virtual
|
|
|
|
/// register with a physical register, check if any of the virtual register
|
|
|
|
/// operand is a sub-register use or def. If so, make sure it won't result
|
|
|
|
/// in an illegal extract_subreg or insert_subreg instruction. e.g.
|
|
|
|
/// vr1024 = extract_subreg vr1025, 1
|
|
|
|
/// ...
|
|
|
|
/// vr1024 = mov8rr AH
|
|
|
|
/// If vr1024 is coalesced with AH, the extract_subreg is now illegal since
|
|
|
|
/// AH does not have a super-reg whose sub-register 1 is AH.
|
|
|
|
bool
|
|
|
|
SimpleRegisterCoalescing::HasIncompatibleSubRegDefUse(MachineInstr *CopyMI,
|
|
|
|
unsigned VirtReg,
|
|
|
|
unsigned PhysReg) {
|
|
|
|
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(VirtReg),
|
|
|
|
E = mri_->reg_end(); I != E; ++I) {
|
|
|
|
MachineOperand &O = I.getOperand();
|
|
|
|
MachineInstr *MI = &*I;
|
|
|
|
if (MI == CopyMI || JoinedCopies.count(MI))
|
|
|
|
continue;
|
|
|
|
unsigned SubIdx = O.getSubReg();
|
|
|
|
if (SubIdx && !tri_->getSubReg(PhysReg, SubIdx))
|
|
|
|
return true;
|
|
|
|
if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
|
|
|
|
SubIdx = MI->getOperand(2).getImm();
|
|
|
|
if (O.isUse() && !tri_->getSubReg(PhysReg, SubIdx))
|
|
|
|
return true;
|
|
|
|
if (O.isDef()) {
|
|
|
|
unsigned SrcReg = MI->getOperand(1).getReg();
|
|
|
|
const TargetRegisterClass *RC =
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(SrcReg)
|
|
|
|
? tri_->getPhysicalRegisterRegClass(SrcReg)
|
|
|
|
: mri_->getRegClass(SrcReg);
|
2009-04-28 20:29:27 +02:00
|
|
|
if (!tri_->getMatchingSuperReg(PhysReg, SubIdx, RC))
|
2008-09-11 22:07:10 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
|
|
|
|
MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) {
|
2008-09-11 22:07:10 +02:00
|
|
|
SubIdx = MI->getOperand(3).getImm();
|
|
|
|
if (VirtReg == MI->getOperand(0).getReg()) {
|
|
|
|
if (!tri_->getSubReg(PhysReg, SubIdx))
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
unsigned DstReg = MI->getOperand(0).getReg();
|
|
|
|
const TargetRegisterClass *RC =
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(DstReg)
|
|
|
|
? tri_->getPhysicalRegisterRegClass(DstReg)
|
|
|
|
: mri_->getRegClass(DstReg);
|
2009-04-28 20:29:27 +02:00
|
|
|
if (!tri_->getMatchingSuperReg(PhysReg, SubIdx, RC))
|
2008-09-11 22:07:10 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-06-19 03:39:21 +02:00
|
|
|
|
2009-01-20 07:44:16 +01:00
|
|
|
/// CanJoinExtractSubRegToPhysReg - Return true if it's possible to coalesce
|
|
|
|
/// an extract_subreg where dst is a physical register, e.g.
|
|
|
|
/// cl = EXTRACT_SUBREG reg1024, 1
|
|
|
|
bool
|
2009-01-23 03:15:19 +01:00
|
|
|
SimpleRegisterCoalescing::CanJoinExtractSubRegToPhysReg(unsigned DstReg,
|
|
|
|
unsigned SrcReg, unsigned SubIdx,
|
|
|
|
unsigned &RealDstReg) {
|
2009-01-20 07:44:16 +01:00
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(SrcReg);
|
2009-04-28 20:29:27 +02:00
|
|
|
RealDstReg = tri_->getMatchingSuperReg(DstReg, SubIdx, RC);
|
2009-01-20 07:44:16 +01:00
|
|
|
assert(RealDstReg && "Invalid extract_subreg instruction!");
|
|
|
|
|
|
|
|
// For this type of EXTRACT_SUBREG, conservatively
|
|
|
|
// check if the live interval of the source register interfere with the
|
|
|
|
// actual super physical register we are trying to coalesce with.
|
|
|
|
LiveInterval &RHS = li_->getInterval(SrcReg);
|
|
|
|
if (li_->hasInterval(RealDstReg) &&
|
|
|
|
RHS.overlaps(li_->getInterval(RealDstReg))) {
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "Interfere with register ";
|
|
|
|
li_->getInterval(RealDstReg).print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2009-01-20 07:44:16 +01:00
|
|
|
return false; // Not coalescable
|
|
|
|
}
|
|
|
|
for (const unsigned* SR = tri_->getSubRegisters(RealDstReg); *SR; ++SR)
|
|
|
|
if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "Interfere with sub-register ";
|
|
|
|
li_->getInterval(*SR).print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2009-01-20 07:44:16 +01:00
|
|
|
return false; // Not coalescable
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CanJoinInsertSubRegToPhysReg - Return true if it's possible to coalesce
|
|
|
|
/// an insert_subreg where src is a physical register, e.g.
|
|
|
|
/// reg1024 = INSERT_SUBREG reg1024, c1, 0
|
|
|
|
bool
|
2009-01-23 03:15:19 +01:00
|
|
|
SimpleRegisterCoalescing::CanJoinInsertSubRegToPhysReg(unsigned DstReg,
|
|
|
|
unsigned SrcReg, unsigned SubIdx,
|
|
|
|
unsigned &RealSrcReg) {
|
2009-01-20 07:44:16 +01:00
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(DstReg);
|
2009-04-28 20:29:27 +02:00
|
|
|
RealSrcReg = tri_->getMatchingSuperReg(SrcReg, SubIdx, RC);
|
2009-01-20 07:44:16 +01:00
|
|
|
assert(RealSrcReg && "Invalid extract_subreg instruction!");
|
|
|
|
|
|
|
|
LiveInterval &RHS = li_->getInterval(DstReg);
|
|
|
|
if (li_->hasInterval(RealSrcReg) &&
|
|
|
|
RHS.overlaps(li_->getInterval(RealSrcReg))) {
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "Interfere with register ";
|
|
|
|
li_->getInterval(RealSrcReg).print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2009-01-20 07:44:16 +01:00
|
|
|
return false; // Not coalescable
|
|
|
|
}
|
|
|
|
for (const unsigned* SR = tri_->getSubRegisters(RealSrcReg); *SR; ++SR)
|
|
|
|
if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "Interfere with sub-register ";
|
|
|
|
li_->getInterval(*SR).print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2009-01-20 07:44:16 +01:00
|
|
|
return false; // Not coalescable
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-06-14 22:22:55 +02:00
|
|
|
/// getRegAllocPreference - Return register allocation preference register.
|
|
|
|
///
|
|
|
|
static unsigned getRegAllocPreference(unsigned Reg, MachineFunction &MF,
|
|
|
|
MachineRegisterInfo *MRI,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg))
|
|
|
|
return 0;
|
2009-06-15 10:28:29 +02:00
|
|
|
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
|
|
|
|
return TRI->ResolveRegAllocHint(Hint.first, Hint.second, MF);
|
2009-06-14 22:22:55 +02:00
|
|
|
}
|
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
/// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
|
|
|
|
/// which are the src/dst of the copy instruction CopyMI. This returns true
|
2007-11-01 07:22:48 +01:00
|
|
|
/// if the copy was successfully coalesced away. If it is not currently
|
|
|
|
/// possible to coalesce this interval, but it may be possible if other
|
|
|
|
/// things get coalesced, then it returns true by reference in 'Again'.
|
2008-02-13 04:01:43 +01:00
|
|
|
bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
|
2007-11-06 09:52:21 +01:00
|
|
|
MachineInstr *CopyMI = TheCopy.MI;
|
|
|
|
|
|
|
|
Again = false;
|
2008-08-30 11:09:33 +02:00
|
|
|
if (JoinedCopies.count(CopyMI) || ReMatCopies.count(CopyMI))
|
2007-11-06 09:52:21 +01:00
|
|
|
return false; // Already done.
|
|
|
|
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
|
2007-06-08 19:18:56 +02:00
|
|
|
|
2009-04-28 18:34:35 +02:00
|
|
|
unsigned SrcReg, DstReg, SrcSubIdx = 0, DstSubIdx = 0;
|
2008-02-15 19:24:29 +01:00
|
|
|
bool isExtSubReg = CopyMI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG;
|
2008-04-09 22:57:25 +02:00
|
|
|
bool isInsSubReg = CopyMI->getOpcode() == TargetInstrInfo::INSERT_SUBREG;
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
bool isSubRegToReg = CopyMI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG;
|
2008-02-15 19:24:29 +01:00
|
|
|
unsigned SubIdx = 0;
|
|
|
|
if (isExtSubReg) {
|
2009-04-28 18:34:35 +02:00
|
|
|
DstReg = CopyMI->getOperand(0).getReg();
|
|
|
|
DstSubIdx = CopyMI->getOperand(0).getSubReg();
|
|
|
|
SrcReg = CopyMI->getOperand(1).getReg();
|
|
|
|
SrcSubIdx = CopyMI->getOperand(2).getImm();
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
} else if (isInsSubReg || isSubRegToReg) {
|
2009-07-18 06:52:23 +02:00
|
|
|
DstReg = CopyMI->getOperand(0).getReg();
|
|
|
|
DstSubIdx = CopyMI->getOperand(3).getImm();
|
|
|
|
SrcReg = CopyMI->getOperand(2).getReg();
|
|
|
|
SrcSubIdx = CopyMI->getOperand(2).getSubReg();
|
|
|
|
if (SrcSubIdx && SrcSubIdx != DstSubIdx) {
|
|
|
|
// r1025 = INSERT_SUBREG r1025, r1024<2>, 2 Then r1024 has already been
|
|
|
|
// coalesced to a larger register so the subreg indices cancel out.
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tSource of insert_subreg or subreg_to_reg is already "
|
2009-09-21 17:18:33 +02:00
|
|
|
"coalesced to another register.\n");
|
2008-04-09 22:57:25 +02:00
|
|
|
return false; // Not coalescable.
|
|
|
|
}
|
2009-12-10 21:59:45 +01:00
|
|
|
} else if (tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
|
|
|
|
if (SrcSubIdx && DstSubIdx && SrcSubIdx != DstSubIdx) {
|
|
|
|
// e.g. %reg16404:1<def> = MOV8rr %reg16412:2<kill>
|
|
|
|
Again = true;
|
|
|
|
return false; // Not coalescable.
|
|
|
|
}
|
|
|
|
} else {
|
2009-07-14 18:55:14 +02:00
|
|
|
llvm_unreachable("Unrecognized copy instruction!");
|
2008-02-13 04:01:43 +01:00
|
|
|
}
|
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// If they are already joined we continue.
|
2008-02-15 19:24:29 +01:00
|
|
|
if (SrcReg == DstReg) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tCopy already coalesced.\n");
|
2007-11-01 07:22:48 +01:00
|
|
|
return false; // Not coalescable.
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
|
|
|
|
bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
|
2007-06-08 19:18:56 +02:00
|
|
|
|
|
|
|
// If they are both physical registers, we cannot join them.
|
|
|
|
if (SrcIsPhys && DstIsPhys) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tCan not coalesce physregs.\n");
|
2007-11-01 07:22:48 +01:00
|
|
|
return false; // Not coalescable.
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// We only join virtual registers with allocatable physical registers.
|
2008-02-15 19:24:29 +01:00
|
|
|
if (SrcIsPhys && !allocatableRegs_[SrcReg]) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tSrc reg is unallocatable physreg.\n");
|
2007-11-01 07:22:48 +01:00
|
|
|
return false; // Not coalescable.
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2008-02-15 19:24:29 +01:00
|
|
|
if (DstIsPhys && !allocatableRegs_[DstReg]) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tDst reg is unallocatable physreg.\n");
|
2007-11-01 07:22:48 +01:00
|
|
|
return false; // Not coalescable.
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2007-10-12 10:50:34 +02:00
|
|
|
|
2009-04-28 18:34:35 +02:00
|
|
|
// Check that a physical source register is compatible with dst regclass
|
|
|
|
if (SrcIsPhys) {
|
|
|
|
unsigned SrcSubReg = SrcSubIdx ?
|
|
|
|
tri_->getSubReg(SrcReg, SrcSubIdx) : SrcReg;
|
|
|
|
const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
|
|
|
|
const TargetRegisterClass *DstSubRC = DstRC;
|
|
|
|
if (DstSubIdx)
|
|
|
|
DstSubRC = DstRC->getSubRegisterRegClass(DstSubIdx);
|
|
|
|
assert(DstSubRC && "Illegal subregister index");
|
|
|
|
if (!DstSubRC->contains(SrcSubReg)) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tIncompatible destination regclass: "
|
2009-08-22 22:52:46 +02:00
|
|
|
<< tri_->getName(SrcSubReg) << " not in "
|
|
|
|
<< DstSubRC->getName() << ".\n");
|
2009-04-28 18:34:35 +02:00
|
|
|
return false; // Not coalescable.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that a physical dst register is compatible with source regclass
|
|
|
|
if (DstIsPhys) {
|
|
|
|
unsigned DstSubReg = DstSubIdx ?
|
|
|
|
tri_->getSubReg(DstReg, DstSubIdx) : DstReg;
|
|
|
|
const TargetRegisterClass *SrcRC = mri_->getRegClass(SrcReg);
|
|
|
|
const TargetRegisterClass *SrcSubRC = SrcRC;
|
|
|
|
if (SrcSubIdx)
|
|
|
|
SrcSubRC = SrcRC->getSubRegisterRegClass(SrcSubIdx);
|
|
|
|
assert(SrcSubRC && "Illegal subregister index");
|
2009-11-04 09:33:14 +01:00
|
|
|
if (!SrcSubRC->contains(DstSubReg)) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tIncompatible source regclass: "
|
2009-08-22 22:52:46 +02:00
|
|
|
<< tri_->getName(DstSubReg) << " not in "
|
|
|
|
<< SrcSubRC->getName() << ".\n");
|
2009-07-28 01:14:11 +02:00
|
|
|
(void)DstSubReg;
|
2009-04-28 18:34:35 +02:00
|
|
|
return false; // Not coalescable.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-19 03:39:21 +02:00
|
|
|
// Should be non-null only when coalescing to a sub-register class.
|
2009-01-23 03:15:19 +01:00
|
|
|
bool CrossRC = false;
|
2009-07-18 04:10:10 +02:00
|
|
|
const TargetRegisterClass *SrcRC= SrcIsPhys ? 0 : mri_->getRegClass(SrcReg);
|
|
|
|
const TargetRegisterClass *DstRC= DstIsPhys ? 0 : mri_->getRegClass(DstReg);
|
2009-01-23 03:15:19 +01:00
|
|
|
const TargetRegisterClass *NewRC = NULL;
|
2008-06-19 03:39:21 +02:00
|
|
|
MachineBasicBlock *CopyMBB = CopyMI->getParent();
|
2007-10-12 10:50:34 +02:00
|
|
|
unsigned RealDstReg = 0;
|
2008-04-09 22:57:25 +02:00
|
|
|
unsigned RealSrcReg = 0;
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
if (isExtSubReg || isInsSubReg || isSubRegToReg) {
|
2008-04-09 22:57:25 +02:00
|
|
|
SubIdx = CopyMI->getOperand(isExtSubReg ? 2 : 3).getImm();
|
|
|
|
if (SrcIsPhys && isExtSubReg) {
|
2007-10-12 10:50:34 +02:00
|
|
|
// r1024 = EXTRACT_SUBREG EAX, 0 then r1024 is really going to be
|
|
|
|
// coalesced with AX.
|
2008-04-17 02:06:42 +02:00
|
|
|
unsigned DstSubIdx = CopyMI->getOperand(0).getSubReg();
|
2008-04-17 09:58:04 +02:00
|
|
|
if (DstSubIdx) {
|
|
|
|
// r1024<2> = EXTRACT_SUBREG EAX, 2. Then r1024 has already been
|
|
|
|
// coalesced to a larger register so the subreg indices cancel out.
|
|
|
|
if (DstSubIdx != SubIdx) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
|
2008-04-17 09:58:04 +02:00
|
|
|
return false; // Not coalescable.
|
|
|
|
}
|
|
|
|
} else
|
2008-04-17 02:06:42 +02:00
|
|
|
SrcReg = tri_->getSubReg(SrcReg, SubIdx);
|
2008-02-15 19:24:29 +01:00
|
|
|
SubIdx = 0;
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
} else if (DstIsPhys && (isInsSubReg || isSubRegToReg)) {
|
2008-04-09 22:57:25 +02:00
|
|
|
// EAX = INSERT_SUBREG EAX, r1024, 0
|
2008-04-17 02:06:42 +02:00
|
|
|
unsigned SrcSubIdx = CopyMI->getOperand(2).getSubReg();
|
2008-04-17 09:58:04 +02:00
|
|
|
if (SrcSubIdx) {
|
|
|
|
// EAX = INSERT_SUBREG EAX, r1024<2>, 2 Then r1024 has already been
|
|
|
|
// coalesced to a larger register so the subreg indices cancel out.
|
|
|
|
if (SrcSubIdx != SubIdx) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
|
2008-04-17 09:58:04 +02:00
|
|
|
return false; // Not coalescable.
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
DstReg = tri_->getSubReg(DstReg, SubIdx);
|
2008-04-09 22:57:25 +02:00
|
|
|
SubIdx = 0;
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
} else if ((DstIsPhys && isExtSubReg) ||
|
|
|
|
(SrcIsPhys && (isInsSubReg || isSubRegToReg))) {
|
|
|
|
if (!isSubRegToReg && CopyMI->getOperand(1).getSubReg()) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tSrc of extract_subreg already coalesced with reg"
|
2009-08-22 22:52:46 +02:00
|
|
|
<< " of a super-class.\n");
|
2009-01-23 03:15:19 +01:00
|
|
|
return false; // Not coalescable.
|
|
|
|
}
|
|
|
|
|
2008-04-09 22:57:25 +02:00
|
|
|
if (isExtSubReg) {
|
2009-01-23 03:15:19 +01:00
|
|
|
if (!CanJoinExtractSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealDstReg))
|
2009-01-20 07:44:16 +01:00
|
|
|
return false; // Not coalescable
|
2008-04-09 22:57:25 +02:00
|
|
|
} else {
|
2009-01-23 03:15:19 +01:00
|
|
|
if (!CanJoinInsertSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealSrcReg))
|
2007-11-01 07:22:48 +01:00
|
|
|
return false; // Not coalescable
|
2009-01-20 07:44:16 +01:00
|
|
|
}
|
2008-02-15 19:24:29 +01:00
|
|
|
SubIdx = 0;
|
2007-11-01 07:22:48 +01:00
|
|
|
} else {
|
2008-04-17 09:58:04 +02:00
|
|
|
unsigned OldSubIdx = isExtSubReg ? CopyMI->getOperand(0).getSubReg()
|
|
|
|
: CopyMI->getOperand(2).getSubReg();
|
|
|
|
if (OldSubIdx) {
|
2009-01-23 03:15:19 +01:00
|
|
|
if (OldSubIdx == SubIdx && !differingRegisterClasses(SrcReg, DstReg))
|
2008-04-17 09:58:04 +02:00
|
|
|
// r1024<2> = EXTRACT_SUBREG r1025, 2. Then r1024 has already been
|
|
|
|
// coalesced to a larger register so the subreg indices cancel out.
|
2008-04-29 03:41:44 +02:00
|
|
|
// Also check if the other larger register is of the same register
|
|
|
|
// class as the would be resulting register.
|
2008-04-17 09:58:04 +02:00
|
|
|
SubIdx = 0;
|
|
|
|
else {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
|
2008-04-17 09:58:04 +02:00
|
|
|
return false; // Not coalescable.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (SubIdx) {
|
2009-07-20 21:47:55 +02:00
|
|
|
if (!DstIsPhys && !SrcIsPhys) {
|
|
|
|
if (isInsSubReg || isSubRegToReg) {
|
2009-07-18 04:10:10 +02:00
|
|
|
NewRC = tri_->getMatchingSuperRegClass(DstRC, SrcRC, SubIdx);
|
2009-07-20 21:47:55 +02:00
|
|
|
} else // extract_subreg {
|
|
|
|
NewRC = tri_->getMatchingSuperRegClass(SrcRC, DstRC, SubIdx);
|
2009-07-18 04:10:10 +02:00
|
|
|
}
|
2009-07-20 21:47:55 +02:00
|
|
|
if (!NewRC) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\t Conflicting sub-register indices.\n");
|
2009-07-20 21:47:55 +02:00
|
|
|
return false; // Not coalescable
|
2009-07-18 04:10:10 +02:00
|
|
|
}
|
2009-07-20 21:47:55 +02:00
|
|
|
|
2008-04-17 09:58:04 +02:00
|
|
|
unsigned LargeReg = isExtSubReg ? SrcReg : DstReg;
|
|
|
|
unsigned SmallReg = isExtSubReg ? DstReg : SrcReg;
|
2009-01-23 03:15:19 +01:00
|
|
|
unsigned Limit= allocatableRCRegs_[mri_->getRegClass(SmallReg)].count();
|
|
|
|
if (!isWinToJoinCrossClass(LargeReg, SmallReg, Limit)) {
|
|
|
|
Again = true; // May be possible to coalesce later.
|
|
|
|
return false;
|
2007-11-01 07:22:48 +01:00
|
|
|
}
|
|
|
|
}
|
2007-10-12 10:50:34 +02:00
|
|
|
}
|
2009-01-23 03:15:19 +01:00
|
|
|
} else if (differingRegisterClasses(SrcReg, DstReg)) {
|
2009-07-21 02:22:59 +02:00
|
|
|
if (DisableCrossClassJoin)
|
2009-01-23 03:15:19 +01:00
|
|
|
return false;
|
|
|
|
CrossRC = true;
|
|
|
|
|
|
|
|
// FIXME: What if the result of a EXTRACT_SUBREG is then coalesced
|
2008-02-15 19:24:29 +01:00
|
|
|
// with another? If it's the resulting destination register, then
|
|
|
|
// the subidx must be propagated to uses (but only those defined
|
|
|
|
// by the EXTRACT_SUBREG). If it's being coalesced into another
|
|
|
|
// register, it should be safe because register is assumed to have
|
|
|
|
// the register class of the super-register.
|
|
|
|
|
2009-01-23 03:15:19 +01:00
|
|
|
// Process moves where one of the registers have a sub-register index.
|
|
|
|
MachineOperand *DstMO = CopyMI->findRegisterDefOperand(DstReg);
|
|
|
|
MachineOperand *SrcMO = CopyMI->findRegisterUseOperand(SrcReg);
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
SubIdx = DstMO->getSubReg();
|
2009-01-23 03:15:19 +01:00
|
|
|
if (SubIdx) {
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
if (SrcMO->getSubReg())
|
|
|
|
// FIXME: can we handle this?
|
|
|
|
return false;
|
|
|
|
// This is not an insert_subreg but it looks like one.
|
2009-04-23 22:39:31 +02:00
|
|
|
// e.g. %reg1024:4 = MOV32rr %EAX
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
isInsSubReg = true;
|
|
|
|
if (SrcIsPhys) {
|
|
|
|
if (!CanJoinInsertSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealSrcReg))
|
2009-01-23 03:15:19 +01:00
|
|
|
return false; // Not coalescable
|
|
|
|
SubIdx = 0;
|
|
|
|
}
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
} else {
|
|
|
|
SubIdx = SrcMO->getSubReg();
|
|
|
|
if (SubIdx) {
|
|
|
|
// This is not a extract_subreg but it looks like one.
|
2009-04-23 22:39:31 +02:00
|
|
|
// e.g. %cl = MOV16rr %reg1024:1
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
isExtSubReg = true;
|
|
|
|
if (DstIsPhys) {
|
|
|
|
if (!CanJoinExtractSubRegToPhysReg(DstReg, SrcReg, SubIdx,RealDstReg))
|
|
|
|
return false; // Not coalescable
|
|
|
|
SubIdx = 0;
|
|
|
|
}
|
|
|
|
}
|
2009-01-23 03:15:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned LargeReg = SrcReg;
|
|
|
|
unsigned SmallReg = DstReg;
|
|
|
|
|
|
|
|
// Now determine the register class of the joined register.
|
|
|
|
if (isExtSubReg) {
|
|
|
|
if (SubIdx && DstRC && DstRC->isASubClass()) {
|
|
|
|
// This is a move to a sub-register class. However, the source is a
|
|
|
|
// sub-register of a larger register class. We don't know what should
|
|
|
|
// the register class be. FIXME.
|
|
|
|
Again = true;
|
|
|
|
return false;
|
|
|
|
}
|
2009-07-18 04:10:10 +02:00
|
|
|
if (!DstIsPhys && !SrcIsPhys)
|
|
|
|
NewRC = SrcRC;
|
2009-04-23 22:18:13 +02:00
|
|
|
} else if (!SrcIsPhys && !DstIsPhys) {
|
2009-04-30 23:24:03 +02:00
|
|
|
NewRC = getCommonSubClass(SrcRC, DstRC);
|
|
|
|
if (!NewRC) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tDisjoint regclasses: "
|
2009-08-22 22:52:46 +02:00
|
|
|
<< SrcRC->getName() << ", "
|
|
|
|
<< DstRC->getName() << ".\n");
|
2009-04-30 23:24:03 +02:00
|
|
|
return false; // Not coalescable.
|
2009-01-23 03:15:19 +01:00
|
|
|
}
|
2009-04-30 23:24:03 +02:00
|
|
|
if (DstRC->getSize() > SrcRC->getSize())
|
|
|
|
std::swap(LargeReg, SmallReg);
|
2009-01-23 03:15:19 +01:00
|
|
|
}
|
|
|
|
|
2009-01-23 06:48:59 +01:00
|
|
|
// If we are joining two virtual registers and the resulting register
|
|
|
|
// class is more restrictive (fewer register, smaller size). Check if it's
|
|
|
|
// worth doing the merge.
|
2009-01-23 03:15:19 +01:00
|
|
|
if (!SrcIsPhys && !DstIsPhys &&
|
2009-01-23 06:48:59 +01:00
|
|
|
(isExtSubReg || DstRC->isASubClass()) &&
|
|
|
|
!isWinToJoinCrossClass(LargeReg, SmallReg,
|
|
|
|
allocatableRCRegs_[NewRC].count())) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tSrc/Dest are different register classes.\n");
|
2008-06-19 03:39:21 +02:00
|
|
|
// Allow the coalescer to try again in case either side gets coalesced to
|
|
|
|
// a physical register that's compatible with the other side. e.g.
|
|
|
|
// r1024 = MOV32to32_ r1025
|
2009-01-23 03:15:19 +01:00
|
|
|
// But later r1024 is assigned EAX then r1025 may be coalesced with EAX.
|
2008-06-19 03:39:21 +02:00
|
|
|
Again = true; // May be possible to coalesce later.
|
|
|
|
return false;
|
|
|
|
}
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2008-09-11 22:07:10 +02:00
|
|
|
|
|
|
|
// Will it create illegal extract_subreg / insert_subreg?
|
|
|
|
if (SrcIsPhys && HasIncompatibleSubRegDefUse(CopyMI, DstReg, SrcReg))
|
|
|
|
return false;
|
|
|
|
if (DstIsPhys && HasIncompatibleSubRegDefUse(CopyMI, SrcReg, DstReg))
|
|
|
|
return false;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
LiveInterval &SrcInt = li_->getInterval(SrcReg);
|
|
|
|
LiveInterval &DstInt = li_->getInterval(DstReg);
|
|
|
|
assert(SrcInt.reg == SrcReg && DstInt.reg == DstReg &&
|
2007-06-08 19:18:56 +02:00
|
|
|
"Register mapping is horribly broken!");
|
|
|
|
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "\t\tInspecting "; SrcInt.print(dbgs(), tri_);
|
|
|
|
dbgs() << " and "; DstInt.print(dbgs(), tri_);
|
|
|
|
dbgs() << ": ";
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2007-06-08 19:18:56 +02:00
|
|
|
|
2009-02-08 12:04:35 +01:00
|
|
|
// Save a copy of the virtual register live interval. We'll manually
|
|
|
|
// merge this into the "real" physical register live interval this is
|
|
|
|
// coalesced with.
|
|
|
|
LiveInterval *SavedLI = 0;
|
|
|
|
if (RealDstReg)
|
|
|
|
SavedLI = li_->dupInterval(&SrcInt);
|
|
|
|
else if (RealSrcReg)
|
|
|
|
SavedLI = li_->dupInterval(&DstInt);
|
|
|
|
|
2008-03-18 09:26:47 +01:00
|
|
|
// Check if it is necessary to propagate "isDead" property.
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
if (!isExtSubReg && !isInsSubReg && !isSubRegToReg) {
|
2008-04-09 22:57:25 +02:00
|
|
|
MachineOperand *mopd = CopyMI->findRegisterDefOperand(DstReg, false);
|
|
|
|
bool isDead = mopd->isDead();
|
|
|
|
|
|
|
|
// We need to be careful about coalescing a source physical register with a
|
|
|
|
// virtual register. Once the coalescing is done, it cannot be broken and
|
|
|
|
// these are not spillable! If the destination interval uses are far away,
|
|
|
|
// think twice about coalescing them!
|
|
|
|
if (!isDead && (SrcIsPhys || DstIsPhys)) {
|
2009-04-30 20:39:57 +02:00
|
|
|
// If the copy is in a loop, take care not to coalesce aggressively if the
|
|
|
|
// src is coming in from outside the loop (or the dst is out of the loop).
|
|
|
|
// If it's not in a loop, then determine whether to join them base purely
|
|
|
|
// by the length of the interval.
|
|
|
|
if (PhysJoinTweak) {
|
|
|
|
if (SrcIsPhys) {
|
|
|
|
if (!isWinToJoinVRWithSrcPhysReg(CopyMI, CopyMBB, DstInt, SrcInt)) {
|
2009-06-15 10:28:29 +02:00
|
|
|
mri_->setRegAllocationHint(DstInt.reg, 0, SrcReg);
|
2009-04-30 20:39:57 +02:00
|
|
|
++numAborts;
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
|
2009-04-30 20:39:57 +02:00
|
|
|
Again = true; // May be possible to coalesce later.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!isWinToJoinVRWithDstPhysReg(CopyMI, CopyMBB, DstInt, SrcInt)) {
|
2009-06-15 10:28:29 +02:00
|
|
|
mri_->setRegAllocationHint(SrcInt.reg, 0, DstReg);
|
2009-04-30 20:39:57 +02:00
|
|
|
++numAborts;
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
|
2009-04-30 20:39:57 +02:00
|
|
|
Again = true; // May be possible to coalesce later.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2009-12-03 01:50:42 +01:00
|
|
|
// If the virtual register live interval is long but it has low use
|
|
|
|
// density, do not join them, instead mark the physical register as its
|
|
|
|
// allocation preference.
|
2009-04-30 20:39:57 +02:00
|
|
|
LiveInterval &JoinVInt = SrcIsPhys ? DstInt : SrcInt;
|
|
|
|
unsigned JoinVReg = SrcIsPhys ? DstReg : SrcReg;
|
|
|
|
unsigned JoinPReg = SrcIsPhys ? SrcReg : DstReg;
|
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(JoinVReg);
|
|
|
|
unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
|
|
|
|
unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
|
|
|
|
float Ratio = 1.0 / Threshold;
|
|
|
|
if (Length > Threshold &&
|
|
|
|
(((float)std::distance(mri_->use_begin(JoinVReg),
|
|
|
|
mri_->use_end()) / Length) < Ratio)) {
|
2009-06-15 10:28:29 +02:00
|
|
|
mri_->setRegAllocationHint(JoinVInt.reg, 0, JoinPReg);
|
2009-04-30 20:39:57 +02:00
|
|
|
++numAborts;
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
|
2009-04-30 20:39:57 +02:00
|
|
|
Again = true; // May be possible to coalesce later.
|
|
|
|
return false;
|
|
|
|
}
|
2008-04-09 22:57:25 +02:00
|
|
|
}
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Okay, attempt to join these two intervals. On failure, this returns false.
|
|
|
|
// Otherwise, if one of the intervals being joined is a physreg, this method
|
|
|
|
// always canonicalizes DstInt to be it. The output "SrcInt" will not have
|
|
|
|
// been modified, so we can use this information below to update aliases.
|
2007-08-28 10:28:51 +02:00
|
|
|
bool Swapped = false;
|
2008-04-03 18:41:54 +02:00
|
|
|
// If SrcInt is implicitly defined, it's safe to coalesce.
|
|
|
|
bool isEmpty = SrcInt.empty();
|
2008-04-09 22:57:25 +02:00
|
|
|
if (isEmpty && !CanCoalesceWithImpDef(CopyMI, DstInt, SrcInt)) {
|
2008-04-03 18:41:54 +02:00
|
|
|
// Only coalesce an empty interval (defined by implicit_def) with
|
2008-04-09 22:57:25 +02:00
|
|
|
// another interval which has a valno defined by the CopyMI and the CopyMI
|
|
|
|
// is a kill of the implicit def.
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "Not profitable!\n");
|
2008-04-03 18:41:54 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isEmpty && !JoinIntervals(DstInt, SrcInt, Swapped)) {
|
2007-07-09 14:00:59 +02:00
|
|
|
// Coalescing failed.
|
2008-08-30 11:09:33 +02:00
|
|
|
|
|
|
|
// If definition of source is defined by trivial computation, try
|
|
|
|
// rematerializing it.
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
if (!isExtSubReg && !isInsSubReg && !isSubRegToReg &&
|
2009-07-16 11:20:10 +02:00
|
|
|
ReMaterializeTrivialDef(SrcInt, DstReg, DstSubIdx, CopyMI))
|
2008-08-30 11:09:33 +02:00
|
|
|
return true;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// If we can eliminate the copy without merging the live ranges, do so now.
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
if (!isExtSubReg && !isInsSubReg && !isSubRegToReg &&
|
2008-02-13 04:01:43 +01:00
|
|
|
(AdjustCopiesBackFrom(SrcInt, DstInt, CopyMI) ||
|
|
|
|
RemoveCopyByCommutingDef(SrcInt, DstInt, CopyMI))) {
|
2007-11-06 09:52:21 +01:00
|
|
|
JoinedCopies.insert(CopyMI);
|
2007-06-08 19:18:56 +02:00
|
|
|
return true;
|
2007-11-06 09:52:21 +01:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Otherwise, we are unable to join the intervals.
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "Interference!\n");
|
2007-11-01 07:22:48 +01:00
|
|
|
Again = true; // May be possible to coalesce later.
|
2007-06-08 19:18:56 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-08-28 10:28:51 +02:00
|
|
|
LiveInterval *ResSrcInt = &SrcInt;
|
|
|
|
LiveInterval *ResDstInt = &DstInt;
|
|
|
|
if (Swapped) {
|
2008-02-15 19:24:29 +01:00
|
|
|
std::swap(SrcReg, DstReg);
|
2007-08-28 10:28:51 +02:00
|
|
|
std::swap(ResSrcInt, ResDstInt);
|
|
|
|
}
|
2008-02-15 19:24:29 +01:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
|
2007-06-08 19:18:56 +02:00
|
|
|
"LiveInterval::join didn't work right!");
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2009-01-07 03:08:57 +01:00
|
|
|
// If we're about to merge live ranges into a physical register live interval,
|
2007-06-08 19:18:56 +02:00
|
|
|
// we have to update any aliased register's live ranges to indicate that they
|
|
|
|
// have clobbered values for this range.
|
2008-02-15 19:24:29 +01:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
|
2007-10-12 10:50:34 +02:00
|
|
|
// If this is a extract_subreg where dst is a physical register, e.g.
|
|
|
|
// cl = EXTRACT_SUBREG reg1024, 1
|
|
|
|
// then create and update the actual physical register allocated to RHS.
|
2008-04-09 22:57:25 +02:00
|
|
|
if (RealDstReg || RealSrcReg) {
|
|
|
|
LiveInterval &RealInt =
|
|
|
|
li_->getOrCreateInterval(RealDstReg ? RealDstReg : RealSrcReg);
|
2009-02-08 12:04:35 +01:00
|
|
|
for (LiveInterval::const_vni_iterator I = SavedLI->vni_begin(),
|
|
|
|
E = SavedLI->vni_end(); I != E; ++I) {
|
|
|
|
const VNInfo *ValNo = *I;
|
2009-08-11 01:43:28 +02:00
|
|
|
VNInfo *NewValNo = RealInt.getNextValue(ValNo->def, ValNo->getCopy(),
|
2009-06-17 23:01:20 +02:00
|
|
|
false, // updated at *
|
2009-02-08 12:04:35 +01:00
|
|
|
li_->getVNInfoAllocator());
|
2009-06-17 23:01:20 +02:00
|
|
|
NewValNo->setFlags(ValNo->getFlags()); // * updated here.
|
2009-02-08 12:04:35 +01:00
|
|
|
RealInt.addKills(NewValNo, ValNo->kills);
|
|
|
|
RealInt.MergeValueInAsValue(*SavedLI, ValNo, NewValNo);
|
2007-10-14 12:08:34 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
RealInt.weight += SavedLI->weight;
|
2008-04-09 22:57:25 +02:00
|
|
|
DstReg = RealDstReg ? RealDstReg : RealSrcReg;
|
2007-10-12 10:50:34 +02:00
|
|
|
}
|
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Update the liveintervals of sub-registers.
|
2008-02-15 19:24:29 +01:00
|
|
|
for (const unsigned *AS = tri_->getSubRegisters(DstReg); *AS; ++AS)
|
2009-11-04 00:52:08 +01:00
|
|
|
li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, *ResSrcInt,
|
2007-09-05 23:46:51 +02:00
|
|
|
li_->getVNInfoAllocator());
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
// If this is a EXTRACT_SUBREG, make sure the result of coalescing is the
|
|
|
|
// larger super-register.
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
if ((isExtSubReg || isInsSubReg || isSubRegToReg) &&
|
|
|
|
!SrcIsPhys && !DstIsPhys) {
|
|
|
|
if ((isExtSubReg && !Swapped) ||
|
|
|
|
((isInsSubReg || isSubRegToReg) && Swapped)) {
|
2009-06-14 22:22:55 +02:00
|
|
|
ResSrcInt->Copy(*ResDstInt, mri_, li_->getVNInfoAllocator());
|
2008-02-15 19:24:29 +01:00
|
|
|
std::swap(SrcReg, DstReg);
|
2007-10-12 10:50:34 +02:00
|
|
|
std::swap(ResSrcInt, ResDstInt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-19 03:39:21 +02:00
|
|
|
// Coalescing to a virtual register that is of a sub-register class of the
|
|
|
|
// other. Make sure the resulting register is set to the right register class.
|
2009-07-18 04:10:10 +02:00
|
|
|
if (CrossRC)
|
|
|
|
++numCrossRCs;
|
|
|
|
|
|
|
|
// This may happen even if it's cross-rc coalescing. e.g.
|
|
|
|
// %reg1026<def> = SUBREG_TO_REG 0, %reg1037<kill>, 4
|
|
|
|
// reg1026 -> GR64, reg1037 -> GR32_ABCD. The resulting register will have to
|
|
|
|
// be allocate a register from GR64_ABCD.
|
|
|
|
if (NewRC)
|
|
|
|
mri_->setRegClass(DstReg, NewRC);
|
2008-06-19 03:39:21 +02:00
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
// Remember to delete the copy instruction.
|
|
|
|
JoinedCopies.insert(CopyMI);
|
|
|
|
|
2008-03-10 09:11:32 +01:00
|
|
|
// Some live range has been lengthened due to colaescing, eliminate the
|
|
|
|
// unnecessary kills.
|
|
|
|
RemoveUnnecessaryKills(SrcReg, *ResDstInt);
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(DstReg))
|
|
|
|
RemoveUnnecessaryKills(DstReg, *ResDstInt);
|
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
UpdateRegDefsUses(SrcReg, DstReg, SubIdx);
|
2007-06-08 19:18:56 +02:00
|
|
|
|
2008-08-30 11:09:33 +02:00
|
|
|
// SrcReg is guarateed to be the register whose live interval that is
|
|
|
|
// being merged.
|
|
|
|
li_->removeInterval(SrcReg);
|
|
|
|
|
2009-06-18 04:04:01 +02:00
|
|
|
// Update regalloc hint.
|
|
|
|
tri_->UpdateRegAllocHint(SrcReg, DstReg, *mf_);
|
|
|
|
|
2009-02-08 12:04:35 +01:00
|
|
|
// Manually deleted the live interval copy.
|
|
|
|
if (SavedLI) {
|
|
|
|
SavedLI->clear();
|
|
|
|
delete SavedLI;
|
|
|
|
}
|
|
|
|
|
2008-09-09 23:44:23 +02:00
|
|
|
// If resulting interval has a preference that no longer fits because of subreg
|
|
|
|
// coalescing, just clear the preference.
|
2009-06-14 22:22:55 +02:00
|
|
|
unsigned Preference = getRegAllocPreference(ResDstInt->reg, *mf_, mri_, tri_);
|
|
|
|
if (Preference && (isExtSubReg || isInsSubReg || isSubRegToReg) &&
|
2008-09-11 20:40:32 +02:00
|
|
|
TargetRegisterInfo::isVirtualRegister(ResDstInt->reg)) {
|
2008-09-09 23:44:23 +02:00
|
|
|
const TargetRegisterClass *RC = mri_->getRegClass(ResDstInt->reg);
|
2009-06-14 22:22:55 +02:00
|
|
|
if (!RC->contains(Preference))
|
2009-06-15 10:28:29 +02:00
|
|
|
mri_->setRegAllocationHint(ResDstInt->reg, 0, 0);
|
2008-09-09 23:44:23 +02:00
|
|
|
}
|
|
|
|
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "\n\t\tJoined. Result = ";
|
|
|
|
ResDstInt->print(dbgs(), tri_);
|
|
|
|
dbgs() << "\n";
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2008-04-03 18:41:54 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
++numJoins;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ComputeUltimateVN - Assuming we are going to join two live intervals,
|
|
|
|
/// compute what the resultant value numbers for each value in the input two
|
|
|
|
/// ranges will be. This is complicated by copies between the two which can
|
|
|
|
/// and will commonly cause multiple value numbers to be merged into one.
|
|
|
|
///
|
|
|
|
/// VN is the value number that we're trying to resolve. InstDefiningValue
|
|
|
|
/// keeps track of the new InstDefiningValue assignment for the result
|
|
|
|
/// LiveInterval. ThisFromOther/OtherFromThis are sets that keep track of
|
|
|
|
/// whether a value in this or other is a copy from the opposite set.
|
|
|
|
/// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have
|
|
|
|
/// already been assigned.
|
|
|
|
///
|
|
|
|
/// ThisFromOther[x] - If x is defined as a copy from the other interval, this
|
|
|
|
/// contains the value number the copy is from.
|
|
|
|
///
|
2007-08-29 22:45:00 +02:00
|
|
|
static unsigned ComputeUltimateVN(VNInfo *VNI,
|
|
|
|
SmallVector<VNInfo*, 16> &NewVNInfo,
|
2007-08-31 23:23:06 +02:00
|
|
|
DenseMap<VNInfo*, VNInfo*> &ThisFromOther,
|
|
|
|
DenseMap<VNInfo*, VNInfo*> &OtherFromThis,
|
2007-06-08 19:18:56 +02:00
|
|
|
SmallVector<int, 16> &ThisValNoAssignments,
|
2007-08-29 22:45:00 +02:00
|
|
|
SmallVector<int, 16> &OtherValNoAssignments) {
|
|
|
|
unsigned VN = VNI->id;
|
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// If the VN has already been computed, just return it.
|
|
|
|
if (ThisValNoAssignments[VN] >= 0)
|
|
|
|
return ThisValNoAssignments[VN];
|
|
|
|
// assert(ThisValNoAssignments[VN] != -2 && "Cyclic case?");
|
2007-08-29 22:45:00 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// If this val is not a copy from the other val, then it must be a new value
|
|
|
|
// number in the destination.
|
2007-08-31 23:23:06 +02:00
|
|
|
DenseMap<VNInfo*, VNInfo*>::iterator I = ThisFromOther.find(VNI);
|
2007-08-31 10:04:17 +02:00
|
|
|
if (I == ThisFromOther.end()) {
|
2007-08-29 22:45:00 +02:00
|
|
|
NewVNInfo.push_back(VNI);
|
|
|
|
return ThisValNoAssignments[VN] = NewVNInfo.size()-1;
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2007-08-31 10:04:17 +02:00
|
|
|
VNInfo *OtherValNo = I->second;
|
2007-06-08 19:18:56 +02:00
|
|
|
|
|
|
|
// Otherwise, this *is* a copy from the RHS. If the other side has already
|
|
|
|
// been computed, return it.
|
2007-08-29 22:45:00 +02:00
|
|
|
if (OtherValNoAssignments[OtherValNo->id] >= 0)
|
|
|
|
return ThisValNoAssignments[VN] = OtherValNoAssignments[OtherValNo->id];
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Mark this value number as currently being computed, then ask what the
|
|
|
|
// ultimate value # of the other value is.
|
|
|
|
ThisValNoAssignments[VN] = -2;
|
|
|
|
unsigned UltimateVN =
|
2007-08-29 22:45:00 +02:00
|
|
|
ComputeUltimateVN(OtherValNo, NewVNInfo, OtherFromThis, ThisFromOther,
|
|
|
|
OtherValNoAssignments, ThisValNoAssignments);
|
2007-06-08 19:18:56 +02:00
|
|
|
return ThisValNoAssignments[VN] = UltimateVN;
|
|
|
|
}
|
|
|
|
|
2007-08-29 22:45:00 +02:00
|
|
|
static bool InVector(VNInfo *Val, const SmallVector<VNInfo*, 8> &V) {
|
2007-06-08 19:18:56 +02:00
|
|
|
return std::find(V.begin(), V.end(), Val) != V.end();
|
|
|
|
}
|
|
|
|
|
2009-11-04 09:33:14 +01:00
|
|
|
static bool isValNoDefMove(const MachineInstr *MI, unsigned DR, unsigned SR,
|
|
|
|
const TargetInstrInfo *TII,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
|
|
|
if (TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
|
|
|
|
;
|
|
|
|
else if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
|
|
|
|
DstReg = MI->getOperand(0).getReg();
|
|
|
|
SrcReg = MI->getOperand(1).getReg();
|
|
|
|
} else if (MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
|
|
|
|
MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
|
|
|
|
DstReg = MI->getOperand(0).getReg();
|
|
|
|
SrcReg = MI->getOperand(2).getReg();
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
return (SrcReg == SR || TRI->isSuperRegister(SR, SrcReg)) &&
|
|
|
|
(DstReg == DR || TRI->isSuperRegister(DR, DstReg));
|
|
|
|
}
|
|
|
|
|
2008-04-09 22:57:25 +02:00
|
|
|
/// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
|
|
|
|
/// the specified live interval is defined by a copy from the specified
|
|
|
|
/// register.
|
|
|
|
bool SimpleRegisterCoalescing::RangeIsDefinedByCopyFromReg(LiveInterval &li,
|
|
|
|
LiveRange *LR,
|
|
|
|
unsigned Reg) {
|
|
|
|
unsigned SrcReg = li_->getVNInfoSourceReg(LR->valno);
|
|
|
|
if (SrcReg == Reg)
|
|
|
|
return true;
|
2009-06-17 23:01:20 +02:00
|
|
|
// FIXME: Do isPHIDef and isDefAccurate both need to be tested?
|
|
|
|
if ((LR->valno->isPHIDef() || !LR->valno->isDefAccurate()) &&
|
2008-04-09 22:57:25 +02:00
|
|
|
TargetRegisterInfo::isPhysicalRegister(li.reg) &&
|
|
|
|
*tri_->getSuperRegisters(li.reg)) {
|
|
|
|
// It's a sub-register live interval, we may not have precise information.
|
|
|
|
// Re-compute it.
|
|
|
|
MachineInstr *DefMI = li_->getInstructionFromIndex(LR->start);
|
2009-11-04 09:33:14 +01:00
|
|
|
if (DefMI && isValNoDefMove(DefMI, li.reg, Reg, tii_, tri_)) {
|
2008-04-09 22:57:25 +02:00
|
|
|
// Cache computed info.
|
2009-11-04 09:33:14 +01:00
|
|
|
LR->valno->def = LR->start;
|
2009-08-11 01:43:28 +02:00
|
|
|
LR->valno->setCopy(DefMI);
|
2008-04-09 22:57:25 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-10-29 18:39:46 +01:00
|
|
|
|
|
|
|
/// ValueLiveAt - Return true if the LiveRange pointed to by the given
|
|
|
|
/// iterator, or any subsequent range with the same value number,
|
|
|
|
/// is live at the given point.
|
|
|
|
bool SimpleRegisterCoalescing::ValueLiveAt(LiveInterval::iterator LRItr,
|
2009-10-30 19:12:09 +01:00
|
|
|
LiveInterval::iterator LREnd,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex defPoint) const {
|
2009-10-30 19:12:09 +01:00
|
|
|
for (const VNInfo *valno = LRItr->valno;
|
|
|
|
(LRItr != LREnd) && (LRItr->valno == valno); ++LRItr) {
|
2009-10-29 18:39:46 +01:00
|
|
|
if (LRItr->contains(defPoint))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
/// SimpleJoin - Attempt to joint the specified interval into this one. The
|
|
|
|
/// caller of this method must guarantee that the RHS only contains a single
|
|
|
|
/// value number and that the RHS is not defined by a copy from this
|
|
|
|
/// interval. This returns false if the intervals are not joinable, or it
|
|
|
|
/// joins them and returns true.
|
2008-01-04 09:59:18 +01:00
|
|
|
bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS){
|
2007-06-08 19:18:56 +02:00
|
|
|
assert(RHS.containsOneValue());
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Some number (potentially more than one) value numbers in the current
|
|
|
|
// interval may be defined as copies from the RHS. Scan the overlapping
|
|
|
|
// portions of the LHS and RHS, keeping track of this and looking for
|
|
|
|
// overlapping live ranges that are NOT defined as copies. If these exist, we
|
2007-07-09 14:00:59 +02:00
|
|
|
// cannot coalesce.
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
LiveInterval::iterator LHSIt = LHS.begin(), LHSEnd = LHS.end();
|
|
|
|
LiveInterval::iterator RHSIt = RHS.begin(), RHSEnd = RHS.end();
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
if (LHSIt->start < RHSIt->start) {
|
|
|
|
LHSIt = std::upper_bound(LHSIt, LHSEnd, RHSIt->start);
|
|
|
|
if (LHSIt != LHS.begin()) --LHSIt;
|
|
|
|
} else if (RHSIt->start < LHSIt->start) {
|
|
|
|
RHSIt = std::upper_bound(RHSIt, RHSEnd, LHSIt->start);
|
|
|
|
if (RHSIt != RHS.begin()) --RHSIt;
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-08-29 22:45:00 +02:00
|
|
|
SmallVector<VNInfo*, 8> EliminatedLHSVals;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
while (1) {
|
|
|
|
// Determine if these live intervals overlap.
|
|
|
|
bool Overlaps = false;
|
|
|
|
if (LHSIt->start <= RHSIt->start)
|
|
|
|
Overlaps = LHSIt->end > RHSIt->start;
|
|
|
|
else
|
|
|
|
Overlaps = RHSIt->end > LHSIt->start;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// If the live intervals overlap, there are two interesting cases: if the
|
|
|
|
// LHS interval is defined by a copy from the RHS, it's ok and we record
|
|
|
|
// that the LHS value # is the same as the RHS. If it's not, then we cannot
|
2007-07-09 14:00:59 +02:00
|
|
|
// coalesce these live ranges and we bail out.
|
2007-06-08 19:18:56 +02:00
|
|
|
if (Overlaps) {
|
|
|
|
// If we haven't already recorded that this value # is safe, check it.
|
2007-08-29 22:45:00 +02:00
|
|
|
if (!InVector(LHSIt->valno, EliminatedLHSVals)) {
|
2009-12-01 23:25:00 +01:00
|
|
|
// If it's re-defined by an early clobber somewhere in the live range,
|
|
|
|
// then conservatively abort coalescing.
|
|
|
|
if (LHSIt->valno->hasRedefByEC())
|
|
|
|
return false;
|
2007-06-08 19:18:56 +02:00
|
|
|
// Copy from the RHS?
|
2008-04-09 22:57:25 +02:00
|
|
|
if (!RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg))
|
2007-06-08 19:18:56 +02:00
|
|
|
return false; // Nope, bail out.
|
2008-05-22 00:34:12 +02:00
|
|
|
|
2009-10-30 19:12:09 +01:00
|
|
|
if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
|
2008-05-22 00:34:12 +02:00
|
|
|
// Here is an interesting situation:
|
|
|
|
// BB1:
|
|
|
|
// vr1025 = copy vr1024
|
|
|
|
// ..
|
|
|
|
// BB2:
|
2009-09-20 04:20:51 +02:00
|
|
|
// vr1024 = op
|
2008-05-22 00:34:12 +02:00
|
|
|
// = vr1025
|
|
|
|
// Even though vr1025 is copied from vr1024, it's not safe to
|
2009-03-30 22:30:02 +02:00
|
|
|
// coalesce them since the live range of vr1025 intersects the
|
2008-05-22 00:34:12 +02:00
|
|
|
// def of vr1024. This happens because vr1025 is assigned the
|
|
|
|
// value of the previous iteration of vr1024.
|
|
|
|
return false;
|
2007-08-29 22:45:00 +02:00
|
|
|
EliminatedLHSVals.push_back(LHSIt->valno);
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// We know this entire LHS live range is okay, so skip it now.
|
|
|
|
if (++LHSIt == LHSEnd) break;
|
|
|
|
continue;
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
if (LHSIt->end < RHSIt->end) {
|
|
|
|
if (++LHSIt == LHSEnd) break;
|
|
|
|
} else {
|
|
|
|
// One interesting case to check here. It's possible that we have
|
|
|
|
// something like "X3 = Y" which defines a new value number in the LHS,
|
|
|
|
// and is the last use of this liverange of the RHS. In this case, we
|
2007-07-09 14:00:59 +02:00
|
|
|
// want to notice this copy (so that it gets coalesced away) even though
|
2007-06-08 19:18:56 +02:00
|
|
|
// the live ranges don't actually overlap.
|
|
|
|
if (LHSIt->start == RHSIt->end) {
|
2007-08-29 22:45:00 +02:00
|
|
|
if (InVector(LHSIt->valno, EliminatedLHSVals)) {
|
2007-06-08 19:18:56 +02:00
|
|
|
// We already know that this value number is going to be merged in
|
2007-07-09 14:00:59 +02:00
|
|
|
// if coalescing succeeds. Just skip the liverange.
|
2007-06-08 19:18:56 +02:00
|
|
|
if (++LHSIt == LHSEnd) break;
|
|
|
|
} else {
|
2009-12-01 23:25:00 +01:00
|
|
|
// If it's re-defined by an early clobber somewhere in the live range,
|
|
|
|
// then conservatively abort coalescing.
|
|
|
|
if (LHSIt->valno->hasRedefByEC())
|
|
|
|
return false;
|
2007-06-08 19:18:56 +02:00
|
|
|
// Otherwise, if this is a copy from the RHS, mark it as being merged
|
|
|
|
// in.
|
2008-04-09 22:57:25 +02:00
|
|
|
if (RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg)) {
|
2009-10-30 19:12:09 +01:00
|
|
|
if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
|
2008-05-22 00:34:12 +02:00
|
|
|
// Here is an interesting situation:
|
|
|
|
// BB1:
|
|
|
|
// vr1025 = copy vr1024
|
|
|
|
// ..
|
|
|
|
// BB2:
|
2009-09-20 04:20:51 +02:00
|
|
|
// vr1024 = op
|
2008-05-22 00:34:12 +02:00
|
|
|
// = vr1025
|
|
|
|
// Even though vr1025 is copied from vr1024, it's not safe to
|
|
|
|
// coalesced them since live range of vr1025 intersects the
|
|
|
|
// def of vr1024. This happens because vr1025 is assigned the
|
|
|
|
// value of the previous iteration of vr1024.
|
|
|
|
return false;
|
2007-08-29 22:45:00 +02:00
|
|
|
EliminatedLHSVals.push_back(LHSIt->valno);
|
2007-06-08 19:18:56 +02:00
|
|
|
|
|
|
|
// We know this entire LHS live range is okay, so skip it now.
|
|
|
|
if (++LHSIt == LHSEnd) break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
if (++RHSIt == RHSEnd) break;
|
|
|
|
}
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-07-09 14:00:59 +02:00
|
|
|
// If we got here, we know that the coalescing will be successful and that
|
2007-06-08 19:18:56 +02:00
|
|
|
// the value numbers in EliminatedLHSVals will all be merged together. Since
|
|
|
|
// the most common case is that EliminatedLHSVals has a single number, we
|
|
|
|
// optimize for it: if there is more than one value, we merge them all into
|
|
|
|
// the lowest numbered one, then handle the interval as if we were merging
|
|
|
|
// with one value number.
|
2009-01-05 18:31:22 +01:00
|
|
|
VNInfo *LHSValNo = NULL;
|
2007-06-08 19:18:56 +02:00
|
|
|
if (EliminatedLHSVals.size() > 1) {
|
|
|
|
// Loop through all the equal value numbers merging them into the smallest
|
|
|
|
// one.
|
2007-08-29 22:45:00 +02:00
|
|
|
VNInfo *Smallest = EliminatedLHSVals[0];
|
2007-06-08 19:18:56 +02:00
|
|
|
for (unsigned i = 1, e = EliminatedLHSVals.size(); i != e; ++i) {
|
2007-08-29 22:45:00 +02:00
|
|
|
if (EliminatedLHSVals[i]->id < Smallest->id) {
|
2007-06-08 19:18:56 +02:00
|
|
|
// Merge the current notion of the smallest into the smaller one.
|
|
|
|
LHS.MergeValueNumberInto(Smallest, EliminatedLHSVals[i]);
|
|
|
|
Smallest = EliminatedLHSVals[i];
|
|
|
|
} else {
|
|
|
|
// Merge into the smallest.
|
|
|
|
LHS.MergeValueNumberInto(EliminatedLHSVals[i], Smallest);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
LHSValNo = Smallest;
|
2008-04-09 22:57:25 +02:00
|
|
|
} else if (EliminatedLHSVals.empty()) {
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(LHS.reg) &&
|
|
|
|
*tri_->getSuperRegisters(LHS.reg))
|
|
|
|
// Imprecise sub-register information. Can't handle it.
|
|
|
|
return false;
|
2009-07-14 18:55:14 +02:00
|
|
|
llvm_unreachable("No copies from the RHS?");
|
2007-06-08 19:18:56 +02:00
|
|
|
} else {
|
|
|
|
LHSValNo = EliminatedLHSVals[0];
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Okay, now that there is a single LHS value number that we're merging the
|
|
|
|
// RHS into, update the value number info for the LHS to indicate that the
|
|
|
|
// value number is defined where the RHS value number was.
|
2007-09-05 23:46:51 +02:00
|
|
|
const VNInfo *VNI = RHS.getValNumInfo(0);
|
2009-10-26 05:56:07 +01:00
|
|
|
LHSValNo->def = VNI->def;
|
2009-08-11 01:43:28 +02:00
|
|
|
LHSValNo->setCopy(VNI->getCopy());
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Okay, the final step is to loop over the RHS live intervals, adding them to
|
|
|
|
// the LHS.
|
2009-06-17 23:01:20 +02:00
|
|
|
if (VNI->hasPHIKill())
|
|
|
|
LHSValNo->setHasPHIKill(true);
|
2007-09-05 23:46:51 +02:00
|
|
|
LHS.addKills(LHSValNo, VNI->kills);
|
2007-08-14 03:56:58 +02:00
|
|
|
LHS.MergeRangesInAsValue(RHS, LHSValNo);
|
2009-07-22 01:36:14 +02:00
|
|
|
|
2009-07-22 22:08:25 +02:00
|
|
|
LHS.ComputeJoinedWeight(RHS);
|
2009-06-14 22:22:55 +02:00
|
|
|
|
|
|
|
// Update regalloc hint if both are virtual registers.
|
2009-09-20 04:20:51 +02:00
|
|
|
if (TargetRegisterInfo::isVirtualRegister(LHS.reg) &&
|
2009-06-14 22:22:55 +02:00
|
|
|
TargetRegisterInfo::isVirtualRegister(RHS.reg)) {
|
2009-06-15 10:28:29 +02:00
|
|
|
std::pair<unsigned, unsigned> RHSPref = mri_->getRegAllocationHint(RHS.reg);
|
|
|
|
std::pair<unsigned, unsigned> LHSPref = mri_->getRegAllocationHint(LHS.reg);
|
|
|
|
if (RHSPref != LHSPref)
|
2009-06-14 22:22:55 +02:00
|
|
|
mri_->setRegAllocationHint(LHS.reg, RHSPref.first, RHSPref.second);
|
|
|
|
}
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
|
|
|
|
// Update the liveintervals of sub-registers.
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(LHS.reg))
|
|
|
|
for (const unsigned *AS = tri_->getSubRegisters(LHS.reg); *AS; ++AS)
|
2009-11-04 00:52:08 +01:00
|
|
|
li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, LHS,
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
li_->getVNInfoAllocator());
|
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// JoinIntervals - Attempt to join these two intervals. On failure, this
|
|
|
|
/// returns false. Otherwise, if one of the intervals being joined is a
|
|
|
|
/// physreg, this method always canonicalizes LHS to be it. The output
|
|
|
|
/// "RHS" will not have been modified, so we can use this information
|
|
|
|
/// below to update aliases.
|
2009-01-07 03:08:57 +01:00
|
|
|
bool
|
|
|
|
SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
|
|
|
|
bool &Swapped) {
|
2007-06-08 19:18:56 +02:00
|
|
|
// Compute the final value assignment, assuming that the live ranges can be
|
2007-07-09 14:00:59 +02:00
|
|
|
// coalesced.
|
2007-06-08 19:18:56 +02:00
|
|
|
SmallVector<int, 16> LHSValNoAssignments;
|
|
|
|
SmallVector<int, 16> RHSValNoAssignments;
|
2007-08-31 23:23:06 +02:00
|
|
|
DenseMap<VNInfo*, VNInfo*> LHSValsDefinedFromRHS;
|
|
|
|
DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
|
2007-08-29 22:45:00 +02:00
|
|
|
SmallVector<VNInfo*, 16> NewVNInfo;
|
2009-01-07 03:08:57 +01:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// If a live interval is a physical register, conservatively check if any
|
|
|
|
// of its sub-registers is overlapping the live interval of the virtual
|
|
|
|
// register. If so, do not coalesce.
|
2008-02-10 19:45:23 +01:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(LHS.reg) &&
|
|
|
|
*tri_->getSubRegisters(LHS.reg)) {
|
2009-01-07 03:08:57 +01:00
|
|
|
// If it's coalescing a virtual register to a physical register, estimate
|
|
|
|
// its live interval length. This is the *cost* of scanning an entire live
|
|
|
|
// interval. If the cost is low, we'll do an exhaustive check instead.
|
2009-01-13 04:57:45 +01:00
|
|
|
|
|
|
|
// If this is something like this:
|
|
|
|
// BB1:
|
|
|
|
// v1024 = op
|
|
|
|
// ...
|
|
|
|
// BB2:
|
|
|
|
// ...
|
|
|
|
// RAX = v1024
|
|
|
|
//
|
|
|
|
// That is, the live interval of v1024 crosses a bb. Then we can't rely on
|
|
|
|
// less conservative check. It's possible a sub-register is defined before
|
|
|
|
// v1024 (or live in) and live out of BB1.
|
2009-01-07 03:08:57 +01:00
|
|
|
if (RHS.containsOneValue() &&
|
2009-01-13 07:08:37 +01:00
|
|
|
li_->intervalIsInOneMBB(RHS) &&
|
2009-01-07 03:08:57 +01:00
|
|
|
li_->getApproximateInstructionCount(RHS) <= 10) {
|
|
|
|
// Perform a more exhaustive check for some common cases.
|
|
|
|
if (li_->conflictsWithPhysRegRef(RHS, LHS.reg, true, JoinedCopies))
|
2007-06-08 19:18:56 +02:00
|
|
|
return false;
|
2009-01-07 03:08:57 +01:00
|
|
|
} else {
|
|
|
|
for (const unsigned* SR = tri_->getSubRegisters(LHS.reg); *SR; ++SR)
|
|
|
|
if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "Interfere with sub-register ";
|
|
|
|
li_->getInterval(*SR).print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2009-01-07 03:08:57 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2008-02-10 19:45:23 +01:00
|
|
|
} else if (TargetRegisterInfo::isPhysicalRegister(RHS.reg) &&
|
|
|
|
*tri_->getSubRegisters(RHS.reg)) {
|
2009-01-07 03:08:57 +01:00
|
|
|
if (LHS.containsOneValue() &&
|
|
|
|
li_->getApproximateInstructionCount(LHS) <= 10) {
|
|
|
|
// Perform a more exhaustive check for some common cases.
|
|
|
|
if (li_->conflictsWithPhysRegRef(LHS, RHS.reg, false, JoinedCopies))
|
2007-06-08 19:18:56 +02:00
|
|
|
return false;
|
2009-01-07 03:08:57 +01:00
|
|
|
} else {
|
|
|
|
for (const unsigned* SR = tri_->getSubRegisters(RHS.reg); *SR; ++SR)
|
|
|
|
if (li_->hasInterval(*SR) && LHS.overlaps(li_->getInterval(*SR))) {
|
2009-08-22 22:52:46 +02:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "Interfere with sub-register ";
|
|
|
|
li_->getInterval(*SR).print(dbgs(), tri_);
|
2009-08-22 22:52:46 +02:00
|
|
|
});
|
2009-01-07 03:08:57 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Compute ultimate value numbers for the LHS and RHS values.
|
|
|
|
if (RHS.containsOneValue()) {
|
|
|
|
// Copies from a liveinterval with a single value are simple to handle and
|
|
|
|
// very common, handle the special case here. This is important, because
|
|
|
|
// often RHS is small and LHS is large (e.g. a physreg).
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Find out if the RHS is defined as a copy from some value in the LHS.
|
2007-08-11 02:59:19 +02:00
|
|
|
int RHSVal0DefinedFromLHS = -1;
|
2007-06-08 19:18:56 +02:00
|
|
|
int RHSValID = -1;
|
2007-08-29 22:45:00 +02:00
|
|
|
VNInfo *RHSValNoInfo = NULL;
|
2007-09-05 23:46:51 +02:00
|
|
|
VNInfo *RHSValNoInfo0 = RHS.getValNumInfo(0);
|
2008-02-15 19:24:29 +01:00
|
|
|
unsigned RHSSrcReg = li_->getVNInfoSourceReg(RHSValNoInfo0);
|
2009-01-07 03:08:57 +01:00
|
|
|
if (RHSSrcReg == 0 || RHSSrcReg != LHS.reg) {
|
2007-06-08 19:18:56 +02:00
|
|
|
// If RHS is not defined as a copy from the LHS, we can use simpler and
|
2007-07-09 14:00:59 +02:00
|
|
|
// faster checks to see if the live ranges are coalescable. This joiner
|
2007-06-08 19:18:56 +02:00
|
|
|
// can't swap the LHS/RHS intervals though.
|
2008-02-10 19:45:23 +01:00
|
|
|
if (!TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
|
2007-06-08 19:18:56 +02:00
|
|
|
return SimpleJoin(LHS, RHS);
|
|
|
|
} else {
|
2007-08-31 10:04:17 +02:00
|
|
|
RHSValNoInfo = RHSValNoInfo0;
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// It was defined as a copy from the LHS, find out what value # it is.
|
2009-09-04 22:41:11 +02:00
|
|
|
RHSValNoInfo =
|
2009-11-04 00:52:08 +01:00
|
|
|
LHS.getLiveRangeContaining(RHSValNoInfo0->def.getPrevSlot())->valno;
|
2007-08-29 22:45:00 +02:00
|
|
|
RHSValID = RHSValNoInfo->id;
|
2007-08-11 02:59:19 +02:00
|
|
|
RHSVal0DefinedFromLHS = RHSValID;
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
|
|
|
|
RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
|
2007-08-29 22:45:00 +02:00
|
|
|
NewVNInfo.resize(LHS.getNumValNums(), NULL);
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Okay, *all* of the values in LHS that are defined as a copy from RHS
|
|
|
|
// should now get updated.
|
2007-08-29 22:45:00 +02:00
|
|
|
for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
|
|
|
|
i != e; ++i) {
|
|
|
|
VNInfo *VNI = *i;
|
|
|
|
unsigned VN = VNI->id;
|
2008-02-15 19:24:29 +01:00
|
|
|
if (unsigned LHSSrcReg = li_->getVNInfoSourceReg(VNI)) {
|
|
|
|
if (LHSSrcReg != RHS.reg) {
|
2007-06-08 19:18:56 +02:00
|
|
|
// If this is not a copy from the RHS, its value number will be
|
2007-07-09 14:00:59 +02:00
|
|
|
// unmodified by the coalescing.
|
2007-08-29 22:45:00 +02:00
|
|
|
NewVNInfo[VN] = VNI;
|
2007-06-08 19:18:56 +02:00
|
|
|
LHSValNoAssignments[VN] = VN;
|
|
|
|
} else if (RHSValID == -1) {
|
|
|
|
// Otherwise, it is a copy from the RHS, and we don't already have a
|
|
|
|
// value# for it. Keep the current value number, but remember it.
|
|
|
|
LHSValNoAssignments[VN] = RHSValID = VN;
|
2007-08-29 22:45:00 +02:00
|
|
|
NewVNInfo[VN] = RHSValNoInfo;
|
2007-08-31 10:04:17 +02:00
|
|
|
LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
|
2007-06-08 19:18:56 +02:00
|
|
|
} else {
|
|
|
|
// Otherwise, use the specified value #.
|
|
|
|
LHSValNoAssignments[VN] = RHSValID;
|
2007-08-29 22:45:00 +02:00
|
|
|
if (VN == (unsigned)RHSValID) { // Else this val# is dead.
|
|
|
|
NewVNInfo[VN] = RHSValNoInfo;
|
2007-08-31 10:04:17 +02:00
|
|
|
LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
|
2007-08-11 02:59:19 +02:00
|
|
|
}
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
} else {
|
2007-08-29 22:45:00 +02:00
|
|
|
NewVNInfo[VN] = VNI;
|
2007-06-08 19:18:56 +02:00
|
|
|
LHSValNoAssignments[VN] = VN;
|
|
|
|
}
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
assert(RHSValID != -1 && "Didn't find value #?");
|
|
|
|
RHSValNoAssignments[0] = RHSValID;
|
2007-08-11 02:59:19 +02:00
|
|
|
if (RHSVal0DefinedFromLHS != -1) {
|
2007-09-01 04:03:17 +02:00
|
|
|
// This path doesn't go through ComputeUltimateVN so just set
|
|
|
|
// it to anything.
|
|
|
|
RHSValsDefinedFromLHS[RHSValNoInfo0] = (VNInfo*)1;
|
2007-08-11 02:59:19 +02:00
|
|
|
}
|
2007-06-08 19:18:56 +02:00
|
|
|
} else {
|
|
|
|
// Loop over the value numbers of the LHS, seeing if any are defined from
|
|
|
|
// the RHS.
|
2007-08-29 22:45:00 +02:00
|
|
|
for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
|
|
|
|
i != e; ++i) {
|
|
|
|
VNInfo *VNI = *i;
|
2009-08-11 01:43:28 +02:00
|
|
|
if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
|
2007-06-08 19:18:56 +02:00
|
|
|
continue;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// DstReg is known to be a register in the LHS interval. If the src is
|
|
|
|
// from the RHS interval, we can use its value #.
|
2008-02-15 19:24:29 +01:00
|
|
|
if (li_->getVNInfoSourceReg(VNI) != RHS.reg)
|
2007-06-08 19:18:56 +02:00
|
|
|
continue;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Figure out the value # from the RHS.
|
2009-12-23 00:54:54 +01:00
|
|
|
LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
|
|
|
|
assert(lr && "Cannot find live range");
|
|
|
|
LHSValsDefinedFromRHS[VNI] = lr->valno;
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Loop over the value numbers of the RHS, seeing if any are defined from
|
|
|
|
// the LHS.
|
2007-08-29 22:45:00 +02:00
|
|
|
for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
|
|
|
|
i != e; ++i) {
|
|
|
|
VNInfo *VNI = *i;
|
2009-08-11 01:43:28 +02:00
|
|
|
if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
|
2007-06-08 19:18:56 +02:00
|
|
|
continue;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// DstReg is known to be a register in the RHS interval. If the src is
|
|
|
|
// from the LHS interval, we can use its value #.
|
2008-02-15 19:24:29 +01:00
|
|
|
if (li_->getVNInfoSourceReg(VNI) != LHS.reg)
|
2007-06-08 19:18:56 +02:00
|
|
|
continue;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Figure out the value # from the LHS.
|
2009-12-23 00:54:54 +01:00
|
|
|
LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
|
|
|
|
assert(lr && "Cannot find live range");
|
|
|
|
RHSValsDefinedFromLHS[VNI] = lr->valno;
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
|
|
|
|
RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
|
2007-08-29 22:45:00 +02:00
|
|
|
NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-08-29 22:45:00 +02:00
|
|
|
for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
|
|
|
|
i != e; ++i) {
|
|
|
|
VNInfo *VNI = *i;
|
|
|
|
unsigned VN = VNI->id;
|
2009-09-20 04:20:51 +02:00
|
|
|
if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
|
2007-06-08 19:18:56 +02:00
|
|
|
continue;
|
2007-08-29 22:45:00 +02:00
|
|
|
ComputeUltimateVN(VNI, NewVNInfo,
|
2007-06-08 19:18:56 +02:00
|
|
|
LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
|
2007-08-29 22:45:00 +02:00
|
|
|
LHSValNoAssignments, RHSValNoAssignments);
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2007-08-29 22:45:00 +02:00
|
|
|
for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
|
|
|
|
i != e; ++i) {
|
|
|
|
VNInfo *VNI = *i;
|
|
|
|
unsigned VN = VNI->id;
|
2009-06-17 23:01:20 +02:00
|
|
|
if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
|
2007-06-08 19:18:56 +02:00
|
|
|
continue;
|
|
|
|
// If this value number isn't a copy from the LHS, it's a new number.
|
2007-08-31 10:04:17 +02:00
|
|
|
if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
|
2007-08-29 22:45:00 +02:00
|
|
|
NewVNInfo.push_back(VNI);
|
|
|
|
RHSValNoAssignments[VN] = NewVNInfo.size()-1;
|
2007-06-08 19:18:56 +02:00
|
|
|
continue;
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-08-29 22:45:00 +02:00
|
|
|
ComputeUltimateVN(VNI, NewVNInfo,
|
2007-06-08 19:18:56 +02:00
|
|
|
RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
|
2007-08-29 22:45:00 +02:00
|
|
|
RHSValNoAssignments, LHSValNoAssignments);
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Armed with the mappings of LHS/RHS values to ultimate values, walk the
|
2007-07-09 14:00:59 +02:00
|
|
|
// interval lists to see if these intervals are coalescable.
|
2007-06-08 19:18:56 +02:00
|
|
|
LiveInterval::const_iterator I = LHS.begin();
|
|
|
|
LiveInterval::const_iterator IE = LHS.end();
|
|
|
|
LiveInterval::const_iterator J = RHS.begin();
|
|
|
|
LiveInterval::const_iterator JE = RHS.end();
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Skip ahead until the first place of potential sharing.
|
|
|
|
if (I->start < J->start) {
|
|
|
|
I = std::upper_bound(I, IE, J->start);
|
|
|
|
if (I != LHS.begin()) --I;
|
|
|
|
} else if (J->start < I->start) {
|
|
|
|
J = std::upper_bound(J, JE, I->start);
|
|
|
|
if (J != RHS.begin()) --J;
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
while (1) {
|
|
|
|
// Determine if these two live ranges overlap.
|
|
|
|
bool Overlaps;
|
|
|
|
if (I->start < J->start) {
|
|
|
|
Overlaps = I->end > J->start;
|
|
|
|
} else {
|
|
|
|
Overlaps = J->end > I->start;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If so, check value # info to determine if they are really different.
|
|
|
|
if (Overlaps) {
|
|
|
|
// If the live range overlap will map to the same value number in the
|
2007-07-09 14:00:59 +02:00
|
|
|
// result liverange, we can still coalesce them. If not, we can't.
|
2007-08-29 22:45:00 +02:00
|
|
|
if (LHSValNoAssignments[I->valno->id] !=
|
|
|
|
RHSValNoAssignments[J->valno->id])
|
2007-06-08 19:18:56 +02:00
|
|
|
return false;
|
2009-12-01 23:25:00 +01:00
|
|
|
// If it's re-defined by an early clobber somewhere in the live range,
|
|
|
|
// then conservatively abort coalescing.
|
|
|
|
if (NewVNInfo[LHSValNoAssignments[I->valno->id]]->hasRedefByEC())
|
|
|
|
return false;
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
if (I->end < J->end) {
|
|
|
|
++I;
|
|
|
|
if (I == IE) break;
|
|
|
|
} else {
|
|
|
|
++J;
|
|
|
|
if (J == JE) break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-14 12:08:34 +02:00
|
|
|
// Update kill info. Some live ranges are extended due to copy coalescing.
|
|
|
|
for (DenseMap<VNInfo*, VNInfo*>::iterator I = LHSValsDefinedFromRHS.begin(),
|
|
|
|
E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
|
|
|
|
VNInfo *VNI = I->first;
|
|
|
|
unsigned LHSValID = LHSValNoAssignments[VNI->id];
|
2009-09-04 22:41:11 +02:00
|
|
|
NewVNInfo[LHSValID]->removeKill(VNI->def);
|
2009-06-17 23:01:20 +02:00
|
|
|
if (VNI->hasPHIKill())
|
|
|
|
NewVNInfo[LHSValID]->setHasPHIKill(true);
|
2007-10-14 12:08:34 +02:00
|
|
|
RHS.addKills(NewVNInfo[LHSValID], VNI->kills);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update kill info. Some live ranges are extended due to copy coalescing.
|
|
|
|
for (DenseMap<VNInfo*, VNInfo*>::iterator I = RHSValsDefinedFromLHS.begin(),
|
|
|
|
E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
|
|
|
|
VNInfo *VNI = I->first;
|
|
|
|
unsigned RHSValID = RHSValNoAssignments[VNI->id];
|
2009-09-04 22:41:11 +02:00
|
|
|
NewVNInfo[RHSValID]->removeKill(VNI->def);
|
2009-06-17 23:01:20 +02:00
|
|
|
if (VNI->hasPHIKill())
|
|
|
|
NewVNInfo[RHSValID]->setHasPHIKill(true);
|
2007-10-14 12:08:34 +02:00
|
|
|
LHS.addKills(NewVNInfo[RHSValID], VNI->kills);
|
|
|
|
}
|
|
|
|
|
2007-07-09 14:00:59 +02:00
|
|
|
// If we get here, we know that we can coalesce the live ranges. Ask the
|
|
|
|
// intervals to coalesce themselves now.
|
2007-08-28 10:28:51 +02:00
|
|
|
if ((RHS.ranges.size() > LHS.ranges.size() &&
|
2008-02-10 19:45:23 +01:00
|
|
|
TargetRegisterInfo::isVirtualRegister(LHS.reg)) ||
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
|
2009-06-14 22:22:55 +02:00
|
|
|
RHS.join(LHS, &RHSValNoAssignments[0], &LHSValNoAssignments[0], NewVNInfo,
|
|
|
|
mri_);
|
2007-08-28 10:28:51 +02:00
|
|
|
Swapped = true;
|
|
|
|
} else {
|
2009-06-14 22:22:55 +02:00
|
|
|
LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
|
|
|
|
mri_);
|
2007-08-28 10:28:51 +02:00
|
|
|
Swapped = false;
|
|
|
|
}
|
2007-06-08 19:18:56 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// DepthMBBCompare - Comparison predicate that sort first based on the loop
|
|
|
|
// depth of the basic block (the unsigned), and then on the MBB number.
|
|
|
|
struct DepthMBBCompare {
|
|
|
|
typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair;
|
|
|
|
bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const {
|
2009-12-01 04:03:00 +01:00
|
|
|
// Deeper loops first
|
|
|
|
if (LHS.first != RHS.first)
|
|
|
|
return LHS.first > RHS.first;
|
|
|
|
|
|
|
|
// Prefer blocks that are more connected in the CFG. This takes care of
|
|
|
|
// the most difficult copies first while intervals are short.
|
|
|
|
unsigned cl = LHS.second->pred_size() + LHS.second->succ_size();
|
|
|
|
unsigned cr = RHS.second->pred_size() + RHS.second->succ_size();
|
|
|
|
if (cl != cr)
|
|
|
|
return cl > cr;
|
|
|
|
|
|
|
|
// As a last resort, sort by block number.
|
|
|
|
return LHS.second->getNumber() < RHS.second->getNumber();
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2007-07-09 14:00:59 +02:00
|
|
|
void SimpleRegisterCoalescing::CopyCoalesceInMBB(MachineBasicBlock *MBB,
|
2007-10-16 10:04:24 +02:00
|
|
|
std::vector<CopyRec> &TryAgain) {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << MBB->getName() << ":\n");
|
2007-11-06 09:52:21 +01:00
|
|
|
|
2007-10-16 10:04:24 +02:00
|
|
|
std::vector<CopyRec> VirtCopies;
|
|
|
|
std::vector<CopyRec> PhysCopies;
|
2008-04-09 22:57:25 +02:00
|
|
|
std::vector<CopyRec> ImpDefCopies;
|
2007-06-08 19:18:56 +02:00
|
|
|
for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
|
|
|
|
MII != E;) {
|
|
|
|
MachineInstr *Inst = MII++;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-10-12 10:50:34 +02:00
|
|
|
// If this isn't a copy nor a extract_subreg, we can't join intervals.
|
2009-01-20 20:12:24 +01:00
|
|
|
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
2009-12-11 07:01:00 +01:00
|
|
|
bool isInsUndef = false;
|
2007-10-12 10:50:34 +02:00
|
|
|
if (Inst->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
|
|
|
|
DstReg = Inst->getOperand(0).getReg();
|
|
|
|
SrcReg = Inst->getOperand(1).getReg();
|
2009-12-11 07:01:00 +01:00
|
|
|
} else if (Inst->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
|
|
|
|
DstReg = Inst->getOperand(0).getReg();
|
|
|
|
SrcReg = Inst->getOperand(2).getReg();
|
|
|
|
if (Inst->getOperand(1).isUndef())
|
|
|
|
isInsUndef = true;
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
} else if (Inst->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
|
|
|
|
Inst->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) {
|
2008-04-09 22:57:25 +02:00
|
|
|
DstReg = Inst->getOperand(0).getReg();
|
|
|
|
SrcReg = Inst->getOperand(2).getReg();
|
2009-01-20 20:12:24 +01:00
|
|
|
} else if (!tii_->isMoveInstr(*Inst, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
|
2007-10-12 10:50:34 +02:00
|
|
|
continue;
|
2007-10-16 10:04:24 +02:00
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
|
|
|
|
bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
|
2009-12-11 07:01:00 +01:00
|
|
|
if (isInsUndef ||
|
|
|
|
(li_->hasInterval(SrcReg) && li_->getInterval(SrcReg).empty()))
|
2009-09-12 04:14:41 +02:00
|
|
|
ImpDefCopies.push_back(CopyRec(Inst, 0));
|
|
|
|
else if (SrcIsPhys || DstIsPhys)
|
|
|
|
PhysCopies.push_back(CopyRec(Inst, 0));
|
|
|
|
else
|
|
|
|
VirtCopies.push_back(CopyRec(Inst, 0));
|
2007-10-16 10:04:24 +02:00
|
|
|
}
|
|
|
|
|
2009-12-11 07:01:00 +01:00
|
|
|
// Try coalescing implicit copies and insert_subreg <undef> first,
|
|
|
|
// followed by copies to / from physical registers, then finally copies
|
|
|
|
// from virtual registers to virtual registers.
|
2008-04-09 22:57:25 +02:00
|
|
|
for (unsigned i = 0, e = ImpDefCopies.size(); i != e; ++i) {
|
|
|
|
CopyRec &TheCopy = ImpDefCopies[i];
|
|
|
|
bool Again = false;
|
|
|
|
if (!JoinCopy(TheCopy, Again))
|
|
|
|
if (Again)
|
|
|
|
TryAgain.push_back(TheCopy);
|
|
|
|
}
|
2007-10-16 10:04:24 +02:00
|
|
|
for (unsigned i = 0, e = PhysCopies.size(); i != e; ++i) {
|
|
|
|
CopyRec &TheCopy = PhysCopies[i];
|
2007-11-01 07:22:48 +01:00
|
|
|
bool Again = false;
|
2007-11-06 09:52:21 +01:00
|
|
|
if (!JoinCopy(TheCopy, Again))
|
2007-11-01 07:22:48 +01:00
|
|
|
if (Again)
|
|
|
|
TryAgain.push_back(TheCopy);
|
2007-10-16 10:04:24 +02:00
|
|
|
}
|
|
|
|
for (unsigned i = 0, e = VirtCopies.size(); i != e; ++i) {
|
|
|
|
CopyRec &TheCopy = VirtCopies[i];
|
2007-11-01 07:22:48 +01:00
|
|
|
bool Again = false;
|
2007-11-06 09:52:21 +01:00
|
|
|
if (!JoinCopy(TheCopy, Again))
|
2007-11-01 07:22:48 +01:00
|
|
|
if (Again)
|
|
|
|
TryAgain.push_back(TheCopy);
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SimpleRegisterCoalescing::joinIntervals() {
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
|
2007-06-08 19:18:56 +02:00
|
|
|
|
|
|
|
std::vector<CopyRec> TryAgainList;
|
2008-08-14 20:13:49 +02:00
|
|
|
if (loopInfo->empty()) {
|
2007-06-08 19:18:56 +02:00
|
|
|
// If there are no loops in the function, join intervals in function order.
|
|
|
|
for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
|
|
|
|
I != E; ++I)
|
2007-10-16 10:04:24 +02:00
|
|
|
CopyCoalesceInMBB(I, TryAgainList);
|
2007-06-08 19:18:56 +02:00
|
|
|
} else {
|
|
|
|
// Otherwise, join intervals in inner loops before other intervals.
|
|
|
|
// Unfortunately we can't just iterate over loop hierarchy here because
|
|
|
|
// there may be more MBB's than BB's. Collect MBB's for sorting.
|
|
|
|
|
|
|
|
// Join intervals in the function prolog first. We want to join physical
|
|
|
|
// registers with virtual registers before the intervals got too long.
|
|
|
|
std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs;
|
2007-12-11 03:09:15 +01:00
|
|
|
for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();I != E;++I){
|
|
|
|
MachineBasicBlock *MBB = I;
|
|
|
|
MBBs.push_back(std::make_pair(loopInfo->getLoopDepth(MBB), I));
|
|
|
|
}
|
2007-06-08 19:18:56 +02:00
|
|
|
|
|
|
|
// Sort by loop depth.
|
|
|
|
std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare());
|
|
|
|
|
|
|
|
// Finally, join intervals in loop nest order.
|
|
|
|
for (unsigned i = 0, e = MBBs.size(); i != e; ++i)
|
2007-10-16 10:04:24 +02:00
|
|
|
CopyCoalesceInMBB(MBBs[i].second, TryAgainList);
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2007-06-08 19:18:56 +02:00
|
|
|
// Joining intervals can allow other intervals to be joined. Iteratively join
|
|
|
|
// until we make no progress.
|
2009-09-12 04:14:41 +02:00
|
|
|
bool ProgressMade = true;
|
|
|
|
while (ProgressMade) {
|
|
|
|
ProgressMade = false;
|
2007-11-06 09:52:21 +01:00
|
|
|
|
2009-09-12 04:14:41 +02:00
|
|
|
for (unsigned i = 0, e = TryAgainList.size(); i != e; ++i) {
|
|
|
|
CopyRec &TheCopy = TryAgainList[i];
|
|
|
|
if (!TheCopy.MI)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bool Again = false;
|
|
|
|
bool Success = JoinCopy(TheCopy, Again);
|
|
|
|
if (Success || !Again) {
|
|
|
|
TheCopy.MI = 0; // Mark this one as done.
|
|
|
|
ProgressMade = true;
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return true if the two specified registers belong to different register
|
2009-01-23 03:15:19 +01:00
|
|
|
/// classes. The registers may be either phys or virt regs.
|
2008-06-19 03:39:21 +02:00
|
|
|
bool
|
2009-01-23 03:15:19 +01:00
|
|
|
SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA,
|
|
|
|
unsigned RegB) const {
|
2007-06-08 19:18:56 +02:00
|
|
|
// Get the register classes for the first reg.
|
2008-02-10 19:45:23 +01:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(RegA)) {
|
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(RegB) &&
|
2007-06-08 19:18:56 +02:00
|
|
|
"Shouldn't consider two physregs!");
|
2008-02-15 19:24:29 +01:00
|
|
|
return !mri_->getRegClass(RegB)->contains(RegA);
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Compare against the regclass for the second reg.
|
2008-06-19 03:39:21 +02:00
|
|
|
const TargetRegisterClass *RegClassA = mri_->getRegClass(RegA);
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(RegB)) {
|
|
|
|
const TargetRegisterClass *RegClassB = mri_->getRegClass(RegB);
|
2009-01-23 03:15:19 +01:00
|
|
|
return RegClassA != RegClassB;
|
2008-06-19 03:39:21 +02:00
|
|
|
}
|
|
|
|
return !RegClassA->contains(RegB);
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// lastRegisterUse - Returns the last use of the specific register between
|
2008-02-15 19:24:29 +01:00
|
|
|
/// cycles Start and End or NULL if there are no uses.
|
|
|
|
MachineOperand *
|
2009-11-04 00:52:08 +01:00
|
|
|
SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start,
|
|
|
|
SlotIndex End,
|
2009-09-04 22:41:11 +02:00
|
|
|
unsigned Reg,
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex &UseIdx) const{
|
|
|
|
UseIdx = SlotIndex();
|
2008-02-15 19:24:29 +01:00
|
|
|
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
|
|
|
MachineOperand *LastUse = NULL;
|
|
|
|
for (MachineRegisterInfo::use_iterator I = mri_->use_begin(Reg),
|
|
|
|
E = mri_->use_end(); I != E; ++I) {
|
|
|
|
MachineOperand &Use = I.getOperand();
|
|
|
|
MachineInstr *UseMI = Use.getParent();
|
2009-01-20 20:12:24 +01:00
|
|
|
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
2009-10-26 05:56:07 +01:00
|
|
|
if (tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
|
|
|
|
SrcReg == DstReg)
|
2008-03-25 03:02:19 +01:00
|
|
|
// Ignore identity copies.
|
|
|
|
continue;
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex Idx = li_->getInstructionIndex(UseMI);
|
|
|
|
// FIXME: Should this be Idx != UseIdx? SlotIndex() will return something
|
|
|
|
// that compares higher than any other interval.
|
2008-02-15 19:24:29 +01:00
|
|
|
if (Idx >= Start && Idx < End && Idx >= UseIdx) {
|
|
|
|
LastUse = &Use;
|
2009-11-04 00:52:08 +01:00
|
|
|
UseIdx = Idx.getUseIndex();
|
2008-02-15 19:24:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return LastUse;
|
|
|
|
}
|
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
SlotIndex s = Start;
|
|
|
|
SlotIndex e = End.getPrevSlot().getBaseIndex();
|
2007-06-08 19:18:56 +02:00
|
|
|
while (e >= s) {
|
|
|
|
// Skip deleted instructions
|
|
|
|
MachineInstr *MI = li_->getInstructionFromIndex(e);
|
2009-11-04 00:52:08 +01:00
|
|
|
while (e != SlotIndex() && e.getPrevIndex() >= s && !MI) {
|
|
|
|
e = e.getPrevIndex();
|
2007-06-08 19:18:56 +02:00
|
|
|
MI = li_->getInstructionFromIndex(e);
|
|
|
|
}
|
|
|
|
if (e < s || MI == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2008-03-25 03:02:19 +01:00
|
|
|
// Ignore identity copies.
|
2009-01-20 20:12:24 +01:00
|
|
|
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
2009-10-26 05:56:07 +01:00
|
|
|
if (!(tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
|
|
|
|
SrcReg == DstReg))
|
2008-03-25 03:02:19 +01:00
|
|
|
for (unsigned i = 0, NumOps = MI->getNumOperands(); i != NumOps; ++i) {
|
|
|
|
MachineOperand &Use = MI->getOperand(i);
|
2008-10-03 17:45:36 +02:00
|
|
|
if (Use.isReg() && Use.isUse() && Use.getReg() &&
|
2008-03-25 03:02:19 +01:00
|
|
|
tri_->regsOverlap(Use.getReg(), Reg)) {
|
2009-11-04 00:52:08 +01:00
|
|
|
UseIdx = e.getUseIndex();
|
2008-03-25 03:02:19 +01:00
|
|
|
return &Use;
|
|
|
|
}
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
|
2009-11-04 00:52:08 +01:00
|
|
|
e = e.getPrevIndex();
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SimpleRegisterCoalescing::printRegName(unsigned reg) const {
|
2008-02-10 19:45:23 +01:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(reg))
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << tri_->getName(reg);
|
2007-06-08 19:18:56 +02:00
|
|
|
else
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "%reg" << reg;
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void SimpleRegisterCoalescing::releaseMemory() {
|
2007-11-06 09:52:21 +01:00
|
|
|
JoinedCopies.clear();
|
2008-08-30 11:09:33 +02:00
|
|
|
ReMatCopies.clear();
|
2008-09-19 19:38:47 +02:00
|
|
|
ReMatDefs.clear();
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
|
|
|
|
mf_ = &fn;
|
2008-02-13 04:01:43 +01:00
|
|
|
mri_ = &fn.getRegInfo();
|
2007-06-08 19:18:56 +02:00
|
|
|
tm_ = &fn.getTarget();
|
2008-02-10 19:45:23 +01:00
|
|
|
tri_ = tm_->getRegisterInfo();
|
2007-06-08 19:18:56 +02:00
|
|
|
tii_ = tm_->getInstrInfo();
|
|
|
|
li_ = &getAnalysis<LiveIntervals>();
|
2009-10-10 01:27:56 +02:00
|
|
|
AA = &getAnalysis<AliasAnalysis>();
|
2007-12-11 03:09:15 +01:00
|
|
|
loopInfo = &getAnalysis<MachineLoopInfo>();
|
2007-06-08 19:18:56 +02:00
|
|
|
|
2010-01-05 02:25:58 +01:00
|
|
|
DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
|
2009-08-22 22:52:46 +02:00
|
|
|
<< "********** Function: "
|
|
|
|
<< ((Value*)mf_->getFunction())->getName() << '\n');
|
2007-06-08 19:18:56 +02:00
|
|
|
|
2008-02-10 19:45:23 +01:00
|
|
|
allocatableRegs_ = tri_->getAllocatableSet(fn);
|
|
|
|
for (TargetRegisterInfo::regclass_iterator I = tri_->regclass_begin(),
|
|
|
|
E = tri_->regclass_end(); I != E; ++I)
|
2008-01-04 09:59:18 +01:00
|
|
|
allocatableRCRegs_.insert(std::make_pair(*I,
|
2008-02-10 19:45:23 +01:00
|
|
|
tri_->getAllocatableSet(fn, *I)));
|
2007-06-08 19:18:56 +02:00
|
|
|
|
2007-07-09 14:00:59 +02:00
|
|
|
// Join (coalesce) intervals if requested.
|
2007-06-08 19:18:56 +02:00
|
|
|
if (EnableJoining) {
|
|
|
|
joinIntervals();
|
2008-12-19 03:09:57 +01:00
|
|
|
DEBUG({
|
2010-01-05 02:25:58 +01:00
|
|
|
dbgs() << "********** INTERVALS POST JOINING **********\n";
|
2009-12-03 01:50:42 +01:00
|
|
|
for (LiveIntervals::iterator I = li_->begin(), E = li_->end();
|
|
|
|
I != E; ++I){
|
2010-01-05 02:25:58 +01:00
|
|
|
I->second->print(dbgs(), tri_);
|
|
|
|
dbgs() << "\n";
|
2008-12-19 03:09:57 +01:00
|
|
|
}
|
|
|
|
});
|
2007-06-08 19:18:56 +02:00
|
|
|
}
|
|
|
|
|
2008-02-15 19:24:29 +01:00
|
|
|
// Perform a final pass over the instructions and compute spill weights
|
|
|
|
// and remove identity moves.
|
2008-10-28 00:21:01 +01:00
|
|
|
SmallVector<unsigned, 4> DeadDefs;
|
2007-06-08 19:18:56 +02:00
|
|
|
for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
|
|
|
|
mbbi != mbbe; ++mbbi) {
|
|
|
|
MachineBasicBlock* mbb = mbbi;
|
|
|
|
for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
|
|
|
|
mii != mie; ) {
|
2008-04-24 11:06:33 +02:00
|
|
|
MachineInstr *MI = mii;
|
2009-01-20 20:12:24 +01:00
|
|
|
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
2008-04-24 11:06:33 +02:00
|
|
|
if (JoinedCopies.count(MI)) {
|
|
|
|
// Delete all coalesced copies.
|
2009-09-28 07:28:43 +02:00
|
|
|
bool DoDelete = true;
|
2009-01-20 20:12:24 +01:00
|
|
|
if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
|
2008-04-24 11:06:33 +02:00
|
|
|
assert((MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 02:15:30 +02:00
|
|
|
MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
|
|
|
|
MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
|
2008-04-24 11:06:33 +02:00
|
|
|
"Unrecognized copy instruction");
|
|
|
|
DstReg = MI->getOperand(0).getReg();
|
2009-09-28 07:28:43 +02:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
|
2009-10-26 05:56:07 +01:00
|
|
|
// Do not delete extract_subreg, insert_subreg of physical
|
|
|
|
// registers unless the definition is dead. e.g.
|
|
|
|
// %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
|
|
|
|
// or else the scavenger may complain. LowerSubregs will
|
|
|
|
// delete them later.
|
2009-09-28 07:28:43 +02:00
|
|
|
DoDelete = false;
|
2008-04-24 11:06:33 +02:00
|
|
|
}
|
|
|
|
if (MI->registerDefIsDead(DstReg)) {
|
|
|
|
LiveInterval &li = li_->getInterval(DstReg);
|
|
|
|
if (!ShortenDeadCopySrcLiveRange(li, MI))
|
|
|
|
ShortenDeadCopyLiveRange(li, MI);
|
2009-09-28 07:28:43 +02:00
|
|
|
DoDelete = true;
|
|
|
|
}
|
2009-10-26 05:56:07 +01:00
|
|
|
if (!DoDelete)
|
2009-12-03 01:50:42 +01:00
|
|
|
mii = llvm::next(mii);
|
2009-10-26 05:56:07 +01:00
|
|
|
else {
|
2009-09-28 07:28:43 +02:00
|
|
|
li_->RemoveMachineInstrFromMaps(MI);
|
|
|
|
mii = mbbi->erase(mii);
|
2009-10-26 05:56:07 +01:00
|
|
|
++numPeep;
|
2008-04-24 11:06:33 +02:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-09-19 19:38:47 +02:00
|
|
|
// Now check if this is a remat'ed def instruction which is now dead.
|
|
|
|
if (ReMatDefs.count(MI)) {
|
|
|
|
bool isDead = true;
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
2008-10-28 00:21:01 +01:00
|
|
|
if (!MO.isReg())
|
2008-09-19 19:38:47 +02:00
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
2009-02-04 19:18:58 +01:00
|
|
|
if (!Reg)
|
|
|
|
continue;
|
2008-10-28 00:21:01 +01:00
|
|
|
if (TargetRegisterInfo::isVirtualRegister(Reg))
|
|
|
|
DeadDefs.push_back(Reg);
|
|
|
|
if (MO.isDead())
|
|
|
|
continue;
|
2008-09-19 19:38:47 +02:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
|
|
|
|
!mri_->use_empty(Reg)) {
|
|
|
|
isDead = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (isDead) {
|
2008-10-28 00:21:01 +01:00
|
|
|
while (!DeadDefs.empty()) {
|
|
|
|
unsigned DeadDef = DeadDefs.back();
|
|
|
|
DeadDefs.pop_back();
|
|
|
|
RemoveDeadDef(li_->getInterval(DeadDef), MI);
|
|
|
|
}
|
2008-09-19 19:38:47 +02:00
|
|
|
li_->RemoveMachineInstrFromMaps(mii);
|
|
|
|
mii = mbbi->erase(mii);
|
2008-09-20 00:49:39 +02:00
|
|
|
continue;
|
2008-10-28 00:21:01 +01:00
|
|
|
} else
|
|
|
|
DeadDefs.clear();
|
2008-09-19 19:38:47 +02:00
|
|
|
}
|
|
|
|
|
2008-04-24 11:06:33 +02:00
|
|
|
// If the move will be an identity move delete it
|
2009-10-26 05:56:07 +01:00
|
|
|
bool isMove= tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx);
|
|
|
|
if (isMove && SrcReg == DstReg) {
|
2008-04-24 11:06:33 +02:00
|
|
|
if (li_->hasInterval(SrcReg)) {
|
|
|
|
LiveInterval &RegInt = li_->getInterval(SrcReg);
|
2008-03-18 09:26:47 +01:00
|
|
|
// If def of this move instruction is dead, remove its live range
|
|
|
|
// from the dstination register's live interval.
|
2008-09-19 19:38:47 +02:00
|
|
|
if (MI->registerDefIsDead(DstReg)) {
|
|
|
|
if (!ShortenDeadCopySrcLiveRange(RegInt, MI))
|
|
|
|
ShortenDeadCopyLiveRange(RegInt, MI);
|
2008-03-18 09:26:47 +01:00
|
|
|
}
|
|
|
|
}
|
2008-09-19 19:38:47 +02:00
|
|
|
li_->RemoveMachineInstrFromMaps(MI);
|
2007-06-08 19:18:56 +02:00
|
|
|
mii = mbbi->erase(mii);
|
|
|
|
++numPeep;
|
2009-07-17 23:06:58 +02:00
|
|
|
} else {
|
2007-06-08 19:18:56 +02:00
|
|
|
++mii;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG(dump());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// print - Implement the dump method.
|
2009-08-23 08:03:38 +02:00
|
|
|
void SimpleRegisterCoalescing::print(raw_ostream &O, const Module* m) const {
|
2007-06-08 19:18:56 +02:00
|
|
|
li_->print(O, m);
|
|
|
|
}
|
2007-09-06 18:18:45 +02:00
|
|
|
|
|
|
|
RegisterCoalescer* llvm::createSimpleRegisterCoalescer() {
|
|
|
|
return new SimpleRegisterCoalescing();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that anything that uses RegisterCoalescer pulls in this file...
|
|
|
|
DEFINING_FILE_FOR(SimpleRegisterCoalescing)
|