2004-07-31 12:01:27 +02:00
|
|
|
//===-- BranchFolding.cpp - Fold machine code branch instructions ---------===//
|
2005-04-22 00:36:52 +02:00
|
|
|
//
|
2004-07-31 12:01:27 +02:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 21:36:04 +01:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 00:36:52 +02:00
|
|
|
//
|
2004-07-31 12:01:27 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass forwards branches to unconditional branches to make them branch
|
|
|
|
// directly to the target block. This pass often results in dead MBB's, which
|
|
|
|
// it then removes.
|
|
|
|
//
|
|
|
|
// Note that this pass must be run after register allocation, it cannot handle
|
|
|
|
// SSA form.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-11-18 22:56:39 +01:00
|
|
|
#define DEBUG_TYPE "branchfolding"
|
2004-07-31 12:01:27 +02:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2007-01-26 22:22:28 +01:00
|
|
|
#include "llvm/CodeGen/MachineModuleInfo.h"
|
2004-07-31 12:01:27 +02:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2006-10-17 19:13:52 +02:00
|
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
2007-03-20 22:35:06 +01:00
|
|
|
#include "llvm/CodeGen/RegisterScavenging.h"
|
2004-07-31 12:01:27 +02:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2008-02-10 19:45:23 +01:00
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2006-10-21 02:47:49 +02:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2006-11-18 22:56:39 +01:00
|
|
|
#include "llvm/Support/Debug.h"
|
2008-04-10 04:32:10 +02:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2006-10-21 02:47:49 +02:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2004-09-02 00:55:40 +02:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2006-11-05 20:31:28 +01:00
|
|
|
#include <algorithm>
|
2004-07-31 12:01:27 +02:00
|
|
|
using namespace llvm;
|
|
|
|
|
2006-12-19 23:41:21 +01:00
|
|
|
STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
|
|
|
|
STATISTIC(NumBranchOpts, "Number of branches optimized");
|
|
|
|
STATISTIC(NumTailMerge , "Number of block tails merged");
|
2007-05-22 19:14:46 +02:00
|
|
|
static cl::opt<cl::boolOrDefault> FlagEnableTailMerge("enable-tail-merge",
|
|
|
|
cl::init(cl::BOU_UNSET), cl::Hidden);
|
2008-05-13 02:00:25 +02:00
|
|
|
// Throttle for huge numbers of predecessors (compile speed problems)
|
|
|
|
static cl::opt<unsigned>
|
|
|
|
TailMergeThreshold("tail-merge-threshold",
|
|
|
|
cl::desc("Max number of predecessors to consider tail merging"),
|
2008-10-27 03:10:21 +01:00
|
|
|
cl::init(150), cl::Hidden);
|
2007-06-08 03:08:52 +02:00
|
|
|
|
2008-05-13 02:00:25 +02:00
|
|
|
namespace {
|
2008-02-28 01:43:03 +01:00
|
|
|
struct VISIBILITY_HIDDEN BranchFolder : public MachineFunctionPass {
|
2007-05-03 03:11:54 +02:00
|
|
|
static char ID;
|
2007-08-02 23:21:54 +02:00
|
|
|
explicit BranchFolder(bool defaultEnableTailMerge) :
|
2008-09-22 22:58:04 +02:00
|
|
|
MachineFunctionPass(&ID) {
|
|
|
|
switch (FlagEnableTailMerge) {
|
|
|
|
case cl::BOU_UNSET: EnableTailMerge = defaultEnableTailMerge; break;
|
|
|
|
case cl::BOU_TRUE: EnableTailMerge = true; break;
|
|
|
|
case cl::BOU_FALSE: EnableTailMerge = false; break;
|
|
|
|
}
|
2007-05-22 19:14:46 +02:00
|
|
|
}
|
2007-05-01 23:15:47 +02:00
|
|
|
|
2004-07-31 12:01:27 +02:00
|
|
|
virtual bool runOnMachineFunction(MachineFunction &MF);
|
2006-10-14 02:21:48 +02:00
|
|
|
virtual const char *getPassName() const { return "Control Flow Optimizer"; }
|
|
|
|
const TargetInstrInfo *TII;
|
2007-01-26 22:22:28 +01:00
|
|
|
MachineModuleInfo *MMI;
|
2006-10-14 02:21:48 +02:00
|
|
|
bool MadeChange;
|
2004-07-31 12:01:27 +02:00
|
|
|
private:
|
2006-10-21 02:47:49 +02:00
|
|
|
// Tail Merging.
|
2007-05-22 19:14:46 +02:00
|
|
|
bool EnableTailMerge;
|
2006-10-21 02:47:49 +02:00
|
|
|
bool TailMergeBlocks(MachineFunction &MF);
|
2007-05-07 22:57:21 +02:00
|
|
|
bool TryMergeBlocks(MachineBasicBlock* SuccBB,
|
|
|
|
MachineBasicBlock* PredBB);
|
2006-10-21 02:47:49 +02:00
|
|
|
void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
|
|
|
|
MachineBasicBlock *NewDest);
|
2006-11-01 02:16:12 +01:00
|
|
|
MachineBasicBlock *SplitMBBAt(MachineBasicBlock &CurMBB,
|
|
|
|
MachineBasicBlock::iterator BBI1);
|
2008-05-10 01:28:24 +02:00
|
|
|
unsigned ComputeSameTails(unsigned CurHash, unsigned minCommonTailLength);
|
|
|
|
void RemoveBlocksWithHash(unsigned CurHash, MachineBasicBlock* SuccBB,
|
|
|
|
MachineBasicBlock* PredBB);
|
2008-05-12 22:33:57 +02:00
|
|
|
unsigned CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
|
|
|
|
unsigned maxCommonTailLength);
|
2007-03-20 22:35:06 +01:00
|
|
|
|
2008-05-09 23:24:35 +02:00
|
|
|
typedef std::pair<unsigned,MachineBasicBlock*> MergePotentialsElt;
|
|
|
|
typedef std::vector<MergePotentialsElt>::iterator MPIterator;
|
2008-05-10 01:28:24 +02:00
|
|
|
std::vector<MergePotentialsElt> MergePotentials;
|
2008-05-12 22:33:57 +02:00
|
|
|
|
2008-05-10 01:28:24 +02:00
|
|
|
typedef std::pair<MPIterator, MachineBasicBlock::iterator> SameTailElt;
|
|
|
|
std::vector<SameTailElt> SameTails;
|
2008-05-09 23:24:35 +02:00
|
|
|
|
2008-02-10 19:45:23 +01:00
|
|
|
const TargetRegisterInfo *RegInfo;
|
2007-03-20 22:35:06 +01:00
|
|
|
RegScavenger *RS;
|
2006-10-21 02:47:49 +02:00
|
|
|
// Branch optzn.
|
|
|
|
bool OptimizeBranches(MachineFunction &MF);
|
2006-10-24 03:12:32 +02:00
|
|
|
void OptimizeBlock(MachineBasicBlock *MBB);
|
2006-10-18 01:17:27 +02:00
|
|
|
void RemoveDeadBlock(MachineBasicBlock *MBB);
|
2008-04-10 04:32:10 +02:00
|
|
|
bool OptimizeImpDefsBlock(MachineBasicBlock *MBB);
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
|
|
|
|
bool CanFallThrough(MachineBasicBlock *CurBB);
|
|
|
|
bool CanFallThrough(MachineBasicBlock *CurBB, bool BranchUnAnalyzable,
|
|
|
|
MachineBasicBlock *TBB, MachineBasicBlock *FBB,
|
2008-08-15 00:49:33 +02:00
|
|
|
const SmallVectorImpl<MachineOperand> &Cond);
|
2004-07-31 12:01:27 +02:00
|
|
|
};
|
2007-05-03 03:11:54 +02:00
|
|
|
char BranchFolder::ID = 0;
|
2004-07-31 12:01:27 +02:00
|
|
|
}
|
|
|
|
|
2007-05-22 19:14:46 +02:00
|
|
|
FunctionPass *llvm::createBranchFoldingPass(bool DefaultEnableTailMerge) {
|
|
|
|
return new BranchFolder(DefaultEnableTailMerge); }
|
2004-07-31 12:01:27 +02:00
|
|
|
|
2006-10-17 19:13:52 +02:00
|
|
|
/// RemoveDeadBlock - Remove the specified dead machine basic block from the
|
|
|
|
/// function, updating the CFG.
|
2006-10-18 01:17:27 +02:00
|
|
|
void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) {
|
2007-02-22 17:39:03 +01:00
|
|
|
assert(MBB->pred_empty() && "MBB must be dead!");
|
2007-02-21 23:42:20 +01:00
|
|
|
DOUT << "\nRemoving MBB: " << *MBB;
|
2006-10-18 01:17:27 +02:00
|
|
|
|
2006-10-17 19:13:52 +02:00
|
|
|
MachineFunction *MF = MBB->getParent();
|
|
|
|
// drop all successors.
|
|
|
|
while (!MBB->succ_empty())
|
|
|
|
MBB->removeSuccessor(MBB->succ_end()-1);
|
2006-10-18 01:17:27 +02:00
|
|
|
|
2008-07-29 22:56:02 +02:00
|
|
|
// If there are any labels in the basic block, unregister them from
|
|
|
|
// MachineModuleInfo.
|
2007-01-26 22:22:28 +01:00
|
|
|
if (MMI && !MBB->empty()) {
|
2006-10-18 01:17:27 +02:00
|
|
|
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
|
|
|
|
I != E; ++I) {
|
2008-07-29 22:56:02 +02:00
|
|
|
if (I->isLabel())
|
2006-10-18 01:17:27 +02:00
|
|
|
// The label ID # is always operand #0, an immediate.
|
2007-01-26 22:22:28 +01:00
|
|
|
MMI->InvalidateLabel(I->getOperand(0).getImm());
|
2006-10-18 01:17:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-17 19:13:52 +02:00
|
|
|
// Remove the block.
|
2008-07-08 01:14:23 +02:00
|
|
|
MF->erase(MBB);
|
2006-10-17 19:13:52 +02:00
|
|
|
}
|
|
|
|
|
2008-04-10 04:32:10 +02:00
|
|
|
/// OptimizeImpDefsBlock - If a basic block is just a bunch of implicit_def
|
|
|
|
/// followed by terminators, and if the implicitly defined registers are not
|
|
|
|
/// used by the terminators, remove those implicit_def's. e.g.
|
|
|
|
/// BB1:
|
|
|
|
/// r0 = implicit_def
|
|
|
|
/// r1 = implicit_def
|
|
|
|
/// br
|
|
|
|
/// This block can be optimized away later if the implicit instructions are
|
|
|
|
/// removed.
|
|
|
|
bool BranchFolder::OptimizeImpDefsBlock(MachineBasicBlock *MBB) {
|
|
|
|
SmallSet<unsigned, 4> ImpDefRegs;
|
|
|
|
MachineBasicBlock::iterator I = MBB->begin();
|
|
|
|
while (I != MBB->end()) {
|
|
|
|
if (I->getOpcode() != TargetInstrInfo::IMPLICIT_DEF)
|
|
|
|
break;
|
|
|
|
unsigned Reg = I->getOperand(0).getReg();
|
|
|
|
ImpDefRegs.insert(Reg);
|
|
|
|
for (const unsigned *SubRegs = RegInfo->getSubRegisters(Reg);
|
|
|
|
unsigned SubReg = *SubRegs; ++SubRegs)
|
|
|
|
ImpDefRegs.insert(SubReg);
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
if (ImpDefRegs.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator FirstTerm = I;
|
|
|
|
while (I != MBB->end()) {
|
|
|
|
if (!TII->isUnpredicatedTerminator(I))
|
|
|
|
return false;
|
|
|
|
// See if it uses any of the implicitly defined registers.
|
|
|
|
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = I->getOperand(i);
|
2008-10-03 17:45:36 +02:00
|
|
|
if (!MO.isReg() || !MO.isUse())
|
2008-04-10 04:32:10 +02:00
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (ImpDefRegs.count(Reg))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
|
|
|
|
I = MBB->begin();
|
|
|
|
while (I != FirstTerm) {
|
|
|
|
MachineInstr *ImpDefMI = &*I;
|
|
|
|
++I;
|
|
|
|
MBB->erase(ImpDefMI);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2004-07-31 12:01:27 +02:00
|
|
|
bool BranchFolder::runOnMachineFunction(MachineFunction &MF) {
|
2006-10-14 02:21:48 +02:00
|
|
|
TII = MF.getTarget().getInstrInfo();
|
|
|
|
if (!TII) return false;
|
|
|
|
|
2008-04-10 04:32:10 +02:00
|
|
|
RegInfo = MF.getTarget().getRegisterInfo();
|
|
|
|
|
2007-05-15 23:19:17 +02:00
|
|
|
// Fix CFG. The later algorithms expect it to be right.
|
|
|
|
bool EverMadeChange = false;
|
|
|
|
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; I++) {
|
|
|
|
MachineBasicBlock *MBB = I, *TBB = 0, *FBB = 0;
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> Cond;
|
2009-02-09 08:14:22 +01:00
|
|
|
if (!TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, true))
|
2007-06-19 00:43:58 +02:00
|
|
|
EverMadeChange |= MBB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
|
2008-04-10 04:32:10 +02:00
|
|
|
EverMadeChange |= OptimizeImpDefsBlock(MBB);
|
2007-05-15 23:19:17 +02:00
|
|
|
}
|
|
|
|
|
2007-03-20 22:35:06 +01:00
|
|
|
RS = RegInfo->requiresRegisterScavenging(MF) ? new RegScavenger() : NULL;
|
|
|
|
|
2009-01-28 14:14:17 +01:00
|
|
|
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
|
2007-05-10 03:01:49 +02:00
|
|
|
|
2006-10-21 02:47:49 +02:00
|
|
|
bool MadeChangeThisIteration = true;
|
|
|
|
while (MadeChangeThisIteration) {
|
|
|
|
MadeChangeThisIteration = false;
|
|
|
|
MadeChangeThisIteration |= TailMergeBlocks(MF);
|
|
|
|
MadeChangeThisIteration |= OptimizeBranches(MF);
|
|
|
|
EverMadeChange |= MadeChangeThisIteration;
|
|
|
|
}
|
|
|
|
|
2006-10-28 20:34:47 +02:00
|
|
|
// See if any jump tables have become mergable or dead as the code generator
|
|
|
|
// did its thing.
|
|
|
|
MachineJumpTableInfo *JTI = MF.getJumpTableInfo();
|
|
|
|
const std::vector<MachineJumpTableEntry> &JTs = JTI->getJumpTables();
|
|
|
|
if (!JTs.empty()) {
|
|
|
|
// Figure out how these jump tables should be merged.
|
|
|
|
std::vector<unsigned> JTMapping;
|
|
|
|
JTMapping.reserve(JTs.size());
|
|
|
|
|
|
|
|
// We always keep the 0th jump table.
|
|
|
|
JTMapping.push_back(0);
|
|
|
|
|
|
|
|
// Scan the jump tables, seeing if there are any duplicates. Note that this
|
|
|
|
// is N^2, which should be fixed someday.
|
|
|
|
for (unsigned i = 1, e = JTs.size(); i != e; ++i)
|
|
|
|
JTMapping.push_back(JTI->getJumpTableIndex(JTs[i].MBBs));
|
|
|
|
|
|
|
|
// If a jump table was merge with another one, walk the function rewriting
|
|
|
|
// references to jump tables to reference the new JT ID's. Keep track of
|
|
|
|
// whether we see a jump table idx, if not, we can delete the JT.
|
2008-05-10 01:28:24 +02:00
|
|
|
BitVector JTIsLive(JTs.size());
|
2006-10-28 20:34:47 +02:00
|
|
|
for (MachineFunction::iterator BB = MF.begin(), E = MF.end();
|
|
|
|
BB != E; ++BB) {
|
|
|
|
for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
|
|
|
|
I != E; ++I)
|
|
|
|
for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
|
|
|
|
MachineOperand &Op = I->getOperand(op);
|
2008-10-03 17:45:36 +02:00
|
|
|
if (!Op.isJTI()) continue;
|
2007-12-31 00:10:15 +01:00
|
|
|
unsigned NewIdx = JTMapping[Op.getIndex()];
|
|
|
|
Op.setIndex(NewIdx);
|
2006-10-28 20:34:47 +02:00
|
|
|
|
|
|
|
// Remember that this JT is live.
|
2008-05-10 01:28:24 +02:00
|
|
|
JTIsLive.set(NewIdx);
|
2006-10-28 20:34:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, remove dead jump tables. This happens either because the
|
|
|
|
// indirect jump was unreachable (and thus deleted) or because the jump
|
|
|
|
// table was merged with some other one.
|
|
|
|
for (unsigned i = 0, e = JTIsLive.size(); i != e; ++i)
|
2008-05-10 01:28:24 +02:00
|
|
|
if (!JTIsLive.test(i)) {
|
2006-10-28 20:34:47 +02:00
|
|
|
JTI->RemoveJumpTable(i);
|
|
|
|
EverMadeChange = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-20 22:35:06 +01:00
|
|
|
delete RS;
|
2006-10-21 02:47:49 +02:00
|
|
|
return EverMadeChange;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Tail Merging of Blocks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// HashMachineInstr - Compute a hash value for MI and its operands.
|
|
|
|
static unsigned HashMachineInstr(const MachineInstr *MI) {
|
|
|
|
unsigned Hash = MI->getOpcode();
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &Op = MI->getOperand(i);
|
|
|
|
|
|
|
|
// Merge in bits from the operand if easy.
|
|
|
|
unsigned OperandHash = 0;
|
|
|
|
switch (Op.getType()) {
|
|
|
|
case MachineOperand::MO_Register: OperandHash = Op.getReg(); break;
|
|
|
|
case MachineOperand::MO_Immediate: OperandHash = Op.getImm(); break;
|
|
|
|
case MachineOperand::MO_MachineBasicBlock:
|
2007-12-31 00:10:15 +01:00
|
|
|
OperandHash = Op.getMBB()->getNumber();
|
2006-10-21 02:47:49 +02:00
|
|
|
break;
|
2007-12-31 00:10:15 +01:00
|
|
|
case MachineOperand::MO_FrameIndex:
|
2006-10-21 02:47:49 +02:00
|
|
|
case MachineOperand::MO_ConstantPoolIndex:
|
|
|
|
case MachineOperand::MO_JumpTableIndex:
|
2007-12-31 00:10:15 +01:00
|
|
|
OperandHash = Op.getIndex();
|
2006-10-21 02:47:49 +02:00
|
|
|
break;
|
|
|
|
case MachineOperand::MO_GlobalAddress:
|
|
|
|
case MachineOperand::MO_ExternalSymbol:
|
|
|
|
// Global address / external symbol are too hard, don't bother, but do
|
|
|
|
// pull in the offset.
|
|
|
|
OperandHash = Op.getOffset();
|
|
|
|
break;
|
|
|
|
default: break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Hash += ((OperandHash << 3) | Op.getType()) << (i&31);
|
|
|
|
}
|
|
|
|
return Hash;
|
|
|
|
}
|
|
|
|
|
2007-05-23 23:07:20 +02:00
|
|
|
/// HashEndOfMBB - Hash the last few instructions in the MBB. For blocks
|
|
|
|
/// with no successors, we hash two instructions, because cross-jumping
|
|
|
|
/// only saves code when at least two instructions are removed (since a
|
|
|
|
/// branch must be inserted). For blocks with a successor, one of the
|
|
|
|
/// two blocks to be tail-merged will end with a branch already, so
|
|
|
|
/// it gains to cross-jump even for one instruction.
|
|
|
|
|
|
|
|
static unsigned HashEndOfMBB(const MachineBasicBlock *MBB,
|
|
|
|
unsigned minCommonTailLength) {
|
2006-10-21 02:47:49 +02:00
|
|
|
MachineBasicBlock::const_iterator I = MBB->end();
|
|
|
|
if (I == MBB->begin())
|
|
|
|
return 0; // Empty MBB.
|
|
|
|
|
|
|
|
--I;
|
|
|
|
unsigned Hash = HashMachineInstr(I);
|
|
|
|
|
2007-05-23 23:07:20 +02:00
|
|
|
if (I == MBB->begin() || minCommonTailLength == 1)
|
2006-10-21 02:47:49 +02:00
|
|
|
return Hash; // Single instr MBB.
|
|
|
|
|
|
|
|
--I;
|
|
|
|
// Hash in the second-to-last instruction.
|
|
|
|
Hash ^= HashMachineInstr(I) << 2;
|
|
|
|
return Hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ComputeCommonTailLength - Given two machine basic blocks, compute the number
|
|
|
|
/// of instructions they actually have in common together at their end. Return
|
|
|
|
/// iterators for the first shared instruction in each block.
|
|
|
|
static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1,
|
|
|
|
MachineBasicBlock *MBB2,
|
|
|
|
MachineBasicBlock::iterator &I1,
|
|
|
|
MachineBasicBlock::iterator &I2) {
|
|
|
|
I1 = MBB1->end();
|
|
|
|
I2 = MBB2->end();
|
|
|
|
|
|
|
|
unsigned TailLen = 0;
|
|
|
|
while (I1 != MBB1->begin() && I2 != MBB2->begin()) {
|
|
|
|
--I1; --I2;
|
2007-10-19 23:09:55 +02:00
|
|
|
if (!I1->isIdenticalTo(I2) ||
|
2007-10-25 21:49:32 +02:00
|
|
|
// FIXME: This check is dubious. It's used to get around a problem where
|
2007-10-25 20:23:45 +02:00
|
|
|
// people incorrectly expect inline asm directives to remain in the same
|
|
|
|
// relative order. This is untenable because normal compiler
|
|
|
|
// optimizations (like this one) may reorder and/or merge these
|
|
|
|
// directives.
|
2007-10-19 23:09:55 +02:00
|
|
|
I1->getOpcode() == TargetInstrInfo::INLINEASM) {
|
2006-10-21 02:47:49 +02:00
|
|
|
++I1; ++I2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
++TailLen;
|
|
|
|
}
|
|
|
|
return TailLen;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
|
2006-10-21 07:08:28 +02:00
|
|
|
/// after it, replacing it with an unconditional branch to NewDest. This
|
|
|
|
/// returns true if OldInst's block is modified, false if NewDest is modified.
|
2006-10-21 02:47:49 +02:00
|
|
|
void BranchFolder::ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
|
|
|
|
MachineBasicBlock *NewDest) {
|
|
|
|
MachineBasicBlock *OldBB = OldInst->getParent();
|
|
|
|
|
|
|
|
// Remove all the old successors of OldBB from the CFG.
|
|
|
|
while (!OldBB->succ_empty())
|
|
|
|
OldBB->removeSuccessor(OldBB->succ_begin());
|
|
|
|
|
|
|
|
// Remove all the dead instructions from the end of OldBB.
|
|
|
|
OldBB->erase(OldInst, OldBB->end());
|
|
|
|
|
2006-10-21 07:08:28 +02:00
|
|
|
// If OldBB isn't immediately before OldBB, insert a branch to it.
|
|
|
|
if (++MachineFunction::iterator(OldBB) != MachineFunction::iterator(NewDest))
|
2008-08-22 18:07:55 +02:00
|
|
|
TII->InsertBranch(*OldBB, NewDest, 0, SmallVector<MachineOperand, 0>());
|
2006-10-21 02:47:49 +02:00
|
|
|
OldBB->addSuccessor(NewDest);
|
|
|
|
++NumTailMerge;
|
|
|
|
}
|
|
|
|
|
2006-11-01 02:16:12 +01:00
|
|
|
/// SplitMBBAt - Given a machine basic block and an iterator into it, split the
|
|
|
|
/// MBB so that the part before the iterator falls into the part starting at the
|
|
|
|
/// iterator. This returns the new MBB.
|
|
|
|
MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
|
|
|
|
MachineBasicBlock::iterator BBI1) {
|
2008-07-08 01:14:23 +02:00
|
|
|
MachineFunction &MF = *CurMBB.getParent();
|
|
|
|
|
2006-11-01 02:16:12 +01:00
|
|
|
// Create the fall-through block.
|
|
|
|
MachineFunction::iterator MBBI = &CurMBB;
|
2008-07-08 01:14:23 +02:00
|
|
|
MachineBasicBlock *NewMBB =MF.CreateMachineBasicBlock(CurMBB.getBasicBlock());
|
|
|
|
CurMBB.getParent()->insert(++MBBI, NewMBB);
|
2006-11-01 02:16:12 +01:00
|
|
|
|
|
|
|
// Move all the successors of this block to the specified block.
|
2008-06-19 19:22:29 +02:00
|
|
|
NewMBB->transferSuccessors(&CurMBB);
|
2006-11-01 02:16:12 +01:00
|
|
|
|
|
|
|
// Add an edge from CurMBB to NewMBB for the fall-through.
|
|
|
|
CurMBB.addSuccessor(NewMBB);
|
|
|
|
|
|
|
|
// Splice the code over.
|
|
|
|
NewMBB->splice(NewMBB->end(), &CurMBB, BBI1, CurMBB.end());
|
2007-03-20 22:35:06 +01:00
|
|
|
|
|
|
|
// For targets that use the register scavenger, we must maintain LiveIns.
|
|
|
|
if (RS) {
|
|
|
|
RS->enterBasicBlock(&CurMBB);
|
|
|
|
if (!CurMBB.empty())
|
|
|
|
RS->forward(prior(CurMBB.end()));
|
|
|
|
BitVector RegsLiveAtExit(RegInfo->getNumRegs());
|
|
|
|
RS->getRegsUsed(RegsLiveAtExit, false);
|
|
|
|
for (unsigned int i=0, e=RegInfo->getNumRegs(); i!=e; i++)
|
|
|
|
if (RegsLiveAtExit[i])
|
|
|
|
NewMBB->addLiveIn(i);
|
|
|
|
}
|
|
|
|
|
2006-11-01 02:16:12 +01:00
|
|
|
return NewMBB;
|
|
|
|
}
|
|
|
|
|
2006-11-01 20:36:29 +01:00
|
|
|
/// EstimateRuntime - Make a rough estimate for how long it will take to run
|
|
|
|
/// the specified code.
|
|
|
|
static unsigned EstimateRuntime(MachineBasicBlock::iterator I,
|
2008-01-07 02:56:04 +01:00
|
|
|
MachineBasicBlock::iterator E) {
|
2006-11-01 20:36:29 +01:00
|
|
|
unsigned Time = 0;
|
|
|
|
for (; I != E; ++I) {
|
2008-01-07 08:27:27 +01:00
|
|
|
const TargetInstrDesc &TID = I->getDesc();
|
|
|
|
if (TID.isCall())
|
2006-11-01 20:36:29 +01:00
|
|
|
Time += 10;
|
2008-12-03 03:30:17 +01:00
|
|
|
else if (TID.mayLoad() || TID.mayStore())
|
2006-11-01 20:36:29 +01:00
|
|
|
Time += 2;
|
|
|
|
else
|
|
|
|
++Time;
|
|
|
|
}
|
|
|
|
return Time;
|
|
|
|
}
|
|
|
|
|
2007-05-10 03:01:49 +02:00
|
|
|
// CurMBB needs to add an unconditional branch to SuccMBB (we removed these
|
|
|
|
// branches temporarily for tail merging). In the case where CurMBB ends
|
|
|
|
// with a conditional branch to the next block, optimize by reversing the
|
|
|
|
// test and conditionally branching to SuccMBB instead.
|
|
|
|
|
|
|
|
static void FixTail(MachineBasicBlock* CurMBB, MachineBasicBlock *SuccBB,
|
|
|
|
const TargetInstrInfo *TII) {
|
|
|
|
MachineFunction *MF = CurMBB->getParent();
|
|
|
|
MachineFunction::iterator I = next(MachineFunction::iterator(CurMBB));
|
|
|
|
MachineBasicBlock *TBB = 0, *FBB = 0;
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> Cond;
|
2007-05-10 03:01:49 +02:00
|
|
|
if (I != MF->end() &&
|
2009-02-09 08:14:22 +01:00
|
|
|
!TII->AnalyzeBranch(*CurMBB, TBB, FBB, Cond, true)) {
|
2007-05-10 03:01:49 +02:00
|
|
|
MachineBasicBlock *NextBB = I;
|
2008-05-10 01:28:24 +02:00
|
|
|
if (TBB == NextBB && !Cond.empty() && !FBB) {
|
2007-05-10 03:01:49 +02:00
|
|
|
if (!TII->ReverseBranchCondition(Cond)) {
|
|
|
|
TII->RemoveBranch(*CurMBB);
|
|
|
|
TII->InsertBranch(*CurMBB, SuccBB, NULL, Cond);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-08-22 18:07:55 +02:00
|
|
|
TII->InsertBranch(*CurMBB, SuccBB, NULL, SmallVector<MachineOperand, 0>());
|
2007-05-10 03:01:49 +02:00
|
|
|
}
|
|
|
|
|
2007-05-30 02:32:01 +02:00
|
|
|
static bool MergeCompare(const std::pair<unsigned,MachineBasicBlock*> &p,
|
|
|
|
const std::pair<unsigned,MachineBasicBlock*> &q) {
|
2007-05-30 01:47:50 +02:00
|
|
|
if (p.first < q.first)
|
|
|
|
return true;
|
|
|
|
else if (p.first > q.first)
|
|
|
|
return false;
|
|
|
|
else if (p.second->getNumber() < q.second->getNumber())
|
|
|
|
return true;
|
|
|
|
else if (p.second->getNumber() > q.second->getNumber())
|
|
|
|
return false;
|
2007-07-11 00:00:30 +02:00
|
|
|
else {
|
2007-07-11 10:47:55 +02:00
|
|
|
// _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing
|
|
|
|
// an object with itself.
|
|
|
|
#ifndef _GLIBCXX_DEBUG
|
2007-05-30 01:47:50 +02:00
|
|
|
assert(0 && "Predecessor appears twice");
|
2007-07-11 00:00:30 +02:00
|
|
|
#endif
|
2009-01-08 23:19:34 +01:00
|
|
|
return false;
|
2007-07-11 00:00:30 +02:00
|
|
|
}
|
2007-05-30 01:47:50 +02:00
|
|
|
}
|
|
|
|
|
2008-05-10 01:28:24 +02:00
|
|
|
/// ComputeSameTails - Look through all the blocks in MergePotentials that have
|
|
|
|
/// hash CurHash (guaranteed to match the last element). Build the vector
|
|
|
|
/// SameTails of all those that have the (same) largest number of instructions
|
|
|
|
/// in common of any pair of these blocks. SameTails entries contain an
|
|
|
|
/// iterator into MergePotentials (from which the MachineBasicBlock can be
|
|
|
|
/// found) and a MachineBasicBlock::iterator into that MBB indicating the
|
|
|
|
/// instruction where the matching code sequence begins.
|
|
|
|
/// Order of elements in SameTails is the reverse of the order in which
|
|
|
|
/// those blocks appear in MergePotentials (where they are not necessarily
|
|
|
|
/// consecutive).
|
|
|
|
unsigned BranchFolder::ComputeSameTails(unsigned CurHash,
|
|
|
|
unsigned minCommonTailLength) {
|
|
|
|
unsigned maxCommonTailLength = 0U;
|
|
|
|
SameTails.clear();
|
|
|
|
MachineBasicBlock::iterator TrialBBI1, TrialBBI2;
|
|
|
|
MPIterator HighestMPIter = prior(MergePotentials.end());
|
|
|
|
for (MPIterator CurMPIter = prior(MergePotentials.end()),
|
|
|
|
B = MergePotentials.begin();
|
|
|
|
CurMPIter!=B && CurMPIter->first==CurHash;
|
|
|
|
--CurMPIter) {
|
|
|
|
for (MPIterator I = prior(CurMPIter); I->first==CurHash ; --I) {
|
|
|
|
unsigned CommonTailLen = ComputeCommonTailLength(
|
|
|
|
CurMPIter->second,
|
|
|
|
I->second,
|
|
|
|
TrialBBI1, TrialBBI2);
|
2008-05-13 00:53:12 +02:00
|
|
|
// If we will have to split a block, there should be at least
|
|
|
|
// minCommonTailLength instructions in common; if not, at worst
|
|
|
|
// we will be replacing a fallthrough into the common tail with a
|
|
|
|
// branch, which at worst breaks even with falling through into
|
|
|
|
// the duplicated common tail, so 1 instruction in common is enough.
|
|
|
|
// We will always pick a block we do not have to split as the common
|
|
|
|
// tail if there is one.
|
|
|
|
// (Empty blocks will get forwarded and need not be considered.)
|
|
|
|
if (CommonTailLen >= minCommonTailLength ||
|
|
|
|
(CommonTailLen > 0 &&
|
|
|
|
(TrialBBI1==CurMPIter->second->begin() ||
|
|
|
|
TrialBBI2==I->second->begin()))) {
|
2008-05-10 01:28:24 +02:00
|
|
|
if (CommonTailLen > maxCommonTailLength) {
|
|
|
|
SameTails.clear();
|
|
|
|
maxCommonTailLength = CommonTailLen;
|
|
|
|
HighestMPIter = CurMPIter;
|
|
|
|
SameTails.push_back(std::make_pair(CurMPIter, TrialBBI1));
|
|
|
|
}
|
|
|
|
if (HighestMPIter == CurMPIter &&
|
|
|
|
CommonTailLen == maxCommonTailLength)
|
|
|
|
SameTails.push_back(std::make_pair(I, TrialBBI2));
|
|
|
|
}
|
|
|
|
if (I==B)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return maxCommonTailLength;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// RemoveBlocksWithHash - Remove all blocks with hash CurHash from
|
|
|
|
/// MergePotentials, restoring branches at ends of blocks as appropriate.
|
|
|
|
void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
|
|
|
|
MachineBasicBlock* SuccBB,
|
|
|
|
MachineBasicBlock* PredBB) {
|
2008-05-23 19:19:02 +02:00
|
|
|
MPIterator CurMPIter, B;
|
|
|
|
for (CurMPIter = prior(MergePotentials.end()), B = MergePotentials.begin();
|
2008-05-10 01:28:24 +02:00
|
|
|
CurMPIter->first==CurHash;
|
|
|
|
--CurMPIter) {
|
|
|
|
// Put the unconditional branch back, if we need one.
|
|
|
|
MachineBasicBlock *CurMBB = CurMPIter->second;
|
|
|
|
if (SuccBB && CurMBB != PredBB)
|
|
|
|
FixTail(CurMBB, SuccBB, TII);
|
2008-05-23 19:19:02 +02:00
|
|
|
if (CurMPIter==B)
|
2008-05-10 01:28:24 +02:00
|
|
|
break;
|
|
|
|
}
|
2008-05-23 19:19:02 +02:00
|
|
|
if (CurMPIter->first!=CurHash)
|
|
|
|
CurMPIter++;
|
|
|
|
MergePotentials.erase(CurMPIter, MergePotentials.end());
|
2008-05-10 01:28:24 +02:00
|
|
|
}
|
|
|
|
|
2008-05-12 22:33:57 +02:00
|
|
|
/// CreateCommonTailOnlyBlock - None of the blocks to be tail-merged consist
|
|
|
|
/// only of the common tail. Create a block that does by splitting one.
|
|
|
|
unsigned BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
|
|
|
|
unsigned maxCommonTailLength) {
|
|
|
|
unsigned i, commonTailIndex;
|
|
|
|
unsigned TimeEstimate = ~0U;
|
|
|
|
for (i=0, commonTailIndex=0; i<SameTails.size(); i++) {
|
|
|
|
// Use PredBB if possible; that doesn't require a new branch.
|
|
|
|
if (SameTails[i].first->second==PredBB) {
|
|
|
|
commonTailIndex = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Otherwise, make a (fairly bogus) choice based on estimate of
|
|
|
|
// how long it will take the various blocks to execute.
|
|
|
|
unsigned t = EstimateRuntime(SameTails[i].first->second->begin(),
|
|
|
|
SameTails[i].second);
|
|
|
|
if (t<=TimeEstimate) {
|
|
|
|
TimeEstimate = t;
|
|
|
|
commonTailIndex = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator BBI = SameTails[commonTailIndex].second;
|
|
|
|
MachineBasicBlock *MBB = SameTails[commonTailIndex].first->second;
|
|
|
|
|
|
|
|
DOUT << "\nSplitting " << MBB->getNumber() << ", size " <<
|
|
|
|
maxCommonTailLength;
|
|
|
|
|
|
|
|
MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI);
|
|
|
|
SameTails[commonTailIndex].first->second = newMBB;
|
|
|
|
SameTails[commonTailIndex].second = newMBB->begin();
|
|
|
|
// If we split PredBB, newMBB is the new predecessor.
|
|
|
|
if (PredBB==MBB)
|
|
|
|
PredBB = newMBB;
|
|
|
|
|
|
|
|
return commonTailIndex;
|
|
|
|
}
|
|
|
|
|
2007-05-07 22:57:21 +02:00
|
|
|
// See if any of the blocks in MergePotentials (which all have a common single
|
|
|
|
// successor, or all have no successor) can be tail-merged. If there is a
|
|
|
|
// successor, any blocks in MergePotentials that are not tail-merged and
|
|
|
|
// are not immediately before Succ must have an unconditional branch to
|
|
|
|
// Succ added (but the predecessor/successor lists need no adjustment).
|
|
|
|
// The lone predecessor of Succ that falls through into Succ,
|
|
|
|
// if any, is given in PredBB.
|
|
|
|
|
|
|
|
bool BranchFolder::TryMergeBlocks(MachineBasicBlock *SuccBB,
|
|
|
|
MachineBasicBlock* PredBB) {
|
2008-02-19 03:09:37 +01:00
|
|
|
// It doesn't make sense to save a single instruction since tail merging
|
|
|
|
// will add a jump.
|
|
|
|
// FIXME: Ask the target to provide the threshold?
|
|
|
|
unsigned minCommonTailLength = (SuccBB ? 1 : 2) + 1;
|
2006-10-21 02:47:49 +02:00
|
|
|
MadeChange = false;
|
|
|
|
|
2008-12-11 00:24:43 +01:00
|
|
|
DOUT << "\nTryMergeBlocks " << MergePotentials.size() << '\n';
|
2008-05-10 01:28:24 +02:00
|
|
|
|
2006-10-21 02:47:49 +02:00
|
|
|
// Sort by hash value so that blocks with identical end sequences sort
|
|
|
|
// together.
|
2008-05-10 01:28:24 +02:00
|
|
|
std::stable_sort(MergePotentials.begin(), MergePotentials.end(),MergeCompare);
|
2006-10-21 02:47:49 +02:00
|
|
|
|
|
|
|
// Walk through equivalence sets looking for actual exact matches.
|
|
|
|
while (MergePotentials.size() > 1) {
|
2008-05-09 23:24:35 +02:00
|
|
|
unsigned CurHash = prior(MergePotentials.end())->first;
|
2006-10-14 02:21:48 +02:00
|
|
|
|
2008-05-10 01:28:24 +02:00
|
|
|
// Build SameTails, identifying the set of blocks with this hash code
|
|
|
|
// and with the maximum number of instructions in common.
|
|
|
|
unsigned maxCommonTailLength = ComputeSameTails(CurHash,
|
|
|
|
minCommonTailLength);
|
2007-05-23 23:07:20 +02:00
|
|
|
|
2007-06-02 01:02:45 +02:00
|
|
|
// If we didn't find any pair that has at least minCommonTailLength
|
2008-05-09 23:24:35 +02:00
|
|
|
// instructions in common, remove all blocks with this hash code and retry.
|
|
|
|
if (SameTails.empty()) {
|
2008-05-10 01:28:24 +02:00
|
|
|
RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
|
2007-05-23 23:07:20 +02:00
|
|
|
continue;
|
|
|
|
}
|
2006-11-01 02:16:12 +01:00
|
|
|
|
2008-05-09 23:24:35 +02:00
|
|
|
// If one of the blocks is the entire common tail (and not the entry
|
2008-05-12 22:33:57 +02:00
|
|
|
// block, which we can't jump to), we can treat all blocks with this same
|
|
|
|
// tail at once. Use PredBB if that is one of the possibilities, as that
|
|
|
|
// will not introduce any extra branches.
|
|
|
|
MachineBasicBlock *EntryBB = MergePotentials.begin()->second->
|
|
|
|
getParent()->begin();
|
|
|
|
unsigned int commonTailIndex, i;
|
|
|
|
for (commonTailIndex=SameTails.size(), i=0; i<SameTails.size(); i++) {
|
2008-05-09 23:24:35 +02:00
|
|
|
MachineBasicBlock *MBB = SameTails[i].first->second;
|
2008-05-12 22:33:57 +02:00
|
|
|
if (MBB->begin() == SameTails[i].second && MBB != EntryBB) {
|
|
|
|
commonTailIndex = i;
|
|
|
|
if (MBB==PredBB)
|
|
|
|
break;
|
2008-05-09 23:24:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-12 22:33:57 +02:00
|
|
|
if (commonTailIndex==SameTails.size()) {
|
|
|
|
// None of the blocks consist entirely of the common tail.
|
|
|
|
// Split a block so that one does.
|
|
|
|
commonTailIndex = CreateCommonTailOnlyBlock(PredBB, maxCommonTailLength);
|
2006-11-01 02:16:12 +01:00
|
|
|
}
|
2008-05-12 22:33:57 +02:00
|
|
|
|
|
|
|
MachineBasicBlock *MBB = SameTails[commonTailIndex].first->second;
|
|
|
|
// MBB is common tail. Adjust all other BB's to jump to this one.
|
|
|
|
// Traversal must be forwards so erases work.
|
|
|
|
DOUT << "\nUsing common tail " << MBB->getNumber() << " for ";
|
|
|
|
for (unsigned int i=0; i<SameTails.size(); ++i) {
|
|
|
|
if (commonTailIndex==i)
|
|
|
|
continue;
|
|
|
|
DOUT << SameTails[i].first->second->getNumber() << ",";
|
|
|
|
// Hack the end off BB i, making it jump to BB commonTailIndex instead.
|
|
|
|
ReplaceTailWithBranchTo(SameTails[i].second, MBB);
|
|
|
|
// BB i is no longer a predecessor of SuccBB; remove it from the worklist.
|
|
|
|
MergePotentials.erase(SameTails[i].first);
|
2006-10-21 02:47:49 +02:00
|
|
|
}
|
2008-05-12 22:33:57 +02:00
|
|
|
DOUT << "\n";
|
|
|
|
// We leave commonTailIndex in the worklist in case there are other blocks
|
|
|
|
// that match it with a smaller number of instructions.
|
2006-11-01 02:16:12 +01:00
|
|
|
MadeChange = true;
|
2004-07-31 12:01:27 +02:00
|
|
|
}
|
2006-10-21 02:47:49 +02:00
|
|
|
return MadeChange;
|
|
|
|
}
|
2004-07-31 12:01:27 +02:00
|
|
|
|
2007-05-07 22:57:21 +02:00
|
|
|
bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
|
2007-05-10 03:01:49 +02:00
|
|
|
|
2007-05-07 22:57:21 +02:00
|
|
|
if (!EnableTailMerge) return false;
|
2007-05-10 03:01:49 +02:00
|
|
|
|
|
|
|
MadeChange = false;
|
|
|
|
|
2007-05-07 22:57:21 +02:00
|
|
|
// First find blocks with no successors.
|
2007-05-10 03:01:49 +02:00
|
|
|
MergePotentials.clear();
|
2007-05-07 22:57:21 +02:00
|
|
|
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
|
|
|
|
if (I->succ_empty())
|
2007-05-23 23:07:20 +02:00
|
|
|
MergePotentials.push_back(std::make_pair(HashEndOfMBB(I, 2U), I));
|
2007-05-07 22:57:21 +02:00
|
|
|
}
|
2007-05-10 03:01:49 +02:00
|
|
|
// See if we can do any tail merging on those.
|
2008-05-09 23:24:35 +02:00
|
|
|
if (MergePotentials.size() < TailMergeThreshold &&
|
|
|
|
MergePotentials.size() >= 2)
|
2007-06-08 02:34:27 +02:00
|
|
|
MadeChange |= TryMergeBlocks(NULL, NULL);
|
2007-05-07 22:57:21 +02:00
|
|
|
|
2007-05-10 03:01:49 +02:00
|
|
|
// Look at blocks (IBB) with multiple predecessors (PBB).
|
|
|
|
// We change each predecessor to a canonical form, by
|
|
|
|
// (1) temporarily removing any unconditional branch from the predecessor
|
|
|
|
// to IBB, and
|
|
|
|
// (2) alter conditional branches so they branch to the other block
|
|
|
|
// not IBB; this may require adding back an unconditional branch to IBB
|
|
|
|
// later, where there wasn't one coming in. E.g.
|
|
|
|
// Bcc IBB
|
|
|
|
// fallthrough to QBB
|
|
|
|
// here becomes
|
|
|
|
// Bncc QBB
|
|
|
|
// with a conceptual B to IBB after that, which never actually exists.
|
|
|
|
// With those changes, we see whether the predecessors' tails match,
|
|
|
|
// and merge them if so. We change things out of canonical form and
|
|
|
|
// back to the way they were later in the process. (OptimizeBranches
|
|
|
|
// would undo some of this, but we can't use it, because we'd get into
|
|
|
|
// a compile-time infinite loop repeatedly doing and undoing the same
|
|
|
|
// transformations.)
|
2007-05-07 22:57:21 +02:00
|
|
|
|
|
|
|
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
|
2008-07-01 23:50:14 +02:00
|
|
|
if (I->pred_size() >= 2 && I->pred_size() < TailMergeThreshold) {
|
2007-05-07 22:57:21 +02:00
|
|
|
MachineBasicBlock *IBB = I;
|
|
|
|
MachineBasicBlock *PredBB = prior(I);
|
2007-05-10 03:01:49 +02:00
|
|
|
MergePotentials.clear();
|
2007-06-08 03:08:52 +02:00
|
|
|
for (MachineBasicBlock::pred_iterator P = I->pred_begin(),
|
|
|
|
E2 = I->pred_end();
|
2007-05-07 22:57:21 +02:00
|
|
|
P != E2; ++P) {
|
|
|
|
MachineBasicBlock* PBB = *P;
|
2007-05-10 03:01:49 +02:00
|
|
|
// Skip blocks that loop to themselves, can't tail merge these.
|
|
|
|
if (PBB==IBB)
|
|
|
|
continue;
|
2007-05-07 22:57:21 +02:00
|
|
|
MachineBasicBlock *TBB = 0, *FBB = 0;
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> Cond;
|
2009-02-09 08:14:22 +01:00
|
|
|
if (!TII->AnalyzeBranch(*PBB, TBB, FBB, Cond, true)) {
|
2007-05-10 03:01:49 +02:00
|
|
|
// Failing case: IBB is the target of a cbr, and
|
|
|
|
// we cannot reverse the branch.
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> NewCond(Cond);
|
2008-05-10 01:28:24 +02:00
|
|
|
if (!Cond.empty() && TBB==IBB) {
|
2007-05-10 03:01:49 +02:00
|
|
|
if (TII->ReverseBranchCondition(NewCond))
|
|
|
|
continue;
|
|
|
|
// This is the QBB case described above
|
|
|
|
if (!FBB)
|
|
|
|
FBB = next(MachineFunction::iterator(PBB));
|
|
|
|
}
|
2007-06-05 01:52:54 +02:00
|
|
|
// Failing case: the only way IBB can be reached from PBB is via
|
|
|
|
// exception handling. Happens for landing pads. Would be nice
|
|
|
|
// to have a bit in the edge so we didn't have to do all this.
|
|
|
|
if (IBB->isLandingPad()) {
|
|
|
|
MachineFunction::iterator IP = PBB; IP++;
|
|
|
|
MachineBasicBlock* PredNextBB = NULL;
|
|
|
|
if (IP!=MF.end())
|
|
|
|
PredNextBB = IP;
|
|
|
|
if (TBB==NULL) {
|
|
|
|
if (IBB!=PredNextBB) // fallthrough
|
|
|
|
continue;
|
|
|
|
} else if (FBB) {
|
|
|
|
if (TBB!=IBB && FBB!=IBB) // cbr then ubr
|
|
|
|
continue;
|
2008-01-29 14:02:09 +01:00
|
|
|
} else if (Cond.empty()) {
|
2007-06-05 01:52:54 +02:00
|
|
|
if (TBB!=IBB) // ubr
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
if (TBB!=IBB && IBB!=PredNextBB) // cbr
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2007-05-10 03:01:49 +02:00
|
|
|
// Remove the unconditional branch at the end, if any.
|
2008-05-10 01:28:24 +02:00
|
|
|
if (TBB && (Cond.empty() || FBB)) {
|
2007-05-07 22:57:21 +02:00
|
|
|
TII->RemoveBranch(*PBB);
|
2008-05-10 01:28:24 +02:00
|
|
|
if (!Cond.empty())
|
2007-05-07 22:57:21 +02:00
|
|
|
// reinsert conditional branch only, for now
|
2007-05-10 03:01:49 +02:00
|
|
|
TII->InsertBranch(*PBB, (TBB==IBB) ? FBB : TBB, 0, NewCond);
|
2007-05-07 22:57:21 +02:00
|
|
|
}
|
2007-05-23 23:07:20 +02:00
|
|
|
MergePotentials.push_back(std::make_pair(HashEndOfMBB(PBB, 1U), *P));
|
2007-05-07 22:57:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (MergePotentials.size() >= 2)
|
|
|
|
MadeChange |= TryMergeBlocks(I, PredBB);
|
|
|
|
// Reinsert an unconditional branch if needed.
|
2008-05-12 22:33:57 +02:00
|
|
|
// The 1 below can occur as a result of removing blocks in TryMergeBlocks.
|
2007-05-18 03:28:58 +02:00
|
|
|
PredBB = prior(I); // this may have been changed in TryMergeBlocks
|
2007-05-07 22:57:21 +02:00
|
|
|
if (MergePotentials.size()==1 &&
|
2008-05-12 22:33:57 +02:00
|
|
|
MergePotentials.begin()->second != PredBB)
|
|
|
|
FixTail(MergePotentials.begin()->second, I, TII);
|
2007-05-07 22:57:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return MadeChange;
|
|
|
|
}
|
2006-10-21 02:47:49 +02:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Branch Optimization
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
bool BranchFolder::OptimizeBranches(MachineFunction &MF) {
|
|
|
|
MadeChange = false;
|
|
|
|
|
2007-02-17 01:44:34 +01:00
|
|
|
// Make sure blocks are numbered in order
|
|
|
|
MF.RenumberBlocks();
|
|
|
|
|
2006-10-21 02:47:49 +02:00
|
|
|
for (MachineFunction::iterator I = ++MF.begin(), E = MF.end(); I != E; ) {
|
|
|
|
MachineBasicBlock *MBB = I++;
|
|
|
|
OptimizeBlock(MBB);
|
|
|
|
|
|
|
|
// If it is dead, remove it.
|
2007-02-22 17:39:03 +01:00
|
|
|
if (MBB->pred_empty()) {
|
2006-10-21 02:47:49 +02:00
|
|
|
RemoveDeadBlock(MBB);
|
|
|
|
MadeChange = true;
|
|
|
|
++NumDeadBlocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return MadeChange;
|
2004-07-31 12:01:27 +02:00
|
|
|
}
|
|
|
|
|
2006-10-21 02:47:49 +02:00
|
|
|
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
/// CanFallThrough - Return true if the specified block (with the specified
|
|
|
|
/// branch condition) can implicitly transfer control to the block after it by
|
|
|
|
/// falling off the end of it. This should return false if it can reach the
|
|
|
|
/// block after it, but it uses an explicit branch to do so (e.g. a table jump).
|
|
|
|
///
|
|
|
|
/// True is a conservative answer.
|
|
|
|
///
|
|
|
|
bool BranchFolder::CanFallThrough(MachineBasicBlock *CurBB,
|
|
|
|
bool BranchUnAnalyzable,
|
2008-05-12 22:33:57 +02:00
|
|
|
MachineBasicBlock *TBB,
|
|
|
|
MachineBasicBlock *FBB,
|
2008-08-15 00:49:33 +02:00
|
|
|
const SmallVectorImpl<MachineOperand> &Cond) {
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
MachineFunction::iterator Fallthrough = CurBB;
|
|
|
|
++Fallthrough;
|
|
|
|
// If FallthroughBlock is off the end of the function, it can't fall through.
|
|
|
|
if (Fallthrough == CurBB->getParent()->end())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If FallthroughBlock isn't a successor of CurBB, no fallthrough is possible.
|
|
|
|
if (!CurBB->isSuccessor(Fallthrough))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If we couldn't analyze the branch, assume it could fall through.
|
|
|
|
if (BranchUnAnalyzable) return true;
|
|
|
|
|
2006-10-24 03:12:32 +02:00
|
|
|
// If there is no branch, control always falls through.
|
|
|
|
if (TBB == 0) return true;
|
|
|
|
|
|
|
|
// If there is some explicit branch to the fallthrough block, it can obviously
|
|
|
|
// reach, even though the branch should get folded to fall through implicitly.
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
if (MachineFunction::iterator(TBB) == Fallthrough ||
|
|
|
|
MachineFunction::iterator(FBB) == Fallthrough)
|
2006-10-24 03:12:32 +02:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// If it's an unconditional branch to some block not the fall through, it
|
|
|
|
// doesn't fall through.
|
|
|
|
if (Cond.empty()) return false;
|
|
|
|
|
|
|
|
// Otherwise, if it is conditional and has no explicit false block, it falls
|
|
|
|
// through.
|
2006-10-26 00:21:37 +02:00
|
|
|
return FBB == 0;
|
2006-10-24 03:12:32 +02:00
|
|
|
}
|
|
|
|
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
/// CanFallThrough - Return true if the specified can implicitly transfer
|
|
|
|
/// control to the block after it by falling off the end of it. This should
|
|
|
|
/// return false if it can reach the block after it, but it uses an explicit
|
|
|
|
/// branch to do so (e.g. a table jump).
|
|
|
|
///
|
|
|
|
/// True is a conservative answer.
|
|
|
|
///
|
|
|
|
bool BranchFolder::CanFallThrough(MachineBasicBlock *CurBB) {
|
|
|
|
MachineBasicBlock *TBB = 0, *FBB = 0;
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> Cond;
|
2009-02-09 08:14:22 +01:00
|
|
|
bool CurUnAnalyzable = TII->AnalyzeBranch(*CurBB, TBB, FBB, Cond, true);
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
return CanFallThrough(CurBB, CurUnAnalyzable, TBB, FBB, Cond);
|
|
|
|
}
|
|
|
|
|
2006-11-18 21:47:54 +01:00
|
|
|
/// IsBetterFallthrough - Return true if it would be clearly better to
|
|
|
|
/// fall-through to MBB1 than to fall through into MBB2. This has to return
|
|
|
|
/// a strict ordering, returning true for both (MBB1,MBB2) and (MBB2,MBB1) will
|
|
|
|
/// result in infinite loops.
|
|
|
|
static bool IsBetterFallthrough(MachineBasicBlock *MBB1,
|
2008-01-07 02:56:04 +01:00
|
|
|
MachineBasicBlock *MBB2) {
|
2006-11-18 22:30:35 +01:00
|
|
|
// Right now, we use a simple heuristic. If MBB2 ends with a call, and
|
|
|
|
// MBB1 doesn't, we prefer to fall through into MBB1. This allows us to
|
2006-11-18 21:47:54 +01:00
|
|
|
// optimize branches that branch to either a return block or an assert block
|
|
|
|
// into a fallthrough to the return.
|
|
|
|
if (MBB1->empty() || MBB2->empty()) return false;
|
2007-12-10 08:24:06 +01:00
|
|
|
|
|
|
|
// If there is a clear successor ordering we make sure that one block
|
|
|
|
// will fall through to the next
|
|
|
|
if (MBB1->isSuccessor(MBB2)) return true;
|
|
|
|
if (MBB2->isSuccessor(MBB1)) return false;
|
2006-11-18 21:47:54 +01:00
|
|
|
|
|
|
|
MachineInstr *MBB1I = --MBB1->end();
|
|
|
|
MachineInstr *MBB2I = --MBB2->end();
|
2008-01-07 08:27:27 +01:00
|
|
|
return MBB2I->getDesc().isCall() && !MBB1I->getDesc().isCall();
|
2006-11-18 21:47:54 +01:00
|
|
|
}
|
|
|
|
|
2006-10-14 02:21:48 +02:00
|
|
|
/// OptimizeBlock - Analyze and optimize control flow related to the specified
|
|
|
|
/// block. This is never called on the entry block.
|
2006-10-24 03:12:32 +02:00
|
|
|
void BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
|
|
|
|
MachineFunction::iterator FallThrough = MBB;
|
|
|
|
++FallThrough;
|
|
|
|
|
2006-10-13 22:43:10 +02:00
|
|
|
// If this block is empty, make everyone use its fall-through, not the block
|
2007-05-31 23:54:00 +02:00
|
|
|
// explicitly. Landing pads should not do this since the landing-pad table
|
|
|
|
// points to this block.
|
|
|
|
if (MBB->empty() && !MBB->isLandingPad()) {
|
2006-10-21 07:08:28 +02:00
|
|
|
// Dead block? Leave for cleanup later.
|
2007-02-22 17:39:03 +01:00
|
|
|
if (MBB->pred_empty()) return;
|
2006-10-14 02:21:48 +02:00
|
|
|
|
2006-10-17 19:13:52 +02:00
|
|
|
if (FallThrough == MBB->getParent()->end()) {
|
|
|
|
// TODO: Simplify preds to not branch here if possible!
|
|
|
|
} else {
|
|
|
|
// Rewrite all predecessors of the old block to go to the fallthrough
|
|
|
|
// instead.
|
2007-02-22 17:39:03 +01:00
|
|
|
while (!MBB->pred_empty()) {
|
2006-10-14 02:21:48 +02:00
|
|
|
MachineBasicBlock *Pred = *(MBB->pred_end()-1);
|
2007-06-04 08:44:01 +02:00
|
|
|
Pred->ReplaceUsesOfBlockWith(MBB, FallThrough);
|
2006-10-14 02:21:48 +02:00
|
|
|
}
|
2006-10-17 19:13:52 +02:00
|
|
|
|
|
|
|
// If MBB was the target of a jump table, update jump tables to go to the
|
|
|
|
// fallthrough instead.
|
2006-10-28 20:34:47 +02:00
|
|
|
MBB->getParent()->getJumpTableInfo()->
|
|
|
|
ReplaceMBBInJumpTables(MBB, FallThrough);
|
2006-10-14 02:21:48 +02:00
|
|
|
MadeChange = true;
|
2004-07-31 12:01:27 +02:00
|
|
|
}
|
2006-10-14 02:21:48 +02:00
|
|
|
return;
|
2004-07-31 12:01:27 +02:00
|
|
|
}
|
|
|
|
|
2006-10-14 02:21:48 +02:00
|
|
|
// Check to see if we can simplify the terminator of the block before this
|
|
|
|
// one.
|
2006-10-24 03:12:32 +02:00
|
|
|
MachineBasicBlock &PrevBB = *prior(MachineFunction::iterator(MBB));
|
Enable deleting branches to successor blocks. With the previous patches,
branch folding can now compile stuff like this:
void foo(int W, int X, int Y, int Z) {
if (W & 1) {
for (; X;--X) bar();
} else if (W & 2) {
for (; Y;--Y) bar();
} else if (W & 4) {
for (; Z;--Z) bar();
} else if (W & 8) {
for (; W;--W) bar();
}
if (W) {
bar();
}
}
contrived testcase where loops exits all end up merging together. To have
the loop merges be:
...
cmplw cr0, r30, r27
bne cr0, LBB1_14 ;bb38
LBB1_16: ;cond_next48.loopexit
mr r27, r29
LBB1_20: ;cond_next48
cmplwi cr0, r27, 0
beq cr0, LBB1_22 ;UnifiedReturnBlock
...
instead of:
...
cmplw cr0, r30, r27
bne cr0, LBB1_14 ;bb38
LBB1_16: ;cond_next48.loopexit
mr r27, r29
b LBB1_20 ;cond_next48
LBB1_17: ;cond_next48.loopexit1
b LBB1_20 ;cond_next48
LBB1_18: ;cond_next48.loopexit2
b LBB1_20 ;cond_next48
LBB1_19: ;cond_next48.loopexit3
LBB1_20: ;cond_next48
cmplwi cr0, r27, 0
beq cr0, LBB1_22 ;UnifiedReturnBlock
...
This is CodeGen/PowerPC/branch-opt.ll
llvm-svn: 31006
2006-10-17 20:16:40 +02:00
|
|
|
|
2006-10-14 02:21:48 +02:00
|
|
|
MachineBasicBlock *PriorTBB = 0, *PriorFBB = 0;
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> PriorCond;
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
bool PriorUnAnalyzable =
|
2009-02-09 08:14:22 +01:00
|
|
|
TII->AnalyzeBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, true);
|
2006-10-21 07:08:28 +02:00
|
|
|
if (!PriorUnAnalyzable) {
|
|
|
|
// If the CFG for the prior block has extra edges, remove them.
|
2007-06-19 00:43:58 +02:00
|
|
|
MadeChange |= PrevBB.CorrectExtraCFGEdges(PriorTBB, PriorFBB,
|
|
|
|
!PriorCond.empty());
|
2006-10-21 07:08:28 +02:00
|
|
|
|
2006-10-14 02:21:48 +02:00
|
|
|
// If the previous branch is conditional and both conditions go to the same
|
2006-10-21 07:43:30 +02:00
|
|
|
// destination, remove the branch, replacing it with an unconditional one or
|
|
|
|
// a fall-through.
|
2006-10-14 02:21:48 +02:00
|
|
|
if (PriorTBB && PriorTBB == PriorFBB) {
|
2006-10-21 07:08:28 +02:00
|
|
|
TII->RemoveBranch(PrevBB);
|
2006-10-14 02:21:48 +02:00
|
|
|
PriorCond.clear();
|
2006-10-24 03:12:32 +02:00
|
|
|
if (PriorTBB != MBB)
|
2006-10-21 07:08:28 +02:00
|
|
|
TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond);
|
2006-10-14 02:21:48 +02:00
|
|
|
MadeChange = true;
|
2006-10-21 02:47:49 +02:00
|
|
|
++NumBranchOpts;
|
2006-10-14 02:21:48 +02:00
|
|
|
return OptimizeBlock(MBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the previous branch *only* branches to *this* block (conditional or
|
|
|
|
// not) remove the branch.
|
2006-10-24 03:12:32 +02:00
|
|
|
if (PriorTBB == MBB && PriorFBB == 0) {
|
2006-10-21 07:08:28 +02:00
|
|
|
TII->RemoveBranch(PrevBB);
|
2006-10-14 02:21:48 +02:00
|
|
|
MadeChange = true;
|
2006-10-21 02:47:49 +02:00
|
|
|
++NumBranchOpts;
|
2006-10-14 02:21:48 +02:00
|
|
|
return OptimizeBlock(MBB);
|
|
|
|
}
|
2006-10-21 07:43:30 +02:00
|
|
|
|
|
|
|
// If the prior block branches somewhere else on the condition and here if
|
|
|
|
// the condition is false, remove the uncond second branch.
|
2006-10-24 03:12:32 +02:00
|
|
|
if (PriorFBB == MBB) {
|
2006-10-21 07:43:30 +02:00
|
|
|
TII->RemoveBranch(PrevBB);
|
|
|
|
TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond);
|
|
|
|
MadeChange = true;
|
|
|
|
++NumBranchOpts;
|
|
|
|
return OptimizeBlock(MBB);
|
|
|
|
}
|
2006-10-21 07:54:00 +02:00
|
|
|
|
|
|
|
// If the prior block branches here on true and somewhere else on false, and
|
|
|
|
// if the branch condition is reversible, reverse the branch to create a
|
|
|
|
// fall-through.
|
2006-10-24 03:12:32 +02:00
|
|
|
if (PriorTBB == MBB) {
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
|
2006-10-21 07:54:00 +02:00
|
|
|
if (!TII->ReverseBranchCondition(NewPriorCond)) {
|
|
|
|
TII->RemoveBranch(PrevBB);
|
|
|
|
TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond);
|
|
|
|
MadeChange = true;
|
|
|
|
++NumBranchOpts;
|
|
|
|
return OptimizeBlock(MBB);
|
|
|
|
}
|
|
|
|
}
|
2006-11-18 21:47:54 +01:00
|
|
|
|
2006-11-18 22:30:35 +01:00
|
|
|
// If this block doesn't fall through (e.g. it ends with an uncond branch or
|
|
|
|
// has no successors) and if the pred falls through into this block, and if
|
|
|
|
// it would otherwise fall through into the block after this, move this
|
|
|
|
// block to the end of the function.
|
|
|
|
//
|
2006-11-18 21:47:54 +01:00
|
|
|
// We consider it more likely that execution will stay in the function (e.g.
|
|
|
|
// due to loops) than it is to exit it. This asserts in loops etc, moving
|
|
|
|
// the assert condition out of the loop body.
|
2006-11-18 22:30:35 +01:00
|
|
|
if (!PriorCond.empty() && PriorFBB == 0 &&
|
|
|
|
MachineFunction::iterator(PriorTBB) == FallThrough &&
|
|
|
|
!CanFallThrough(MBB)) {
|
2006-11-18 22:56:39 +01:00
|
|
|
bool DoTransform = true;
|
|
|
|
|
2006-11-18 21:47:54 +01:00
|
|
|
// We have to be careful that the succs of PredBB aren't both no-successor
|
|
|
|
// blocks. If neither have successors and if PredBB is the second from
|
|
|
|
// last block in the function, we'd just keep swapping the two blocks for
|
|
|
|
// last. Only do the swap if one is clearly better to fall through than
|
|
|
|
// the other.
|
2006-11-18 22:56:39 +01:00
|
|
|
if (FallThrough == --MBB->getParent()->end() &&
|
2008-01-07 02:56:04 +01:00
|
|
|
!IsBetterFallthrough(PriorTBB, MBB))
|
2006-11-18 22:56:39 +01:00
|
|
|
DoTransform = false;
|
|
|
|
|
|
|
|
// We don't want to do this transformation if we have control flow like:
|
|
|
|
// br cond BB2
|
|
|
|
// BB1:
|
|
|
|
// ..
|
|
|
|
// jmp BBX
|
|
|
|
// BB2:
|
|
|
|
// ..
|
|
|
|
// ret
|
|
|
|
//
|
|
|
|
// In this case, we could actually be moving the return block *into* a
|
|
|
|
// loop!
|
2006-11-18 23:25:39 +01:00
|
|
|
if (DoTransform && !MBB->succ_empty() &&
|
|
|
|
(!CanFallThrough(PriorTBB) || PriorTBB->empty()))
|
2006-11-18 22:56:39 +01:00
|
|
|
DoTransform = false;
|
|
|
|
|
2006-11-18 21:47:54 +01:00
|
|
|
|
2006-11-18 22:56:39 +01:00
|
|
|
if (DoTransform) {
|
2006-11-18 21:47:54 +01:00
|
|
|
// Reverse the branch so we will fall through on the previous true cond.
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
|
2006-11-18 21:47:54 +01:00
|
|
|
if (!TII->ReverseBranchCondition(NewPriorCond)) {
|
2006-11-18 22:56:39 +01:00
|
|
|
DOUT << "\nMoving MBB: " << *MBB;
|
|
|
|
DOUT << "To make fallthrough to: " << *PriorTBB << "\n";
|
|
|
|
|
2006-11-18 21:47:54 +01:00
|
|
|
TII->RemoveBranch(PrevBB);
|
|
|
|
TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond);
|
|
|
|
|
|
|
|
// Move this block to the end of the function.
|
|
|
|
MBB->moveAfter(--MBB->getParent()->end());
|
|
|
|
MadeChange = true;
|
|
|
|
++NumBranchOpts;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-10-14 02:21:48 +02:00
|
|
|
}
|
|
|
|
|
2006-10-21 07:08:28 +02:00
|
|
|
// Analyze the branch in the current block.
|
|
|
|
MachineBasicBlock *CurTBB = 0, *CurFBB = 0;
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> CurCond;
|
2009-02-09 08:14:22 +01:00
|
|
|
bool CurUnAnalyzable= TII->AnalyzeBranch(*MBB, CurTBB, CurFBB, CurCond, true);
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
if (!CurUnAnalyzable) {
|
2006-10-21 07:08:28 +02:00
|
|
|
// If the CFG for the prior block has extra edges, remove them.
|
2007-06-19 00:43:58 +02:00
|
|
|
MadeChange |= MBB->CorrectExtraCFGEdges(CurTBB, CurFBB, !CurCond.empty());
|
2006-10-21 07:08:28 +02:00
|
|
|
|
optimize single MBB loops better. In particular, produce:
LBB1_57: #bb207.i
movl 72(%esp), %ecx
movb (%ecx,%eax), %cl
movl 80(%esp), %edx
movb %cl, 1(%edx,%eax)
incl %eax
cmpl $143, %eax
jne LBB1_57 #bb207.i
jmp LBB1_64 #cond_next255.i
intead of:
LBB1_57: #bb207.i
movl 72(%esp), %ecx
movb (%ecx,%eax), %cl
movl 80(%esp), %edx
movb %cl, 1(%edx,%eax)
incl %eax
cmpl $143, %eax
je LBB1_64 #cond_next255.i
jmp LBB1_57 #bb207.i
This eliminates a branch per iteration of the loop. This hurted PPC
particularly, because the extra branch meant another dispatch group for each
iteration of the loop.
llvm-svn: 31530
2006-11-08 02:03:21 +01:00
|
|
|
// If this is a two-way branch, and the FBB branches to this block, reverse
|
|
|
|
// the condition so the single-basic-block loop is faster. Instead of:
|
|
|
|
// Loop: xxx; jcc Out; jmp Loop
|
|
|
|
// we want:
|
|
|
|
// Loop: xxx; jncc Loop; jmp Out
|
|
|
|
if (CurTBB && CurFBB && CurFBB == MBB && CurTBB != MBB) {
|
2008-08-15 00:49:33 +02:00
|
|
|
SmallVector<MachineOperand, 4> NewCond(CurCond);
|
optimize single MBB loops better. In particular, produce:
LBB1_57: #bb207.i
movl 72(%esp), %ecx
movb (%ecx,%eax), %cl
movl 80(%esp), %edx
movb %cl, 1(%edx,%eax)
incl %eax
cmpl $143, %eax
jne LBB1_57 #bb207.i
jmp LBB1_64 #cond_next255.i
intead of:
LBB1_57: #bb207.i
movl 72(%esp), %ecx
movb (%ecx,%eax), %cl
movl 80(%esp), %edx
movb %cl, 1(%edx,%eax)
incl %eax
cmpl $143, %eax
je LBB1_64 #cond_next255.i
jmp LBB1_57 #bb207.i
This eliminates a branch per iteration of the loop. This hurted PPC
particularly, because the extra branch meant another dispatch group for each
iteration of the loop.
llvm-svn: 31530
2006-11-08 02:03:21 +01:00
|
|
|
if (!TII->ReverseBranchCondition(NewCond)) {
|
|
|
|
TII->RemoveBranch(*MBB);
|
|
|
|
TII->InsertBranch(*MBB, CurFBB, CurTBB, NewCond);
|
|
|
|
MadeChange = true;
|
|
|
|
++NumBranchOpts;
|
|
|
|
return OptimizeBlock(MBB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-21 07:08:28 +02:00
|
|
|
// If this branch is the only thing in its block, see if we can forward
|
|
|
|
// other blocks across it.
|
|
|
|
if (CurTBB && CurCond.empty() && CurFBB == 0 &&
|
2008-01-07 08:27:27 +01:00
|
|
|
MBB->begin()->getDesc().isBranch() && CurTBB != MBB) {
|
2006-10-21 07:08:28 +02:00
|
|
|
// This block may contain just an unconditional branch. Because there can
|
|
|
|
// be 'non-branch terminators' in the block, try removing the branch and
|
|
|
|
// then seeing if the block is empty.
|
|
|
|
TII->RemoveBranch(*MBB);
|
|
|
|
|
|
|
|
// If this block is just an unconditional branch to CurTBB, we can
|
|
|
|
// usually completely eliminate the block. The only case we cannot
|
|
|
|
// completely eliminate the block is when the block before this one
|
|
|
|
// falls through into MBB and we can't understand the prior block's branch
|
|
|
|
// condition.
|
2006-10-28 19:32:47 +02:00
|
|
|
if (MBB->empty()) {
|
|
|
|
bool PredHasNoFallThrough = TII->BlockHasNoFallThrough(PrevBB);
|
|
|
|
if (PredHasNoFallThrough || !PriorUnAnalyzable ||
|
|
|
|
!PrevBB.isSuccessor(MBB)) {
|
|
|
|
// If the prior block falls through into us, turn it into an
|
|
|
|
// explicit branch to us to make updates simpler.
|
|
|
|
if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) &&
|
|
|
|
PriorTBB != MBB && PriorFBB != MBB) {
|
|
|
|
if (PriorTBB == 0) {
|
2006-10-28 20:34:47 +02:00
|
|
|
assert(PriorCond.empty() && PriorFBB == 0 &&
|
|
|
|
"Bad branch analysis");
|
2006-10-28 19:32:47 +02:00
|
|
|
PriorTBB = MBB;
|
|
|
|
} else {
|
|
|
|
assert(PriorFBB == 0 && "Machine CFG out of date!");
|
|
|
|
PriorFBB = MBB;
|
|
|
|
}
|
|
|
|
TII->RemoveBranch(PrevBB);
|
|
|
|
TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond);
|
2006-10-21 07:08:28 +02:00
|
|
|
}
|
2004-07-31 12:01:27 +02:00
|
|
|
|
2006-10-28 19:32:47 +02:00
|
|
|
// Iterate through all the predecessors, revectoring each in-turn.
|
2007-06-29 04:45:24 +02:00
|
|
|
size_t PI = 0;
|
2006-10-28 19:32:47 +02:00
|
|
|
bool DidChange = false;
|
|
|
|
bool HasBranchToSelf = false;
|
2007-06-29 04:45:24 +02:00
|
|
|
while(PI != MBB->pred_size()) {
|
|
|
|
MachineBasicBlock *PMBB = *(MBB->pred_begin() + PI);
|
|
|
|
if (PMBB == MBB) {
|
2006-10-28 19:32:47 +02:00
|
|
|
// If this block has an uncond branch to itself, leave it.
|
|
|
|
++PI;
|
|
|
|
HasBranchToSelf = true;
|
|
|
|
} else {
|
|
|
|
DidChange = true;
|
2007-06-29 04:45:24 +02:00
|
|
|
PMBB->ReplaceUsesOfBlockWith(MBB, CurTBB);
|
2006-10-28 19:32:47 +02:00
|
|
|
}
|
2006-10-21 08:11:43 +02:00
|
|
|
}
|
2004-07-31 12:01:27 +02:00
|
|
|
|
2006-10-28 19:32:47 +02:00
|
|
|
// Change any jumptables to go to the new MBB.
|
2006-10-28 20:34:47 +02:00
|
|
|
MBB->getParent()->getJumpTableInfo()->
|
|
|
|
ReplaceMBBInJumpTables(MBB, CurTBB);
|
2006-10-28 19:32:47 +02:00
|
|
|
if (DidChange) {
|
|
|
|
++NumBranchOpts;
|
|
|
|
MadeChange = true;
|
|
|
|
if (!HasBranchToSelf) return;
|
|
|
|
}
|
2006-10-21 08:11:43 +02:00
|
|
|
}
|
2004-07-31 12:01:27 +02:00
|
|
|
}
|
2006-10-21 07:08:28 +02:00
|
|
|
|
|
|
|
// Add the branch back if the block is more than just an uncond branch.
|
|
|
|
TII->InsertBranch(*MBB, CurTBB, 0, CurCond);
|
2004-07-31 12:01:27 +02:00
|
|
|
}
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the prior block doesn't fall through into this block, and if this
|
|
|
|
// block doesn't fall through into some other block, see if we can find a
|
|
|
|
// place to move this block where a fall-through will happen.
|
|
|
|
if (!CanFallThrough(&PrevBB, PriorUnAnalyzable,
|
|
|
|
PriorTBB, PriorFBB, PriorCond)) {
|
|
|
|
// Now we know that there was no fall-through into this block, check to
|
|
|
|
// see if it has a fall-through into its successor.
|
2007-02-17 01:44:34 +01:00
|
|
|
bool CurFallsThru = CanFallThrough(MBB, CurUnAnalyzable, CurTBB, CurFBB,
|
2007-05-01 01:35:00 +02:00
|
|
|
CurCond);
|
2007-02-17 01:44:34 +01:00
|
|
|
|
2007-02-21 23:42:20 +01:00
|
|
|
if (!MBB->isLandingPad()) {
|
|
|
|
// Check all the predecessors of this block. If one of them has no fall
|
|
|
|
// throughs, move this block right after it.
|
|
|
|
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
|
|
|
|
E = MBB->pred_end(); PI != E; ++PI) {
|
|
|
|
// Analyze the branch at the end of the pred.
|
|
|
|
MachineBasicBlock *PredBB = *PI;
|
|
|
|
MachineFunction::iterator PredFallthrough = PredBB; ++PredFallthrough;
|
|
|
|
if (PredBB != MBB && !CanFallThrough(PredBB)
|
2007-05-10 03:01:49 +02:00
|
|
|
&& (!CurFallsThru || !CurTBB || !CurFBB)
|
2007-02-21 23:42:20 +01:00
|
|
|
&& (!CurFallsThru || MBB->getNumber() >= PredBB->getNumber())) {
|
|
|
|
// If the current block doesn't fall through, just move it.
|
|
|
|
// If the current block can fall through and does not end with a
|
|
|
|
// conditional branch, we need to append an unconditional jump to
|
|
|
|
// the (current) next block. To avoid a possible compile-time
|
|
|
|
// infinite loop, move blocks only backward in this case.
|
2007-05-10 03:01:49 +02:00
|
|
|
// Also, if there are already 2 branches here, we cannot add a third;
|
|
|
|
// this means we have the case
|
|
|
|
// Bcc next
|
|
|
|
// B elsewhere
|
|
|
|
// next:
|
2007-02-21 23:42:20 +01:00
|
|
|
if (CurFallsThru) {
|
|
|
|
MachineBasicBlock *NextBB = next(MachineFunction::iterator(MBB));
|
|
|
|
CurCond.clear();
|
|
|
|
TII->InsertBranch(*MBB, NextBB, 0, CurCond);
|
|
|
|
}
|
|
|
|
MBB->moveAfter(PredBB);
|
|
|
|
MadeChange = true;
|
|
|
|
return OptimizeBlock(MBB);
|
2006-10-24 03:12:32 +02:00
|
|
|
}
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
}
|
2007-02-17 01:44:34 +01:00
|
|
|
}
|
2006-10-24 03:12:32 +02:00
|
|
|
|
2007-02-17 01:44:34 +01:00
|
|
|
if (!CurFallsThru) {
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
// Check all successors to see if we can move this block before it.
|
|
|
|
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
|
|
|
|
E = MBB->succ_end(); SI != E; ++SI) {
|
|
|
|
// Analyze the branch at the end of the block before the succ.
|
|
|
|
MachineBasicBlock *SuccBB = *SI;
|
|
|
|
MachineFunction::iterator SuccPrev = SuccBB; --SuccPrev;
|
|
|
|
std::vector<MachineOperand> SuccPrevCond;
|
2007-05-01 01:35:00 +02:00
|
|
|
|
|
|
|
// If this block doesn't already fall-through to that successor, and if
|
|
|
|
// the succ doesn't already have a block that can fall through into it,
|
|
|
|
// and if the successor isn't an EH destination, we can arrange for the
|
|
|
|
// fallthrough to happen.
|
|
|
|
if (SuccBB != MBB && !CanFallThrough(SuccPrev) &&
|
|
|
|
!SuccBB->isLandingPad()) {
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
MBB->moveBefore(SuccBB);
|
2006-10-24 03:12:32 +02:00
|
|
|
MadeChange = true;
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
return OptimizeBlock(MBB);
|
2006-10-24 03:12:32 +02:00
|
|
|
}
|
|
|
|
}
|
Make CanFallThrough more intelligent (so it can handle blocks with (e.g.) no
successors), and make island block movement more general.
This compiles CodeGen/X86/2006-04-27-ISelFoldingBug.ll to:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
je LBB1_2 #codeRepl5.exitStub
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
instead of:
_loadAndRLEsource_no_exit_2E_1_label_2E_0:
subl $8, %esp
movl %esi, 4(%esp)
movl %ebx, (%esp)
movl 16(%esp), %eax
movl 12(%esp), %ecx
jmp LBB1_3 #label.0
LBB1_1: #label.0.no_exit.1_crit_edge.exitStub
movl $1, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_2: #codeRepl5.exitStub
xorl %eax, %eax
movl (%esp), %ebx
movl 4(%esp), %esi
addl $8, %esp
ret
LBB1_3: #label.0
movl _last, %edx
movl %edx, %esi
incl %esi
movl %esi, _last
movl %ecx, %ebx
# TRUNCATE movb %bl, %bl
movl _block, %esi
movb %bl, 1(%esi,%edx)
cmpl %eax, _last
jge LBB1_2 #codeRepl5.exitStub
LBB1_4: #label.0
cmpl $257, %ecx
jne LBB1_1 #label.0.no_exit.1_crit_edge.exitStub
jmp LBB1_2 #codeRepl5.exitStub
... which is much better layout :)
llvm-svn: 31282
2006-10-29 22:05:41 +01:00
|
|
|
|
|
|
|
// Okay, there is no really great place to put this block. If, however,
|
|
|
|
// the block before this one would be a fall-through if this block were
|
|
|
|
// removed, move this block to the end of the function.
|
|
|
|
if (FallThrough != MBB->getParent()->end() &&
|
|
|
|
PrevBB.isSuccessor(FallThrough)) {
|
|
|
|
MBB->moveAfter(--MBB->getParent()->end());
|
|
|
|
MadeChange = true;
|
|
|
|
return;
|
|
|
|
}
|
2006-10-24 03:12:32 +02:00
|
|
|
}
|
2004-07-31 12:01:27 +02:00
|
|
|
}
|
|
|
|
}
|