1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00
llvm-mirror/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp

3756 lines
144 KiB
C++
Raw Normal View History

//===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements the SelectionDAGISel class.
//
//===----------------------------------------------------------------------===//
#include "ScheduleDAGSDNodes.h"
#include "SelectionDAGBuilder.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachinePassRegistry.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Pass.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
2006-05-23 15:43:15 +02:00
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "isel"
STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
STATISTIC(NumFastIselFailLowerArguments,
"Number of entry blocks where fast isel failed to lower arguments");
static cl::opt<int> EnableFastISelAbort(
"fast-isel-abort", cl::Hidden,
cl::desc("Enable abort calls when \"fast\" instruction selection "
"fails to lower an instruction: 0 disable the abort, 1 will "
"abort but for args, calls and terminators, 2 will also "
"abort for argument lowering, and 3 will never fallback "
"to SelectionDAG."));
static cl::opt<bool> EnableFastISelFallbackReport(
"fast-isel-report-on-fallback", cl::Hidden,
cl::desc("Emit a diagnostic when \"fast\" instruction selection "
"falls back to SelectionDAG."));
static cl::opt<bool>
UseMBPI("use-mbpi",
cl::desc("use Machine Branch Probability Info"),
cl::init(true), cl::Hidden);
#ifndef NDEBUG
static cl::opt<std::string>
FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
cl::desc("Only display the basic block whose name "
"matches this for all view-*-dags options"));
static cl::opt<bool>
ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before the first "
"dag combine pass"));
static cl::opt<bool>
ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before legalize types"));
static cl::opt<bool>
ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before legalize"));
static cl::opt<bool>
ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before the second "
"dag combine pass"));
static cl::opt<bool>
ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
cl::desc("Pop up a window to show dags before the post legalize types"
" dag combine pass"));
static cl::opt<bool>
ViewISelDAGs("view-isel-dags", cl::Hidden,
cl::desc("Pop up a window to show isel dags as they are selected"));
static cl::opt<bool>
ViewSchedDAGs("view-sched-dags", cl::Hidden,
cl::desc("Pop up a window to show sched dags as they are processed"));
static cl::opt<bool>
ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
2008-01-25 18:24:52 +01:00
cl::desc("Pop up a window to show SUnit dags after they are processed"));
#else
static const bool ViewDAGCombine1 = false,
ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false,
ViewDAGCombine2 = false,
ViewDAGCombineLT = false,
ViewISelDAGs = false, ViewSchedDAGs = false,
ViewSUnitDAGs = false;
#endif
//===---------------------------------------------------------------------===//
///
/// RegisterScheduler class - Track the registration of instruction schedulers.
///
//===---------------------------------------------------------------------===//
MachinePassRegistry RegisterScheduler::Registry;
//===---------------------------------------------------------------------===//
///
/// ISHeuristic command line option for instruction schedulers.
///
//===---------------------------------------------------------------------===//
static cl::opt<RegisterScheduler::FunctionPassCtor, false,
RegisterPassParser<RegisterScheduler>>
ISHeuristic("pre-RA-sched",
cl::init(&createDefaultScheduler), cl::Hidden,
cl::desc("Instruction schedulers available (before register"
" allocation):"));
static RegisterScheduler
defaultListDAGScheduler("default", "Best scheduler for the target",
createDefaultScheduler);
namespace llvm {
//===--------------------------------------------------------------------===//
/// \brief This class is used by SelectionDAGISel to temporarily override
/// the optimization level on a per-function basis.
class OptLevelChanger {
SelectionDAGISel &IS;
CodeGenOpt::Level SavedOptLevel;
bool SavedFastISel;
public:
OptLevelChanger(SelectionDAGISel &ISel,
CodeGenOpt::Level NewOptLevel) : IS(ISel) {
SavedOptLevel = IS.OptLevel;
if (NewOptLevel == SavedOptLevel)
return;
IS.OptLevel = NewOptLevel;
IS.TM.setOptLevel(NewOptLevel);
DEBUG(dbgs() << "\nChanging optimization level for Function "
<< IS.MF->getFunction()->getName() << "\n");
DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel
<< " ; After: -O" << NewOptLevel << "\n");
SavedFastISel = IS.TM.Options.EnableFastISel;
if (NewOptLevel == CodeGenOpt::None) {
IS.TM.setFastISel(IS.TM.getO0WantsFastISel());
DEBUG(dbgs() << "\tFastISel is "
<< (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
<< "\n");
}
}
~OptLevelChanger() {
if (IS.OptLevel == SavedOptLevel)
return;
DEBUG(dbgs() << "\nRestoring optimization level for Function "
<< IS.MF->getFunction()->getName() << "\n");
DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel
<< " ; After: -O" << SavedOptLevel << "\n");
IS.OptLevel = SavedOptLevel;
IS.TM.setOptLevel(SavedOptLevel);
IS.TM.setFastISel(SavedFastISel);
}
};
//===--------------------------------------------------------------------===//
/// createDefaultScheduler - This creates an instruction scheduler appropriate
/// for the target.
ScheduleDAGSDNodes* createDefaultScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetLowering *TLI = IS->TLI;
const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
// Try first to see if the Target has its own way of selecting a scheduler
if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
return SchedulerCtor(IS, OptLevel);
}
if (OptLevel == CodeGenOpt::None ||
(ST.enableMachineScheduler() && ST.enableMachineSchedDefaultSched()) ||
TLI->getSchedulingPreference() == Sched::Source)
return createSourceListDAGScheduler(IS, OptLevel);
if (TLI->getSchedulingPreference() == Sched::RegPressure)
return createBURRListDAGScheduler(IS, OptLevel);
if (TLI->getSchedulingPreference() == Sched::Hybrid)
return createHybridListDAGScheduler(IS, OptLevel);
if (TLI->getSchedulingPreference() == Sched::VLIW)
return createVLIWDAGScheduler(IS, OptLevel);
assert(TLI->getSchedulingPreference() == Sched::ILP &&
"Unknown sched type!");
return createILPListDAGScheduler(IS, OptLevel);
}
} // end namespace llvm
// EmitInstrWithCustomInserter - This method should be implemented by targets
// that mark instructions with the 'usesCustomInserter' flag. These
// instructions are special in various ways, which require special support to
// insert. The specified MachineInstr is created but not inserted into any
// basic blocks, and this method is called to expand it into a sequence of
// instructions, potentially also creating new basic blocks and control flow.
// When new basic blocks are inserted and the edges from MBB to its successors
// are modified, the method should insert pairs of <OldSucc, NewSucc> into the
// DenseMap.
MachineBasicBlock *
TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const {
#ifndef NDEBUG
dbgs() << "If a target marks an instruction with "
"'usesCustomInserter', it must implement "
"TargetLowering::EmitInstrWithCustomInserter!";
#endif
llvm_unreachable(nullptr);
}
void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const {
assert(!MI.hasPostISelHook() &&
"If a target marks an instruction with 'hasPostISelHook', "
"it must implement TargetLowering::AdjustInstrPostInstrSelection!");
}
//===----------------------------------------------------------------------===//
// SelectionDAGISel code
//===----------------------------------------------------------------------===//
SelectionDAGISel::SelectionDAGISel(TargetMachine &tm,
2011-01-05 22:45:56 +01:00
CodeGenOpt::Level OL) :
MachineFunctionPass(ID), TM(tm),
FuncInfo(new FunctionLoweringInfo()),
CurDAG(new SelectionDAG(tm, OL)),
SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
AA(), GFI(),
OptLevel(OL),
DAGSize(0) {
initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
initializeBranchProbabilityInfoWrapperPassPass(
*PassRegistry::getPassRegistry());
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible with the new pass manager, and no longer relying on analysis groups. This builds essentially a ground-up new AA infrastructure stack for LLVM. The core ideas are the same that are used throughout the new pass manager: type erased polymorphism and direct composition. The design is as follows: - FunctionAAResults is a type-erasing alias analysis results aggregation interface to walk a single query across a range of results from different alias analyses. Currently this is function-specific as we always assume that aliasing queries are *within* a function. - AAResultBase is a CRTP utility providing stub implementations of various parts of the alias analysis result concept, notably in several cases in terms of other more general parts of the interface. This can be used to implement only a narrow part of the interface rather than the entire interface. This isn't really ideal, this logic should be hoisted into FunctionAAResults as currently it will cause a significant amount of redundant work, but it faithfully models the behavior of the prior infrastructure. - All the alias analysis passes are ported to be wrapper passes for the legacy PM and new-style analysis passes for the new PM with a shared result object. In some cases (most notably CFL), this is an extremely naive approach that we should revisit when we can specialize for the new pass manager. - BasicAA has been restructured to reflect that it is much more fundamentally a function analysis because it uses dominator trees and loop info that need to be constructed for each function. All of the references to getting alias analysis results have been updated to use the new aggregation interface. All the preservation and other pass management code has been updated accordingly. The way the FunctionAAResultsWrapperPass works is to detect the available alias analyses when run, and add them to the results object. This means that we should be able to continue to respect when various passes are added to the pipeline, for example adding CFL or adding TBAA passes should just cause their results to be available and to get folded into this. The exception to this rule is BasicAA which really needs to be a function pass due to using dominator trees and loop info. As a consequence, the FunctionAAResultsWrapperPass directly depends on BasicAA and always includes it in the aggregation. This has significant implications for preserving analyses. Generally, most passes shouldn't bother preserving FunctionAAResultsWrapperPass because rebuilding the results just updates the set of known AA passes. The exception to this rule are LoopPass instances which need to preserve all the function analyses that the loop pass manager will end up needing. This means preserving both BasicAAWrapperPass and the aggregating FunctionAAResultsWrapperPass. Now, when preserving an alias analysis, you do so by directly preserving that analysis. This is only necessary for non-immutable-pass-provided alias analyses though, and there are only three of interest: BasicAA, GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is preserved when needed because it (like DominatorTree and LoopInfo) is marked as a CFG-only pass. I've expanded GlobalsAA into the preserved set everywhere we previously were preserving all of AliasAnalysis, and I've added SCEVAA in the intersection of that with where we preserve SCEV itself. One significant challenge to all of this is that the CGSCC passes were actually using the alias analysis implementations by taking advantage of a pretty amazing set of loop holes in the old pass manager's analysis management code which allowed analysis groups to slide through in many cases. Moving away from analysis groups makes this problem much more obvious. To fix it, I've leveraged the flexibility the design of the new PM components provides to just directly construct the relevant alias analyses for the relevant functions in the IPO passes that need them. This is a bit hacky, but should go away with the new pass manager, and is already in many ways cleaner than the prior state. Another significant challenge is that various facilities of the old alias analysis infrastructure just don't fit any more. The most significant of these is the alias analysis 'counter' pass. That pass relied on the ability to snoop on AA queries at different points in the analysis group chain. Instead, I'm planning to build printing functionality directly into the aggregation layer. I've not included that in this patch merely to keep it smaller. Note that all of this needs a nearly complete rewrite of the AA documentation. I'm planning to do that, but I'd like to make sure the new design settles, and to flesh out a bit more of what it looks like in the new pass manager first. Differential Revision: http://reviews.llvm.org/D12080 llvm-svn: 247167
2015-09-09 19:55:00 +02:00
initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
initializeTargetLibraryInfoWrapperPassPass(
*PassRegistry::getPassRegistry());
}
SelectionDAGISel::~SelectionDAGISel() {
delete SDB;
delete CurDAG;
delete FuncInfo;
}
void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
if (OptLevel != CodeGenOpt::None)
AU.addRequired<AAResultsWrapperPass>();
AU.addRequired<GCModuleInfo>();
AU.addRequired<StackProtector>();
AU.addPreserved<StackProtector>();
AU.addPreserved<GCModuleInfo>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
if (UseMBPI && OptLevel != CodeGenOpt::None)
AU.addRequired<BranchProbabilityInfoWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
/// SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that
/// may trap on it. In this case we have to split the edge so that the path
/// through the predecessor block that doesn't go to the phi block doesn't
/// execute the possibly trapping instruction. If available, we pass domtree
/// and loop info to be updated when we split critical edges. This is because
/// SelectionDAGISel preserves these analyses.
/// This is required for correctness, so it must be done at -O0.
///
static void SplitCriticalSideEffectEdges(Function &Fn, DominatorTree *DT,
LoopInfo *LI) {
// Loop for blocks with phi nodes.
for (BasicBlock &BB : Fn) {
PHINode *PN = dyn_cast<PHINode>(BB.begin());
if (!PN) continue;
2010-12-24 05:28:06 +01:00
ReprocessBlock:
// For each block with a PHI node, check to see if any of the input values
// are potentially trapping constant expressions. Constant expressions are
// the only potentially trapping value that can occur as the argument to a
// PHI.
for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I)); ++I)
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(PN->getIncomingValue(i));
if (!CE || !CE->canTrap()) continue;
2010-12-24 05:28:06 +01:00
// The only case we have to worry about is when the edge is critical.
// Since this block has a PHI Node, we assume it has multiple input
// edges: check to see if the pred has multiple successors.
BasicBlock *Pred = PN->getIncomingBlock(i);
if (Pred->getTerminator()->getNumSuccessors() == 1)
continue;
2010-12-24 05:28:06 +01:00
// Okay, we have to split this edge.
SplitCriticalEdge(
Pred->getTerminator(), GetSuccessorNumber(Pred, &BB),
CriticalEdgeSplittingOptions(DT, LI).setMergeIdenticalEdges());
goto ReprocessBlock;
}
}
}
bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// If we already selected that function, we do not need to run SDISel.
if (mf.getProperties().hasProperty(
MachineFunctionProperties::Property::Selected))
return false;
// Do some sanity-checking on the command-line options.
assert((!EnableFastISelAbort || TM.Options.EnableFastISel) &&
"-fast-isel-abort > 0 requires -fast-isel");
const Function &Fn = *mf.getFunction();
MF = &mf;
// Reset the target options before resetting the optimization
// level below.
// FIXME: This is a horrible hack and should be processed via
// codegen looking at the optimization level explicitly when
// it wants to look at it.
TM.resetTargetOptions(Fn);
// Reset OptLevel to None for optnone functions.
CodeGenOpt::Level NewOptLevel = OptLevel;
if (OptLevel != CodeGenOpt::None && skipFunction(Fn))
NewOptLevel = CodeGenOpt::None;
OptLevelChanger OLC(*this, NewOptLevel);
TII = MF->getSubtarget().getInstrInfo();
TLI = MF->getSubtarget().getTargetLowering();
RegInfo = &MF->getRegInfo();
LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : nullptr;
ORE = make_unique<OptimizationRemarkEmitter>(&Fn);
auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
SplitCriticalSideEffectEdges(const_cast<Function &>(Fn), DT, LI);
2010-12-24 05:28:06 +01:00
CurDAG->init(*MF, *ORE, this);
FuncInfo->set(Fn, *MF, CurDAG);
// Now get the optional analyzes if we want to.
// This is based on the possibly changed OptLevel (after optnone is taken
// into account). That's unfortunate but OK because it just means we won't
// ask for passes that have been required anyway.
if (UseMBPI && OptLevel != CodeGenOpt::None)
FuncInfo->BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
else
FuncInfo->BPI = nullptr;
if (OptLevel != CodeGenOpt::None)
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
else
AA = nullptr;
SDB->init(GFI, AA, LibInfo);
MF->setHasInlineAsm(false);
FuncInfo->SplitCSR = false;
// We split CSR if the target supports it for the given function
// and the function has only return exits.
if (OptLevel != CodeGenOpt::None && TLI->supportSplitCSR(MF)) {
FuncInfo->SplitCSR = true;
// Collect all the return blocks.
for (const BasicBlock &BB : Fn) {
if (!succ_empty(&BB))
continue;
const TerminatorInst *Term = BB.getTerminator();
if (isa<UnreachableInst>(Term) || isa<ReturnInst>(Term))
continue;
// Bail out if the exit block is not Return nor Unreachable.
FuncInfo->SplitCSR = false;
break;
}
}
MachineBasicBlock *EntryMBB = &MF->front();
if (FuncInfo->SplitCSR)
// This performs initialization so lowering for SplitCSR will be correct.
TLI->initializeSplitCSR(EntryMBB);
SelectAllBasicBlocks(Fn);
if (FastISelFailed && EnableFastISelFallbackReport) {
DiagnosticInfoISelFallback DiagFallback(Fn);
Fn.getContext().diagnose(DiagFallback);
}
2010-05-01 02:33:28 +02:00
// If the first basic block in the function has live ins that need to be
// copied into vregs, emit the copies into the top of the block before
// emitting the code for the block.
const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII);
// Insert copies in the entry block and the return blocks.
if (FuncInfo->SplitCSR) {
SmallVector<MachineBasicBlock*, 4> Returns;
// Collect all the return blocks.
for (MachineBasicBlock &MBB : mf) {
if (!MBB.succ_empty())
continue;
MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
if (Term != MBB.end() && Term->isReturn()) {
Returns.push_back(&MBB);
continue;
}
}
TLI->insertCopiesSplitCSR(EntryMBB, Returns);
}
DenseMap<unsigned, unsigned> LiveInMap;
if (!FuncInfo->ArgDbgValues.empty())
for (MachineRegisterInfo::livein_iterator LI = RegInfo->livein_begin(),
E = RegInfo->livein_end(); LI != E; ++LI)
2010-12-24 05:28:06 +01:00
if (LI->second)
LiveInMap.insert(std::make_pair(LI->first, LI->second));
// Insert DBG_VALUE instructions for function arguments to the entry block.
for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
bool hasFI = MI->getOperand(0).isFI();
2013-11-20 01:32:32 +01:00
unsigned Reg =
hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
EntryMBB->insert(EntryMBB->begin(), MI);
else {
MachineInstr *Def = RegInfo->getVRegDef(Reg);
if (Def) {
MachineBasicBlock::iterator InsertPos = Def;
// FIXME: VR def may not be in entry block.
Def->getParent()->insert(std::next(InsertPos), MI);
} else
DEBUG(dbgs() << "Dropping debug info for dead vreg"
<< TargetRegisterInfo::virtReg2Index(Reg) << "\n");
}
// If Reg is live-in then update debug info to track its copy in a vreg.
DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
if (LDI != LiveInMap.end()) {
assert(!hasFI && "There's no handling of frame pointer updating here yet "
"- add if needed");
MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
MachineBasicBlock::iterator InsertPos = Def;
Move the complex address expression out of DIVariable and into an extra argument of the llvm.dbg.declare/llvm.dbg.value intrinsics. Previously, DIVariable was a variable-length field that has an optional reference to a Metadata array consisting of a variable number of complex address expressions. In the case of OpPiece expressions this is wasting a lot of storage in IR, because when an aggregate type is, e.g., SROA'd into all of its n individual members, the IR will contain n copies of the DIVariable, all alike, only differing in the complex address reference at the end. By making the complex address into an extra argument of the dbg.value/dbg.declare intrinsics, all of the pieces can reference the same variable and the complex address expressions can be uniqued across the CU, too. Down the road, this will allow us to move other flags, such as "indirection" out of the DIVariable, too. The new intrinsics look like this: declare void @llvm.dbg.declare(metadata %storage, metadata %var, metadata %expr) declare void @llvm.dbg.value(metadata %storage, i64 %offset, metadata %var, metadata %expr) This patch adds a new LLVM-local tag to DIExpressions, so we can detect and pretty-print DIExpression metadata nodes. What this patch doesn't do: This patch does not touch the "Indirect" field in DIVariable; but moving that into the expression would be a natural next step. http://reviews.llvm.org/D4919 rdar://problem/17994491 Thanks to dblaikie and dexonsmith for reviewing this patch! Note: I accidentally committed a bogus older version of this patch previously. llvm-svn: 218787
2014-10-01 20:55:02 +02:00
const MDNode *Variable = MI->getDebugVariable();
const MDNode *Expr = MI->getDebugExpression();
DebugLoc DL = MI->getDebugLoc();
bool IsIndirect = MI->isIndirectDebugValue();
if (IsIndirect)
assert(MI->getOperand(1).getImm() == 0 &&
"DBG_VALUE with nonzero offset");
assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
// Def is never a terminator here, so it is ok to increment InsertPos.
BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE),
IsIndirect, LDI->second, Variable, Expr);
// If this vreg is directly copied into an exported register then
// that COPY instructions also need DBG_VALUE, if it is the only
// user of LDI->second.
MachineInstr *CopyUseMI = nullptr;
for (MachineRegisterInfo::use_instr_iterator
UI = RegInfo->use_instr_begin(LDI->second),
E = RegInfo->use_instr_end(); UI != E; ) {
MachineInstr *UseMI = &*(UI++);
if (UseMI->isDebugValue()) continue;
if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
CopyUseMI = UseMI; continue;
}
// Otherwise this is another use or second copy use.
CopyUseMI = nullptr; break;
}
if (CopyUseMI) {
// Use MI's debug location, which describes where Variable was
// declared, rather than whatever is attached to CopyUseMI.
MachineInstr *NewMI =
BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
CopyUseMI->getOperand(0).getReg(), Variable, Expr);
MachineBasicBlock::iterator Pos = CopyUseMI;
EntryMBB->insertAfter(Pos, NewMI);
}
}
}
// Determine if there are any calls in this machine function.
MachineFrameInfo &MFI = MF->getFrameInfo();
for (const auto &MBB : *MF) {
if (MFI.hasCalls() && MF->hasInlineAsm())
break;
for (const auto &MI : MBB) {
const MCInstrDesc &MCID = TII->get(MI.getOpcode());
if ((MCID.isCall() && !MCID.isReturn()) ||
MI.isStackAligningInlineAsm()) {
MFI.setHasCalls(true);
}
if (MI.isInlineAsm()) {
MF->setHasInlineAsm(true);
}
}
}
// Determine if there is a call to setjmp in the machine function.
MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
// Replace forward-declared registers with the registers containing
// the desired value.
MachineRegisterInfo &MRI = MF->getRegInfo();
for (DenseMap<unsigned, unsigned>::iterator
I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end();
I != E; ++I) {
unsigned From = I->first;
unsigned To = I->second;
// If To is also scheduled to be replaced, find what its ultimate
// replacement is.
while (true) {
DenseMap<unsigned, unsigned>::iterator J = FuncInfo->RegFixups.find(To);
if (J == E) break;
To = J->second;
}
// Make sure the new register has a sufficiently constrained register class.
if (TargetRegisterInfo::isVirtualRegister(From) &&
TargetRegisterInfo::isVirtualRegister(To))
MRI.constrainRegClass(To, MRI.getRegClass(From));
// Replace it.
// Replacing one register with another won't touch the kill flags.
// We need to conservatively clear the kill flags as a kill on the old
// register might dominate existing uses of the new register.
if (!MRI.use_empty(To))
MRI.clearKillFlags(From);
MRI.replaceRegWith(From, To);
}
TLI->finalizeLowering(*MF);
// Release function-specific state. SDB and CurDAG are already cleared
// at this point.
FuncInfo->clear();
DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
DEBUG(MF->print(dbgs()));
return true;
}
static void reportFastISelFailure(MachineFunction &MF,
OptimizationRemarkEmitter &ORE,
OptimizationRemarkMissed &R,
bool ShouldAbort) {
// Print the function name explicitly if we don't have a debug location (which
// makes the diagnostic less useful) or if we're going to emit a raw error.
if (!R.getLocation().isValid() || ShouldAbort)
R << (" (in function: " + MF.getName() + ")").str();
if (ShouldAbort)
report_fatal_error(R.getMsg());
ORE.emit(R);
}
void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End,
bool &HadTailCall) {
// Allow creating illegal types during DAG building for the basic block.
CurDAG->NewNodesMustHaveLegalTypes = false;
// Lower the instructions. If a call is emitted as a tail call, cease emitting
// nodes for this block.
2017-03-01 22:42:00 +01:00
for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
if (!ElidedArgCopyInstrs.count(&*I))
SDB->visit(*I);
}
// Make sure the root of the DAG is up-to-date.
CurDAG->setRoot(SDB->getControlRoot());
HadTailCall = SDB->HasTailCall;
SDB->clear();
// Final step, emit the lowered DAG as machine code.
CodeGenAndEmitDAG();
}
void SelectionDAGISel::ComputeLiveOutVRegInfo() {
SmallPtrSet<SDNode*, 16> VisitedNodes;
SmallVector<SDNode*, 128> Worklist;
Worklist.push_back(CurDAG->getRoot().getNode());
KnownBits Known;
do {
SDNode *N = Worklist.pop_back_val();
// If we've already seen this node, ignore it.
if (!VisitedNodes.insert(N).second)
continue;
// Otherwise, add all chain operands to the worklist.
for (const SDValue &Op : N->op_values())
if (Op.getValueType() == MVT::Other)
Worklist.push_back(Op.getNode());
// If this is a CopyToReg with a vreg dest, process it.
if (N->getOpcode() != ISD::CopyToReg)
continue;
unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
if (!TargetRegisterInfo::isVirtualRegister(DestReg))
continue;
// Ignore non-scalar or non-integer values.
SDValue Src = N->getOperand(2);
EVT SrcVT = Src.getValueType();
if (!SrcVT.isInteger() || SrcVT.isVector())
continue;
unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
CurDAG->computeKnownBits(Src, Known);
FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, Known);
} while (!Worklist.empty());
}
void SelectionDAGISel::CodeGenAndEmitDAG() {
StringRef GroupName = "sdag";
StringRef GroupDescription = "Instruction Selection and Scheduling";
std::string BlockName;
int BlockNumber = -1;
(void)BlockNumber;
bool MatchFilterBB = false; (void)MatchFilterBB;
// Pre-type legalization allow creation of any node types.
CurDAG->NewNodesMustHaveLegalTypes = false;
#ifndef NDEBUG
MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
FilterDAGBasicBlockName ==
FuncInfo->MBB->getBasicBlock()->getName().str());
#endif
#ifdef NDEBUG
if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs ||
ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
ViewSUnitDAGs)
#endif
{
BlockNumber = FuncInfo->MBB->getNumber();
BlockName =
(MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
}
DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
if (ViewDAGCombine1 && MatchFilterBB)
CurDAG->viewGraph("dag-combine1 input for " + BlockName);
// Run the DAG combiner in pre-legalize mode.
{
NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
GroupDescription, TimePassesIsEnabled);
CurDAG->Combine(BeforeLegalizeTypes, AA, OptLevel);
}
DEBUG(dbgs() << "Optimized lowered selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
if (ViewLegalizeTypesDAGs && MatchFilterBB)
CurDAG->viewGraph("legalize-types input for " + BlockName);
bool Changed;
{
NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
GroupDescription, TimePassesIsEnabled);
Changed = CurDAG->LegalizeTypes();
}
DEBUG(dbgs() << "Type-legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
// Only allow creation of legal node types.
CurDAG->NewNodesMustHaveLegalTypes = true;
if (Changed) {
if (ViewDAGCombineLT && MatchFilterBB)
CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
{
NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
GroupName, GroupDescription, TimePassesIsEnabled);
CurDAG->Combine(AfterLegalizeTypes, AA, OptLevel);
}
DEBUG(dbgs() << "Optimized type-legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
}
{
NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
GroupDescription, TimePassesIsEnabled);
Changed = CurDAG->LegalizeVectors();
}
if (Changed) {
DEBUG(dbgs() << "Vector-legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
{
NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
GroupDescription, TimePassesIsEnabled);
2009-12-28 02:51:30 +01:00
CurDAG->LegalizeTypes();
}
DEBUG(dbgs() << "Vector/type-legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
if (ViewDAGCombineLT && MatchFilterBB)
CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
{
NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
GroupName, GroupDescription, TimePassesIsEnabled);
CurDAG->Combine(AfterLegalizeVectorOps, AA, OptLevel);
}
DEBUG(dbgs() << "Optimized vector-legalized selection DAG: BB#"
<< BlockNumber << " '" << BlockName << "'\n"; CurDAG->dump());
}
if (ViewLegalizeDAGs && MatchFilterBB)
CurDAG->viewGraph("legalize input for " + BlockName);
{
NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
GroupDescription, TimePassesIsEnabled);
CurDAG->Legalize();
}
DEBUG(dbgs() << "Legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
if (ViewDAGCombine2 && MatchFilterBB)
CurDAG->viewGraph("dag-combine2 input for " + BlockName);
// Run the DAG combiner in post-legalize mode.
{
NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
GroupDescription, TimePassesIsEnabled);
CurDAG->Combine(AfterLegalizeDAG, AA, OptLevel);
}
DEBUG(dbgs() << "Optimized legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
if (OptLevel != CodeGenOpt::None)
ComputeLiveOutVRegInfo();
if (ViewISelDAGs && MatchFilterBB)
CurDAG->viewGraph("isel input for " + BlockName);
// Third, instruction select all of the operations to machine code, adding the
// code to the MachineBasicBlock.
{
NamedRegionTimer T("isel", "Instruction Selection", GroupName,
GroupDescription, TimePassesIsEnabled);
DoInstructionSelection();
}
DEBUG(dbgs() << "Selected selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
if (ViewSchedDAGs && MatchFilterBB)
CurDAG->viewGraph("scheduler input for " + BlockName);
// Schedule machine code.
ScheduleDAGSDNodes *Scheduler = CreateScheduler();
{
NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
GroupDescription, TimePassesIsEnabled);
misched preparation: clarify ScheduleDAG and ScheduleDAGInstrs roles. ScheduleDAG is responsible for the DAG: SUnits and SDeps. It provides target hooks for latency computation. ScheduleDAGInstrs extends ScheduleDAG and defines the current scheduling region in terms of MachineInstr iterators. It has access to the target's scheduling itinerary data. ScheduleDAGInstrs provides the logic for building the ScheduleDAG for the sequence of MachineInstrs in the current region. Target's can implement highly custom schedulers by extending this class. ScheduleDAGPostRATDList provides the driver and diagnostics for current postRA scheduling. It maintains a current Sequence of scheduled machine instructions and logic for splicing them into the block. During scheduling, it uses the ScheduleHazardRecognizer provided by the target. Specific changes: - Removed driver code from ScheduleDAG. clearDAG is the only interface needed. - Added enterRegion/exitRegion hooks to ScheduleDAGInstrs to delimit the scope of each scheduling region and associated DAG. They should be used to setup and cleanup any region-specific state in addition to the DAG itself. This is necessary because we reuse the same ScheduleDAG object for the entire function. The target may extend these hooks to do things at regions boundaries, like bundle terminators. The hooks are called even if we decide not to schedule the region. So all instructions in a block are "covered" by these calls. - Added ScheduleDAGInstrs::begin()/end() public API. - Moved Sequence into the driver layer, which is specific to the scheduling algorithm. llvm-svn: 152208
2012-03-07 06:21:52 +01:00
Scheduler->Run(CurDAG, FuncInfo->MBB);
}
if (ViewSUnitDAGs && MatchFilterBB)
Scheduler->viewGraph();
// Emit machine code to BB. This can change 'BB' to the last block being
// inserted into.
MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
{
NamedRegionTimer T("emit", "Instruction Creation", GroupName,
GroupDescription, TimePassesIsEnabled);
misched preparation: clarify ScheduleDAG and ScheduleDAGInstrs roles. ScheduleDAG is responsible for the DAG: SUnits and SDeps. It provides target hooks for latency computation. ScheduleDAGInstrs extends ScheduleDAG and defines the current scheduling region in terms of MachineInstr iterators. It has access to the target's scheduling itinerary data. ScheduleDAGInstrs provides the logic for building the ScheduleDAG for the sequence of MachineInstrs in the current region. Target's can implement highly custom schedulers by extending this class. ScheduleDAGPostRATDList provides the driver and diagnostics for current postRA scheduling. It maintains a current Sequence of scheduled machine instructions and logic for splicing them into the block. During scheduling, it uses the ScheduleHazardRecognizer provided by the target. Specific changes: - Removed driver code from ScheduleDAG. clearDAG is the only interface needed. - Added enterRegion/exitRegion hooks to ScheduleDAGInstrs to delimit the scope of each scheduling region and associated DAG. They should be used to setup and cleanup any region-specific state in addition to the DAG itself. This is necessary because we reuse the same ScheduleDAG object for the entire function. The target may extend these hooks to do things at regions boundaries, like bundle terminators. The hooks are called even if we decide not to schedule the region. So all instructions in a block are "covered" by these calls. - Added ScheduleDAGInstrs::begin()/end() public API. - Moved Sequence into the driver layer, which is specific to the scheduling algorithm. llvm-svn: 152208
2012-03-07 06:21:52 +01:00
// FuncInfo->InsertPt is passed by reference and set to the end of the
// scheduled instructions.
LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt);
}
// If the block was split, make sure we update any references that are used to
// update PHI nodes later on.
if (FirstMBB != LastMBB)
SDB->UpdateSplitBlock(FirstMBB, LastMBB);
// Free the scheduler state.
{
NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
GroupDescription, TimePassesIsEnabled);
delete Scheduler;
}
// Free the SelectionDAG state, now that we're finished with it.
CurDAG->clear();
}
namespace {
/// ISelUpdater - helper class to handle updates of the instruction selection
/// graph.
class ISelUpdater : public SelectionDAG::DAGUpdateListener {
SelectionDAG::allnodes_iterator &ISelPosition;
public:
ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
: SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
/// NodeDeleted - Handle nodes deleted from the graph. If the node being
/// deleted is the current ISelPosition node, update ISelPosition.
///
void NodeDeleted(SDNode *N, SDNode *E) override {
if (ISelPosition == SelectionDAG::allnodes_iterator(N))
++ISelPosition;
}
};
} // end anonymous namespace
void SelectionDAGISel::DoInstructionSelection() {
DEBUG(dbgs() << "===== Instruction selection begins: BB#"
<< FuncInfo->MBB->getNumber()
<< " '" << FuncInfo->MBB->getName() << "'\n");
PreprocessISelDAG();
2010-12-24 05:28:06 +01:00
// Select target instructions for the DAG.
{
// Number all nodes with a topological order and set DAGSize.
DAGSize = CurDAG->AssignTopologicalOrder();
2010-12-24 05:28:06 +01:00
// Create a dummy node (which is not added to allnodes), that adds
// a reference to the root node, preventing it from being deleted,
// and tracking any changes of the root.
HandleSDNode Dummy(CurDAG->getRoot());
SelectionDAG::allnodes_iterator ISelPosition (CurDAG->getRoot().getNode());
++ISelPosition;
2010-12-24 05:28:06 +01:00
// Make sure that ISelPosition gets properly updated when nodes are deleted
// in calls made from this function.
ISelUpdater ISU(*CurDAG, ISelPosition);
// The AllNodes list is now topological-sorted. Visit the
// nodes by starting at the end of the list (the root of the
// graph) and preceding back toward the beginning (the entry
// node).
while (ISelPosition != CurDAG->allnodes_begin()) {
SDNode *Node = &*--ISelPosition;
// Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
// but there are currently some corner cases that it misses. Also, this
// makes it theoretically possible to disable the DAGCombiner.
if (Node->use_empty())
continue;
2010-12-24 05:28:06 +01:00
// When we are using non-default rounding modes or FP exception behavior
// FP operations are represented by StrictFP pseudo-operations. They
// need to be simplified here so that the target-specific instruction
// selectors know how to handle them.
//
// If the current node is a strict FP pseudo-op, the isStrictFPOp()
// function will provide the corresponding normal FP opcode to which the
// node should be mutated.
//
// FIXME: The backends need a way to handle FP constraints.
if (Node->isStrictFPOpcode())
Node = CurDAG->mutateStrictFPToFP(Node);
Select(Node);
}
2010-12-24 05:28:06 +01:00
CurDAG->setRoot(Dummy.getValue());
2010-12-24 05:28:06 +01:00
}
DEBUG(dbgs() << "===== Instruction selection ends:\n");
PostprocessISelDAG();
}
static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI) {
for (const User *U : CPI->users()) {
if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(U)) {
Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
if (IID == Intrinsic::eh_exceptionpointer ||
IID == Intrinsic::eh_exceptioncode)
return true;
}
}
return false;
}
/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
/// do other setup for EH landing-pad blocks.
bool SelectionDAGISel::PrepareEHLandingPad() {
MachineBasicBlock *MBB = FuncInfo->MBB;
const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
const BasicBlock *LLVMBB = MBB->getBasicBlock();
const TargetRegisterClass *PtrRC =
TLI->getRegClassFor(TLI->getPointerTy(CurDAG->getDataLayout()));
// Catchpads have one live-in register, which typically holds the exception
// pointer or code.
if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI())) {
if (hasExceptionPointerOrCodeUser(CPI)) {
// Get or create the virtual register to hold the pointer or code. Mark
// the live in physreg and copy into the vreg.
MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
assert(EHPhysReg && "target lacks exception pointer register");
MBB->addLiveIn(EHPhysReg);
unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, PtrRC);
BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(),
TII->get(TargetOpcode::COPY), VReg)
.addReg(EHPhysReg, RegState::Kill);
}
return true;
}
if (!LLVMBB->isLandingPad())
return true;
// Add a label to mark the beginning of the landing pad. Deletion of the
// landing pad can thus be detected via the MachineModuleInfo.
MCSymbol *Label = MF->addLandingPad(MBB);
// Assign the call site to the landing pad's begin label.
MF->setCallSiteLandingPad(Label, SDB->LPadToCallSiteMap[MBB]);
2012-03-07 01:18:15 +01:00
const MCInstrDesc &II = TII->get(TargetOpcode::EH_LABEL);
BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
.addSym(Label);
// Mark exception register as live in.
if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(Reg, PtrRC);
// Mark exception selector register as live in.
if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(Reg, PtrRC);
return true;
}
/// isFoldedOrDeadInstruction - Return true if the specified instruction is
/// side-effect free and is either dead or folded into a generated instruction.
/// Return false if it needs to be emitted.
static bool isFoldedOrDeadInstruction(const Instruction *I,
FunctionLoweringInfo *FuncInfo) {
return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
!isa<TerminatorInst>(I) && // Terminators aren't folded.
!isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
!I->isEHPad() && // EH pad instructions aren't folded.
!FuncInfo->isExportedInst(I); // Exported instrs must be computed.
}
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
/// Set up SwiftErrorVals by going through the function. If the function has
/// swifterror argument, it will be the first entry.
static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI,
FunctionLoweringInfo *FuncInfo) {
if (!TLI->supportSwiftError())
return;
FuncInfo->SwiftErrorVals.clear();
FuncInfo->SwiftErrorVRegDefMap.clear();
FuncInfo->SwiftErrorVRegUpwardsUse.clear();
FuncInfo->SwiftErrorVRegDefUses.clear();
FuncInfo->SwiftErrorArg = nullptr;
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
// Check if function has a swifterror argument.
bool HaveSeenSwiftErrorArg = false;
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
for (Function::const_arg_iterator AI = Fn.arg_begin(), AE = Fn.arg_end();
AI != AE; ++AI)
if (AI->hasSwiftErrorAttr()) {
assert(!HaveSeenSwiftErrorArg &&
"Must have only one swifterror parameter");
(void)HaveSeenSwiftErrorArg; // silence warning.
HaveSeenSwiftErrorArg = true;
FuncInfo->SwiftErrorArg = &*AI;
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
FuncInfo->SwiftErrorVals.push_back(&*AI);
}
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
for (const auto &LLVMBB : Fn)
for (const auto &Inst : LLVMBB) {
if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(&Inst))
if (Alloca->isSwiftError())
FuncInfo->SwiftErrorVals.push_back(Alloca);
}
}
static void createSwiftErrorEntriesInEntryBlock(FunctionLoweringInfo *FuncInfo,
FastISel *FastIS,
const TargetLowering *TLI,
const TargetInstrInfo *TII,
SelectionDAGBuilder *SDB) {
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
if (!TLI->supportSwiftError())
return;
// We only need to do this when we have swifterror parameter or swifterror
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
// alloc.
if (FuncInfo->SwiftErrorVals.empty())
return;
assert(FuncInfo->MBB == &*FuncInfo->MF->begin() &&
"expected to insert into entry block");
auto &DL = FuncInfo->MF->getDataLayout();
auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
for (const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
// We will always generate a copy from the argument. It is always used at
// least by the 'return' of the swifterror.
if (FuncInfo->SwiftErrorArg && FuncInfo->SwiftErrorArg == SwiftErrorVal)
continue;
unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
// Assign Undef to Vreg. We construct MI directly to make sure it works
// with FastISel.
BuildMI(*FuncInfo->MBB, FuncInfo->MBB->getFirstNonPHI(),
SDB->getCurDebugLoc(), TII->get(TargetOpcode::IMPLICIT_DEF),
VReg);
// Keep FastIS informed about the value we just inserted.
if (FastIS)
FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorVal, VReg);
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
}
}
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
/// Collect llvm.dbg.declare information. This is done after argument lowering
/// in case the declarations refer to arguments.
static void processDbgDeclares(FunctionLoweringInfo *FuncInfo) {
MachineFunction *MF = FuncInfo->MF;
const DataLayout &DL = MF->getDataLayout();
for (const BasicBlock &BB : *FuncInfo->Fn) {
for (const Instruction &I : BB) {
const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(&I);
if (!DI)
continue;
assert(DI->getVariable() && "Missing variable");
assert(DI->getDebugLoc() && "Missing location");
const Value *Address = DI->getAddress();
if (!Address)
continue;
// Look through casts and constant offset GEPs. These mostly come from
// inalloca.
APInt Offset(DL.getPointerSizeInBits(0), 0);
Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
// Check if the variable is a static alloca or a byval or inalloca
// argument passed in memory. If it is not, then we will ignore this
// intrinsic and handle this during isel like dbg.value.
int FI = std::numeric_limits<int>::max();
if (const auto *AI = dyn_cast<AllocaInst>(Address)) {
auto SI = FuncInfo->StaticAllocaMap.find(AI);
if (SI != FuncInfo->StaticAllocaMap.end())
FI = SI->second;
} else if (const auto *Arg = dyn_cast<Argument>(Address))
FI = FuncInfo->getArgumentFrameIndex(Arg);
if (FI == std::numeric_limits<int>::max())
continue;
DIExpression *Expr = DI->getExpression();
if (Offset.getBoolValue())
Expr = DIExpression::prepend(Expr, DIExpression::NoDeref,
Offset.getZExtValue());
MF->setVariableDbgInfo(DI->getVariable(), Expr, FI, DI->getDebugLoc());
}
}
}
/// Propagate swifterror values through the machine function CFG.
static void propagateSwiftErrorVRegs(FunctionLoweringInfo *FuncInfo) {
auto *TLI = FuncInfo->TLI;
if (!TLI->supportSwiftError())
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
return;
// We only need to do this when we have swifterror parameter or swifterror
// alloc.
if (FuncInfo->SwiftErrorVals.empty())
return;
// For each machine basic block in reverse post order.
ReversePostOrderTraversal<MachineFunction *> RPOT(FuncInfo->MF);
for (MachineBasicBlock *MBB : RPOT) {
// For each swifterror value in the function.
for(const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
auto Key = std::make_pair(MBB, SwiftErrorVal);
auto UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
auto VRegDefIt = FuncInfo->SwiftErrorVRegDefMap.find(Key);
bool UpwardsUse = UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end();
unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0;
bool DownwardDef = VRegDefIt != FuncInfo->SwiftErrorVRegDefMap.end();
assert(!(UpwardsUse && !DownwardDef) &&
"We can't have an upwards use but no downwards def");
// If there is no upwards exposed use and an entry for the swifterror in
// the def map for this value we don't need to do anything: We already
// have a downward def for this basic block.
if (!UpwardsUse && DownwardDef)
continue;
// Otherwise we either have an upwards exposed use vreg that we need to
// materialize or need to forward the downward def from predecessors.
// Check whether we have a single vreg def from all predecessors.
// Otherwise we need a phi.
SmallVector<std::pair<MachineBasicBlock *, unsigned>, 4> VRegs;
SmallSet<const MachineBasicBlock*, 8> Visited;
for (auto *Pred : MBB->predecessors()) {
if (!Visited.insert(Pred).second)
continue;
VRegs.push_back(std::make_pair(
Pred, FuncInfo->getOrCreateSwiftErrorVReg(Pred, SwiftErrorVal)));
if (Pred != MBB)
continue;
// We have a self-edge.
// If there was no upwards use in this basic block there is now one: the
// phi needs to use it self.
if (!UpwardsUse) {
UpwardsUse = true;
UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
assert(UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end());
UUseVReg = UUseIt->second;
}
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
}
// We need a phi node if we have more than one predecessor with different
// downward defs.
bool needPHI =
VRegs.size() >= 1 &&
std::find_if(
VRegs.begin(), VRegs.end(),
[&](const std::pair<const MachineBasicBlock *, unsigned> &V)
-> bool { return V.second != VRegs[0].second; }) !=
VRegs.end();
// If there is no upwards exposed used and we don't need a phi just
// forward the swifterror vreg from the predecessor(s).
if (!UpwardsUse && !needPHI) {
assert(!VRegs.empty() &&
"No predecessors? The entry block should bail out earlier");
// Just forward the swifterror vreg from the predecessor(s).
FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, VRegs[0].second);
continue;
}
auto DLoc = isa<Instruction>(SwiftErrorVal)
? dyn_cast<Instruction>(SwiftErrorVal)->getDebugLoc()
: DebugLoc();
const auto *TII = FuncInfo->MF->getSubtarget().getInstrInfo();
// If we don't need a phi create a copy to the upward exposed vreg.
if (!needPHI) {
assert(UpwardsUse);
assert(!VRegs.empty() &&
"No predecessors? Is the Calling Convention correct?");
unsigned DestReg = UUseVReg;
BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, TII->get(TargetOpcode::COPY),
DestReg)
.addReg(VRegs[0].second);
continue;
}
// We need a phi: if there is an upwards exposed use we already have a
// destination virtual register number otherwise we generate a new one.
auto &DL = FuncInfo->MF->getDataLayout();
auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
unsigned PHIVReg =
UpwardsUse ? UUseVReg
: FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
MachineInstrBuilder SwiftErrorPHI =
BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc,
TII->get(TargetOpcode::PHI), PHIVReg);
for (auto BBRegPair : VRegs) {
SwiftErrorPHI.addReg(BBRegPair.second).addMBB(BBRegPair.first);
}
// We did not have a definition in this block before: store the phi's vreg
// as this block downward exposed def.
if (!UpwardsUse)
FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, PHIVReg);
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
}
}
}
static void preassignSwiftErrorRegs(const TargetLowering *TLI,
FunctionLoweringInfo *FuncInfo,
BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End) {
if (!TLI->supportSwiftError() || FuncInfo->SwiftErrorVals.empty())
return;
// Iterator over instructions and assign vregs to swifterror defs and uses.
for (auto It = Begin; It != End; ++It) {
ImmutableCallSite CS(&*It);
if (CS) {
// A call-site with a swifterror argument is both use and def.
const Value *SwiftErrorAddr = nullptr;
for (auto &Arg : CS.args()) {
if (!Arg->isSwiftError())
continue;
// Use of swifterror.
assert(!SwiftErrorAddr && "Cannot have multiple swifterror arguments");
SwiftErrorAddr = &*Arg;
assert(SwiftErrorAddr->isSwiftError() &&
"Must have a swifterror value argument");
unsigned VReg; bool CreatedReg;
std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
&*It, FuncInfo->MBB, SwiftErrorAddr);
assert(CreatedReg);
}
if (!SwiftErrorAddr)
continue;
// Def of swifterror.
unsigned VReg; bool CreatedReg;
std::tie(VReg, CreatedReg) =
FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
assert(CreatedReg);
FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
// A load is a use.
} else if (const LoadInst *LI = dyn_cast<const LoadInst>(&*It)) {
const Value *V = LI->getOperand(0);
if (!V->isSwiftError())
continue;
unsigned VReg; bool CreatedReg;
std::tie(VReg, CreatedReg) =
FuncInfo->getOrCreateSwiftErrorVRegUseAt(LI, FuncInfo->MBB, V);
assert(CreatedReg);
// A store is a def.
} else if (const StoreInst *SI = dyn_cast<const StoreInst>(&*It)) {
const Value *SwiftErrorAddr = SI->getOperand(1);
if (!SwiftErrorAddr->isSwiftError())
continue;
// Def of swifterror.
unsigned VReg; bool CreatedReg;
std::tie(VReg, CreatedReg) =
FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
assert(CreatedReg);
FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
// A return in a swiferror returning function is a use.
} else if (const ReturnInst *R = dyn_cast<const ReturnInst>(&*It)) {
const Function *F = R->getParent()->getParent();
if(!F->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
continue;
unsigned VReg; bool CreatedReg;
std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
R, FuncInfo->MBB, FuncInfo->SwiftErrorArg);
assert(CreatedReg);
}
}
}
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
FastISelFailed = false;
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = nullptr;
if (TM.Options.EnableFastISel)
FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
Swift Calling Convention: swifterror target-independent change. At IR level, the swifterror argument is an input argument with type ErrorObject**. For targets that support swifterror, we want to optimize it to behave as an inout value with type ErrorObject*; it will be passed in a fixed physical register. The main idea is to track the virtual registers for each swifterror value. We define swifterror values as AllocaInsts with swifterror attribute or a function argument with swifterror attribute. In SelectionDAGISel.cpp, we set up swifterror values (SwiftErrorVals) before handling the basic blocks. When iterating over all basic blocks in RPO, before actually visiting the basic block, we call mergeIncomingSwiftErrors to merge incoming swifterror values when there are multiple predecessors or to simply propagate them. There, we create a virtual register for each swifterror value in the entry block. For predecessors that are not yet visited, we create virtual registers to hold the swifterror values at the end of the predecessor. The assignments are saved in SwiftErrorWorklist and will be materialized at the end of visiting the basic block. When visiting a load from a swifterror value, we copy from the current virtual register assignment. When visiting a store to a swifterror value, we create a virtual register to hold the swifterror value and update SwiftErrorMap to track the current virtual register assignment. Differential Revision: http://reviews.llvm.org/D18108 llvm-svn: 265433
2016-04-05 20:13:16 +02:00
setupSwiftErrorVals(Fn, TLI, FuncInfo);
ReversePostOrderTraversal<const Function*> RPOT(&Fn);
// Lower arguments up front. An RPO iteration always visits the entry block
// first.
assert(*RPOT.begin() == &Fn.getEntryBlock());
++NumEntryBlocks;
// Set up FuncInfo for ISel. Entry blocks never have PHIs.
FuncInfo->MBB = FuncInfo->MBBMap[&Fn.getEntryBlock()];
FuncInfo->InsertPt = FuncInfo->MBB->begin();
if (!FastIS) {
LowerArguments(Fn);
} else {
// See if fast isel can lower the arguments.
FastIS->startNewBlock();
if (!FastIS->lowerArguments()) {
FastISelFailed = true;
// Fast isel failed to lower these arguments
++NumFastIselFailLowerArguments;
OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
Fn.getSubprogram(),
&Fn.getEntryBlock());
R << "FastISel didn't lower all arguments: "
<< ore::NV("Prototype", Fn.getType());
reportFastISelFailure(*MF, *ORE, R, EnableFastISelAbort > 1);
// Use SelectionDAG argument lowering
LowerArguments(Fn);
CurDAG->setRoot(SDB->getControlRoot());
SDB->clear();
CodeGenAndEmitDAG();
}
// If we inserted any instructions at the beginning, make a note of
// where they are, so we can be sure to emit subsequent instructions
// after them.
if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
else
FastIS->setLastLocalValue(nullptr);
}
createSwiftErrorEntriesInEntryBlock(FuncInfo, FastIS, TLI, TII, SDB);
processDbgDeclares(FuncInfo);
// Iterate over all basic blocks in the function.
for (const BasicBlock *LLVMBB : RPOT) {
if (OptLevel != CodeGenOpt::None) {
bool AllPredsVisited = true;
for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB);
PI != PE; ++PI) {
if (!FuncInfo->VisitedBBs.count(*PI)) {
AllPredsVisited = false;
break;
}
}
if (AllPredsVisited) {
for (BasicBlock::const_iterator I = LLVMBB->begin();
const PHINode *PN = dyn_cast<PHINode>(I); ++I)
FuncInfo->ComputePHILiveOutRegInfo(PN);
} else {
for (BasicBlock::const_iterator I = LLVMBB->begin();
const PHINode *PN = dyn_cast<PHINode>(I); ++I)
FuncInfo->InvalidatePHILiveOutRegInfo(PN);
}
FuncInfo->VisitedBBs.insert(LLVMBB);
}
BasicBlock::const_iterator const Begin =
LLVMBB->getFirstNonPHI()->getIterator();
BasicBlock::const_iterator const End = LLVMBB->end();
BasicBlock::const_iterator BI = End;
FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
if (!FuncInfo->MBB)
continue; // Some blocks like catchpads have no code or MBB.
// Insert new instructions after any phi or argument setup code.
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Setup an EH landing-pad block.
FuncInfo->ExceptionPointerVirtReg = 0;
FuncInfo->ExceptionSelectorVirtReg = 0;
if (LLVMBB->isEHPad())
if (!PrepareEHLandingPad())
continue;
// Before doing SelectionDAG ISel, see if FastISel has been requested.
if (FastIS) {
if (LLVMBB != &Fn.getEntryBlock())
FastIS->startNewBlock();
unsigned NumFastIselRemaining = std::distance(Begin, End);
// Pre-assign swifterror vregs.
preassignSwiftErrorRegs(TLI, FuncInfo, Begin, End);
// Do FastISel on as many instructions as possible.
for (; BI != Begin; --BI) {
const Instruction *Inst = &*std::prev(BI);
// If we no longer require this instruction, skip it.
2017-03-01 22:42:00 +01:00
if (isFoldedOrDeadInstruction(Inst, FuncInfo) ||
ElidedArgCopyInstrs.count(Inst)) {
--NumFastIselRemaining;
continue;
}
// Bottom-up: reset the insert pos at the top, after any local-value
// instructions.
FastIS->recomputeInsertPt();
// Try to select the instruction with FastISel.
if (FastIS->selectInstruction(Inst)) {
--NumFastIselRemaining;
++NumFastIselSuccess;
// If fast isel succeeded, skip over all the folded instructions, and
// then see if there is a load right before the selected instructions.
// Try to fold the load if so.
const Instruction *BeforeInst = Inst;
while (BeforeInst != &*Begin) {
BeforeInst = &*std::prev(BasicBlock::const_iterator(BeforeInst));
if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
break;
}
if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
BeforeInst->hasOneUse() &&
FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
// If we succeeded, don't re-select the load.
BI = std::next(BasicBlock::const_iterator(BeforeInst));
--NumFastIselRemaining;
++NumFastIselSuccess;
}
continue;
}
FastISelFailed = true;
// Then handle certain instructions as single-LLVM-Instruction blocks.
// We cannot separate out GCrelocates to their own blocks since we need
// to keep track of gc-relocates for a particular gc-statepoint. This is
// done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
// visitGCRelocate.
if (isa<CallInst>(Inst) && !isStatepoint(Inst) && !isGCRelocate(Inst)) {
OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
Inst->getDebugLoc(), LLVMBB);
R << "FastISel missed call";
if (R.isEnabled() || EnableFastISelAbort) {
std::string InstStrStorage;
raw_string_ostream InstStr(InstStrStorage);
InstStr << *Inst;
R << ": " << InstStr.str();
}
reportFastISelFailure(*MF, *ORE, R, EnableFastISelAbort > 2);
if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
!Inst->use_empty()) {
unsigned &R = FuncInfo->ValueMap[Inst];
if (!R)
R = FuncInfo->CreateRegs(Inst->getType());
}
bool HadTailCall = false;
MachineBasicBlock::iterator SavedInsertPt = FuncInfo->InsertPt;
SelectBasicBlock(Inst->getIterator(), BI, HadTailCall);
// If the call was emitted as a tail call, we're done with the block.
// We also need to delete any previously emitted instructions.
if (HadTailCall) {
FastIS->removeDeadCode(SavedInsertPt, FuncInfo->MBB->end());
--BI;
break;
}
// Recompute NumFastIselRemaining as Selection DAG instruction
// selection may have handled the call, input args, etc.
unsigned RemainingNow = std::distance(Begin, BI);
NumFastIselFailures += NumFastIselRemaining - RemainingNow;
NumFastIselRemaining = RemainingNow;
continue;
}
OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
Inst->getDebugLoc(), LLVMBB);
bool ShouldAbort = EnableFastISelAbort;
if (isa<TerminatorInst>(Inst)) {
// Use a different message for terminator misses.
R << "FastISel missed terminator";
// Don't abort for terminator unless the level is really high
ShouldAbort = (EnableFastISelAbort > 2);
} else {
R << "FastISel missed";
}
if (R.isEnabled() || EnableFastISelAbort) {
std::string InstStrStorage;
raw_string_ostream InstStr(InstStrStorage);
InstStr << *Inst;
R << ": " << InstStr.str();
}
reportFastISelFailure(*MF, *ORE, R, ShouldAbort);
NumFastIselFailures += NumFastIselRemaining;
break;
}
FastIS->recomputeInsertPt();
}
[stack-protection] Add support for MSVC buffer security check Summary: This patch is adding support for the MSVC buffer security check implementation The buffer security check is turned on with the '/GS' compiler switch. * https://msdn.microsoft.com/en-us/library/8dbf701c.aspx * To be added to clang here: http://reviews.llvm.org/D20347 Some overview of buffer security check feature and implementation: * https://msdn.microsoft.com/en-us/library/aa290051(VS.71).aspx * http://www.ksyash.com/2011/01/buffer-overflow-protection-3/ * http://blog.osom.info/2012/02/understanding-vs-c-compilers-buffer.html For the following example: ``` int example(int offset, int index) { char buffer[10]; memset(buffer, 0xCC, index); return buffer[index]; } ``` The MSVC compiler is adding these instructions to perform stack integrity check: ``` push ebp mov ebp,esp sub esp,50h [1] mov eax,dword ptr [__security_cookie (01068024h)] [2] xor eax,ebp [3] mov dword ptr [ebp-4],eax push ebx push esi push edi mov eax,dword ptr [index] push eax push 0CCh lea ecx,[buffer] push ecx call _memset (010610B9h) add esp,0Ch mov eax,dword ptr [index] movsx eax,byte ptr buffer[eax] pop edi pop esi pop ebx [4] mov ecx,dword ptr [ebp-4] [5] xor ecx,ebp [6] call @__security_check_cookie@4 (01061276h) mov esp,ebp pop ebp ret ``` The instrumentation above is: * [1] is loading the global security canary, * [3] is storing the local computed ([2]) canary to the guard slot, * [4] is loading the guard slot and ([5]) re-compute the global canary, * [6] is validating the resulting canary with the '__security_check_cookie' and performs error handling. Overview of the current stack-protection implementation: * lib/CodeGen/StackProtector.cpp * There is a default stack-protection implementation applied on intermediate representation. * The target can overload 'getIRStackGuard' method if it has a standard location for the stack protector cookie. * An intrinsic 'Intrinsic::stackprotector' is added to the prologue. It will be expanded by the instruction selection pass (DAG or Fast). * Basic Blocks are added to every instrumented function to receive the code for handling stack guard validation and errors handling. * Guard manipulation and comparison are added directly to the intermediate representation. * lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp * lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp * There is an implementation that adds instrumentation during instruction selection (for better handling of sibbling calls). * see long comment above 'class StackProtectorDescriptor' declaration. * The target needs to override 'getSDagStackGuard' to activate SDAG stack protection generation. (note: getIRStackGuard MUST be nullptr). * 'getSDagStackGuard' returns the appropriate stack guard (security cookie) * The code is generated by 'SelectionDAGBuilder.cpp' and 'SelectionDAGISel.cpp'. * include/llvm/Target/TargetLowering.h * Contains function to retrieve the default Guard 'Value'; should be overriden by each target to select which implementation is used and provide Guard 'Value'. * lib/Target/X86/X86ISelLowering.cpp * Contains the x86 specialisation; Guard 'Value' used by the SelectionDAG algorithm. Function-based Instrumentation: * The MSVC doesn't inline the stack guard comparison in every function. Instead, a call to '__security_check_cookie' is added to the epilogue before every return instructions. * To support function-based instrumentation, this patch is * adding a function to get the function-based check (llvm 'Value', see include/llvm/Target/TargetLowering.h), * If provided, the stack protection instrumentation won't be inlined and a call to that function will be added to the prologue. * modifying (SelectionDAGISel.cpp) do avoid producing basic blocks used for inline instrumentation, * generating the function-based instrumentation during the ISEL pass (SelectionDAGBuilder.cpp), * if FastISEL (not SelectionDAG), using the fallback which rely on the same function-based implemented over intermediate representation (StackProtector.cpp). Modifications * adding support for MSVC (lib/Target/X86/X86ISelLowering.cpp) * adding support function-based instrumentation (lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp, .h) Results * IR generated instrumentation: ``` clang-cl /GS test.cc /Od /c -mllvm -print-isel-input ``` ``` *** Final LLVM Code input to ISel *** ; Function Attrs: nounwind sspstrong define i32 @"\01?example@@YAHHH@Z"(i32 %offset, i32 %index) #0 { entry: %StackGuardSlot = alloca i8* <<<-- Allocated guard slot %0 = call i8* @llvm.stackguard() <<<-- Loading Stack Guard value call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot) <<<-- Prologue intrinsic call (store to Guard slot) %index.addr = alloca i32, align 4 %offset.addr = alloca i32, align 4 %buffer = alloca [10 x i8], align 1 store i32 %index, i32* %index.addr, align 4 store i32 %offset, i32* %offset.addr, align 4 %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 0 %1 = load i32, i32* %index.addr, align 4 call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -52, i32 %1, i32 1, i1 false) %2 = load i32, i32* %index.addr, align 4 %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 %2 %3 = load i8, i8* %arrayidx, align 1 %conv = sext i8 %3 to i32 %4 = load volatile i8*, i8** %StackGuardSlot <<<-- Loading Guard slot call void @__security_check_cookie(i8* %4) <<<-- Epilogue function-based check ret i32 %conv } ``` * SelectionDAG generated instrumentation: ``` clang-cl /GS test.cc /O1 /c /FA ``` ``` "?example@@YAHHH@Z": # @"\01?example@@YAHHH@Z" # BB#0: # %entry pushl %esi subl $16, %esp movl ___security_cookie, %eax <<<-- Loading Stack Guard value movl 28(%esp), %esi movl %eax, 12(%esp) <<<-- Store to Guard slot leal 2(%esp), %eax pushl %esi pushl $204 pushl %eax calll _memset addl $12, %esp movsbl 2(%esp,%esi), %esi movl 12(%esp), %ecx <<<-- Loading Guard slot calll @__security_check_cookie@4 <<<-- Epilogue function-based check movl %esi, %eax addl $16, %esp popl %esi retl ``` Reviewers: kcc, pcc, eugenis, rnk Subscribers: majnemer, llvm-commits, hans, thakis, rnk Differential Revision: http://reviews.llvm.org/D20346 llvm-svn: 272053
2016-06-07 22:15:35 +02:00
if (getAnalysis<StackProtector>().shouldEmitSDCheck(*LLVMBB)) {
bool FunctionBasedInstrumentation =
TLI->getSSPStackGuardCheck(*Fn.getParent());
SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB],
FunctionBasedInstrumentation);
}
if (Begin != BI)
++NumDAGBlocks;
else
++NumFastIselBlocks;
if (Begin != BI) {
// Run SelectionDAG instruction selection on the remainder of the block
// not handled by FastISel. If FastISel is not run, this is the entire
// block.
bool HadTailCall;
SelectBasicBlock(Begin, BI, HadTailCall);
// But if FastISel was run, we already selected some of the block.
// If we emitted a tail-call, we need to delete any previously emitted
// instruction that follows it.
if (HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
FastIS->removeDeadCode(FuncInfo->InsertPt, FuncInfo->MBB->end());
}
FinishBasicBlock();
FuncInfo->PHINodesToUpdate.clear();
2017-03-01 22:42:00 +01:00
ElidedArgCopyInstrs.clear();
}
propagateSwiftErrorVRegs(FuncInfo);
delete FastIS;
SDB->clearDanglingDebugInfo();
Teach selectiondag how to handle the stackprotectorcheck intrinsic. Previously, generation of stack protectors was done exclusively in the pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated splitting basic blocks at the IR level to create the success/failure basic blocks in the tail of the basic block in question. As a result of this, calls that would have qualified for the sibling call optimization were no longer eligible for optimization since said calls were no longer right in the "tail position" (i.e. the immediate predecessor of a ReturnInst instruction). Then it was noticed that since the sibling call optimization causes the callee to reuse the caller's stack, if we could delay the generation of the stack protector check until later in CodeGen after the sibling call decision was made, we get both the tail call optimization and the stack protector check! A few goals in solving this problem were: 1. Preserve the architecture independence of stack protector generation. 2. Preserve the normal IR level stack protector check for platforms like OpenBSD for which we support platform specific stack protector generation. The main problem that guided the present solution is that one can not solve this problem in an architecture independent manner at the IR level only. This is because: 1. The decision on whether or not to perform a sibling call on certain platforms (for instance i386) requires lower level information related to available registers that can not be known at the IR level. 2. Even if the previous point were not true, the decision on whether to perform a tail call is done in LowerCallTo in SelectionDAG which occurs after the Stack Protector Pass. As a result, one would need to put the relevant callinst into the stack protector check success basic block (where the return inst is placed) and then move it back later at SelectionDAG/MI time before the stack protector check if the tail call optimization failed. The MI level option was nixed immediately since it would require platform specific pattern matching. The SelectionDAG level option was nixed because SelectionDAG only processes one IR level basic block at a time implying one could not create a DAG Combine to move the callinst. To get around this problem a few things were realized: 1. While one can not handle multiple IR level basic blocks at the SelectionDAG Level, one can generate multiple machine basic blocks for one IR level basic block. This is how we handle bit tests and switches. 2. At the MI level, tail calls are represented via a special return MIInst called "tcreturn". Thus if we know the basic block in which we wish to insert the stack protector check, we get the correct behavior by always inserting the stack protector check right before the return statement. This is a "magical transformation" since no matter where the stack protector check intrinsic is, we always insert the stack protector check code at the end of the BB. Given the aforementioned constraints, the following solution was devised: 1. On platforms that do not support SelectionDAG stack protector check generation, allow for the normal IR level stack protector check generation to continue. 2. On platforms that do support SelectionDAG stack protector check generation: a. Use the IR level stack protector pass to decide if a stack protector is required/which BB we insert the stack protector check in by reusing the logic already therein. If we wish to generate a stack protector check in a basic block, we place a special IR intrinsic called llvm.stackprotectorcheck right before the BB's returninst or if there is a callinst that could potentially be sibling call optimized, before the call inst. b. Then when a BB with said intrinsic is processed, we codegen the BB normally via SelectBasicBlock. In said process, when we visit the stack protector check, we do not actually emit anything into the BB. Instead, we just initialize the stack protector descriptor class (which involves stashing information/creating the success mbbb and the failure mbb if we have not created one for this function yet) and export the guard variable that we are going to compare. c. After we finish selecting the basic block, in FinishBasicBlock if the StackProtectorDescriptor attached to the SelectionDAGBuilder is initialized, we first find a splice point in the parent basic block before the terminator and then splice the terminator of said basic block into the success basic block. Then we code-gen a new tail for the parent basic block consisting of the two loads, the comparison, and finally two branches to the success/failure basic blocks. We conclude by code-gening the failure basic block if we have not code-gened it already (all stack protector checks we generate in the same function, use the same failure basic block). llvm-svn: 188755
2013-08-20 09:00:16 +02:00
SDB->SPDescriptor.resetPerFunctionState();
}
/// Given that the input MI is before a partial terminator sequence TSeq, return
/// true if M + TSeq also a partial terminator sequence.
///
/// A Terminator sequence is a sequence of MachineInstrs which at this point in
/// lowering copy vregs into physical registers, which are then passed into
/// terminator instructors so we can satisfy ABI constraints. A partial
/// terminator sequence is an improper subset of a terminator sequence (i.e. it
/// may be the whole terminator sequence).
static bool MIIsInTerminatorSequence(const MachineInstr &MI) {
// If we do not have a copy or an implicit def, we return true if and only if
// MI is a debug value.
if (!MI.isCopy() && !MI.isImplicitDef())
// Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the
// physical registers if there is debug info associated with the terminator
// of our mbb. We want to include said debug info in our terminator
// sequence, so we return true in that case.
return MI.isDebugValue();
// We have left the terminator sequence if we are not doing one of the
// following:
//
// 1. Copying a vreg into a physical register.
// 2. Copying a vreg into a vreg.
// 3. Defining a register via an implicit def.
// OPI should always be a register definition...
MachineInstr::const_mop_iterator OPI = MI.operands_begin();
2013-09-26 07:53:31 +02:00
if (!OPI->isReg() || !OPI->isDef())
return false;
// Defining any register via an implicit def is always ok.
if (MI.isImplicitDef())
return true;
// Grab the copy source...
MachineInstr::const_mop_iterator OPI2 = OPI;
++OPI2;
assert(OPI2 != MI.operands_end()
&& "Should have a copy implying we should have 2 arguments.");
// Make sure that the copy dest is not a vreg when the copy source is a
// physical register.
if (!OPI2->isReg() ||
(!TargetRegisterInfo::isPhysicalRegister(OPI->getReg()) &&
TargetRegisterInfo::isPhysicalRegister(OPI2->getReg())))
return false;
return true;
}
Teach selectiondag how to handle the stackprotectorcheck intrinsic. Previously, generation of stack protectors was done exclusively in the pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated splitting basic blocks at the IR level to create the success/failure basic blocks in the tail of the basic block in question. As a result of this, calls that would have qualified for the sibling call optimization were no longer eligible for optimization since said calls were no longer right in the "tail position" (i.e. the immediate predecessor of a ReturnInst instruction). Then it was noticed that since the sibling call optimization causes the callee to reuse the caller's stack, if we could delay the generation of the stack protector check until later in CodeGen after the sibling call decision was made, we get both the tail call optimization and the stack protector check! A few goals in solving this problem were: 1. Preserve the architecture independence of stack protector generation. 2. Preserve the normal IR level stack protector check for platforms like OpenBSD for which we support platform specific stack protector generation. The main problem that guided the present solution is that one can not solve this problem in an architecture independent manner at the IR level only. This is because: 1. The decision on whether or not to perform a sibling call on certain platforms (for instance i386) requires lower level information related to available registers that can not be known at the IR level. 2. Even if the previous point were not true, the decision on whether to perform a tail call is done in LowerCallTo in SelectionDAG which occurs after the Stack Protector Pass. As a result, one would need to put the relevant callinst into the stack protector check success basic block (where the return inst is placed) and then move it back later at SelectionDAG/MI time before the stack protector check if the tail call optimization failed. The MI level option was nixed immediately since it would require platform specific pattern matching. The SelectionDAG level option was nixed because SelectionDAG only processes one IR level basic block at a time implying one could not create a DAG Combine to move the callinst. To get around this problem a few things were realized: 1. While one can not handle multiple IR level basic blocks at the SelectionDAG Level, one can generate multiple machine basic blocks for one IR level basic block. This is how we handle bit tests and switches. 2. At the MI level, tail calls are represented via a special return MIInst called "tcreturn". Thus if we know the basic block in which we wish to insert the stack protector check, we get the correct behavior by always inserting the stack protector check right before the return statement. This is a "magical transformation" since no matter where the stack protector check intrinsic is, we always insert the stack protector check code at the end of the BB. Given the aforementioned constraints, the following solution was devised: 1. On platforms that do not support SelectionDAG stack protector check generation, allow for the normal IR level stack protector check generation to continue. 2. On platforms that do support SelectionDAG stack protector check generation: a. Use the IR level stack protector pass to decide if a stack protector is required/which BB we insert the stack protector check in by reusing the logic already therein. If we wish to generate a stack protector check in a basic block, we place a special IR intrinsic called llvm.stackprotectorcheck right before the BB's returninst or if there is a callinst that could potentially be sibling call optimized, before the call inst. b. Then when a BB with said intrinsic is processed, we codegen the BB normally via SelectBasicBlock. In said process, when we visit the stack protector check, we do not actually emit anything into the BB. Instead, we just initialize the stack protector descriptor class (which involves stashing information/creating the success mbbb and the failure mbb if we have not created one for this function yet) and export the guard variable that we are going to compare. c. After we finish selecting the basic block, in FinishBasicBlock if the StackProtectorDescriptor attached to the SelectionDAGBuilder is initialized, we first find a splice point in the parent basic block before the terminator and then splice the terminator of said basic block into the success basic block. Then we code-gen a new tail for the parent basic block consisting of the two loads, the comparison, and finally two branches to the success/failure basic blocks. We conclude by code-gening the failure basic block if we have not code-gened it already (all stack protector checks we generate in the same function, use the same failure basic block). llvm-svn: 188755
2013-08-20 09:00:16 +02:00
/// Find the split point at which to splice the end of BB into its success stack
/// protector check machine basic block.
///
/// On many platforms, due to ABI constraints, terminators, even before register
/// allocation, use physical registers. This creates an issue for us since
/// physical registers at this point can not travel across basic
/// blocks. Luckily, selectiondag always moves physical registers into vregs
/// when they enter functions and moves them through a sequence of copies back
/// into the physical registers right before the terminator creating a
/// ``Terminator Sequence''. This function is searching for the beginning of the
/// terminator sequence so that we can ensure that we splice off not just the
/// terminator, but additionally the copies that move the vregs into the
/// physical registers.
Teach selectiondag how to handle the stackprotectorcheck intrinsic. Previously, generation of stack protectors was done exclusively in the pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated splitting basic blocks at the IR level to create the success/failure basic blocks in the tail of the basic block in question. As a result of this, calls that would have qualified for the sibling call optimization were no longer eligible for optimization since said calls were no longer right in the "tail position" (i.e. the immediate predecessor of a ReturnInst instruction). Then it was noticed that since the sibling call optimization causes the callee to reuse the caller's stack, if we could delay the generation of the stack protector check until later in CodeGen after the sibling call decision was made, we get both the tail call optimization and the stack protector check! A few goals in solving this problem were: 1. Preserve the architecture independence of stack protector generation. 2. Preserve the normal IR level stack protector check for platforms like OpenBSD for which we support platform specific stack protector generation. The main problem that guided the present solution is that one can not solve this problem in an architecture independent manner at the IR level only. This is because: 1. The decision on whether or not to perform a sibling call on certain platforms (for instance i386) requires lower level information related to available registers that can not be known at the IR level. 2. Even if the previous point were not true, the decision on whether to perform a tail call is done in LowerCallTo in SelectionDAG which occurs after the Stack Protector Pass. As a result, one would need to put the relevant callinst into the stack protector check success basic block (where the return inst is placed) and then move it back later at SelectionDAG/MI time before the stack protector check if the tail call optimization failed. The MI level option was nixed immediately since it would require platform specific pattern matching. The SelectionDAG level option was nixed because SelectionDAG only processes one IR level basic block at a time implying one could not create a DAG Combine to move the callinst. To get around this problem a few things were realized: 1. While one can not handle multiple IR level basic blocks at the SelectionDAG Level, one can generate multiple machine basic blocks for one IR level basic block. This is how we handle bit tests and switches. 2. At the MI level, tail calls are represented via a special return MIInst called "tcreturn". Thus if we know the basic block in which we wish to insert the stack protector check, we get the correct behavior by always inserting the stack protector check right before the return statement. This is a "magical transformation" since no matter where the stack protector check intrinsic is, we always insert the stack protector check code at the end of the BB. Given the aforementioned constraints, the following solution was devised: 1. On platforms that do not support SelectionDAG stack protector check generation, allow for the normal IR level stack protector check generation to continue. 2. On platforms that do support SelectionDAG stack protector check generation: a. Use the IR level stack protector pass to decide if a stack protector is required/which BB we insert the stack protector check in by reusing the logic already therein. If we wish to generate a stack protector check in a basic block, we place a special IR intrinsic called llvm.stackprotectorcheck right before the BB's returninst or if there is a callinst that could potentially be sibling call optimized, before the call inst. b. Then when a BB with said intrinsic is processed, we codegen the BB normally via SelectBasicBlock. In said process, when we visit the stack protector check, we do not actually emit anything into the BB. Instead, we just initialize the stack protector descriptor class (which involves stashing information/creating the success mbbb and the failure mbb if we have not created one for this function yet) and export the guard variable that we are going to compare. c. After we finish selecting the basic block, in FinishBasicBlock if the StackProtectorDescriptor attached to the SelectionDAGBuilder is initialized, we first find a splice point in the parent basic block before the terminator and then splice the terminator of said basic block into the success basic block. Then we code-gen a new tail for the parent basic block consisting of the two loads, the comparison, and finally two branches to the success/failure basic blocks. We conclude by code-gening the failure basic block if we have not code-gened it already (all stack protector checks we generate in the same function, use the same failure basic block). llvm-svn: 188755
2013-08-20 09:00:16 +02:00
static MachineBasicBlock::iterator
[stack-protection] Add support for MSVC buffer security check Summary: This patch is adding support for the MSVC buffer security check implementation The buffer security check is turned on with the '/GS' compiler switch. * https://msdn.microsoft.com/en-us/library/8dbf701c.aspx * To be added to clang here: http://reviews.llvm.org/D20347 Some overview of buffer security check feature and implementation: * https://msdn.microsoft.com/en-us/library/aa290051(VS.71).aspx * http://www.ksyash.com/2011/01/buffer-overflow-protection-3/ * http://blog.osom.info/2012/02/understanding-vs-c-compilers-buffer.html For the following example: ``` int example(int offset, int index) { char buffer[10]; memset(buffer, 0xCC, index); return buffer[index]; } ``` The MSVC compiler is adding these instructions to perform stack integrity check: ``` push ebp mov ebp,esp sub esp,50h [1] mov eax,dword ptr [__security_cookie (01068024h)] [2] xor eax,ebp [3] mov dword ptr [ebp-4],eax push ebx push esi push edi mov eax,dword ptr [index] push eax push 0CCh lea ecx,[buffer] push ecx call _memset (010610B9h) add esp,0Ch mov eax,dword ptr [index] movsx eax,byte ptr buffer[eax] pop edi pop esi pop ebx [4] mov ecx,dword ptr [ebp-4] [5] xor ecx,ebp [6] call @__security_check_cookie@4 (01061276h) mov esp,ebp pop ebp ret ``` The instrumentation above is: * [1] is loading the global security canary, * [3] is storing the local computed ([2]) canary to the guard slot, * [4] is loading the guard slot and ([5]) re-compute the global canary, * [6] is validating the resulting canary with the '__security_check_cookie' and performs error handling. Overview of the current stack-protection implementation: * lib/CodeGen/StackProtector.cpp * There is a default stack-protection implementation applied on intermediate representation. * The target can overload 'getIRStackGuard' method if it has a standard location for the stack protector cookie. * An intrinsic 'Intrinsic::stackprotector' is added to the prologue. It will be expanded by the instruction selection pass (DAG or Fast). * Basic Blocks are added to every instrumented function to receive the code for handling stack guard validation and errors handling. * Guard manipulation and comparison are added directly to the intermediate representation. * lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp * lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp * There is an implementation that adds instrumentation during instruction selection (for better handling of sibbling calls). * see long comment above 'class StackProtectorDescriptor' declaration. * The target needs to override 'getSDagStackGuard' to activate SDAG stack protection generation. (note: getIRStackGuard MUST be nullptr). * 'getSDagStackGuard' returns the appropriate stack guard (security cookie) * The code is generated by 'SelectionDAGBuilder.cpp' and 'SelectionDAGISel.cpp'. * include/llvm/Target/TargetLowering.h * Contains function to retrieve the default Guard 'Value'; should be overriden by each target to select which implementation is used and provide Guard 'Value'. * lib/Target/X86/X86ISelLowering.cpp * Contains the x86 specialisation; Guard 'Value' used by the SelectionDAG algorithm. Function-based Instrumentation: * The MSVC doesn't inline the stack guard comparison in every function. Instead, a call to '__security_check_cookie' is added to the epilogue before every return instructions. * To support function-based instrumentation, this patch is * adding a function to get the function-based check (llvm 'Value', see include/llvm/Target/TargetLowering.h), * If provided, the stack protection instrumentation won't be inlined and a call to that function will be added to the prologue. * modifying (SelectionDAGISel.cpp) do avoid producing basic blocks used for inline instrumentation, * generating the function-based instrumentation during the ISEL pass (SelectionDAGBuilder.cpp), * if FastISEL (not SelectionDAG), using the fallback which rely on the same function-based implemented over intermediate representation (StackProtector.cpp). Modifications * adding support for MSVC (lib/Target/X86/X86ISelLowering.cpp) * adding support function-based instrumentation (lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp, .h) Results * IR generated instrumentation: ``` clang-cl /GS test.cc /Od /c -mllvm -print-isel-input ``` ``` *** Final LLVM Code input to ISel *** ; Function Attrs: nounwind sspstrong define i32 @"\01?example@@YAHHH@Z"(i32 %offset, i32 %index) #0 { entry: %StackGuardSlot = alloca i8* <<<-- Allocated guard slot %0 = call i8* @llvm.stackguard() <<<-- Loading Stack Guard value call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot) <<<-- Prologue intrinsic call (store to Guard slot) %index.addr = alloca i32, align 4 %offset.addr = alloca i32, align 4 %buffer = alloca [10 x i8], align 1 store i32 %index, i32* %index.addr, align 4 store i32 %offset, i32* %offset.addr, align 4 %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 0 %1 = load i32, i32* %index.addr, align 4 call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -52, i32 %1, i32 1, i1 false) %2 = load i32, i32* %index.addr, align 4 %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 %2 %3 = load i8, i8* %arrayidx, align 1 %conv = sext i8 %3 to i32 %4 = load volatile i8*, i8** %StackGuardSlot <<<-- Loading Guard slot call void @__security_check_cookie(i8* %4) <<<-- Epilogue function-based check ret i32 %conv } ``` * SelectionDAG generated instrumentation: ``` clang-cl /GS test.cc /O1 /c /FA ``` ``` "?example@@YAHHH@Z": # @"\01?example@@YAHHH@Z" # BB#0: # %entry pushl %esi subl $16, %esp movl ___security_cookie, %eax <<<-- Loading Stack Guard value movl 28(%esp), %esi movl %eax, 12(%esp) <<<-- Store to Guard slot leal 2(%esp), %eax pushl %esi pushl $204 pushl %eax calll _memset addl $12, %esp movsbl 2(%esp,%esi), %esi movl 12(%esp), %ecx <<<-- Loading Guard slot calll @__security_check_cookie@4 <<<-- Epilogue function-based check movl %esi, %eax addl $16, %esp popl %esi retl ``` Reviewers: kcc, pcc, eugenis, rnk Subscribers: majnemer, llvm-commits, hans, thakis, rnk Differential Revision: http://reviews.llvm.org/D20346 llvm-svn: 272053
2016-06-07 22:15:35 +02:00
FindSplitPointForStackProtector(MachineBasicBlock *BB) {
2013-09-26 07:53:31 +02:00
MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator();
//
Teach selectiondag how to handle the stackprotectorcheck intrinsic. Previously, generation of stack protectors was done exclusively in the pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated splitting basic blocks at the IR level to create the success/failure basic blocks in the tail of the basic block in question. As a result of this, calls that would have qualified for the sibling call optimization were no longer eligible for optimization since said calls were no longer right in the "tail position" (i.e. the immediate predecessor of a ReturnInst instruction). Then it was noticed that since the sibling call optimization causes the callee to reuse the caller's stack, if we could delay the generation of the stack protector check until later in CodeGen after the sibling call decision was made, we get both the tail call optimization and the stack protector check! A few goals in solving this problem were: 1. Preserve the architecture independence of stack protector generation. 2. Preserve the normal IR level stack protector check for platforms like OpenBSD for which we support platform specific stack protector generation. The main problem that guided the present solution is that one can not solve this problem in an architecture independent manner at the IR level only. This is because: 1. The decision on whether or not to perform a sibling call on certain platforms (for instance i386) requires lower level information related to available registers that can not be known at the IR level. 2. Even if the previous point were not true, the decision on whether to perform a tail call is done in LowerCallTo in SelectionDAG which occurs after the Stack Protector Pass. As a result, one would need to put the relevant callinst into the stack protector check success basic block (where the return inst is placed) and then move it back later at SelectionDAG/MI time before the stack protector check if the tail call optimization failed. The MI level option was nixed immediately since it would require platform specific pattern matching. The SelectionDAG level option was nixed because SelectionDAG only processes one IR level basic block at a time implying one could not create a DAG Combine to move the callinst. To get around this problem a few things were realized: 1. While one can not handle multiple IR level basic blocks at the SelectionDAG Level, one can generate multiple machine basic blocks for one IR level basic block. This is how we handle bit tests and switches. 2. At the MI level, tail calls are represented via a special return MIInst called "tcreturn". Thus if we know the basic block in which we wish to insert the stack protector check, we get the correct behavior by always inserting the stack protector check right before the return statement. This is a "magical transformation" since no matter where the stack protector check intrinsic is, we always insert the stack protector check code at the end of the BB. Given the aforementioned constraints, the following solution was devised: 1. On platforms that do not support SelectionDAG stack protector check generation, allow for the normal IR level stack protector check generation to continue. 2. On platforms that do support SelectionDAG stack protector check generation: a. Use the IR level stack protector pass to decide if a stack protector is required/which BB we insert the stack protector check in by reusing the logic already therein. If we wish to generate a stack protector check in a basic block, we place a special IR intrinsic called llvm.stackprotectorcheck right before the BB's returninst or if there is a callinst that could potentially be sibling call optimized, before the call inst. b. Then when a BB with said intrinsic is processed, we codegen the BB normally via SelectBasicBlock. In said process, when we visit the stack protector check, we do not actually emit anything into the BB. Instead, we just initialize the stack protector descriptor class (which involves stashing information/creating the success mbbb and the failure mbb if we have not created one for this function yet) and export the guard variable that we are going to compare. c. After we finish selecting the basic block, in FinishBasicBlock if the StackProtectorDescriptor attached to the SelectionDAGBuilder is initialized, we first find a splice point in the parent basic block before the terminator and then splice the terminator of said basic block into the success basic block. Then we code-gen a new tail for the parent basic block consisting of the two loads, the comparison, and finally two branches to the success/failure basic blocks. We conclude by code-gening the failure basic block if we have not code-gened it already (all stack protector checks we generate in the same function, use the same failure basic block). llvm-svn: 188755
2013-08-20 09:00:16 +02:00
if (SplitPoint == BB->begin())
return SplitPoint;
MachineBasicBlock::iterator Start = BB->begin();
MachineBasicBlock::iterator Previous = SplitPoint;
--Previous;
while (MIIsInTerminatorSequence(*Previous)) {
Teach selectiondag how to handle the stackprotectorcheck intrinsic. Previously, generation of stack protectors was done exclusively in the pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated splitting basic blocks at the IR level to create the success/failure basic blocks in the tail of the basic block in question. As a result of this, calls that would have qualified for the sibling call optimization were no longer eligible for optimization since said calls were no longer right in the "tail position" (i.e. the immediate predecessor of a ReturnInst instruction). Then it was noticed that since the sibling call optimization causes the callee to reuse the caller's stack, if we could delay the generation of the stack protector check until later in CodeGen after the sibling call decision was made, we get both the tail call optimization and the stack protector check! A few goals in solving this problem were: 1. Preserve the architecture independence of stack protector generation. 2. Preserve the normal IR level stack protector check for platforms like OpenBSD for which we support platform specific stack protector generation. The main problem that guided the present solution is that one can not solve this problem in an architecture independent manner at the IR level only. This is because: 1. The decision on whether or not to perform a sibling call on certain platforms (for instance i386) requires lower level information related to available registers that can not be known at the IR level. 2. Even if the previous point were not true, the decision on whether to perform a tail call is done in LowerCallTo in SelectionDAG which occurs after the Stack Protector Pass. As a result, one would need to put the relevant callinst into the stack protector check success basic block (where the return inst is placed) and then move it back later at SelectionDAG/MI time before the stack protector check if the tail call optimization failed. The MI level option was nixed immediately since it would require platform specific pattern matching. The SelectionDAG level option was nixed because SelectionDAG only processes one IR level basic block at a time implying one could not create a DAG Combine to move the callinst. To get around this problem a few things were realized: 1. While one can not handle multiple IR level basic blocks at the SelectionDAG Level, one can generate multiple machine basic blocks for one IR level basic block. This is how we handle bit tests and switches. 2. At the MI level, tail calls are represented via a special return MIInst called "tcreturn". Thus if we know the basic block in which we wish to insert the stack protector check, we get the correct behavior by always inserting the stack protector check right before the return statement. This is a "magical transformation" since no matter where the stack protector check intrinsic is, we always insert the stack protector check code at the end of the BB. Given the aforementioned constraints, the following solution was devised: 1. On platforms that do not support SelectionDAG stack protector check generation, allow for the normal IR level stack protector check generation to continue. 2. On platforms that do support SelectionDAG stack protector check generation: a. Use the IR level stack protector pass to decide if a stack protector is required/which BB we insert the stack protector check in by reusing the logic already therein. If we wish to generate a stack protector check in a basic block, we place a special IR intrinsic called llvm.stackprotectorcheck right before the BB's returninst or if there is a callinst that could potentially be sibling call optimized, before the call inst. b. Then when a BB with said intrinsic is processed, we codegen the BB normally via SelectBasicBlock. In said process, when we visit the stack protector check, we do not actually emit anything into the BB. Instead, we just initialize the stack protector descriptor class (which involves stashing information/creating the success mbbb and the failure mbb if we have not created one for this function yet) and export the guard variable that we are going to compare. c. After we finish selecting the basic block, in FinishBasicBlock if the StackProtectorDescriptor attached to the SelectionDAGBuilder is initialized, we first find a splice point in the parent basic block before the terminator and then splice the terminator of said basic block into the success basic block. Then we code-gen a new tail for the parent basic block consisting of the two loads, the comparison, and finally two branches to the success/failure basic blocks. We conclude by code-gening the failure basic block if we have not code-gened it already (all stack protector checks we generate in the same function, use the same failure basic block). llvm-svn: 188755
2013-08-20 09:00:16 +02:00
SplitPoint = Previous;
if (Previous == Start)
break;
--Previous;
}
return SplitPoint;
}
void
SelectionDAGISel::FinishBasicBlock() {
DEBUG(dbgs() << "Total amount of phi nodes to update: "
<< FuncInfo->PHINodesToUpdate.size() << "\n";
for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
dbgs() << "Node " << i << " : ("
<< FuncInfo->PHINodesToUpdate[i].first
<< ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
// Next, now that we know what the last MBB the LLVM BB expanded is, update
// PHI nodes in successors.
for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[i].first);
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
continue;
PHI.addReg(FuncInfo->PHINodesToUpdate[i].second).addMBB(FuncInfo->MBB);
}
Teach selectiondag how to handle the stackprotectorcheck intrinsic. Previously, generation of stack protectors was done exclusively in the pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated splitting basic blocks at the IR level to create the success/failure basic blocks in the tail of the basic block in question. As a result of this, calls that would have qualified for the sibling call optimization were no longer eligible for optimization since said calls were no longer right in the "tail position" (i.e. the immediate predecessor of a ReturnInst instruction). Then it was noticed that since the sibling call optimization causes the callee to reuse the caller's stack, if we could delay the generation of the stack protector check until later in CodeGen after the sibling call decision was made, we get both the tail call optimization and the stack protector check! A few goals in solving this problem were: 1. Preserve the architecture independence of stack protector generation. 2. Preserve the normal IR level stack protector check for platforms like OpenBSD for which we support platform specific stack protector generation. The main problem that guided the present solution is that one can not solve this problem in an architecture independent manner at the IR level only. This is because: 1. The decision on whether or not to perform a sibling call on certain platforms (for instance i386) requires lower level information related to available registers that can not be known at the IR level. 2. Even if the previous point were not true, the decision on whether to perform a tail call is done in LowerCallTo in SelectionDAG which occurs after the Stack Protector Pass. As a result, one would need to put the relevant callinst into the stack protector check success basic block (where the return inst is placed) and then move it back later at SelectionDAG/MI time before the stack protector check if the tail call optimization failed. The MI level option was nixed immediately since it would require platform specific pattern matching. The SelectionDAG level option was nixed because SelectionDAG only processes one IR level basic block at a time implying one could not create a DAG Combine to move the callinst. To get around this problem a few things were realized: 1. While one can not handle multiple IR level basic blocks at the SelectionDAG Level, one can generate multiple machine basic blocks for one IR level basic block. This is how we handle bit tests and switches. 2. At the MI level, tail calls are represented via a special return MIInst called "tcreturn". Thus if we know the basic block in which we wish to insert the stack protector check, we get the correct behavior by always inserting the stack protector check right before the return statement. This is a "magical transformation" since no matter where the stack protector check intrinsic is, we always insert the stack protector check code at the end of the BB. Given the aforementioned constraints, the following solution was devised: 1. On platforms that do not support SelectionDAG stack protector check generation, allow for the normal IR level stack protector check generation to continue. 2. On platforms that do support SelectionDAG stack protector check generation: a. Use the IR level stack protector pass to decide if a stack protector is required/which BB we insert the stack protector check in by reusing the logic already therein. If we wish to generate a stack protector check in a basic block, we place a special IR intrinsic called llvm.stackprotectorcheck right before the BB's returninst or if there is a callinst that could potentially be sibling call optimized, before the call inst. b. Then when a BB with said intrinsic is processed, we codegen the BB normally via SelectBasicBlock. In said process, when we visit the stack protector check, we do not actually emit anything into the BB. Instead, we just initialize the stack protector descriptor class (which involves stashing information/creating the success mbbb and the failure mbb if we have not created one for this function yet) and export the guard variable that we are going to compare. c. After we finish selecting the basic block, in FinishBasicBlock if the StackProtectorDescriptor attached to the SelectionDAGBuilder is initialized, we first find a splice point in the parent basic block before the terminator and then splice the terminator of said basic block into the success basic block. Then we code-gen a new tail for the parent basic block consisting of the two loads, the comparison, and finally two branches to the success/failure basic blocks. We conclude by code-gening the failure basic block if we have not code-gened it already (all stack protector checks we generate in the same function, use the same failure basic block). llvm-svn: 188755
2013-08-20 09:00:16 +02:00
// Handle stack protector.
[stack-protection] Add support for MSVC buffer security check Summary: This patch is adding support for the MSVC buffer security check implementation The buffer security check is turned on with the '/GS' compiler switch. * https://msdn.microsoft.com/en-us/library/8dbf701c.aspx * To be added to clang here: http://reviews.llvm.org/D20347 Some overview of buffer security check feature and implementation: * https://msdn.microsoft.com/en-us/library/aa290051(VS.71).aspx * http://www.ksyash.com/2011/01/buffer-overflow-protection-3/ * http://blog.osom.info/2012/02/understanding-vs-c-compilers-buffer.html For the following example: ``` int example(int offset, int index) { char buffer[10]; memset(buffer, 0xCC, index); return buffer[index]; } ``` The MSVC compiler is adding these instructions to perform stack integrity check: ``` push ebp mov ebp,esp sub esp,50h [1] mov eax,dword ptr [__security_cookie (01068024h)] [2] xor eax,ebp [3] mov dword ptr [ebp-4],eax push ebx push esi push edi mov eax,dword ptr [index] push eax push 0CCh lea ecx,[buffer] push ecx call _memset (010610B9h) add esp,0Ch mov eax,dword ptr [index] movsx eax,byte ptr buffer[eax] pop edi pop esi pop ebx [4] mov ecx,dword ptr [ebp-4] [5] xor ecx,ebp [6] call @__security_check_cookie@4 (01061276h) mov esp,ebp pop ebp ret ``` The instrumentation above is: * [1] is loading the global security canary, * [3] is storing the local computed ([2]) canary to the guard slot, * [4] is loading the guard slot and ([5]) re-compute the global canary, * [6] is validating the resulting canary with the '__security_check_cookie' and performs error handling. Overview of the current stack-protection implementation: * lib/CodeGen/StackProtector.cpp * There is a default stack-protection implementation applied on intermediate representation. * The target can overload 'getIRStackGuard' method if it has a standard location for the stack protector cookie. * An intrinsic 'Intrinsic::stackprotector' is added to the prologue. It will be expanded by the instruction selection pass (DAG or Fast). * Basic Blocks are added to every instrumented function to receive the code for handling stack guard validation and errors handling. * Guard manipulation and comparison are added directly to the intermediate representation. * lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp * lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp * There is an implementation that adds instrumentation during instruction selection (for better handling of sibbling calls). * see long comment above 'class StackProtectorDescriptor' declaration. * The target needs to override 'getSDagStackGuard' to activate SDAG stack protection generation. (note: getIRStackGuard MUST be nullptr). * 'getSDagStackGuard' returns the appropriate stack guard (security cookie) * The code is generated by 'SelectionDAGBuilder.cpp' and 'SelectionDAGISel.cpp'. * include/llvm/Target/TargetLowering.h * Contains function to retrieve the default Guard 'Value'; should be overriden by each target to select which implementation is used and provide Guard 'Value'. * lib/Target/X86/X86ISelLowering.cpp * Contains the x86 specialisation; Guard 'Value' used by the SelectionDAG algorithm. Function-based Instrumentation: * The MSVC doesn't inline the stack guard comparison in every function. Instead, a call to '__security_check_cookie' is added to the epilogue before every return instructions. * To support function-based instrumentation, this patch is * adding a function to get the function-based check (llvm 'Value', see include/llvm/Target/TargetLowering.h), * If provided, the stack protection instrumentation won't be inlined and a call to that function will be added to the prologue. * modifying (SelectionDAGISel.cpp) do avoid producing basic blocks used for inline instrumentation, * generating the function-based instrumentation during the ISEL pass (SelectionDAGBuilder.cpp), * if FastISEL (not SelectionDAG), using the fallback which rely on the same function-based implemented over intermediate representation (StackProtector.cpp). Modifications * adding support for MSVC (lib/Target/X86/X86ISelLowering.cpp) * adding support function-based instrumentation (lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp, .h) Results * IR generated instrumentation: ``` clang-cl /GS test.cc /Od /c -mllvm -print-isel-input ``` ``` *** Final LLVM Code input to ISel *** ; Function Attrs: nounwind sspstrong define i32 @"\01?example@@YAHHH@Z"(i32 %offset, i32 %index) #0 { entry: %StackGuardSlot = alloca i8* <<<-- Allocated guard slot %0 = call i8* @llvm.stackguard() <<<-- Loading Stack Guard value call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot) <<<-- Prologue intrinsic call (store to Guard slot) %index.addr = alloca i32, align 4 %offset.addr = alloca i32, align 4 %buffer = alloca [10 x i8], align 1 store i32 %index, i32* %index.addr, align 4 store i32 %offset, i32* %offset.addr, align 4 %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 0 %1 = load i32, i32* %index.addr, align 4 call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -52, i32 %1, i32 1, i1 false) %2 = load i32, i32* %index.addr, align 4 %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 %2 %3 = load i8, i8* %arrayidx, align 1 %conv = sext i8 %3 to i32 %4 = load volatile i8*, i8** %StackGuardSlot <<<-- Loading Guard slot call void @__security_check_cookie(i8* %4) <<<-- Epilogue function-based check ret i32 %conv } ``` * SelectionDAG generated instrumentation: ``` clang-cl /GS test.cc /O1 /c /FA ``` ``` "?example@@YAHHH@Z": # @"\01?example@@YAHHH@Z" # BB#0: # %entry pushl %esi subl $16, %esp movl ___security_cookie, %eax <<<-- Loading Stack Guard value movl 28(%esp), %esi movl %eax, 12(%esp) <<<-- Store to Guard slot leal 2(%esp), %eax pushl %esi pushl $204 pushl %eax calll _memset addl $12, %esp movsbl 2(%esp,%esi), %esi movl 12(%esp), %ecx <<<-- Loading Guard slot calll @__security_check_cookie@4 <<<-- Epilogue function-based check movl %esi, %eax addl $16, %esp popl %esi retl ``` Reviewers: kcc, pcc, eugenis, rnk Subscribers: majnemer, llvm-commits, hans, thakis, rnk Differential Revision: http://reviews.llvm.org/D20346 llvm-svn: 272053
2016-06-07 22:15:35 +02:00
if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
// The target provides a guard check function. There is no need to
// generate error handling code or to split current basic block.
MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
// Add load and check to the basicblock.
FuncInfo->MBB = ParentMBB;
FuncInfo->InsertPt =
FindSplitPointForStackProtector(ParentMBB);
SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
// Clear the Per-BB State.
SDB->SPDescriptor.resetPerBBState();
} else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
Teach selectiondag how to handle the stackprotectorcheck intrinsic. Previously, generation of stack protectors was done exclusively in the pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated splitting basic blocks at the IR level to create the success/failure basic blocks in the tail of the basic block in question. As a result of this, calls that would have qualified for the sibling call optimization were no longer eligible for optimization since said calls were no longer right in the "tail position" (i.e. the immediate predecessor of a ReturnInst instruction). Then it was noticed that since the sibling call optimization causes the callee to reuse the caller's stack, if we could delay the generation of the stack protector check until later in CodeGen after the sibling call decision was made, we get both the tail call optimization and the stack protector check! A few goals in solving this problem were: 1. Preserve the architecture independence of stack protector generation. 2. Preserve the normal IR level stack protector check for platforms like OpenBSD for which we support platform specific stack protector generation. The main problem that guided the present solution is that one can not solve this problem in an architecture independent manner at the IR level only. This is because: 1. The decision on whether or not to perform a sibling call on certain platforms (for instance i386) requires lower level information related to available registers that can not be known at the IR level. 2. Even if the previous point were not true, the decision on whether to perform a tail call is done in LowerCallTo in SelectionDAG which occurs after the Stack Protector Pass. As a result, one would need to put the relevant callinst into the stack protector check success basic block (where the return inst is placed) and then move it back later at SelectionDAG/MI time before the stack protector check if the tail call optimization failed. The MI level option was nixed immediately since it would require platform specific pattern matching. The SelectionDAG level option was nixed because SelectionDAG only processes one IR level basic block at a time implying one could not create a DAG Combine to move the callinst. To get around this problem a few things were realized: 1. While one can not handle multiple IR level basic blocks at the SelectionDAG Level, one can generate multiple machine basic blocks for one IR level basic block. This is how we handle bit tests and switches. 2. At the MI level, tail calls are represented via a special return MIInst called "tcreturn". Thus if we know the basic block in which we wish to insert the stack protector check, we get the correct behavior by always inserting the stack protector check right before the return statement. This is a "magical transformation" since no matter where the stack protector check intrinsic is, we always insert the stack protector check code at the end of the BB. Given the aforementioned constraints, the following solution was devised: 1. On platforms that do not support SelectionDAG stack protector check generation, allow for the normal IR level stack protector check generation to continue. 2. On platforms that do support SelectionDAG stack protector check generation: a. Use the IR level stack protector pass to decide if a stack protector is required/which BB we insert the stack protector check in by reusing the logic already therein. If we wish to generate a stack protector check in a basic block, we place a special IR intrinsic called llvm.stackprotectorcheck right before the BB's returninst or if there is a callinst that could potentially be sibling call optimized, before the call inst. b. Then when a BB with said intrinsic is processed, we codegen the BB normally via SelectBasicBlock. In said process, when we visit the stack protector check, we do not actually emit anything into the BB. Instead, we just initialize the stack protector descriptor class (which involves stashing information/creating the success mbbb and the failure mbb if we have not created one for this function yet) and export the guard variable that we are going to compare. c. After we finish selecting the basic block, in FinishBasicBlock if the StackProtectorDescriptor attached to the SelectionDAGBuilder is initialized, we first find a splice point in the parent basic block before the terminator and then splice the terminator of said basic block into the success basic block. Then we code-gen a new tail for the parent basic block consisting of the two loads, the comparison, and finally two branches to the success/failure basic blocks. We conclude by code-gening the failure basic block if we have not code-gened it already (all stack protector checks we generate in the same function, use the same failure basic block). llvm-svn: 188755
2013-08-20 09:00:16 +02:00
MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
// Find the split point to split the parent mbb. At the same time copy all
// physical registers used in the tail of parent mbb into virtual registers
// before the split point and back into physical registers after the split
// point. This prevents us needing to deal with Live-ins and many other
// register allocation issues caused by us splitting the parent mbb. The
// register allocator will clean up said virtual copies later on.
MachineBasicBlock::iterator SplitPoint =
[stack-protection] Add support for MSVC buffer security check Summary: This patch is adding support for the MSVC buffer security check implementation The buffer security check is turned on with the '/GS' compiler switch. * https://msdn.microsoft.com/en-us/library/8dbf701c.aspx * To be added to clang here: http://reviews.llvm.org/D20347 Some overview of buffer security check feature and implementation: * https://msdn.microsoft.com/en-us/library/aa290051(VS.71).aspx * http://www.ksyash.com/2011/01/buffer-overflow-protection-3/ * http://blog.osom.info/2012/02/understanding-vs-c-compilers-buffer.html For the following example: ``` int example(int offset, int index) { char buffer[10]; memset(buffer, 0xCC, index); return buffer[index]; } ``` The MSVC compiler is adding these instructions to perform stack integrity check: ``` push ebp mov ebp,esp sub esp,50h [1] mov eax,dword ptr [__security_cookie (01068024h)] [2] xor eax,ebp [3] mov dword ptr [ebp-4],eax push ebx push esi push edi mov eax,dword ptr [index] push eax push 0CCh lea ecx,[buffer] push ecx call _memset (010610B9h) add esp,0Ch mov eax,dword ptr [index] movsx eax,byte ptr buffer[eax] pop edi pop esi pop ebx [4] mov ecx,dword ptr [ebp-4] [5] xor ecx,ebp [6] call @__security_check_cookie@4 (01061276h) mov esp,ebp pop ebp ret ``` The instrumentation above is: * [1] is loading the global security canary, * [3] is storing the local computed ([2]) canary to the guard slot, * [4] is loading the guard slot and ([5]) re-compute the global canary, * [6] is validating the resulting canary with the '__security_check_cookie' and performs error handling. Overview of the current stack-protection implementation: * lib/CodeGen/StackProtector.cpp * There is a default stack-protection implementation applied on intermediate representation. * The target can overload 'getIRStackGuard' method if it has a standard location for the stack protector cookie. * An intrinsic 'Intrinsic::stackprotector' is added to the prologue. It will be expanded by the instruction selection pass (DAG or Fast). * Basic Blocks are added to every instrumented function to receive the code for handling stack guard validation and errors handling. * Guard manipulation and comparison are added directly to the intermediate representation. * lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp * lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp * There is an implementation that adds instrumentation during instruction selection (for better handling of sibbling calls). * see long comment above 'class StackProtectorDescriptor' declaration. * The target needs to override 'getSDagStackGuard' to activate SDAG stack protection generation. (note: getIRStackGuard MUST be nullptr). * 'getSDagStackGuard' returns the appropriate stack guard (security cookie) * The code is generated by 'SelectionDAGBuilder.cpp' and 'SelectionDAGISel.cpp'. * include/llvm/Target/TargetLowering.h * Contains function to retrieve the default Guard 'Value'; should be overriden by each target to select which implementation is used and provide Guard 'Value'. * lib/Target/X86/X86ISelLowering.cpp * Contains the x86 specialisation; Guard 'Value' used by the SelectionDAG algorithm. Function-based Instrumentation: * The MSVC doesn't inline the stack guard comparison in every function. Instead, a call to '__security_check_cookie' is added to the epilogue before every return instructions. * To support function-based instrumentation, this patch is * adding a function to get the function-based check (llvm 'Value', see include/llvm/Target/TargetLowering.h), * If provided, the stack protection instrumentation won't be inlined and a call to that function will be added to the prologue. * modifying (SelectionDAGISel.cpp) do avoid producing basic blocks used for inline instrumentation, * generating the function-based instrumentation during the ISEL pass (SelectionDAGBuilder.cpp), * if FastISEL (not SelectionDAG), using the fallback which rely on the same function-based implemented over intermediate representation (StackProtector.cpp). Modifications * adding support for MSVC (lib/Target/X86/X86ISelLowering.cpp) * adding support function-based instrumentation (lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp, .h) Results * IR generated instrumentation: ``` clang-cl /GS test.cc /Od /c -mllvm -print-isel-input ``` ``` *** Final LLVM Code input to ISel *** ; Function Attrs: nounwind sspstrong define i32 @"\01?example@@YAHHH@Z"(i32 %offset, i32 %index) #0 { entry: %StackGuardSlot = alloca i8* <<<-- Allocated guard slot %0 = call i8* @llvm.stackguard() <<<-- Loading Stack Guard value call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot) <<<-- Prologue intrinsic call (store to Guard slot) %index.addr = alloca i32, align 4 %offset.addr = alloca i32, align 4 %buffer = alloca [10 x i8], align 1 store i32 %index, i32* %index.addr, align 4 store i32 %offset, i32* %offset.addr, align 4 %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 0 %1 = load i32, i32* %index.addr, align 4 call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -52, i32 %1, i32 1, i1 false) %2 = load i32, i32* %index.addr, align 4 %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 %2 %3 = load i8, i8* %arrayidx, align 1 %conv = sext i8 %3 to i32 %4 = load volatile i8*, i8** %StackGuardSlot <<<-- Loading Guard slot call void @__security_check_cookie(i8* %4) <<<-- Epilogue function-based check ret i32 %conv } ``` * SelectionDAG generated instrumentation: ``` clang-cl /GS test.cc /O1 /c /FA ``` ``` "?example@@YAHHH@Z": # @"\01?example@@YAHHH@Z" # BB#0: # %entry pushl %esi subl $16, %esp movl ___security_cookie, %eax <<<-- Loading Stack Guard value movl 28(%esp), %esi movl %eax, 12(%esp) <<<-- Store to Guard slot leal 2(%esp), %eax pushl %esi pushl $204 pushl %eax calll _memset addl $12, %esp movsbl 2(%esp,%esi), %esi movl 12(%esp), %ecx <<<-- Loading Guard slot calll @__security_check_cookie@4 <<<-- Epilogue function-based check movl %esi, %eax addl $16, %esp popl %esi retl ``` Reviewers: kcc, pcc, eugenis, rnk Subscribers: majnemer, llvm-commits, hans, thakis, rnk Differential Revision: http://reviews.llvm.org/D20346 llvm-svn: 272053
2016-06-07 22:15:35 +02:00
FindSplitPointForStackProtector(ParentMBB);
Teach selectiondag how to handle the stackprotectorcheck intrinsic. Previously, generation of stack protectors was done exclusively in the pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated splitting basic blocks at the IR level to create the success/failure basic blocks in the tail of the basic block in question. As a result of this, calls that would have qualified for the sibling call optimization were no longer eligible for optimization since said calls were no longer right in the "tail position" (i.e. the immediate predecessor of a ReturnInst instruction). Then it was noticed that since the sibling call optimization causes the callee to reuse the caller's stack, if we could delay the generation of the stack protector check until later in CodeGen after the sibling call decision was made, we get both the tail call optimization and the stack protector check! A few goals in solving this problem were: 1. Preserve the architecture independence of stack protector generation. 2. Preserve the normal IR level stack protector check for platforms like OpenBSD for which we support platform specific stack protector generation. The main problem that guided the present solution is that one can not solve this problem in an architecture independent manner at the IR level only. This is because: 1. The decision on whether or not to perform a sibling call on certain platforms (for instance i386) requires lower level information related to available registers that can not be known at the IR level. 2. Even if the previous point were not true, the decision on whether to perform a tail call is done in LowerCallTo in SelectionDAG which occurs after the Stack Protector Pass. As a result, one would need to put the relevant callinst into the stack protector check success basic block (where the return inst is placed) and then move it back later at SelectionDAG/MI time before the stack protector check if the tail call optimization failed. The MI level option was nixed immediately since it would require platform specific pattern matching. The SelectionDAG level option was nixed because SelectionDAG only processes one IR level basic block at a time implying one could not create a DAG Combine to move the callinst. To get around this problem a few things were realized: 1. While one can not handle multiple IR level basic blocks at the SelectionDAG Level, one can generate multiple machine basic blocks for one IR level basic block. This is how we handle bit tests and switches. 2. At the MI level, tail calls are represented via a special return MIInst called "tcreturn". Thus if we know the basic block in which we wish to insert the stack protector check, we get the correct behavior by always inserting the stack protector check right before the return statement. This is a "magical transformation" since no matter where the stack protector check intrinsic is, we always insert the stack protector check code at the end of the BB. Given the aforementioned constraints, the following solution was devised: 1. On platforms that do not support SelectionDAG stack protector check generation, allow for the normal IR level stack protector check generation to continue. 2. On platforms that do support SelectionDAG stack protector check generation: a. Use the IR level stack protector pass to decide if a stack protector is required/which BB we insert the stack protector check in by reusing the logic already therein. If we wish to generate a stack protector check in a basic block, we place a special IR intrinsic called llvm.stackprotectorcheck right before the BB's returninst or if there is a callinst that could potentially be sibling call optimized, before the call inst. b. Then when a BB with said intrinsic is processed, we codegen the BB normally via SelectBasicBlock. In said process, when we visit the stack protector check, we do not actually emit anything into the BB. Instead, we just initialize the stack protector descriptor class (which involves stashing information/creating the success mbbb and the failure mbb if we have not created one for this function yet) and export the guard variable that we are going to compare. c. After we finish selecting the basic block, in FinishBasicBlock if the StackProtectorDescriptor attached to the SelectionDAGBuilder is initialized, we first find a splice point in the parent basic block before the terminator and then splice the terminator of said basic block into the success basic block. Then we code-gen a new tail for the parent basic block consisting of the two loads, the comparison, and finally two branches to the success/failure basic blocks. We conclude by code-gening the failure basic block if we have not code-gened it already (all stack protector checks we generate in the same function, use the same failure basic block). llvm-svn: 188755
2013-08-20 09:00:16 +02:00
// Splice the terminator of ParentMBB into SuccessMBB.
SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
SplitPoint,
ParentMBB->end());
// Add compare/jump on neq/jump to the parent BB.
FuncInfo->MBB = ParentMBB;
FuncInfo->InsertPt = ParentMBB->end();
SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
// CodeGen Failure MBB if we have not codegened it yet.
MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
if (FailureMBB->empty()) {
Teach selectiondag how to handle the stackprotectorcheck intrinsic. Previously, generation of stack protectors was done exclusively in the pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated splitting basic blocks at the IR level to create the success/failure basic blocks in the tail of the basic block in question. As a result of this, calls that would have qualified for the sibling call optimization were no longer eligible for optimization since said calls were no longer right in the "tail position" (i.e. the immediate predecessor of a ReturnInst instruction). Then it was noticed that since the sibling call optimization causes the callee to reuse the caller's stack, if we could delay the generation of the stack protector check until later in CodeGen after the sibling call decision was made, we get both the tail call optimization and the stack protector check! A few goals in solving this problem were: 1. Preserve the architecture independence of stack protector generation. 2. Preserve the normal IR level stack protector check for platforms like OpenBSD for which we support platform specific stack protector generation. The main problem that guided the present solution is that one can not solve this problem in an architecture independent manner at the IR level only. This is because: 1. The decision on whether or not to perform a sibling call on certain platforms (for instance i386) requires lower level information related to available registers that can not be known at the IR level. 2. Even if the previous point were not true, the decision on whether to perform a tail call is done in LowerCallTo in SelectionDAG which occurs after the Stack Protector Pass. As a result, one would need to put the relevant callinst into the stack protector check success basic block (where the return inst is placed) and then move it back later at SelectionDAG/MI time before the stack protector check if the tail call optimization failed. The MI level option was nixed immediately since it would require platform specific pattern matching. The SelectionDAG level option was nixed because SelectionDAG only processes one IR level basic block at a time implying one could not create a DAG Combine to move the callinst. To get around this problem a few things were realized: 1. While one can not handle multiple IR level basic blocks at the SelectionDAG Level, one can generate multiple machine basic blocks for one IR level basic block. This is how we handle bit tests and switches. 2. At the MI level, tail calls are represented via a special return MIInst called "tcreturn". Thus if we know the basic block in which we wish to insert the stack protector check, we get the correct behavior by always inserting the stack protector check right before the return statement. This is a "magical transformation" since no matter where the stack protector check intrinsic is, we always insert the stack protector check code at the end of the BB. Given the aforementioned constraints, the following solution was devised: 1. On platforms that do not support SelectionDAG stack protector check generation, allow for the normal IR level stack protector check generation to continue. 2. On platforms that do support SelectionDAG stack protector check generation: a. Use the IR level stack protector pass to decide if a stack protector is required/which BB we insert the stack protector check in by reusing the logic already therein. If we wish to generate a stack protector check in a basic block, we place a special IR intrinsic called llvm.stackprotectorcheck right before the BB's returninst or if there is a callinst that could potentially be sibling call optimized, before the call inst. b. Then when a BB with said intrinsic is processed, we codegen the BB normally via SelectBasicBlock. In said process, when we visit the stack protector check, we do not actually emit anything into the BB. Instead, we just initialize the stack protector descriptor class (which involves stashing information/creating the success mbbb and the failure mbb if we have not created one for this function yet) and export the guard variable that we are going to compare. c. After we finish selecting the basic block, in FinishBasicBlock if the StackProtectorDescriptor attached to the SelectionDAGBuilder is initialized, we first find a splice point in the parent basic block before the terminator and then splice the terminator of said basic block into the success basic block. Then we code-gen a new tail for the parent basic block consisting of the two loads, the comparison, and finally two branches to the success/failure basic blocks. We conclude by code-gening the failure basic block if we have not code-gened it already (all stack protector checks we generate in the same function, use the same failure basic block). llvm-svn: 188755
2013-08-20 09:00:16 +02:00
FuncInfo->MBB = FailureMBB;
FuncInfo->InsertPt = FailureMBB->end();
SDB->visitSPDescriptorFailure(SDB->SPDescriptor);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
}
// Clear the Per-BB State.
SDB->SPDescriptor.resetPerBBState();
}
// Lower each BitTestBlock.
for (auto &BTB : SDB->BitTestCases) {
// Lower header first, if it wasn't already lowered
if (!BTB.Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = BTB.Parent;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
SDB->visitBitTestHeader(BTB, FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
}
BranchProbability UnhandledProb = BTB.Prob;
for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
UnhandledProb -= BTB.Cases[j].ExtraProb;
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = BTB.Cases[j].ThisBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
// If all cases cover a contiguous range, it is not necessary to jump to
// the default block after the last bit test fails. This is because the
// range check during bit test header creation has guaranteed that every
// case here doesn't go outside the range. In this case, there is no need
// to perform the last bit test, as it will always be true. Instead, make
// the second-to-last bit-test fall through to the target of the last bit
// test, and delete the last bit test.
MachineBasicBlock *NextMBB;
if (BTB.ContiguousRange && j + 2 == ej) {
// Second-to-last bit-test with contiguous range: fall through to the
// target of the final bit test.
NextMBB = BTB.Cases[j + 1].TargetBB;
} else if (j + 1 == ej) {
// For the last bit test, fall through to Default.
NextMBB = BTB.Default;
} else {
// Otherwise, fall through to the next bit test.
NextMBB = BTB.Cases[j + 1].ThisBB;
}
SDB->visitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
if (BTB.ContiguousRange && j + 2 == ej) {
// Since we're not going to use the final bit test, remove it.
BTB.Cases.pop_back();
break;
}
}
// Update PHI Nodes
for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
pi != pe; ++pi) {
MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
MachineBasicBlock *PHIBB = PHI->getParent();
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
// This is "default" BB. We have two jumps to it. From "header" BB and
// from last "case" BB, unless the latter was skipped.
if (PHIBB == BTB.Default) {
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(BTB.Parent);
if (!BTB.ContiguousRange) {
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
.addMBB(BTB.Cases.back().ThisBB);
}
}
// One of "cases" BB.
for (unsigned j = 0, ej = BTB.Cases.size();
j != ej; ++j) {
MachineBasicBlock* cBB = BTB.Cases[j].ThisBB;
if (cBB->isSuccessor(PHIBB))
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(cBB);
}
}
}
SDB->BitTestCases.clear();
// If the JumpTable record is filled in, then we need to emit a jump table.
// Updating the PHI nodes is tricky in this case, since we need to determine
// whether the PHI is a successor of the range check MBB or the jump table MBB
for (unsigned i = 0, e = SDB->JTCases.size(); i != e; ++i) {
// Lower header first, if it wasn't already lowered
if (!SDB->JTCases[i].first.Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
}
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->JTCases[i].second.MBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
SDB->visitJumpTable(SDB->JTCases[i].second);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
// Update PHI Nodes
for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
pi != pe; ++pi) {
MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
MachineBasicBlock *PHIBB = PHI->getParent();
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
// "default" BB. We can go there only from header BB.
if (PHIBB == SDB->JTCases[i].second.Default)
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
.addMBB(SDB->JTCases[i].first.HeaderBB);
// JT BB. Just iterate over successors here
if (FuncInfo->MBB->isSuccessor(PHIBB))
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(FuncInfo->MBB);
}
}
SDB->JTCases.clear();
// If we generated any switch lowering information, build and codegen any
// additional DAGs necessary.
for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
// Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Determine the unique successors.
SmallVector<MachineBasicBlock *, 2> Succs;
Succs.push_back(SDB->SwitchCases[i].TrueBB);
if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
Succs.push_back(SDB->SwitchCases[i].FalseBB);
// Emit the code. Note that this could result in FuncInfo->MBB being split.
SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
// Remember the last block, now that any splitting is done, for use in
// populating PHI nodes in successors.
MachineBasicBlock *ThisBB = FuncInfo->MBB;
// Handle any PHI nodes in successors of this chunk, as if we were coming
// from the original BB before switch expansion. Note that PHI nodes can
// occur multiple times in PHINodesToUpdate. We have to be very careful to
// handle them the right number of times.
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
FuncInfo->MBB = Succs[i];
FuncInfo->InsertPt = FuncInfo->MBB->end();
// FuncInfo->MBB may have been removed from the CFG if a branch was
// constant folded.
if (ThisBB->isSuccessor(FuncInfo->MBB)) {
for (MachineBasicBlock::iterator
MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
MachineInstrBuilder PHI(*MF, MBBI);
// This value for this PHI node is recorded in PHINodesToUpdate.
for (unsigned pn = 0; ; ++pn) {
assert(pn != FuncInfo->PHINodesToUpdate.size() &&
"Didn't find PHI entry!");
if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
PHI.addReg(FuncInfo->PHINodesToUpdate[pn].second).addMBB(ThisBB);
break;
}
}
}
}
}
}
SDB->SwitchCases.clear();
}
/// Create the scheduler. If a specific scheduler was specified
/// via the SchedulerRegistry, use it, otherwise select the
/// one preferred by the target.
///
ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
return ISHeuristic(this, OptLevel);
}
//===----------------------------------------------------------------------===//
// Helper functions used by the generated instruction selector.
//===----------------------------------------------------------------------===//
// Calls to these methods are generated by tblgen.
/// CheckAndMask - The isel is trying to match something like (and X, 255). If
/// the dag combiner simplified the 255, we still want to match. RHS is the
/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
/// specified in the .td file (e.g. 255).
bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
return true;
// If the actual AND mask is allowing unallowed bits, this doesn't match.
if (ActualMask.intersects(~DesiredMask))
return false;
// Otherwise, the DAG Combiner may have proven that the value coming in is
// either already zero or is not demanded. Check for known zero input bits.
APInt NeededMask = DesiredMask & ~ActualMask;
if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
return true;
// TODO: check to see if missing bits are just not demanded.
// Otherwise, this pattern doesn't match.
return false;
}
/// CheckOrMask - The isel is trying to match something like (or X, 255). If
/// the dag combiner simplified the 255, we still want to match. RHS is the
/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
/// specified in the .td file (e.g. 255).
bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
return true;
// If the actual AND mask is allowing unallowed bits, this doesn't match.
if (ActualMask.intersects(~DesiredMask))
return false;
// Otherwise, the DAG Combiner may have proven that the value coming in is
// either already zero or is not demanded. Check for known zero input bits.
APInt NeededMask = DesiredMask & ~ActualMask;
KnownBits Known;
CurDAG->computeKnownBits(LHS, Known);
// If all the missing bits in the or are already known to be set, match!
if (NeededMask.isSubsetOf(Known.One))
return true;
// TODO: check to see if missing bits are just not demanded.
// Otherwise, this pattern doesn't match.
return false;
}
/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
/// by tblgen. Others should not call it.
void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
const SDLoc &DL) {
std::vector<SDValue> InOps;
std::swap(InOps, Ops);
Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
if (InOps[e-1].getValueType() == MVT::Glue)
2010-12-23 18:13:18 +01:00
--e; // Don't process a glue operand if it is here.
while (i != e) {
unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
if (!InlineAsm::isMemKind(Flags)) {
// Just skip over this operand, copying the operands verbatim.
Ops.insert(Ops.end(), InOps.begin()+i,
InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
i += InlineAsm::getNumOperandRegisters(Flags) + 1;
} else {
assert(InlineAsm::getNumOperandRegisters(Flags) == 1 &&
"Memory operand with multiple values?");
unsigned TiedToOperand;
if (InlineAsm::isUseOperandTiedToDef(Flags, TiedToOperand)) {
// We need the constraint ID from the operand this is tied to.
unsigned CurOp = InlineAsm::Op_FirstOperand;
Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
for (; TiedToOperand; --TiedToOperand) {
CurOp += InlineAsm::getNumOperandRegisters(Flags)+1;
Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
}
}
// Otherwise, this is a memory operand. Ask the target to select it.
std::vector<SDValue> SelOps;
unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags);
if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
report_fatal_error("Could not match memory address. Inline asm"
2010-04-08 01:50:38 +02:00
" failure!");
// Add this to the output node.
unsigned NewFlags =
InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
i += 2;
}
}
2010-12-23 18:13:18 +01:00
// Add the glue input back if present.
if (e != InOps.size())
Ops.push_back(InOps.back());
}
2010-12-23 18:13:18 +01:00
/// findGlueUse - Return use of MVT::Glue value produced by the specified
/// SDNode.
///
2010-12-23 18:13:18 +01:00
static SDNode *findGlueUse(SDNode *N) {
unsigned FlagResNo = N->getNumValues()-1;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
SDUse &Use = I.getUse();
if (Use.getResNo() == FlagResNo)
return Use.getUser();
}
return nullptr;
}
/// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
/// This function iteratively traverses up the operand chain, ignoring
/// certain nodes.
static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
SDNode *Root, SmallPtrSetImpl<SDNode*> &Visited,
bool IgnoreChains) {
// The NodeID's are given uniques ID's where a node ID is guaranteed to be
// greater than all of its (recursive) operands. If we scan to a point where
// 'use' is smaller than the node we're scanning for, then we know we will
// never find it.
//
// The Use may be -1 (unassigned) if it is a newly allocated node. This can
2010-12-23 18:13:18 +01:00
// happen because we scan down to newly selected nodes in the case of glue
// uses.
std::vector<SDNode *> WorkList;
WorkList.push_back(Use);
2010-12-24 05:28:06 +01:00
while (!WorkList.empty()) {
Use = WorkList.back();
WorkList.pop_back();
if (Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1)
continue;
// Don't revisit nodes if we already scanned it and didn't fail, we know we
// won't fail if we scan it again.
if (!Visited.insert(Use).second)
continue;
2010-12-24 05:28:06 +01:00
for (const SDValue &Op : Use->op_values()) {
// Ignore chain uses, they are validated by HandleMergeInputChains.
if (Op.getValueType() == MVT::Other && IgnoreChains)
continue;
SDNode *N = Op.getNode();
if (N == Def) {
if (Use == ImmedUse || Use == Root)
continue; // We are not looking for immediate use.
assert(N != Root);
return true;
}
// Traverse up the operand chain.
WorkList.push_back(N);
}
}
return false;
}
/// IsProfitableToFold - Returns true if it's profitable to fold the specific
/// operand node N of U during instruction selection that starts at Root.
bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
SDNode *Root) const {
if (OptLevel == CodeGenOpt::None) return false;
return N.hasOneUse();
}
/// IsLegalToFold - Returns true if the specific operand node N of
/// U can be folded during instruction selection that starts at Root.
bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
CodeGenOpt::Level OptLevel,
bool IgnoreChains) {
if (OptLevel == CodeGenOpt::None) return false;
// If Root use can somehow reach N through a path that that doesn't contain
// U then folding N would create a cycle. e.g. In the following
// diagram, Root can reach N through X. If N is folded into into Root, then
// X is both a predecessor and a successor of U.
//
// [N*] //
// ^ ^ //
// / \ //
// [U*] [X]? //
// ^ ^ //
// \ / //
// \ / //
// [Root*] //
//
// * indicates nodes to be folded together.
//
2010-12-23 18:13:18 +01:00
// If Root produces glue, then it gets (even more) interesting. Since it
// will be "glued" together with its glue use in the scheduler, we need to
// check if it might reach N.
//
// [N*] //
// ^ ^ //
// / \ //
// [U*] [X]? //
// ^ ^ //
// \ \ //
// \ | //
// [Root*] | //
// ^ | //
// f | //
// | / //
// [Y] / //
// ^ / //
// f / //
// | / //
2010-12-23 18:13:18 +01:00
// [GU] //
//
2010-12-23 18:13:18 +01:00
// If GU (glue use) indirectly reaches N (the load), and Root folds N
// (call it Fold), then X is a predecessor of GU and a successor of
// Fold. But since Fold and GU are glued together, this will create
// a cycle in the scheduling graph.
2010-12-23 18:13:18 +01:00
// If the node has glue, walk down the graph to the "lowest" node in the
// glueged set.
EVT VT = Root->getValueType(Root->getNumValues()-1);
while (VT == MVT::Glue) {
2010-12-23 18:13:18 +01:00
SDNode *GU = findGlueUse(Root);
if (!GU)
break;
2010-12-23 18:13:18 +01:00
Root = GU;
VT = Root->getValueType(Root->getNumValues()-1);
2010-12-24 05:28:06 +01:00
2010-12-23 18:13:18 +01:00
// If our query node has a glue result with a use, we've walked up it. If
// the user (which has already been selected) has a chain or indirectly uses
// the chain, our WalkChainUsers predicate will not consider it. Because of
// this, we cannot ignore chains in this predicate.
IgnoreChains = false;
}
2010-12-24 05:28:06 +01:00
SmallPtrSet<SDNode*, 16> Visited;
return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains);
}
void SelectionDAGISel::Select_INLINEASM(SDNode *N) {
SDLoc DL(N);
std::vector<SDValue> Ops(N->op_begin(), N->op_end());
SelectInlineAsmMemoryOperands(Ops, DL);
2010-12-24 05:28:06 +01:00
const EVT VTs[] = {MVT::Other, MVT::Glue};
SDValue New = CurDAG->getNode(ISD::INLINEASM, DL, VTs, Ops);
New->setNodeId(-1);
ReplaceUses(N, New.getNode());
CurDAG->RemoveDeadNode(N);
}
void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
SDLoc dl(Op);
MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1));
const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
unsigned Reg =
TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0),
*CurDAG);
SDValue New = CurDAG->getCopyFromReg(
Op->getOperand(0), dl, Reg, Op->getValueType(0));
New->setNodeId(-1);
ReplaceUses(Op, New.getNode());
CurDAG->RemoveDeadNode(Op);
}
void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
SDLoc dl(Op);
MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1));
const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(),
Op->getOperand(2).getValueType(),
*CurDAG);
SDValue New = CurDAG->getCopyToReg(
Op->getOperand(0), dl, Reg, Op->getOperand(2));
New->setNodeId(-1);
ReplaceUses(Op, New.getNode());
CurDAG->RemoveDeadNode(Op);
}
void SelectionDAGISel::Select_UNDEF(SDNode *N) {
CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
}
/// GetVBR - decode a vbr encoding whose top bit is set.
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline uint64_t
GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
assert(Val >= 128 && "Not a VBR");
Val &= 127; // Remove first vbr bit.
2010-12-24 05:28:06 +01:00
unsigned Shift = 7;
uint64_t NextBits;
do {
2010-02-28 23:38:43 +01:00
NextBits = MatcherTable[Idx++];
Val |= (NextBits&127) << Shift;
Shift += 7;
} while (NextBits & 128);
2010-12-24 05:28:06 +01:00
return Val;
}
/// When a match is complete, this method updates uses of interior chain results
/// to use the new results.
void SelectionDAGISel::UpdateChains(
SDNode *NodeToMatch, SDValue InputChain,
SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
SmallVector<SDNode*, 4> NowDeadNodes;
2010-12-24 05:28:06 +01:00
// Now that all the normal results are replaced, we replace the chain and
2010-12-23 18:13:18 +01:00
// glue results if present.
if (!ChainNodesMatched.empty()) {
assert(InputChain.getNode() &&
"Matched input chains but didn't produce a chain");
// Loop over all of the nodes we matched that produced a chain result.
// Replace all the chain results with the final chain we ended up with.
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
SDNode *ChainNode = ChainNodesMatched[i];
// If ChainNode is null, it's because we replaced it on a previous
// iteration and we cleared it out of the map. Just skip it.
if (!ChainNode)
continue;
assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
"Deleted node left in chain");
2010-12-24 05:28:06 +01:00
// Don't replace the results of the root node if we're doing a
// MorphNodeTo.
if (ChainNode == NodeToMatch && isMorphNodeTo)
continue;
2010-12-24 05:28:06 +01:00
SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
if (ChainVal.getValueType() == MVT::Glue)
ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
SelectionDAG::DAGNodeDeletedListener NDL(
*CurDAG, [&](SDNode *N, SDNode *E) {
std::replace(ChainNodesMatched.begin(), ChainNodesMatched.end(), N,
static_cast<SDNode *>(nullptr));
});
CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain);
2010-12-24 05:28:06 +01:00
// If the node became dead and we haven't already seen it, delete it.
if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
!std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
NowDeadNodes.push_back(ChainNode);
}
}
2010-12-24 05:28:06 +01:00
if (!NowDeadNodes.empty())
CurDAG->RemoveDeadNodes(NowDeadNodes);
2010-12-24 05:28:06 +01:00
DEBUG(dbgs() << "ISEL: Match complete!\n");
}
enum ChainResult {
CR_Simple,
CR_InducesCycle,
CR_LeadsToInteriorNode
};
/// WalkChainUsers - Walk down the users of the specified chained node that is
/// part of the pattern we're matching, looking at all of the users we find.
/// This determines whether something is an interior node, whether we have a
/// non-pattern node in between two pattern nodes (which prevent folding because
/// it would induce a cycle) and whether we have a TokenFactor node sandwiched
/// between pattern nodes (in which case the TF becomes part of the pattern).
///
/// The walk we do here is guaranteed to be small because we quickly get down to
/// already selected nodes "below" us.
2010-12-24 05:28:06 +01:00
static ChainResult
WalkChainUsers(const SDNode *ChainedNode,
SmallVectorImpl<SDNode *> &ChainedNodesInPattern,
DenseMap<const SDNode *, ChainResult> &TokenFactorResult,
SmallVectorImpl<SDNode *> &InteriorChainedNodes) {
ChainResult Result = CR_Simple;
2010-12-24 05:28:06 +01:00
for (SDNode::use_iterator UI = ChainedNode->use_begin(),
E = ChainedNode->use_end(); UI != E; ++UI) {
// Make sure the use is of the chain, not some other value we produce.
if (UI.getUse().getValueType() != MVT::Other) continue;
2010-12-24 05:28:06 +01:00
SDNode *User = *UI;
if (User->getOpcode() == ISD::HANDLENODE) // Root of the graph.
continue;
// If we see an already-selected machine node, then we've gone beyond the
// pattern that we're selecting down into the already selected chunk of the
// DAG.
unsigned UserOpcode = User->getOpcode();
if (User->isMachineOpcode() ||
UserOpcode == ISD::CopyToReg ||
UserOpcode == ISD::CopyFromReg ||
UserOpcode == ISD::INLINEASM ||
UserOpcode == ISD::EH_LABEL ||
UserOpcode == ISD::LIFETIME_START ||
UserOpcode == ISD::LIFETIME_END) {
// If their node ID got reset to -1 then they've already been selected.
// Treat them like a MachineOpcode.
if (User->getNodeId() == -1)
continue;
}
// If we have a TokenFactor, we handle it specially.
if (User->getOpcode() != ISD::TokenFactor) {
// If the node isn't a token factor and isn't part of our pattern, then it
// must be a random chained node in between two nodes we're selecting.
// This happens when we have something like:
// x = load ptr
// call
// y = x+4
// store y -> ptr
// Because we structurally match the load/store as a read/modify/write,
// but the call is chained between them. We cannot fold in this case
// because it would induce a cycle in the graph.
if (!std::count(ChainedNodesInPattern.begin(),
ChainedNodesInPattern.end(), User))
return CR_InducesCycle;
2010-12-24 05:28:06 +01:00
// Otherwise we found a node that is part of our pattern. For example in:
// x = load ptr
// y = x+4
// store y -> ptr
// This would happen when we're scanning down from the load and see the
// store as a user. Record that there is a use of ChainedNode that is
// part of the pattern and keep scanning uses.
Result = CR_LeadsToInteriorNode;
InteriorChainedNodes.push_back(User);
continue;
}
2010-12-24 05:28:06 +01:00
// If we found a TokenFactor, there are two cases to consider: first if the
// TokenFactor is just hanging "below" the pattern we're matching (i.e. no
// uses of the TF are in our pattern) we just want to ignore it. Second,
// the TokenFactor can be sandwiched in between two chained nodes, like so:
// [Load chain]
// ^
// |
// [Load]
// ^ ^
// | \ DAG's like cheese
// / \ do you?
// / |
// [TokenFactor] [Op]
// ^ ^
// | |
// \ /
// \ /
// [Store]
//
// In this case, the TokenFactor becomes part of our match and we rewrite it
// as a new TokenFactor.
//
// To distinguish these two cases, do a recursive walk down the uses.
auto MemoizeResult = TokenFactorResult.find(User);
bool Visited = MemoizeResult != TokenFactorResult.end();
// Recursively walk chain users only if the result is not memoized.
if (!Visited) {
auto Res = WalkChainUsers(User, ChainedNodesInPattern, TokenFactorResult,
InteriorChainedNodes);
MemoizeResult = TokenFactorResult.insert(std::make_pair(User, Res)).first;
}
switch (MemoizeResult->second) {
case CR_Simple:
// If the uses of the TokenFactor are just already-selected nodes, ignore
// it, it is "below" our pattern.
continue;
case CR_InducesCycle:
// If the uses of the TokenFactor lead to nodes that are not part of our
// pattern that are not selected, folding would turn this into a cycle,
// bail out now.
return CR_InducesCycle;
case CR_LeadsToInteriorNode:
break; // Otherwise, keep processing.
}
2010-12-24 05:28:06 +01:00
// Okay, we know we're in the interesting interior case. The TokenFactor
// is now going to be considered part of the pattern so that we rewrite its
// uses (it may have uses that are not part of the pattern) with the
// ultimate chain result of the generated code. We will also add its chain
// inputs as inputs to the ultimate TokenFactor we create.
Result = CR_LeadsToInteriorNode;
if (!Visited) {
ChainedNodesInPattern.push_back(User);
InteriorChainedNodes.push_back(User);
}
}
2010-12-24 05:28:06 +01:00
return Result;
}
/// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
/// operation for when the pattern matched at least one node with a chains. The
/// input vector contains a list of all of the chained nodes that we match. We
/// must determine if this is a valid thing to cover (i.e. matching it won't
/// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
/// be used as the input node chain for the generated nodes.
static SDValue
HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
SelectionDAG *CurDAG) {
// Used for memoization. Without it WalkChainUsers could take exponential
// time to run.
DenseMap<const SDNode *, ChainResult> TokenFactorResult;
// Walk all of the chained nodes we've matched, recursively scanning down the
// users of the chain result. This adds any TokenFactor nodes that are caught
// in between chained nodes to the chained and interior nodes list.
SmallVector<SDNode*, 3> InteriorChainedNodes;
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
if (WalkChainUsers(ChainNodesMatched[i], ChainNodesMatched,
TokenFactorResult,
InteriorChainedNodes) == CR_InducesCycle)
return SDValue(); // Would induce a cycle.
}
2010-12-24 05:28:06 +01:00
// Okay, we have walked all the matched nodes and collected TokenFactor nodes
// that we are interested in. Form our input TokenFactor node.
SmallVector<SDValue, 3> InputChains;
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
// Add the input chain of this node to the InputChains list (which will be
// the operands of the generated TokenFactor) if it's not an interior node.
SDNode *N = ChainNodesMatched[i];
if (N->getOpcode() != ISD::TokenFactor) {
if (std::count(InteriorChainedNodes.begin(),InteriorChainedNodes.end(),N))
continue;
2010-12-24 05:28:06 +01:00
// Otherwise, add the input chain.
SDValue InChain = ChainNodesMatched[i]->getOperand(0);
assert(InChain.getValueType() == MVT::Other && "Not a chain");
InputChains.push_back(InChain);
continue;
}
2010-12-24 05:28:06 +01:00
// If we have a token factor, we want to add all inputs of the token factor
// that are not part of the pattern we're matching.
for (const SDValue &Op : N->op_values()) {
if (!std::count(ChainNodesMatched.begin(), ChainNodesMatched.end(),
Op.getNode()))
InputChains.push_back(Op);
}
}
2010-12-24 05:28:06 +01:00
if (InputChains.size() == 1)
return InputChains[0];
return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
MVT::Other, InputChains);
2010-12-24 05:28:06 +01:00
}
/// MorphNode - Handle morphing a node in place for the selector.
SDNode *SelectionDAGISel::
MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
// It is possible we're using MorphNodeTo to replace a node with no
// normal results with one that has a normal result (or we could be
2010-12-23 18:13:18 +01:00
// adding a chain) and the input could have glue and chains as well.
// In this case we need to shift the operands down.
// FIXME: This is a horrible hack and broken in obscure cases, no worse
// than the old isel though.
2010-12-23 18:13:18 +01:00
int OldGlueResultNo = -1, OldChainResultNo = -1;
unsigned NTMNumResults = Node->getNumValues();
if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
2010-12-23 18:13:18 +01:00
OldGlueResultNo = NTMNumResults-1;
if (NTMNumResults != 1 &&
Node->getValueType(NTMNumResults-2) == MVT::Other)
OldChainResultNo = NTMNumResults-2;
} else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
OldChainResultNo = NTMNumResults-1;
// Call the underlying SelectionDAG routine to do the transmogrification. Note
// that this deletes operands of the old node that become dead.
SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops);
// MorphNodeTo can operate in two ways: if an existing node with the
// specified operands exists, it can just return it. Otherwise, it
// updates the node in place to have the requested operands.
if (Res == Node) {
// If we updated the node in place, reset the node ID. To the isel,
// this should be just like a newly allocated machine node.
Res->setNodeId(-1);
}
unsigned ResNumResults = Res->getNumValues();
2010-12-23 18:13:18 +01:00
// Move the glue if needed.
if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
(unsigned)OldGlueResultNo != ResNumResults-1)
2010-12-24 05:28:06 +01:00
CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldGlueResultNo),
SDValue(Res, ResNumResults-1));
2010-12-23 18:13:18 +01:00
if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
2010-07-04 10:58:43 +02:00
--ResNumResults;
// Move the chain reference if needed.
if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
(unsigned)OldChainResultNo != ResNumResults-1)
2010-12-24 05:28:06 +01:00
CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
SDValue(Res, ResNumResults-1));
// Otherwise, no replacement happened because the node already exists. Replace
// Uses of the old node with the new one.
if (Res != Node) {
CurDAG->ReplaceAllUsesWith(Node, Res);
CurDAG->RemoveDeadNode(Node);
}
2010-12-24 05:28:06 +01:00
return Res;
}
/// CheckSame - Implements OP_CheckSame.
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N,
const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
// Accept if it is exactly the same as a previously recorded node.
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
return N == RecordedNodes[RecNo].first;
}
2010-12-24 05:28:06 +01:00
/// CheckChildSame - Implements OP_CheckChildXSame.
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N,
const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes,
unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
return false; // Match fails if out of range child #.
return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo),
RecordedNodes);
}
/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
const SelectionDAGISel &SDISel) {
return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
}
/// CheckNodePredicate - Implements OP_CheckNodePredicate.
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
const SelectionDAGISel &SDISel, SDNode *N) {
return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDNode *N) {
uint16_t Opc = MatcherTable[MatcherIndex++];
Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
return N->getOpcode() == Opc;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
const TargetLowering *TLI, const DataLayout &DL) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (N.getValueType() == VT) return true;
2010-12-24 05:28:06 +01:00
// Handle the case when VT is iPTR.
return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering *TLI, const DataLayout &DL,
unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
return false; // Match fails if out of range child #.
return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI,
DL);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N) {
return cast<CondCodeSDNode>(N)->get() ==
(ISD::CondCode)MatcherTable[MatcherIndex++];
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (cast<VTSDNode>(N)->getVT() == VT)
return true;
2010-12-24 05:28:06 +01:00
// Handle the case when VT is iPTR.
return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
2010-12-24 05:28:06 +01:00
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
return C && C->getSExtValue() == Val;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
return false; // Match fails if out of range child #.
return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
2010-12-24 05:28:06 +01:00
if (N->getOpcode() != ISD::AND) return false;
2010-12-24 05:28:06 +01:00
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
2010-12-24 05:28:06 +01:00
if (N->getOpcode() != ISD::OR) return false;
2010-12-24 05:28:06 +01:00
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
return C && SDISel.CheckOrMask(N.getOperand(0), C, Val);
}
/// IsPredicateKnownToFail - If we know how and can do so without pushing a
/// scope, evaluate the current node. If the current predicate is known to
/// fail, set Result=true and return anything. If the current predicate is
/// known to pass, set Result=false and return the MatcherIndex to continue
/// with. If the current predicate is unknown, set Result=false and return the
2010-12-24 05:28:06 +01:00
/// MatcherIndex to continue with.
static unsigned IsPredicateKnownToFail(const unsigned char *Table,
unsigned Index, SDValue N,
bool &Result,
const SelectionDAGISel &SDISel,
SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
switch (Table[Index++]) {
default:
Result = false;
return Index-1; // Could not evaluate this predicate.
case SelectionDAGISel::OPC_CheckSame:
Result = !::CheckSame(Table, Index, N, RecordedNodes);
return Index;
case SelectionDAGISel::OPC_CheckChild0Same:
case SelectionDAGISel::OPC_CheckChild1Same:
case SelectionDAGISel::OPC_CheckChild2Same:
case SelectionDAGISel::OPC_CheckChild3Same:
Result = !::CheckChildSame(Table, Index, N, RecordedNodes,
Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
return Index;
case SelectionDAGISel::OPC_CheckPatternPredicate:
Result = !::CheckPatternPredicate(Table, Index, SDISel);
return Index;
case SelectionDAGISel::OPC_CheckPredicate:
Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode());
return Index;
case SelectionDAGISel::OPC_CheckOpcode:
Result = !::CheckOpcode(Table, Index, N.getNode());
return Index;
case SelectionDAGISel::OPC_CheckType:
Result = !::CheckType(Table, Index, N, SDISel.TLI,
SDISel.CurDAG->getDataLayout());
return Index;
case SelectionDAGISel::OPC_CheckChild0Type:
case SelectionDAGISel::OPC_CheckChild1Type:
case SelectionDAGISel::OPC_CheckChild2Type:
case SelectionDAGISel::OPC_CheckChild3Type:
case SelectionDAGISel::OPC_CheckChild4Type:
case SelectionDAGISel::OPC_CheckChild5Type:
case SelectionDAGISel::OPC_CheckChild6Type:
case SelectionDAGISel::OPC_CheckChild7Type:
Result = !::CheckChildType(
Table, Index, N, SDISel.TLI, SDISel.CurDAG->getDataLayout(),
Table[Index - 1] - SelectionDAGISel::OPC_CheckChild0Type);
return Index;
case SelectionDAGISel::OPC_CheckCondCode:
Result = !::CheckCondCode(Table, Index, N);
return Index;
case SelectionDAGISel::OPC_CheckValueType:
Result = !::CheckValueType(Table, Index, N, SDISel.TLI,
SDISel.CurDAG->getDataLayout());
return Index;
case SelectionDAGISel::OPC_CheckInteger:
Result = !::CheckInteger(Table, Index, N);
return Index;
case SelectionDAGISel::OPC_CheckChild0Integer:
case SelectionDAGISel::OPC_CheckChild1Integer:
case SelectionDAGISel::OPC_CheckChild2Integer:
case SelectionDAGISel::OPC_CheckChild3Integer:
case SelectionDAGISel::OPC_CheckChild4Integer:
Result = !::CheckChildInteger(Table, Index, N,
Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
return Index;
case SelectionDAGISel::OPC_CheckAndImm:
Result = !::CheckAndImm(Table, Index, N, SDISel);
return Index;
case SelectionDAGISel::OPC_CheckOrImm:
Result = !::CheckOrImm(Table, Index, N, SDISel);
return Index;
}
}
namespace {
struct MatchScope {
/// FailIndex - If this match fails, this is the index to continue with.
unsigned FailIndex;
2010-12-24 05:28:06 +01:00
/// NodeStack - The node stack when the scope was formed.
SmallVector<SDValue, 4> NodeStack;
2010-12-24 05:28:06 +01:00
/// NumRecordedNodes - The number of recorded nodes when the scope was formed.
unsigned NumRecordedNodes;
2010-12-24 05:28:06 +01:00
/// NumMatchedMemRefs - The number of matched memref entries.
unsigned NumMatchedMemRefs;
2010-12-24 05:28:06 +01:00
/// InputChain/InputGlue - The current chain/glue
2010-12-23 18:13:18 +01:00
SDValue InputChain, InputGlue;
/// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
bool HasChainNodesMatched;
};
[ISel] Keep matching state consistent when folding during X86 address match In the X86 backend, matching an address is initiated by the 'addr' complex pattern and its friends. During this process we may reassociate and-of-shift into shift-of-and (FoldMaskedShiftToScaledMask) to allow folding of the shift into the scale of the address. However as demonstrated by the testcase, this can trigger CSE of not only the shift and the AND which the code is prepared for but also the underlying load node. In the testcase this node is sitting in the RecordedNode and MatchScope data structures of the matcher and becomes a deleted node upon CSE. Returning from the complex pattern function, we try to access it again hitting an assert because the node is no longer a load even though this was checked before. Now obviously changing the DAG this late is bending the rules but I think it makes sense somewhat. Outside of addresses we prefer and-of-shift because it may lead to smaller immediates (FoldMaskAndShiftToScale is an even better example because it create a non-canonical node). We currently don't recognize addresses during DAGCombiner where arguably this canonicalization should be performed. On the other hand, having this in the matcher allows us to cover all the cases where an address can be used in an instruction. I've also talked a little bit to Dan Gohman on llvm-dev who added the RAUW for the new shift node in FoldMaskedShiftToScaledMask. This RAUW is responsible for initiating the recursive CSE on users (http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-September/076903.html) but it is not strictly necessary since the shift is hooked into the visited user. Of course it's safer to keep the DAG consistent at all times (e.g. for accurate number of uses, etc.). So rather than changing the fundamentals, I've decided to continue along the previous patches and detect the CSE. This patch installs a very targeted DAGUpdateListener for the duration of a complex-pattern match and updates the matching state accordingly. (Previous patches used HandleSDNode to detect the CSE but that's not practical here). The listener is only installed on X86. I tested that there is no measurable overhead due to this while running through the spec2k BC files with llc. The only thing we pay for is the creation of the listener. The callback never ever triggers in spec2k since this is a corner case. Fixes rdar://problem/18206171 llvm-svn: 219009
2014-10-03 22:00:34 +02:00
/// \\brief A DAG update listener to keep the matching state
/// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
/// change the DAG while matching. X86 addressing mode matcher is an example
/// for this.
class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
{
SDNode **NodeToMatch;
SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes;
SmallVectorImpl<MatchScope> &MatchScopes;
[ISel] Keep matching state consistent when folding during X86 address match In the X86 backend, matching an address is initiated by the 'addr' complex pattern and its friends. During this process we may reassociate and-of-shift into shift-of-and (FoldMaskedShiftToScaledMask) to allow folding of the shift into the scale of the address. However as demonstrated by the testcase, this can trigger CSE of not only the shift and the AND which the code is prepared for but also the underlying load node. In the testcase this node is sitting in the RecordedNode and MatchScope data structures of the matcher and becomes a deleted node upon CSE. Returning from the complex pattern function, we try to access it again hitting an assert because the node is no longer a load even though this was checked before. Now obviously changing the DAG this late is bending the rules but I think it makes sense somewhat. Outside of addresses we prefer and-of-shift because it may lead to smaller immediates (FoldMaskAndShiftToScale is an even better example because it create a non-canonical node). We currently don't recognize addresses during DAGCombiner where arguably this canonicalization should be performed. On the other hand, having this in the matcher allows us to cover all the cases where an address can be used in an instruction. I've also talked a little bit to Dan Gohman on llvm-dev who added the RAUW for the new shift node in FoldMaskedShiftToScaledMask. This RAUW is responsible for initiating the recursive CSE on users (http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-September/076903.html) but it is not strictly necessary since the shift is hooked into the visited user. Of course it's safer to keep the DAG consistent at all times (e.g. for accurate number of uses, etc.). So rather than changing the fundamentals, I've decided to continue along the previous patches and detect the CSE. This patch installs a very targeted DAGUpdateListener for the duration of a complex-pattern match and updates the matching state accordingly. (Previous patches used HandleSDNode to detect the CSE but that's not practical here). The listener is only installed on X86. I tested that there is no measurable overhead due to this while running through the spec2k BC files with llc. The only thing we pay for is the creation of the listener. The callback never ever triggers in spec2k since this is a corner case. Fixes rdar://problem/18206171 llvm-svn: 219009
2014-10-03 22:00:34 +02:00
public:
MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
SmallVectorImpl<MatchScope> &MS)
: SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
RecordedNodes(RN), MatchScopes(MS) {}
[ISel] Keep matching state consistent when folding during X86 address match In the X86 backend, matching an address is initiated by the 'addr' complex pattern and its friends. During this process we may reassociate and-of-shift into shift-of-and (FoldMaskedShiftToScaledMask) to allow folding of the shift into the scale of the address. However as demonstrated by the testcase, this can trigger CSE of not only the shift and the AND which the code is prepared for but also the underlying load node. In the testcase this node is sitting in the RecordedNode and MatchScope data structures of the matcher and becomes a deleted node upon CSE. Returning from the complex pattern function, we try to access it again hitting an assert because the node is no longer a load even though this was checked before. Now obviously changing the DAG this late is bending the rules but I think it makes sense somewhat. Outside of addresses we prefer and-of-shift because it may lead to smaller immediates (FoldMaskAndShiftToScale is an even better example because it create a non-canonical node). We currently don't recognize addresses during DAGCombiner where arguably this canonicalization should be performed. On the other hand, having this in the matcher allows us to cover all the cases where an address can be used in an instruction. I've also talked a little bit to Dan Gohman on llvm-dev who added the RAUW for the new shift node in FoldMaskedShiftToScaledMask. This RAUW is responsible for initiating the recursive CSE on users (http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-September/076903.html) but it is not strictly necessary since the shift is hooked into the visited user. Of course it's safer to keep the DAG consistent at all times (e.g. for accurate number of uses, etc.). So rather than changing the fundamentals, I've decided to continue along the previous patches and detect the CSE. This patch installs a very targeted DAGUpdateListener for the duration of a complex-pattern match and updates the matching state accordingly. (Previous patches used HandleSDNode to detect the CSE but that's not practical here). The listener is only installed on X86. I tested that there is no measurable overhead due to this while running through the spec2k BC files with llc. The only thing we pay for is the creation of the listener. The callback never ever triggers in spec2k since this is a corner case. Fixes rdar://problem/18206171 llvm-svn: 219009
2014-10-03 22:00:34 +02:00
void NodeDeleted(SDNode *N, SDNode *E) override {
[ISel] Keep matching state consistent when folding during X86 address match In the X86 backend, matching an address is initiated by the 'addr' complex pattern and its friends. During this process we may reassociate and-of-shift into shift-of-and (FoldMaskedShiftToScaledMask) to allow folding of the shift into the scale of the address. However as demonstrated by the testcase, this can trigger CSE of not only the shift and the AND which the code is prepared for but also the underlying load node. In the testcase this node is sitting in the RecordedNode and MatchScope data structures of the matcher and becomes a deleted node upon CSE. Returning from the complex pattern function, we try to access it again hitting an assert because the node is no longer a load even though this was checked before. Now obviously changing the DAG this late is bending the rules but I think it makes sense somewhat. Outside of addresses we prefer and-of-shift because it may lead to smaller immediates (FoldMaskAndShiftToScale is an even better example because it create a non-canonical node). We currently don't recognize addresses during DAGCombiner where arguably this canonicalization should be performed. On the other hand, having this in the matcher allows us to cover all the cases where an address can be used in an instruction. I've also talked a little bit to Dan Gohman on llvm-dev who added the RAUW for the new shift node in FoldMaskedShiftToScaledMask. This RAUW is responsible for initiating the recursive CSE on users (http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-September/076903.html) but it is not strictly necessary since the shift is hooked into the visited user. Of course it's safer to keep the DAG consistent at all times (e.g. for accurate number of uses, etc.). So rather than changing the fundamentals, I've decided to continue along the previous patches and detect the CSE. This patch installs a very targeted DAGUpdateListener for the duration of a complex-pattern match and updates the matching state accordingly. (Previous patches used HandleSDNode to detect the CSE but that's not practical here). The listener is only installed on X86. I tested that there is no measurable overhead due to this while running through the spec2k BC files with llc. The only thing we pay for is the creation of the listener. The callback never ever triggers in spec2k since this is a corner case. Fixes rdar://problem/18206171 llvm-svn: 219009
2014-10-03 22:00:34 +02:00
// Some early-returns here to avoid the search if we deleted the node or
// if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
// do, so it's unnecessary to update matching state at that point).
// Neither of these can occur currently because we only install this
// update listener during matching a complex patterns.
if (!E || E->isMachineOpcode())
return;
// Check if NodeToMatch was updated.
if (N == *NodeToMatch)
*NodeToMatch = E;
[ISel] Keep matching state consistent when folding during X86 address match In the X86 backend, matching an address is initiated by the 'addr' complex pattern and its friends. During this process we may reassociate and-of-shift into shift-of-and (FoldMaskedShiftToScaledMask) to allow folding of the shift into the scale of the address. However as demonstrated by the testcase, this can trigger CSE of not only the shift and the AND which the code is prepared for but also the underlying load node. In the testcase this node is sitting in the RecordedNode and MatchScope data structures of the matcher and becomes a deleted node upon CSE. Returning from the complex pattern function, we try to access it again hitting an assert because the node is no longer a load even though this was checked before. Now obviously changing the DAG this late is bending the rules but I think it makes sense somewhat. Outside of addresses we prefer and-of-shift because it may lead to smaller immediates (FoldMaskAndShiftToScale is an even better example because it create a non-canonical node). We currently don't recognize addresses during DAGCombiner where arguably this canonicalization should be performed. On the other hand, having this in the matcher allows us to cover all the cases where an address can be used in an instruction. I've also talked a little bit to Dan Gohman on llvm-dev who added the RAUW for the new shift node in FoldMaskedShiftToScaledMask. This RAUW is responsible for initiating the recursive CSE on users (http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-September/076903.html) but it is not strictly necessary since the shift is hooked into the visited user. Of course it's safer to keep the DAG consistent at all times (e.g. for accurate number of uses, etc.). So rather than changing the fundamentals, I've decided to continue along the previous patches and detect the CSE. This patch installs a very targeted DAGUpdateListener for the duration of a complex-pattern match and updates the matching state accordingly. (Previous patches used HandleSDNode to detect the CSE but that's not practical here). The listener is only installed on X86. I tested that there is no measurable overhead due to this while running through the spec2k BC files with llc. The only thing we pay for is the creation of the listener. The callback never ever triggers in spec2k since this is a corner case. Fixes rdar://problem/18206171 llvm-svn: 219009
2014-10-03 22:00:34 +02:00
// Performing linear search here does not matter because we almost never
// run this code. You'd have to have a CSE during complex pattern
// matching.
for (auto &I : RecordedNodes)
if (I.first.getNode() == N)
I.first.setNode(E);
for (auto &I : MatchScopes)
for (auto &J : I.NodeStack)
if (J.getNode() == N)
J.setNode(E);
}
};
} // end anonymous namespace
void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
const unsigned char *MatcherTable,
unsigned TableSize) {
// FIXME: Should these even be selected? Handle these cases in the caller?
switch (NodeToMatch->getOpcode()) {
default:
break;
case ISD::EntryToken: // These nodes remain the same.
case ISD::BasicBlock:
case ISD::Register:
case ISD::RegisterMask:
case ISD::HANDLENODE:
case ISD::MDNODE_SDNODE:
case ISD::TargetConstant:
case ISD::TargetConstantFP:
case ISD::TargetConstantPool:
case ISD::TargetFrameIndex:
case ISD::TargetExternalSymbol:
case ISD::MCSymbol:
case ISD::TargetBlockAddress:
case ISD::TargetJumpTable:
case ISD::TargetGlobalTLSAddress:
case ISD::TargetGlobalAddress:
case ISD::TokenFactor:
case ISD::CopyFromReg:
case ISD::CopyToReg:
case ISD::EH_LABEL:
case ISD::ANNOTATION_LABEL:
case ISD::LIFETIME_START:
case ISD::LIFETIME_END:
NodeToMatch->setNodeId(-1); // Mark selected.
return;
case ISD::AssertSext:
case ISD::AssertZext:
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, 0),
NodeToMatch->getOperand(0));
CurDAG->RemoveDeadNode(NodeToMatch);
return;
case ISD::INLINEASM:
Select_INLINEASM(NodeToMatch);
return;
case ISD::READ_REGISTER:
Select_READ_REGISTER(NodeToMatch);
return;
case ISD::WRITE_REGISTER:
Select_WRITE_REGISTER(NodeToMatch);
return;
case ISD::UNDEF:
Select_UNDEF(NodeToMatch);
return;
}
2010-12-24 05:28:06 +01:00
assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
// Set up the node stack with NodeToMatch as the only node on the stack.
SmallVector<SDValue, 8> NodeStack;
SDValue N = SDValue(NodeToMatch, 0);
NodeStack.push_back(N);
// MatchScopes - Scopes used when matching, if a match failure happens, this
// indicates where to continue checking.
SmallVector<MatchScope, 8> MatchScopes;
2010-12-24 05:28:06 +01:00
// RecordedNodes - This is the set of nodes that have been recorded by the
// state machine. The second value is the parent of the node, or null if the
// root is recorded.
SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
2010-12-24 05:28:06 +01:00
// MatchedMemRefs - This is the set of MemRef's we've seen in the input
// pattern.
SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
2010-12-24 05:28:06 +01:00
2010-12-23 18:13:18 +01:00
// These are the current input chain and glue for use when generating nodes.
// Various Emit operations change these. For example, emitting a copytoreg
// uses and updates these.
2010-12-23 18:13:18 +01:00
SDValue InputChain, InputGlue;
2010-12-24 05:28:06 +01:00
// ChainNodesMatched - If a pattern matches nodes that have input/output
// chains, the OPC_EmitMergeInputChains operation is emitted which indicates
// which ones they are. The result is captured into this list so that we can
// update the chain results when the pattern is complete.
SmallVector<SDNode*, 3> ChainNodesMatched;
2010-12-24 05:28:06 +01:00
DEBUG(dbgs() << "ISEL: Starting pattern match on root node: ";
NodeToMatch->dump(CurDAG);
dbgs() << '\n');
2010-12-24 05:28:06 +01:00
// Determine where to start the interpreter. Normally we start at opcode #0,
// but if the state machine starts with an OPC_SwitchOpcode, then we
// accelerate the first lookup (which is guaranteed to be hot) with the
// OpcodeOffset table.
unsigned MatcherIndex = 0;
2010-12-24 05:28:06 +01:00
if (!OpcodeOffset.empty()) {
// Already computed the OpcodeOffset table, just index into it.
if (N.getOpcode() < OpcodeOffset.size())
MatcherIndex = OpcodeOffset[N.getOpcode()];
DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
} else if (MatcherTable[0] == OPC_SwitchOpcode) {
// Otherwise, the table isn't computed, but the state machine does start
// with an OPC_SwitchOpcode instruction. Populate the table now, since this
// is the first time we're selecting an instruction.
unsigned Idx = 1;
while (true) {
// Get the size of this case.
unsigned CaseSize = MatcherTable[Idx++];
if (CaseSize & 128)
CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
if (CaseSize == 0) break;
// Get the opcode, add the index to the table.
uint16_t Opc = MatcherTable[Idx++];
Opc |= (unsigned short)MatcherTable[Idx++] << 8;
if (Opc >= OpcodeOffset.size())
OpcodeOffset.resize((Opc+1)*2);
OpcodeOffset[Opc] = Idx;
Idx += CaseSize;
}
// Okay, do the lookup for the first opcode.
if (N.getOpcode() < OpcodeOffset.size())
MatcherIndex = OpcodeOffset[N.getOpcode()];
}
2010-12-24 05:28:06 +01:00
while (true) {
assert(MatcherIndex < TableSize && "Invalid index");
#ifndef NDEBUG
unsigned CurrentOpcodeIndex = MatcherIndex;
#endif
BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
switch (Opcode) {
case OPC_Scope: {
// Okay, the semantics of this operation are that we should push a scope
// then evaluate the first child. However, pushing a scope only to have
// the first check fail (which then pops it) is inefficient. If we can
// determine immediately that the first check (or first several) will
// immediately fail, don't even bother pushing a scope for them.
unsigned FailIndex;
2010-12-24 05:28:06 +01:00
while (true) {
unsigned NumToSkip = MatcherTable[MatcherIndex++];
if (NumToSkip & 128)
NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
// Found the end of the scope with no match.
if (NumToSkip == 0) {
FailIndex = 0;
break;
}
2010-12-24 05:28:06 +01:00
FailIndex = MatcherIndex+NumToSkip;
2010-12-24 05:28:06 +01:00
unsigned MatcherIndexOfPredicate = MatcherIndex;
(void)MatcherIndexOfPredicate; // silence warning.
2010-12-24 05:28:06 +01:00
// If we can't evaluate this predicate without pushing a scope (e.g. if
// it is a 'MoveParent') or if the predicate succeeds on this node, we
// push the scope and evaluate the full predicate chain.
bool Result;
MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
Result, *this, RecordedNodes);
if (!Result)
break;
2010-12-24 05:28:06 +01:00
DEBUG(dbgs() << " Skipped scope entry (due to false predicate) at "
<< "index " << MatcherIndexOfPredicate
<< ", continuing at " << FailIndex << "\n");
++NumDAGIselRetries;
2010-12-24 05:28:06 +01:00
// Otherwise, we know that this case of the Scope is guaranteed to fail,
// move to the next case.
MatcherIndex = FailIndex;
}
2010-12-24 05:28:06 +01:00
// If the whole scope failed to match, bail.
if (FailIndex == 0) break;
2010-12-24 05:28:06 +01:00
// Push a MatchScope which indicates where to go if the first child fails
// to match.
MatchScope NewEntry;
NewEntry.FailIndex = FailIndex;
NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
NewEntry.NumRecordedNodes = RecordedNodes.size();
NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
NewEntry.InputChain = InputChain;
2010-12-23 18:13:18 +01:00
NewEntry.InputGlue = InputGlue;
NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
MatchScopes.push_back(NewEntry);
continue;
}
case OPC_RecordNode: {
// Remember this node, it may end up being an operand in the pattern.
SDNode *Parent = nullptr;
if (NodeStack.size() > 1)
Parent = NodeStack[NodeStack.size()-2].getNode();
RecordedNodes.push_back(std::make_pair(N, Parent));
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_RecordChild0: case OPC_RecordChild1:
case OPC_RecordChild2: case OPC_RecordChild3:
case OPC_RecordChild4: case OPC_RecordChild5:
case OPC_RecordChild6: case OPC_RecordChild7: {
unsigned ChildNo = Opcode-OPC_RecordChild0;
if (ChildNo >= N.getNumOperands())
break; // Match fails if out of range child #.
RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
N.getNode()));
continue;
}
case OPC_RecordMemRef:
MatchedMemRefs.push_back(cast<MemSDNode>(N)->getMemOperand());
continue;
2010-12-24 05:28:06 +01:00
case OPC_CaptureGlueInput:
2010-12-23 18:13:18 +01:00
// If the current node has an input glue, capture it in InputGlue.
if (N->getNumOperands() != 0 &&
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
2010-12-23 18:13:18 +01:00
InputGlue = N->getOperand(N->getNumOperands()-1);
continue;
2010-12-24 05:28:06 +01:00
case OPC_MoveChild: {
unsigned ChildNo = MatcherTable[MatcherIndex++];
if (ChildNo >= N.getNumOperands())
break; // Match fails if out of range child #.
N = N.getOperand(ChildNo);
NodeStack.push_back(N);
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_MoveChild0: case OPC_MoveChild1:
case OPC_MoveChild2: case OPC_MoveChild3:
case OPC_MoveChild4: case OPC_MoveChild5:
case OPC_MoveChild6: case OPC_MoveChild7: {
unsigned ChildNo = Opcode-OPC_MoveChild0;
if (ChildNo >= N.getNumOperands())
break; // Match fails if out of range child #.
N = N.getOperand(ChildNo);
NodeStack.push_back(N);
continue;
}
case OPC_MoveParent:
// Pop the current node off the NodeStack.
NodeStack.pop_back();
assert(!NodeStack.empty() && "Node stack imbalance!");
2010-12-24 05:28:06 +01:00
N = NodeStack.back();
continue;
2010-12-24 05:28:06 +01:00
case OPC_CheckSame:
if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
continue;
case OPC_CheckChild0Same: case OPC_CheckChild1Same:
case OPC_CheckChild2Same: case OPC_CheckChild3Same:
if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
Opcode-OPC_CheckChild0Same))
break;
continue;
case OPC_CheckPatternPredicate:
if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break;
continue;
case OPC_CheckPredicate:
if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this,
N.getNode()))
break;
continue;
case OPC_CheckComplexPat: {
unsigned CPNum = MatcherTable[MatcherIndex++];
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
[ISel] Keep matching state consistent when folding during X86 address match In the X86 backend, matching an address is initiated by the 'addr' complex pattern and its friends. During this process we may reassociate and-of-shift into shift-of-and (FoldMaskedShiftToScaledMask) to allow folding of the shift into the scale of the address. However as demonstrated by the testcase, this can trigger CSE of not only the shift and the AND which the code is prepared for but also the underlying load node. In the testcase this node is sitting in the RecordedNode and MatchScope data structures of the matcher and becomes a deleted node upon CSE. Returning from the complex pattern function, we try to access it again hitting an assert because the node is no longer a load even though this was checked before. Now obviously changing the DAG this late is bending the rules but I think it makes sense somewhat. Outside of addresses we prefer and-of-shift because it may lead to smaller immediates (FoldMaskAndShiftToScale is an even better example because it create a non-canonical node). We currently don't recognize addresses during DAGCombiner where arguably this canonicalization should be performed. On the other hand, having this in the matcher allows us to cover all the cases where an address can be used in an instruction. I've also talked a little bit to Dan Gohman on llvm-dev who added the RAUW for the new shift node in FoldMaskedShiftToScaledMask. This RAUW is responsible for initiating the recursive CSE on users (http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-September/076903.html) but it is not strictly necessary since the shift is hooked into the visited user. Of course it's safer to keep the DAG consistent at all times (e.g. for accurate number of uses, etc.). So rather than changing the fundamentals, I've decided to continue along the previous patches and detect the CSE. This patch installs a very targeted DAGUpdateListener for the duration of a complex-pattern match and updates the matching state accordingly. (Previous patches used HandleSDNode to detect the CSE but that's not practical here). The listener is only installed on X86. I tested that there is no measurable overhead due to this while running through the spec2k BC files with llc. The only thing we pay for is the creation of the listener. The callback never ever triggers in spec2k since this is a corner case. Fixes rdar://problem/18206171 llvm-svn: 219009
2014-10-03 22:00:34 +02:00
// If target can modify DAG during matching, keep the matching state
// consistent.
std::unique_ptr<MatchStateUpdater> MSU;
if (ComplexPatternFuncMutatesDAG())
MSU.reset(new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
[ISel] Keep matching state consistent when folding during X86 address match In the X86 backend, matching an address is initiated by the 'addr' complex pattern and its friends. During this process we may reassociate and-of-shift into shift-of-and (FoldMaskedShiftToScaledMask) to allow folding of the shift into the scale of the address. However as demonstrated by the testcase, this can trigger CSE of not only the shift and the AND which the code is prepared for but also the underlying load node. In the testcase this node is sitting in the RecordedNode and MatchScope data structures of the matcher and becomes a deleted node upon CSE. Returning from the complex pattern function, we try to access it again hitting an assert because the node is no longer a load even though this was checked before. Now obviously changing the DAG this late is bending the rules but I think it makes sense somewhat. Outside of addresses we prefer and-of-shift because it may lead to smaller immediates (FoldMaskAndShiftToScale is an even better example because it create a non-canonical node). We currently don't recognize addresses during DAGCombiner where arguably this canonicalization should be performed. On the other hand, having this in the matcher allows us to cover all the cases where an address can be used in an instruction. I've also talked a little bit to Dan Gohman on llvm-dev who added the RAUW for the new shift node in FoldMaskedShiftToScaledMask. This RAUW is responsible for initiating the recursive CSE on users (http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-September/076903.html) but it is not strictly necessary since the shift is hooked into the visited user. Of course it's safer to keep the DAG consistent at all times (e.g. for accurate number of uses, etc.). So rather than changing the fundamentals, I've decided to continue along the previous patches and detect the CSE. This patch installs a very targeted DAGUpdateListener for the duration of a complex-pattern match and updates the matching state accordingly. (Previous patches used HandleSDNode to detect the CSE but that's not practical here). The listener is only installed on X86. I tested that there is no measurable overhead due to this while running through the spec2k BC files with llc. The only thing we pay for is the creation of the listener. The callback never ever triggers in spec2k since this is a corner case. Fixes rdar://problem/18206171 llvm-svn: 219009
2014-10-03 22:00:34 +02:00
MatchScopes));
if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
RecordedNodes[RecNo].first, CPNum,
RecordedNodes))
break;
continue;
}
case OPC_CheckOpcode:
if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
continue;
2010-12-24 05:28:06 +01:00
case OPC_CheckType:
if (!::CheckType(MatcherTable, MatcherIndex, N, TLI,
CurDAG->getDataLayout()))
break;
continue;
2010-12-24 05:28:06 +01:00
case OPC_SwitchOpcode: {
unsigned CurNodeOpcode = N.getOpcode();
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
unsigned CaseSize;
while (true) {
// Get the size of this case.
CaseSize = MatcherTable[MatcherIndex++];
if (CaseSize & 128)
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
if (CaseSize == 0) break;
uint16_t Opc = MatcherTable[MatcherIndex++];
Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
// If the opcode matches, then we will execute this case.
if (CurNodeOpcode == Opc)
break;
2010-12-24 05:28:06 +01:00
// Otherwise, skip over this case.
MatcherIndex += CaseSize;
}
2010-12-24 05:28:06 +01:00
// If no cases matched, bail out.
if (CaseSize == 0) break;
2010-12-24 05:28:06 +01:00
// Otherwise, execute the case we found.
DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart
<< " to " << MatcherIndex << "\n");
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_SwitchType: {
MVT CurNodeVT = N.getSimpleValueType();
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
unsigned CaseSize;
while (true) {
// Get the size of this case.
CaseSize = MatcherTable[MatcherIndex++];
if (CaseSize & 128)
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
if (CaseSize == 0) break;
2010-12-24 05:28:06 +01:00
MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (CaseVT == MVT::iPTR)
CaseVT = TLI->getPointerTy(CurDAG->getDataLayout());
2010-12-24 05:28:06 +01:00
// If the VT matches, then we will execute this case.
if (CurNodeVT == CaseVT)
break;
2010-12-24 05:28:06 +01:00
// Otherwise, skip over this case.
MatcherIndex += CaseSize;
}
2010-12-24 05:28:06 +01:00
// If no cases matched, bail out.
if (CaseSize == 0) break;
2010-12-24 05:28:06 +01:00
// Otherwise, execute the case we found.
DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
<< "] from " << SwitchStart << " to " << MatcherIndex<<'\n');
continue;
}
case OPC_CheckChild0Type: case OPC_CheckChild1Type:
case OPC_CheckChild2Type: case OPC_CheckChild3Type:
case OPC_CheckChild4Type: case OPC_CheckChild5Type:
case OPC_CheckChild6Type: case OPC_CheckChild7Type:
if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
CurDAG->getDataLayout(),
Opcode - OPC_CheckChild0Type))
break;
continue;
case OPC_CheckCondCode:
if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
continue;
case OPC_CheckValueType:
if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
CurDAG->getDataLayout()))
break;
continue;
case OPC_CheckInteger:
if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
continue;
case OPC_CheckChild0Integer: case OPC_CheckChild1Integer:
case OPC_CheckChild2Integer: case OPC_CheckChild3Integer:
case OPC_CheckChild4Integer:
if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
Opcode-OPC_CheckChild0Integer)) break;
continue;
case OPC_CheckAndImm:
if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
continue;
case OPC_CheckOrImm:
if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
continue;
2010-12-24 05:28:06 +01:00
case OPC_CheckFoldableChainNode: {
assert(NodeStack.size() != 1 && "No parent node");
// Verify that all intermediate nodes between the root and this one have
// a single use.
bool HasMultipleUses = false;
for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i)
if (!NodeStack[i].getNode()->hasOneUse()) {
HasMultipleUses = true;
break;
}
if (HasMultipleUses) break;
// Check to see that the target thinks this is profitable to fold and that
// we can fold it without inducing cycles in the graph.
if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
NodeToMatch) ||
!IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
NodeToMatch, OptLevel,
true/*We validate our own chains*/))
break;
2010-12-24 05:28:06 +01:00
continue;
}
case OPC_EmitInteger: {
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
CurDAG->getTargetConstant(Val, SDLoc(NodeToMatch),
VT), nullptr));
continue;
}
case OPC_EmitRegister: {
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
unsigned RegNo = MatcherTable[MatcherIndex++];
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
CurDAG->getRegister(RegNo, VT), nullptr));
continue;
}
case OPC_EmitRegister2: {
// For targets w/ more than 256 register names, the register enum
// values are stored in two bytes in the matcher table (just like
// opcodes).
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
unsigned RegNo = MatcherTable[MatcherIndex++];
RegNo |= MatcherTable[MatcherIndex++] << 8;
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
CurDAG->getRegister(RegNo, VT), nullptr));
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_EmitConvertToTarget: {
// Convert from IMM/FPIMM to target version.
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
SDValue Imm = RecordedNodes[RecNo].first;
if (Imm->getOpcode() == ISD::Constant) {
const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue();
Imm = CurDAG->getTargetConstant(*Val, SDLoc(NodeToMatch),
Imm.getValueType());
} else if (Imm->getOpcode() == ISD::ConstantFP) {
const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
Imm = CurDAG->getTargetConstantFP(*Val, SDLoc(NodeToMatch),
Imm.getValueType());
}
2010-12-24 05:28:06 +01:00
RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1
case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2
// These are space-optimized forms of OPC_EmitMergeInputChains.
assert(!InputChain.getNode() &&
"EmitMergeInputChains should be the first chain producing node");
assert(ChainNodesMatched.empty() &&
"Should only have one EmitMergeInputChains per match");
2010-12-24 05:28:06 +01:00
// Read all of the chained nodes.
unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
2010-12-24 05:28:06 +01:00
// FIXME: What if other value results of the node have uses not matched
// by this pattern?
if (ChainNodesMatched.back() != NodeToMatch &&
!RecordedNodes[RecNo].first.hasOneUse()) {
ChainNodesMatched.clear();
break;
}
2010-12-24 05:28:06 +01:00
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
2010-12-24 05:28:06 +01:00
if (!InputChain.getNode())
break; // Failed to merge.
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_EmitMergeInputChains: {
assert(!InputChain.getNode() &&
"EmitMergeInputChains should be the first chain producing node");
// This node gets a list of nodes we matched in the input that have
// chains. We want to token factor all of the input chains to these nodes
// together. However, if any of the input chains is actually one of the
// nodes matched in this pattern, then we have an intra-match reference.
// Ignore these because the newly token factored chain should not refer to
// the old nodes.
unsigned NumChains = MatcherTable[MatcherIndex++];
assert(NumChains != 0 && "Can't TF zero chains");
assert(ChainNodesMatched.empty() &&
"Should only have one EmitMergeInputChains per match");
// Read all of the chained nodes.
for (unsigned i = 0; i != NumChains; ++i) {
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
2010-12-24 05:28:06 +01:00
// FIXME: What if other value results of the node have uses not matched
// by this pattern?
if (ChainNodesMatched.back() != NodeToMatch &&
!RecordedNodes[RecNo].first.hasOneUse()) {
ChainNodesMatched.clear();
break;
}
}
2010-12-24 05:28:06 +01:00
// If the inner loop broke out, the match fails.
if (ChainNodesMatched.empty())
break;
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
2010-12-24 05:28:06 +01:00
if (!InputChain.getNode())
break; // Failed to merge.
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_EmitCopyToReg: {
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
unsigned DestPhysReg = MatcherTable[MatcherIndex++];
2010-12-24 05:28:06 +01:00
if (!InputChain.getNode())
InputChain = CurDAG->getEntryNode();
2010-12-24 05:28:06 +01:00
InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch),
DestPhysReg, RecordedNodes[RecNo].first,
2010-12-23 18:13:18 +01:00
InputGlue);
2010-12-24 05:28:06 +01:00
2010-12-23 18:13:18 +01:00
InputGlue = InputChain.getValue(1);
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_EmitNodeXForm: {
unsigned XFormNo = MatcherTable[MatcherIndex++];
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr));
continue;
}
case OPC_Coverage: {
// This is emitted right before MorphNode/EmitNode.
// So it should be safe to assume that this node has been selected
unsigned index = MatcherTable[MatcherIndex++];
index |= (MatcherTable[MatcherIndex++] << 8);
dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_EmitNode: case OPC_MorphNodeTo:
case OPC_EmitNode0: case OPC_EmitNode1: case OPC_EmitNode2:
case OPC_MorphNodeTo0: case OPC_MorphNodeTo1: case OPC_MorphNodeTo2: {
2010-02-28 23:38:43 +01:00
uint16_t TargetOpc = MatcherTable[MatcherIndex++];
TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
unsigned EmitNodeInfo = MatcherTable[MatcherIndex++];
// Get the result VT list.
unsigned NumVTs;
// If this is one of the compressed forms, get the number of VTs based
// on the Opcode. Otherwise read the next byte from the table.
if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
NumVTs = Opcode - OPC_MorphNodeTo0;
else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
NumVTs = Opcode - OPC_EmitNode0;
else
NumVTs = MatcherTable[MatcherIndex++];
SmallVector<EVT, 4> VTs;
for (unsigned i = 0; i != NumVTs; ++i) {
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (VT == MVT::iPTR)
VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy;
VTs.push_back(VT);
}
2010-12-24 05:28:06 +01:00
if (EmitNodeInfo & OPFL_Chain)
VTs.push_back(MVT::Other);
2010-12-23 18:13:18 +01:00
if (EmitNodeInfo & OPFL_GlueOutput)
VTs.push_back(MVT::Glue);
2010-12-24 05:28:06 +01:00
// This is hot code, so optimize the two most common cases of 1 and 2
// results.
SDVTList VTList;
if (VTs.size() == 1)
VTList = CurDAG->getVTList(VTs[0]);
else if (VTs.size() == 2)
VTList = CurDAG->getVTList(VTs[0], VTs[1]);
else
VTList = CurDAG->getVTList(VTs);
// Get the operand list.
unsigned NumOps = MatcherTable[MatcherIndex++];
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i != NumOps; ++i) {
unsigned RecNo = MatcherTable[MatcherIndex++];
if (RecNo & 128)
RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
2010-12-24 05:28:06 +01:00
assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
Ops.push_back(RecordedNodes[RecNo].first);
}
2010-12-24 05:28:06 +01:00
// If there are variadic operands to add, handle them now.
if (EmitNodeInfo & OPFL_VariadicInfo) {
// Determine the start index to copy from.
unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
"Invalid variadic node");
2010-12-23 18:13:18 +01:00
// Copy all of the variadic operands, not including a potential glue
// input.
for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
i != e; ++i) {
SDValue V = NodeToMatch->getOperand(i);
if (V.getValueType() == MVT::Glue) break;
Ops.push_back(V);
}
}
2010-12-24 05:28:06 +01:00
2010-12-23 18:13:18 +01:00
// If this has chain/glue inputs, add them.
if (EmitNodeInfo & OPFL_Chain)
Ops.push_back(InputChain);
if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
2010-12-23 18:13:18 +01:00
Ops.push_back(InputGlue);
2010-12-24 05:28:06 +01:00
// Create the node.
SDNode *Res = nullptr;
bool IsMorphNodeTo = Opcode == OPC_MorphNodeTo ||
(Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2);
if (!IsMorphNodeTo) {
// If this is a normal EmitNode command, just create the new node and
// add the results to the RecordedNodes list.
Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch),
VTList, Ops);
2010-12-24 05:28:06 +01:00
2010-12-23 18:13:18 +01:00
// Add all the non-glue/non-chain results to the RecordedNodes list.
for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
nullptr));
}
} else {
assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
"NodeToMatch was removed partway through selection");
SelectionDAG::DAGNodeDeletedListener NDL(*CurDAG, [&](SDNode *N,
SDNode *E) {
auto &Chain = ChainNodesMatched;
assert((!E || !is_contained(Chain, N)) &&
"Chain node replaced during MorphNode");
Chain.erase(std::remove(Chain.begin(), Chain.end(), N), Chain.end());
});
Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops, EmitNodeInfo);
}
2010-12-24 05:28:06 +01:00
2010-12-23 18:13:18 +01:00
// If the node had chain/glue results, update our notion of the current
// chain and glue.
if (EmitNodeInfo & OPFL_GlueOutput) {
InputGlue = SDValue(Res, VTs.size()-1);
if (EmitNodeInfo & OPFL_Chain)
InputChain = SDValue(Res, VTs.size()-2);
} else if (EmitNodeInfo & OPFL_Chain)
InputChain = SDValue(Res, VTs.size()-1);
2010-12-23 18:13:18 +01:00
// If the OPFL_MemRefs glue is set on this node, slap all of the
// accumulated memrefs onto it.
//
// FIXME: This is vastly incorrect for patterns with multiple outputs
// instructions that access memory and for ComplexPatterns that match
// loads.
if (EmitNodeInfo & OPFL_MemRefs) {
// Only attach load or store memory operands if the generated
// instruction may load or store.
const MCInstrDesc &MCID = TII->get(TargetOpc);
bool mayLoad = MCID.mayLoad();
bool mayStore = MCID.mayStore();
unsigned NumMemRefs = 0;
for (SmallVectorImpl<MachineMemOperand *>::const_iterator I =
MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
if ((*I)->isLoad()) {
if (mayLoad)
++NumMemRefs;
} else if ((*I)->isStore()) {
if (mayStore)
++NumMemRefs;
} else {
++NumMemRefs;
}
}
MachineSDNode::mmo_iterator MemRefs =
MF->allocateMemRefsArray(NumMemRefs);
MachineSDNode::mmo_iterator MemRefsPos = MemRefs;
for (SmallVectorImpl<MachineMemOperand *>::const_iterator I =
MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
if ((*I)->isLoad()) {
if (mayLoad)
*MemRefsPos++ = *I;
} else if ((*I)->isStore()) {
if (mayStore)
*MemRefsPos++ = *I;
} else {
*MemRefsPos++ = *I;
}
}
cast<MachineSDNode>(Res)
->setMemRefs(MemRefs, MemRefs + NumMemRefs);
}
2010-12-24 05:28:06 +01:00
DEBUG(dbgs() << " "
<< (IsMorphNodeTo ? "Morphed" : "Created")
<< " node: "; Res->dump(CurDAG); dbgs() << "\n");
2010-12-24 05:28:06 +01:00
// If this was a MorphNodeTo then we're completely done!
if (IsMorphNodeTo) {
// Update chain uses.
UpdateChains(Res, InputChain, ChainNodesMatched, true);
return;
}
continue;
}
2010-12-24 05:28:06 +01:00
case OPC_CompleteMatch: {
// The match has been completed, and any new nodes (if any) have been
// created. Patch up references to the matched dag to use the newly
// created nodes.
unsigned NumResults = MatcherTable[MatcherIndex++];
for (unsigned i = 0; i != NumResults; ++i) {
unsigned ResSlot = MatcherTable[MatcherIndex++];
if (ResSlot & 128)
ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
2010-12-24 05:28:06 +01:00
assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
SDValue Res = RecordedNodes[ResSlot].first;
2010-12-24 05:28:06 +01:00
assert(i < NodeToMatch->getNumValues() &&
NodeToMatch->getValueType(i) != MVT::Other &&
NodeToMatch->getValueType(i) != MVT::Glue &&
"Invalid number of results to complete!");
assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
NodeToMatch->getValueType(i) == MVT::iPTR ||
Res.getValueType() == MVT::iPTR ||
NodeToMatch->getValueType(i).getSizeInBits() ==
Res.getValueSizeInBits()) &&
"invalid replacement");
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);
}
// Update chain uses.
UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, false);
2010-12-24 05:28:06 +01:00
// If the root node defines glue, we need to update it to the glue result.
// TODO: This never happens in our tests and I think it can be removed /
// replaced with an assert, but if we do it this the way the change is
// NFC.
if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) ==
MVT::Glue &&
InputGlue.getNode())
CurDAG->ReplaceAllUsesOfValueWith(
SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1), InputGlue);
2010-12-24 05:28:06 +01:00
assert(NodeToMatch->use_empty() &&
"Didn't replace all uses of the node?");
CurDAG->RemoveDeadNode(NodeToMatch);
2010-12-24 05:28:06 +01:00
return;
}
}
2010-12-24 05:28:06 +01:00
// If the code reached this point, then the match failed. See if there is
// another child to try in the current 'Scope', otherwise pop it until we
// find a case to check.
DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex << "\n");
++NumDAGIselRetries;
while (true) {
if (MatchScopes.empty()) {
CannotYetSelect(NodeToMatch);
return;
}
// Restore the interpreter state back to the point where the scope was
// formed.
MatchScope &LastScope = MatchScopes.back();
RecordedNodes.resize(LastScope.NumRecordedNodes);
NodeStack.clear();
NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
N = NodeStack.back();
if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
MatcherIndex = LastScope.FailIndex;
2010-12-24 05:28:06 +01:00
DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
2010-12-24 05:28:06 +01:00
InputChain = LastScope.InputChain;
2010-12-23 18:13:18 +01:00
InputGlue = LastScope.InputGlue;
if (!LastScope.HasChainNodesMatched)
ChainNodesMatched.clear();
// Check to see what the offset is at the new MatcherIndex. If it is zero
// we have reached the end of this scope, otherwise we have another child
// in the current scope to try.
unsigned NumToSkip = MatcherTable[MatcherIndex++];
if (NumToSkip & 128)
NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
// If we have another child in this scope to match, update FailIndex and
// try it.
if (NumToSkip != 0) {
LastScope.FailIndex = MatcherIndex+NumToSkip;
break;
}
2010-12-24 05:28:06 +01:00
// End of this scope, pop it and try the next child in the containing
// scope.
MatchScopes.pop_back();
}
}
}
2010-12-24 05:28:06 +01:00
void SelectionDAGISel::CannotYetSelect(SDNode *N) {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "Cannot select: ";
2010-12-24 05:28:06 +01:00
if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_VOID) {
N->printrFull(Msg, CurDAG);
Msg << "\nIn function: " << MF->getName();
} else {
bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
unsigned iid =
cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
if (iid < Intrinsic::num_intrinsics)
Msg << "intrinsic %" << Intrinsic::getName((Intrinsic::ID)iid, None);
else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
Msg << "target intrinsic %" << TII->getName(iid);
else
Msg << "unknown intrinsic #" << iid;
}
report_fatal_error(Msg.str());
}
2007-05-03 03:11:54 +02:00
char SelectionDAGISel::ID = 0;