2017-06-08 01:53:32 +02:00
|
|
|
//===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
|
2005-04-22 00:36:52 +02:00
|
|
|
//
|
2005-01-07 08:47:53 +01:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 21:36:04 +01:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 00:36:52 +02:00
|
|
|
//
|
2005-01-07 08:47:53 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This implements the SelectionDAGISel class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-02-06 18:22:58 +01:00
|
|
|
#include "ScheduleDAGSDNodes.h"
|
2009-11-23 19:04:58 +01:00
|
|
|
#include "SelectionDAGBuilder.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/ADT/APInt.h"
|
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/None.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/ADT/PostOrderIterator.h"
|
2017-06-06 13:49:48 +02:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/ADT/StringRef.h"
|
2012-06-28 02:05:13 +02:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/BranchProbabilityInfo.h"
|
2013-07-27 03:24:00 +02:00
|
|
|
#include "llvm/Analysis/CFG.h"
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
|
2015-01-15 03:16:27 +01:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2008-08-20 00:33:34 +02:00
|
|
|
#include "llvm/CodeGen/FastISel.h"
|
2012-06-28 02:05:13 +02:00
|
|
|
#include "llvm/CodeGen/FunctionLoweringInfo.h"
|
2008-08-17 20:44:35 +02:00
|
|
|
#include "llvm/CodeGen/GCMetadata.h"
|
2017-06-08 01:53:32 +02:00
|
|
|
#include "llvm/CodeGen/ISDOpcodes.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2010-05-14 23:14:32 +02:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2005-01-07 08:47:53 +01:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2005-01-07 08:47:53 +01:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
|
|
|
#include "llvm/CodeGen/MachinePassRegistry.h"
|
2007-12-31 05:13:23 +01:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/CodeGen/MachineValueType.h"
|
2006-08-02 14:30:23 +02:00
|
|
|
#include "llvm/CodeGen/SchedulerRegistry.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2017-06-08 01:53:32 +02:00
|
|
|
#include "llvm/CodeGen/SelectionDAGISel.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
2016-04-08 23:26:31 +02:00
|
|
|
#include "llvm/CodeGen/StackProtector.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Constants.h"
|
2017-06-08 01:53:32 +02:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
|
|
|
#include "llvm/IR/DebugLoc.h"
|
2017-02-13 18:38:59 +01:00
|
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
2017-06-08 01:53:32 +02:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/InlineAsm.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/IR/Metadata.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/User.h"
|
2017-06-08 01:53:32 +02:00
|
|
|
#include "llvm/IR/Value.h"
|
2017-02-04 03:00:53 +01:00
|
|
|
#include "llvm/MC/MCInstrDesc.h"
|
|
|
|
#include "llvm/MC/MCRegisterInfo.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/BranchProbability.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Support/Compiler.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2017-04-28 07:31:46 +02:00
|
|
|
#include "llvm/Support/KnownBits.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Support/Timer.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2005-01-07 08:47:53 +01:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Target/TargetIntrinsicInfo.h"
|
2005-01-07 08:47:53 +01:00
|
|
|
#include "llvm/Target/TargetLowering.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2006-05-23 15:43:15 +02:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2012-11-13 09:47:29 +01:00
|
|
|
#include "llvm/Target/TargetSubtargetInfo.h"
|
2010-12-19 05:58:57 +01:00
|
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
2006-02-24 03:52:40 +01:00
|
|
|
#include <algorithm>
|
2017-02-04 03:00:53 +01:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
2017-06-08 01:53:32 +02:00
|
|
|
#include <limits>
|
2017-02-04 03:00:53 +01:00
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
2016-02-02 19:20:45 +01:00
|
|
|
|
2005-01-07 08:47:53 +01:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 04:02:50 +02:00
|
|
|
#define DEBUG_TYPE "isel"
|
|
|
|
|
2013-03-08 23:56:31 +01:00
|
|
|
STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
|
|
|
|
STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
|
2010-10-25 22:55:43 +02:00
|
|
|
STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
|
|
|
|
STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
|
2013-02-27 23:52:54 +01:00
|
|
|
STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
|
2013-04-19 03:04:40 +02:00
|
|
|
STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
|
|
|
|
STATISTIC(NumFastIselFailLowerArguments,
|
|
|
|
"Number of entry blocks where fast isel failed to lower arguments");
|
2013-02-27 23:52:54 +01:00
|
|
|
|
2015-02-27 19:32:11 +01:00
|
|
|
static cl::opt<int> EnableFastISelAbort(
|
|
|
|
"fast-isel-abort", cl::Hidden,
|
|
|
|
cl::desc("Enable abort calls when \"fast\" instruction selection "
|
|
|
|
"fails to lower an instruction: 0 disable the abort, 1 will "
|
|
|
|
"abort but for args, calls and terminators, 2 will also "
|
|
|
|
"abort for argument lowering, and 3 will never fallback "
|
|
|
|
"to SelectionDAG."));
|
2008-06-17 08:09:18 +02:00
|
|
|
|
2017-02-13 18:38:59 +01:00
|
|
|
static cl::opt<bool> EnableFastISelFallbackReport(
|
|
|
|
"fast-isel-report-on-fallback", cl::Hidden,
|
|
|
|
cl::desc("Emit a diagnostic when \"fast\" instruction selection "
|
|
|
|
"falls back to SelectionDAG."));
|
|
|
|
|
2011-06-16 22:22:37 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
UseMBPI("use-mbpi",
|
|
|
|
cl::desc("use Machine Branch Probability Info"),
|
|
|
|
cl::init(true), cl::Hidden);
|
|
|
|
|
2005-09-01 20:44:10 +02:00
|
|
|
#ifndef NDEBUG
|
2015-01-14 07:03:18 +01:00
|
|
|
static cl::opt<std::string>
|
|
|
|
FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
|
|
|
|
cl::desc("Only display the basic block whose name "
|
|
|
|
"matches this for all view-*-dags options"));
|
2005-01-12 04:41:21 +01:00
|
|
|
static cl::opt<bool>
|
2008-07-21 22:00:07 +02:00
|
|
|
ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show dags before the first "
|
|
|
|
"dag combine pass"));
|
|
|
|
static cl::opt<bool>
|
|
|
|
ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show dags before legalize types"));
|
|
|
|
static cl::opt<bool>
|
|
|
|
ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show dags before legalize"));
|
|
|
|
static cl::opt<bool>
|
|
|
|
ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show dags before the second "
|
|
|
|
"dag combine pass"));
|
|
|
|
static cl::opt<bool>
|
2008-11-24 15:53:14 +01:00
|
|
|
ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show dags before the post legalize types"
|
|
|
|
" dag combine pass"));
|
|
|
|
static cl::opt<bool>
|
2006-01-21 03:32:06 +01:00
|
|
|
ViewISelDAGs("view-isel-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show isel dags as they are selected"));
|
|
|
|
static cl::opt<bool>
|
|
|
|
ViewSchedDAGs("view-sched-dags", cl::Hidden,
|
|
|
|
cl::desc("Pop up a window to show sched dags as they are processed"));
|
2007-08-28 22:32:58 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
|
2008-01-25 18:24:52 +01:00
|
|
|
cl::desc("Pop up a window to show SUnit dags after they are processed"));
|
2005-01-12 04:41:21 +01:00
|
|
|
#else
|
2008-07-21 22:00:07 +02:00
|
|
|
static const bool ViewDAGCombine1 = false,
|
|
|
|
ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false,
|
|
|
|
ViewDAGCombine2 = false,
|
2008-11-24 15:53:14 +01:00
|
|
|
ViewDAGCombineLT = false,
|
2008-07-21 22:00:07 +02:00
|
|
|
ViewISelDAGs = false, ViewSchedDAGs = false,
|
|
|
|
ViewSUnitDAGs = false;
|
2005-01-12 04:41:21 +01:00
|
|
|
#endif
|
|
|
|
|
2006-08-02 14:30:23 +02:00
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
///
|
|
|
|
/// RegisterScheduler class - Track the registration of instruction schedulers.
|
|
|
|
///
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
MachinePassRegistry RegisterScheduler::Registry;
|
|
|
|
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
///
|
|
|
|
/// ISHeuristic command line option for instruction schedulers.
|
|
|
|
///
|
|
|
|
//===---------------------------------------------------------------------===//
|
2008-05-13 02:00:25 +02:00
|
|
|
static cl::opt<RegisterScheduler::FunctionPassCtor, false,
|
2017-02-04 03:00:53 +01:00
|
|
|
RegisterPassParser<RegisterScheduler>>
|
2008-05-13 02:00:25 +02:00
|
|
|
ISHeuristic("pre-RA-sched",
|
2014-01-13 21:08:27 +01:00
|
|
|
cl::init(&createDefaultScheduler), cl::Hidden,
|
2008-05-13 02:00:25 +02:00
|
|
|
cl::desc("Instruction schedulers available (before register"
|
|
|
|
" allocation):"));
|
2006-08-01 16:21:23 +02:00
|
|
|
|
2008-05-13 02:00:25 +02:00
|
|
|
static RegisterScheduler
|
2008-10-14 22:25:08 +02:00
|
|
|
defaultListDAGScheduler("default", "Best scheduler for the target",
|
2008-05-13 02:00:25 +02:00
|
|
|
createDefaultScheduler);
|
2006-01-23 08:01:07 +01:00
|
|
|
|
2008-09-03 18:12:24 +02:00
|
|
|
namespace llvm {
|
2017-02-04 03:00:53 +01:00
|
|
|
|
2013-11-22 20:11:24 +01:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
/// \brief This class is used by SelectionDAGISel to temporarily override
|
|
|
|
/// the optimization level on a per-function basis.
|
|
|
|
class OptLevelChanger {
|
|
|
|
SelectionDAGISel &IS;
|
|
|
|
CodeGenOpt::Level SavedOptLevel;
|
|
|
|
bool SavedFastISel;
|
|
|
|
|
|
|
|
public:
|
|
|
|
OptLevelChanger(SelectionDAGISel &ISel,
|
|
|
|
CodeGenOpt::Level NewOptLevel) : IS(ISel) {
|
|
|
|
SavedOptLevel = IS.OptLevel;
|
|
|
|
if (NewOptLevel == SavedOptLevel)
|
|
|
|
return;
|
|
|
|
IS.OptLevel = NewOptLevel;
|
|
|
|
IS.TM.setOptLevel(NewOptLevel);
|
|
|
|
DEBUG(dbgs() << "\nChanging optimization level for Function "
|
|
|
|
<< IS.MF->getFunction()->getName() << "\n");
|
|
|
|
DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel
|
|
|
|
<< " ; After: -O" << NewOptLevel << "\n");
|
2015-11-30 22:56:16 +01:00
|
|
|
SavedFastISel = IS.TM.Options.EnableFastISel;
|
|
|
|
if (NewOptLevel == CodeGenOpt::None) {
|
|
|
|
IS.TM.setFastISel(IS.TM.getO0WantsFastISel());
|
|
|
|
DEBUG(dbgs() << "\tFastISel is "
|
|
|
|
<< (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
|
|
|
|
<< "\n");
|
|
|
|
}
|
2013-11-22 20:11:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
~OptLevelChanger() {
|
|
|
|
if (IS.OptLevel == SavedOptLevel)
|
|
|
|
return;
|
|
|
|
DEBUG(dbgs() << "\nRestoring optimization level for Function "
|
|
|
|
<< IS.MF->getFunction()->getName() << "\n");
|
|
|
|
DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel
|
|
|
|
<< " ; After: -O" << SavedOptLevel << "\n");
|
|
|
|
IS.OptLevel = SavedOptLevel;
|
|
|
|
IS.TM.setOptLevel(SavedOptLevel);
|
|
|
|
IS.TM.setFastISel(SavedFastISel);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2008-09-03 18:12:24 +02:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
/// createDefaultScheduler - This creates an instruction scheduler appropriate
|
|
|
|
/// for the target.
|
2009-02-11 05:27:20 +01:00
|
|
|
ScheduleDAGSDNodes* createDefaultScheduler(SelectionDAGISel *IS,
|
2009-04-30 01:29:43 +02:00
|
|
|
CodeGenOpt::Level OptLevel) {
|
2014-10-08 09:32:17 +02:00
|
|
|
const TargetLowering *TLI = IS->TLI;
|
2014-10-08 03:58:03 +02:00
|
|
|
const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
|
2009-01-15 17:58:17 +01:00
|
|
|
|
2015-07-28 08:18:04 +02:00
|
|
|
// Try first to see if the Target has its own way of selecting a scheduler
|
|
|
|
if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
|
|
|
|
return SchedulerCtor(IS, OptLevel);
|
|
|
|
}
|
|
|
|
|
2015-03-11 23:56:10 +01:00
|
|
|
if (OptLevel == CodeGenOpt::None ||
|
|
|
|
(ST.enableMachineScheduler() && ST.enableMachineSchedDefaultSched()) ||
|
2013-06-06 02:43:09 +02:00
|
|
|
TLI->getSchedulingPreference() == Sched::Source)
|
2010-07-16 04:01:19 +02:00
|
|
|
return createSourceListDAGScheduler(IS, OptLevel);
|
2013-06-06 02:43:09 +02:00
|
|
|
if (TLI->getSchedulingPreference() == Sched::RegPressure)
|
2010-05-20 08:13:19 +02:00
|
|
|
return createBURRListDAGScheduler(IS, OptLevel);
|
2013-06-06 02:43:09 +02:00
|
|
|
if (TLI->getSchedulingPreference() == Sched::Hybrid)
|
2010-07-24 02:39:05 +02:00
|
|
|
return createHybridListDAGScheduler(IS, OptLevel);
|
2013-06-06 02:43:09 +02:00
|
|
|
if (TLI->getSchedulingPreference() == Sched::VLIW)
|
2012-02-01 23:13:57 +01:00
|
|
|
return createVLIWDAGScheduler(IS, OptLevel);
|
2013-06-06 02:43:09 +02:00
|
|
|
assert(TLI->getSchedulingPreference() == Sched::ILP &&
|
2010-05-19 22:19:50 +02:00
|
|
|
"Unknown sched type!");
|
2010-07-24 02:39:05 +02:00
|
|
|
return createILPListDAGScheduler(IS, OptLevel);
|
2007-04-12 08:00:20 +02:00
|
|
|
}
|
2017-02-04 03:00:53 +01:00
|
|
|
|
2016-02-02 19:20:45 +01:00
|
|
|
} // end namespace llvm
|
2005-01-07 08:47:53 +01:00
|
|
|
|
2008-01-30 19:18:23 +01:00
|
|
|
// EmitInstrWithCustomInserter - This method should be implemented by targets
|
2009-10-29 19:10:34 +01:00
|
|
|
// that mark instructions with the 'usesCustomInserter' flag. These
|
2005-08-26 22:54:47 +02:00
|
|
|
// instructions are special in various ways, which require special support to
|
|
|
|
// insert. The specified MachineInstr is created but not inserted into any
|
2009-10-29 19:10:34 +01:00
|
|
|
// basic blocks, and this method is called to expand it into a sequence of
|
|
|
|
// instructions, potentially also creating new basic blocks and control flow.
|
|
|
|
// When new basic blocks are inserted and the edges from MBB to its successors
|
|
|
|
// are modified, the method should insert pairs of <OldSucc, NewSucc> into the
|
|
|
|
// DenseMap.
|
2010-05-01 02:01:06 +02:00
|
|
|
MachineBasicBlock *
|
2016-07-01 00:52:52 +02:00
|
|
|
TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
|
2010-05-01 02:01:06 +02:00
|
|
|
MachineBasicBlock *MBB) const {
|
2009-07-12 22:07:01 +02:00
|
|
|
#ifndef NDEBUG
|
2010-01-05 02:26:11 +01:00
|
|
|
dbgs() << "If a target marks an instruction with "
|
2009-10-29 19:10:34 +01:00
|
|
|
"'usesCustomInserter', it must implement "
|
2009-07-12 22:07:01 +02:00
|
|
|
"TargetLowering::EmitInstrWithCustomInserter!";
|
|
|
|
#endif
|
2014-04-14 02:51:57 +02:00
|
|
|
llvm_unreachable(nullptr);
|
2005-08-26 22:54:47 +02:00
|
|
|
}
|
|
|
|
|
2016-07-01 00:52:52 +02:00
|
|
|
void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
|
2011-08-30 21:09:48 +02:00
|
|
|
SDNode *Node) const {
|
2016-07-01 00:52:52 +02:00
|
|
|
assert(!MI.hasPostISelHook() &&
|
2011-09-21 04:20:46 +02:00
|
|
|
"If a target marks an instruction with 'hasPostISelHook', "
|
|
|
|
"it must implement TargetLowering::AdjustInstrPostInstrSelection!");
|
2011-08-30 21:09:48 +02:00
|
|
|
}
|
|
|
|
|
2005-01-11 06:56:49 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SelectionDAGISel code
|
|
|
|
//===----------------------------------------------------------------------===//
|
2005-01-07 08:47:53 +01:00
|
|
|
|
2013-06-19 23:36:55 +02:00
|
|
|
SelectionDAGISel::SelectionDAGISel(TargetMachine &tm,
|
2011-01-05 22:45:56 +01:00
|
|
|
CodeGenOpt::Level OL) :
|
2013-06-19 23:36:55 +02:00
|
|
|
MachineFunctionPass(ID), TM(tm),
|
2014-10-09 02:57:31 +02:00
|
|
|
FuncInfo(new FunctionLoweringInfo()),
|
2011-12-15 19:21:18 +01:00
|
|
|
CurDAG(new SelectionDAG(tm, OL)),
|
2010-04-19 21:05:59 +02:00
|
|
|
SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
|
2017-05-10 02:39:30 +02:00
|
|
|
AA(), GFI(),
|
2009-04-29 02:15:41 +02:00
|
|
|
OptLevel(OL),
|
2010-10-19 19:21:58 +02:00
|
|
|
DAGSize(0) {
|
|
|
|
initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
|
2015-07-16 00:48:29 +02:00
|
|
|
initializeBranchProbabilityInfoWrapperPassPass(
|
|
|
|
*PassRegistry::getPassRegistry());
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 19:55:00 +02:00
|
|
|
initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
|
2015-01-15 11:41:28 +01:00
|
|
|
initializeTargetLibraryInfoWrapperPassPass(
|
|
|
|
*PassRegistry::getPassRegistry());
|
2010-10-19 19:21:58 +02:00
|
|
|
}
|
2008-08-28 01:52:12 +02:00
|
|
|
|
|
|
|
SelectionDAGISel::~SelectionDAGISel() {
|
2009-11-23 19:04:58 +01:00
|
|
|
delete SDB;
|
2008-08-28 01:52:12 +02:00
|
|
|
delete CurDAG;
|
|
|
|
delete FuncInfo;
|
|
|
|
}
|
|
|
|
|
2005-08-17 08:37:43 +02:00
|
|
|
void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
|
2017-05-10 02:39:30 +02:00
|
|
|
if (OptLevel != CodeGenOpt::None)
|
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
2008-08-17 20:44:35 +02:00
|
|
|
AU.addRequired<GCModuleInfo>();
|
2016-04-08 23:26:31 +02:00
|
|
|
AU.addRequired<StackProtector>();
|
|
|
|
AU.addPreserved<StackProtector>();
|
2009-08-01 01:36:22 +02:00
|
|
|
AU.addPreserved<GCModuleInfo>();
|
2015-01-15 11:41:28 +01:00
|
|
|
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
2011-06-16 22:22:37 +02:00
|
|
|
if (UseMBPI && OptLevel != CodeGenOpt::None)
|
2015-07-16 00:48:29 +02:00
|
|
|
AU.addRequired<BranchProbabilityInfoWrapperPass>();
|
2009-07-31 20:16:33 +02:00
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
2005-08-17 08:37:43 +02:00
|
|
|
}
|
2005-01-07 08:47:53 +01:00
|
|
|
|
2010-12-19 05:58:57 +01:00
|
|
|
/// SplitCriticalSideEffectEdges - Look for critical edges with a PHI value that
|
|
|
|
/// may trap on it. In this case we have to split the edge so that the path
|
|
|
|
/// through the predecessor block that doesn't go to the phi block doesn't
|
2017-06-17 02:56:27 +02:00
|
|
|
/// execute the possibly trapping instruction. If available, we pass domtree
|
|
|
|
/// and loop info to be updated when we split critical edges. This is because
|
|
|
|
/// SelectionDAGISel preserves these analyses.
|
2010-12-19 05:58:57 +01:00
|
|
|
/// This is required for correctness, so it must be done at -O0.
|
|
|
|
///
|
2017-06-17 02:56:27 +02:00
|
|
|
static void SplitCriticalSideEffectEdges(Function &Fn, DominatorTree *DT,
|
|
|
|
LoopInfo *LI) {
|
2010-12-19 05:58:57 +01:00
|
|
|
// Loop for blocks with phi nodes.
|
2015-10-13 21:47:46 +02:00
|
|
|
for (BasicBlock &BB : Fn) {
|
|
|
|
PHINode *PN = dyn_cast<PHINode>(BB.begin());
|
2014-04-14 02:51:57 +02:00
|
|
|
if (!PN) continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-19 05:58:57 +01:00
|
|
|
ReprocessBlock:
|
|
|
|
// For each block with a PHI node, check to see if any of the input values
|
|
|
|
// are potentially trapping constant expressions. Constant expressions are
|
|
|
|
// the only potentially trapping value that can occur as the argument to a
|
|
|
|
// PHI.
|
2015-10-13 21:47:46 +02:00
|
|
|
for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I)); ++I)
|
2010-12-19 05:58:57 +01:00
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
|
|
|
|
ConstantExpr *CE = dyn_cast<ConstantExpr>(PN->getIncomingValue(i));
|
2014-04-14 02:51:57 +02:00
|
|
|
if (!CE || !CE->canTrap()) continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-19 05:58:57 +01:00
|
|
|
// The only case we have to worry about is when the edge is critical.
|
|
|
|
// Since this block has a PHI Node, we assume it has multiple input
|
|
|
|
// edges: check to see if the pred has multiple successors.
|
|
|
|
BasicBlock *Pred = PN->getIncomingBlock(i);
|
|
|
|
if (Pred->getTerminator()->getNumSuccessors() == 1)
|
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-19 05:58:57 +01:00
|
|
|
// Okay, we have to split this edge.
|
2015-01-19 13:09:11 +01:00
|
|
|
SplitCriticalEdge(
|
2015-10-13 21:47:46 +02:00
|
|
|
Pred->getTerminator(), GetSuccessorNumber(Pred, &BB),
|
2017-06-17 02:56:27 +02:00
|
|
|
CriticalEdgeSplittingOptions(DT, LI).setMergeIdenticalEdges());
|
2010-12-19 05:58:57 +01:00
|
|
|
goto ReprocessBlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-31 20:16:33 +02:00
|
|
|
bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
|
2016-08-27 00:32:55 +02:00
|
|
|
// If we already selected that function, we do not need to run SDISel.
|
|
|
|
if (mf.getProperties().hasProperty(
|
|
|
|
MachineFunctionProperties::Property::Selected))
|
|
|
|
return false;
|
2008-09-10 01:05:00 +02:00
|
|
|
// Do some sanity-checking on the command-line options.
|
2011-12-02 23:16:29 +01:00
|
|
|
assert((!EnableFastISelAbort || TM.Options.EnableFastISel) &&
|
2015-02-27 19:32:11 +01:00
|
|
|
"-fast-isel-abort > 0 requires -fast-isel");
|
2008-09-10 01:05:00 +02:00
|
|
|
|
2010-04-15 06:33:49 +02:00
|
|
|
const Function &Fn = *mf.getFunction();
|
2010-04-14 19:02:07 +02:00
|
|
|
MF = &mf;
|
|
|
|
|
2014-10-08 03:58:01 +02:00
|
|
|
// Reset the target options before resetting the optimization
|
|
|
|
// level below.
|
|
|
|
// FIXME: This is a horrible hack and should be processed via
|
|
|
|
// codegen looking at the optimization level explicitly when
|
|
|
|
// it wants to look at it.
|
2014-09-26 03:28:10 +02:00
|
|
|
TM.resetTargetOptions(Fn);
|
2013-11-22 20:11:24 +01:00
|
|
|
// Reset OptLevel to None for optnone functions.
|
|
|
|
CodeGenOpt::Level NewOptLevel = OptLevel;
|
2016-07-07 20:55:02 +02:00
|
|
|
if (OptLevel != CodeGenOpt::None && skipFunction(Fn))
|
2013-11-22 20:11:24 +01:00
|
|
|
NewOptLevel = CodeGenOpt::None;
|
|
|
|
OptLevelChanger OLC(*this, NewOptLevel);
|
|
|
|
|
2014-10-08 09:32:17 +02:00
|
|
|
TII = MF->getSubtarget().getInstrInfo();
|
|
|
|
TLI = MF->getSubtarget().getTargetLowering();
|
2014-10-08 03:58:01 +02:00
|
|
|
RegInfo = &MF->getRegInfo();
|
2015-01-15 11:41:28 +01:00
|
|
|
LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
|
2014-10-08 03:58:01 +02:00
|
|
|
GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : nullptr;
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
ORE = make_unique<OptimizationRemarkEmitter>(&Fn);
|
[SelectionDAG] Update the dominator after splitting critical edges.
Running `llc -verify-dom-info` on the attached testcase results in a
crash in the verifier, due to a stale dominator tree.
i.e.
DominatorTree is not up to date!
Computed:
=============================--------------------------------
Inorder Dominator Tree:
[1] %safe_mod_func_uint8_t_u_u.exit.i.i.i {0,7}
[2] %lor.lhs.false.i61.i.i.i {1,2}
[2] %safe_mod_func_int8_t_s_s.exit.i.i.i {3,6}
[3] %safe_div_func_int64_t_s_s.exit66.i.i.i {4,5}
Actual:
=============================--------------------------------
Inorder Dominator Tree:
[1] %safe_mod_func_uint8_t_u_u.exit.i.i.i {0,9}
[2] %lor.lhs.false.i61.i.i.i {1,2}
[2] %safe_mod_func_int8_t_s_s.exit.i.i.i {3,8}
[3] %safe_div_func_int64_t_s_s.exit66.i.i.i {4,5}
[3] %safe_mod_func_int8_t_s_s.exit.i.i.i.lor.lhs.false.i61.i.i.i_crit_edge {6,7}
This is because in `SelectionDAGIsel` we split critical edges without
updating the corresponding dominator for the function (and we claim
in `MachineFunctionPass::getAnalysisUsage()` that the domtree is preserved).
We could either stop preserving the domtree in `getAnalysisUsage`
or tell `splitCriticalEdge()` to update it.
As the second option is easy to implement, that's the one I chose.
Differential Revision: https://reviews.llvm.org/D33800
llvm-svn: 304742
2017-06-06 00:16:41 +02:00
|
|
|
auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
|
|
|
|
DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
|
2017-06-17 02:56:27 +02:00
|
|
|
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
|
|
|
|
LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
|
2014-10-08 03:58:01 +02:00
|
|
|
|
2010-01-05 02:26:11 +01:00
|
|
|
DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
|
2005-01-07 08:47:53 +01:00
|
|
|
|
2017-06-17 02:56:27 +02:00
|
|
|
SplitCriticalSideEffectEdges(const_cast<Function &>(Fn), DT, LI);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2017-08-03 23:54:00 +02:00
|
|
|
CurDAG->init(*MF, *ORE, this);
|
2014-03-05 03:43:26 +01:00
|
|
|
FuncInfo->set(Fn, *MF, CurDAG);
|
2011-06-16 22:22:37 +02:00
|
|
|
|
2017-05-10 02:39:30 +02:00
|
|
|
// Now get the optional analyzes if we want to.
|
|
|
|
// This is based on the possibly changed OptLevel (after optnone is taken
|
|
|
|
// into account). That's unfortunate but OK because it just means we won't
|
|
|
|
// ask for passes that have been required anyway.
|
|
|
|
|
2011-06-16 22:22:37 +02:00
|
|
|
if (UseMBPI && OptLevel != CodeGenOpt::None)
|
2015-07-16 00:48:29 +02:00
|
|
|
FuncInfo->BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
|
2011-06-16 22:22:37 +02:00
|
|
|
else
|
2014-04-14 02:51:57 +02:00
|
|
|
FuncInfo->BPI = nullptr;
|
2011-06-16 22:22:37 +02:00
|
|
|
|
2017-05-10 02:39:30 +02:00
|
|
|
if (OptLevel != CodeGenOpt::None)
|
|
|
|
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
|
|
|
|
else
|
|
|
|
AA = nullptr;
|
|
|
|
|
|
|
|
SDB->init(GFI, AA, LibInfo);
|
2005-01-07 08:47:53 +01:00
|
|
|
|
2013-12-10 19:27:32 +01:00
|
|
|
MF->setHasInlineAsm(false);
|
|
|
|
|
2015-12-11 19:24:30 +01:00
|
|
|
FuncInfo->SplitCSR = false;
|
|
|
|
|
2015-12-16 21:45:48 +01:00
|
|
|
// We split CSR if the target supports it for the given function
|
2015-12-11 19:24:30 +01:00
|
|
|
// and the function has only return exits.
|
2016-03-19 00:38:49 +01:00
|
|
|
if (OptLevel != CodeGenOpt::None && TLI->supportSplitCSR(MF)) {
|
2015-12-11 19:24:30 +01:00
|
|
|
FuncInfo->SplitCSR = true;
|
|
|
|
|
|
|
|
// Collect all the return blocks.
|
|
|
|
for (const BasicBlock &BB : Fn) {
|
|
|
|
if (!succ_empty(&BB))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const TerminatorInst *Term = BB.getTerminator();
|
2016-03-25 00:21:29 +01:00
|
|
|
if (isa<UnreachableInst>(Term) || isa<ReturnInst>(Term))
|
2015-12-11 19:24:30 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Bail out if the exit block is not Return nor Unreachable.
|
|
|
|
FuncInfo->SplitCSR = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineBasicBlock *EntryMBB = &MF->front();
|
|
|
|
if (FuncInfo->SplitCSR)
|
|
|
|
// This performs initialization so lowering for SplitCSR will be correct.
|
|
|
|
TLI->initializeSplitCSR(EntryMBB);
|
|
|
|
|
2010-04-14 22:05:00 +02:00
|
|
|
SelectAllBasicBlocks(Fn);
|
2017-02-13 18:38:59 +01:00
|
|
|
if (FastISelFailed && EnableFastISelFallbackReport) {
|
|
|
|
DiagnosticInfoISelFallback DiagFallback(Fn);
|
|
|
|
Fn.getContext().diagnose(DiagFallback);
|
|
|
|
}
|
2005-04-22 00:36:52 +02:00
|
|
|
|
2010-05-01 02:33:28 +02:00
|
|
|
// If the first basic block in the function has live ins that need to be
|
2008-09-06 00:59:21 +02:00
|
|
|
// copied into vregs, emit the copies into the top of the block before
|
|
|
|
// emitting the code for the block.
|
2014-10-08 11:50:52 +02:00
|
|
|
const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
|
2014-10-08 03:58:03 +02:00
|
|
|
RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII);
|
2010-04-29 01:08:54 +02:00
|
|
|
|
2015-12-11 19:24:30 +01:00
|
|
|
// Insert copies in the entry block and the return blocks.
|
2016-03-25 00:21:29 +01:00
|
|
|
if (FuncInfo->SplitCSR) {
|
|
|
|
SmallVector<MachineBasicBlock*, 4> Returns;
|
|
|
|
// Collect all the return blocks.
|
|
|
|
for (MachineBasicBlock &MBB : mf) {
|
|
|
|
if (!MBB.succ_empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
|
|
|
|
if (Term != MBB.end() && Term->isReturn()) {
|
|
|
|
Returns.push_back(&MBB);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2015-12-11 19:24:30 +01:00
|
|
|
TLI->insertCopiesSplitCSR(EntryMBB, Returns);
|
2016-03-25 00:21:29 +01:00
|
|
|
}
|
2015-12-11 19:24:30 +01:00
|
|
|
|
2010-05-26 22:18:50 +02:00
|
|
|
DenseMap<unsigned, unsigned> LiveInMap;
|
|
|
|
if (!FuncInfo->ArgDbgValues.empty())
|
|
|
|
for (MachineRegisterInfo::livein_iterator LI = RegInfo->livein_begin(),
|
|
|
|
E = RegInfo->livein_end(); LI != E; ++LI)
|
2010-12-24 05:28:06 +01:00
|
|
|
if (LI->second)
|
2010-05-26 22:18:50 +02:00
|
|
|
LiveInMap.insert(std::make_pair(LI->first, LI->second));
|
|
|
|
|
2010-04-29 01:08:54 +02:00
|
|
|
// Insert DBG_VALUE instructions for function arguments to the entry block.
|
|
|
|
for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
|
|
|
|
MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
|
2013-06-16 22:34:15 +02:00
|
|
|
bool hasFI = MI->getOperand(0).isFI();
|
2013-11-20 01:32:32 +01:00
|
|
|
unsigned Reg =
|
|
|
|
hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
|
2010-04-29 01:08:54 +02:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg))
|
|
|
|
EntryMBB->insert(EntryMBB->begin(), MI);
|
|
|
|
else {
|
|
|
|
MachineInstr *Def = RegInfo->getVRegDef(Reg);
|
2013-10-05 02:08:27 +02:00
|
|
|
if (Def) {
|
|
|
|
MachineBasicBlock::iterator InsertPos = Def;
|
|
|
|
// FIXME: VR def may not be in entry block.
|
2014-03-02 13:27:27 +01:00
|
|
|
Def->getParent()->insert(std::next(InsertPos), MI);
|
2013-10-05 02:08:27 +02:00
|
|
|
} else
|
|
|
|
DEBUG(dbgs() << "Dropping debug info for dead vreg"
|
|
|
|
<< TargetRegisterInfo::virtReg2Index(Reg) << "\n");
|
2010-04-29 01:08:54 +02:00
|
|
|
}
|
2010-05-26 22:18:50 +02:00
|
|
|
|
|
|
|
// If Reg is live-in then update debug info to track its copy in a vreg.
|
|
|
|
DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
|
|
|
|
if (LDI != LiveInMap.end()) {
|
2013-06-16 22:34:15 +02:00
|
|
|
assert(!hasFI && "There's no handling of frame pointer updating here yet "
|
|
|
|
"- add if needed");
|
2010-05-26 22:18:50 +02:00
|
|
|
MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
|
|
|
|
MachineBasicBlock::iterator InsertPos = Def;
|
Move the complex address expression out of DIVariable and into an extra
argument of the llvm.dbg.declare/llvm.dbg.value intrinsics.
Previously, DIVariable was a variable-length field that has an optional
reference to a Metadata array consisting of a variable number of
complex address expressions. In the case of OpPiece expressions this is
wasting a lot of storage in IR, because when an aggregate type is, e.g.,
SROA'd into all of its n individual members, the IR will contain n copies
of the DIVariable, all alike, only differing in the complex address
reference at the end.
By making the complex address into an extra argument of the
dbg.value/dbg.declare intrinsics, all of the pieces can reference the
same variable and the complex address expressions can be uniqued across
the CU, too.
Down the road, this will allow us to move other flags, such as
"indirection" out of the DIVariable, too.
The new intrinsics look like this:
declare void @llvm.dbg.declare(metadata %storage, metadata %var, metadata %expr)
declare void @llvm.dbg.value(metadata %storage, i64 %offset, metadata %var, metadata %expr)
This patch adds a new LLVM-local tag to DIExpressions, so we can detect
and pretty-print DIExpression metadata nodes.
What this patch doesn't do:
This patch does not touch the "Indirect" field in DIVariable; but moving
that into the expression would be a natural next step.
http://reviews.llvm.org/D4919
rdar://problem/17994491
Thanks to dblaikie and dexonsmith for reviewing this patch!
Note: I accidentally committed a bogus older version of this patch previously.
llvm-svn: 218787
2014-10-01 20:55:02 +02:00
|
|
|
const MDNode *Variable = MI->getDebugVariable();
|
|
|
|
const MDNode *Expr = MI->getDebugExpression();
|
2015-04-03 21:20:26 +02:00
|
|
|
DebugLoc DL = MI->getDebugLoc();
|
2013-09-17 01:29:03 +02:00
|
|
|
bool IsIndirect = MI->isIndirectDebugValue();
|
2017-07-29 01:00:45 +02:00
|
|
|
if (IsIndirect)
|
|
|
|
assert(MI->getOperand(1).getImm() == 0 &&
|
|
|
|
"DBG_VALUE with nonzero offset");
|
2015-04-29 18:38:44 +02:00
|
|
|
assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
|
2015-04-03 21:20:26 +02:00
|
|
|
"Expected inlined-at fields to agree");
|
2010-05-26 22:18:50 +02:00
|
|
|
// Def is never a terminator here, so it is ok to increment InsertPos.
|
2015-04-03 21:20:26 +02:00
|
|
|
BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE),
|
2017-07-29 01:00:45 +02:00
|
|
|
IsIndirect, LDI->second, Variable, Expr);
|
2010-09-21 22:56:33 +02:00
|
|
|
|
|
|
|
// If this vreg is directly copied into an exported register then
|
|
|
|
// that COPY instructions also need DBG_VALUE, if it is the only
|
|
|
|
// user of LDI->second.
|
2014-04-14 02:51:57 +02:00
|
|
|
MachineInstr *CopyUseMI = nullptr;
|
2014-03-13 07:02:25 +01:00
|
|
|
for (MachineRegisterInfo::use_instr_iterator
|
|
|
|
UI = RegInfo->use_instr_begin(LDI->second),
|
|
|
|
E = RegInfo->use_instr_end(); UI != E; ) {
|
|
|
|
MachineInstr *UseMI = &*(UI++);
|
2010-09-21 22:56:33 +02:00
|
|
|
if (UseMI->isDebugValue()) continue;
|
|
|
|
if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
|
|
|
|
CopyUseMI = UseMI; continue;
|
|
|
|
}
|
|
|
|
// Otherwise this is another use or second copy use.
|
2014-04-14 02:51:57 +02:00
|
|
|
CopyUseMI = nullptr; break;
|
2010-09-21 22:56:33 +02:00
|
|
|
}
|
|
|
|
if (CopyUseMI) {
|
2015-04-03 21:20:26 +02:00
|
|
|
// Use MI's debug location, which describes where Variable was
|
|
|
|
// declared, rather than whatever is attached to CopyUseMI.
|
2010-09-21 22:56:33 +02:00
|
|
|
MachineInstr *NewMI =
|
2015-04-03 21:20:26 +02:00
|
|
|
BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
|
2017-07-29 01:00:45 +02:00
|
|
|
CopyUseMI->getOperand(0).getReg(), Variable, Expr);
|
2011-12-06 23:12:01 +01:00
|
|
|
MachineBasicBlock::iterator Pos = CopyUseMI;
|
|
|
|
EntryMBB->insertAfter(Pos, NewMI);
|
2010-09-21 22:56:33 +02:00
|
|
|
}
|
2010-05-26 22:18:50 +02:00
|
|
|
}
|
2010-04-29 01:08:54 +02:00
|
|
|
}
|
|
|
|
|
2010-05-18 01:09:50 +02:00
|
|
|
// Determine if there are any calls in this machine function.
|
2016-07-28 20:40:00 +02:00
|
|
|
MachineFrameInfo &MFI = MF->getFrameInfo();
|
2014-05-01 00:17:38 +02:00
|
|
|
for (const auto &MBB : *MF) {
|
2016-07-28 20:40:00 +02:00
|
|
|
if (MFI.hasCalls() && MF->hasInlineAsm())
|
2013-02-16 02:25:28 +01:00
|
|
|
break;
|
|
|
|
|
2014-05-01 00:17:38 +02:00
|
|
|
for (const auto &MI : MBB) {
|
2014-10-08 03:58:03 +02:00
|
|
|
const MCInstrDesc &MCID = TII->get(MI.getOpcode());
|
2013-02-16 02:25:28 +01:00
|
|
|
if ((MCID.isCall() && !MCID.isReturn()) ||
|
2014-05-01 00:17:38 +02:00
|
|
|
MI.isStackAligningInlineAsm()) {
|
2016-07-28 20:40:00 +02:00
|
|
|
MFI.setHasCalls(true);
|
2013-02-16 02:25:28 +01:00
|
|
|
}
|
2014-05-01 00:17:38 +02:00
|
|
|
if (MI.isInlineAsm()) {
|
2013-12-10 19:27:32 +01:00
|
|
|
MF->setHasInlineAsm(true);
|
2010-05-18 01:09:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-26 21:46:12 +02:00
|
|
|
// Determine if there is a call to setjmp in the machine function.
|
2011-12-18 21:35:43 +01:00
|
|
|
MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
|
2010-05-26 21:46:12 +02:00
|
|
|
|
2010-07-10 11:00:22 +02:00
|
|
|
// Replace forward-declared registers with the registers containing
|
|
|
|
// the desired value.
|
|
|
|
MachineRegisterInfo &MRI = MF->getRegInfo();
|
|
|
|
for (DenseMap<unsigned, unsigned>::iterator
|
|
|
|
I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
unsigned From = I->first;
|
|
|
|
unsigned To = I->second;
|
|
|
|
// If To is also scheduled to be replaced, find what its ultimate
|
|
|
|
// replacement is.
|
2017-02-04 03:00:53 +01:00
|
|
|
while (true) {
|
2012-05-01 01:41:30 +02:00
|
|
|
DenseMap<unsigned, unsigned>::iterator J = FuncInfo->RegFixups.find(To);
|
2010-07-10 11:00:22 +02:00
|
|
|
if (J == E) break;
|
|
|
|
To = J->second;
|
|
|
|
}
|
2013-08-17 01:37:31 +02:00
|
|
|
// Make sure the new register has a sufficiently constrained register class.
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(From) &&
|
|
|
|
TargetRegisterInfo::isVirtualRegister(To))
|
|
|
|
MRI.constrainRegClass(To, MRI.getRegClass(From));
|
2010-07-10 11:00:22 +02:00
|
|
|
// Replace it.
|
2015-05-08 22:46:54 +02:00
|
|
|
|
|
|
|
|
|
|
|
// Replacing one register with another won't touch the kill flags.
|
|
|
|
// We need to conservatively clear the kill flags as a kill on the old
|
|
|
|
// register might dominate existing uses of the new register.
|
|
|
|
if (!MRI.use_empty(To))
|
|
|
|
MRI.clearKillFlags(From);
|
2010-07-10 11:00:22 +02:00
|
|
|
MRI.replaceRegWith(From, To);
|
|
|
|
}
|
|
|
|
|
2017-04-28 22:25:05 +02:00
|
|
|
TLI->finalizeLowering(*MF);
|
2012-10-15 23:33:06 +02:00
|
|
|
|
2010-04-29 01:08:54 +02:00
|
|
|
// Release function-specific state. SDB and CurDAG are already cleared
|
|
|
|
// at this point.
|
|
|
|
FuncInfo->clear();
|
2008-09-06 00:59:21 +02:00
|
|
|
|
2013-12-17 03:01:10 +01:00
|
|
|
DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
|
|
|
|
DEBUG(MF->print(dbgs()));
|
|
|
|
|
2005-01-07 08:47:53 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
static void reportFastISelFailure(MachineFunction &MF,
|
|
|
|
OptimizationRemarkEmitter &ORE,
|
|
|
|
OptimizationRemarkMissed &R,
|
|
|
|
bool ShouldAbort) {
|
|
|
|
// Print the function name explicitly if we don't have a debug location (which
|
|
|
|
// makes the diagnostic less useful) or if we're going to emit a raw error.
|
|
|
|
if (!R.getLocation().isValid() || ShouldAbort)
|
|
|
|
R << (" (in function: " + MF.getName() + ")").str();
|
|
|
|
|
|
|
|
if (ShouldAbort)
|
|
|
|
report_fatal_error(R.getMsg());
|
|
|
|
|
|
|
|
ORE.emit(R);
|
|
|
|
}
|
|
|
|
|
2011-04-17 19:12:08 +02:00
|
|
|
void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
|
|
|
|
BasicBlock::const_iterator End,
|
|
|
|
bool &HadTailCall) {
|
2017-08-07 07:51:14 +02:00
|
|
|
// Allow creating illegal types during DAG building for the basic block.
|
|
|
|
CurDAG->NewNodesMustHaveLegalTypes = false;
|
|
|
|
|
2015-03-19 01:02:22 +01:00
|
|
|
// Lower the instructions. If a call is emitted as a tail call, cease emitting
|
|
|
|
// nodes for this block.
|
Elide argument copies during instruction selection
Summary:
Avoids tons of prologue boilerplate when arguments are passed in memory
and left in memory. This can happen in a debug build or in a release
build when an argument alloca is escaped. This will dramatically affect
the code size of x86 debug builds, because X86 fast isel doesn't handle
arguments passed in memory at all. It only handles the x86_64 case of up
to 6 basic register parameters.
This is implemented by analyzing the entry block before ISel to identify
copy elision candidates. A copy elision candidate is an argument that is
used to fully initialize an alloca before any other possibly escaping
uses of that alloca. If an argument is a copy elision candidate, we set
a flag on the InputArg. If the the target generates loads from a fixed
stack object that matches the size and alignment requirements of the
alloca, the SelectionDAG builder will delete the stack object created
for the alloca and replace it with the fixed stack object. The load is
left behind to satisfy any remaining uses of the argument value. The
store is now dead and is therefore elided. The fixed stack object is
also marked as mutable, as it may now be modified by the user, and it
would be invalid to rematerialize the initial load from it.
Supersedes D28388
Fixes PR26328
Reviewers: chandlerc, MatzeB, qcolombet, inglorion, hans
Subscribers: igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D29668
llvm-svn: 296683
2017-03-01 22:42:00 +01:00
|
|
|
for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
|
|
|
|
if (!ElidedArgCopyInstrs.count(&*I))
|
|
|
|
SDB->visit(*I);
|
|
|
|
}
|
2008-08-23 04:25:05 +02:00
|
|
|
|
2005-01-17 20:43:36 +01:00
|
|
|
// Make sure the root of the DAG is up-to-date.
|
2009-11-23 19:04:58 +01:00
|
|
|
CurDAG->setRoot(SDB->getControlRoot());
|
|
|
|
HadTailCall = SDB->HasTailCall;
|
|
|
|
SDB->clear();
|
2010-05-01 02:25:44 +02:00
|
|
|
|
|
|
|
// Final step, emit the lowered DAG as machine code.
|
2010-07-10 11:00:22 +02:00
|
|
|
CodeGenAndEmitDAG();
|
2005-01-07 08:47:53 +01:00
|
|
|
}
|
|
|
|
|
2008-08-23 04:25:05 +02:00
|
|
|
void SelectionDAGISel::ComputeLiveOutVRegInfo() {
|
2016-01-30 02:24:31 +01:00
|
|
|
SmallPtrSet<SDNode*, 16> VisitedNodes;
|
2008-06-17 08:09:18 +02:00
|
|
|
SmallVector<SDNode*, 128> Worklist;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-08-28 23:40:38 +02:00
|
|
|
Worklist.push_back(CurDAG->getRoot().getNode());
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2017-04-28 07:31:46 +02:00
|
|
|
KnownBits Known;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2010-01-07 18:27:56 +01:00
|
|
|
do {
|
|
|
|
SDNode *N = Worklist.pop_back_val();
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-06-17 08:09:18 +02:00
|
|
|
// If we've already seen this node, ignore it.
|
2014-11-19 08:49:26 +01:00
|
|
|
if (!VisitedNodes.insert(N).second)
|
2008-06-17 08:09:18 +02:00
|
|
|
continue;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-06-17 08:09:18 +02:00
|
|
|
// Otherwise, add all chain operands to the worklist.
|
2015-06-26 21:37:02 +02:00
|
|
|
for (const SDValue &Op : N->op_values())
|
|
|
|
if (Op.getValueType() == MVT::Other)
|
|
|
|
Worklist.push_back(Op.getNode());
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-06-17 08:09:18 +02:00
|
|
|
// If this is a CopyToReg with a vreg dest, process it.
|
|
|
|
if (N->getOpcode() != ISD::CopyToReg)
|
|
|
|
continue;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-06-17 08:09:18 +02:00
|
|
|
unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(DestReg))
|
|
|
|
continue;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-06-17 08:09:18 +02:00
|
|
|
// Ignore non-scalar or non-integer values.
|
2008-07-27 23:46:04 +02:00
|
|
|
SDValue Src = N->getOperand(2);
|
2009-08-11 00:56:29 +02:00
|
|
|
EVT SrcVT = Src.getValueType();
|
2008-06-17 08:09:18 +02:00
|
|
|
if (!SrcVT.isInteger() || SrcVT.isVector())
|
|
|
|
continue;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2008-08-23 04:25:05 +02:00
|
|
|
unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
|
2017-04-28 07:31:46 +02:00
|
|
|
CurDAG->computeKnownBits(Src, Known);
|
|
|
|
FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, Known);
|
2010-01-07 18:27:56 +01:00
|
|
|
} while (!Worklist.empty());
|
2008-06-17 08:09:18 +02:00
|
|
|
}
|
|
|
|
|
2010-07-10 11:00:22 +02:00
|
|
|
void SelectionDAGISel::CodeGenAndEmitDAG() {
|
2016-11-18 20:43:18 +01:00
|
|
|
StringRef GroupName = "sdag";
|
|
|
|
StringRef GroupDescription = "Instruction Selection and Scheduling";
|
2008-07-21 22:00:07 +02:00
|
|
|
std::string BlockName;
|
2011-03-23 02:38:28 +01:00
|
|
|
int BlockNumber = -1;
|
2011-08-12 16:54:45 +02:00
|
|
|
(void)BlockNumber;
|
2015-01-14 07:03:18 +01:00
|
|
|
bool MatchFilterBB = false; (void)MatchFilterBB;
|
2017-02-10 15:37:25 +01:00
|
|
|
|
|
|
|
// Pre-type legalization allow creation of any node types.
|
|
|
|
CurDAG->NewNodesMustHaveLegalTypes = false;
|
|
|
|
|
2015-01-14 07:03:18 +01:00
|
|
|
#ifndef NDEBUG
|
2015-01-15 13:03:32 +01:00
|
|
|
MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
|
2015-01-14 07:03:18 +01:00
|
|
|
FilterDAGBasicBlockName ==
|
|
|
|
FuncInfo->MBB->getBasicBlock()->getName().str());
|
|
|
|
#endif
|
2011-03-23 02:38:28 +01:00
|
|
|
#ifdef NDEBUG
|
2008-07-21 22:00:07 +02:00
|
|
|
if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs ||
|
2008-11-24 15:53:14 +01:00
|
|
|
ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
|
|
|
|
ViewSUnitDAGs)
|
2011-03-23 02:38:28 +01:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
BlockNumber = FuncInfo->MBB->getNumber();
|
2015-03-27 18:51:30 +01:00
|
|
|
BlockName =
|
|
|
|
(MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
|
2011-03-23 02:38:28 +01:00
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber
|
|
|
|
<< " '" << BlockName << "'\n"; CurDAG->dump());
|
2008-07-21 22:00:07 +02:00
|
|
|
|
2015-01-14 07:03:18 +01:00
|
|
|
if (ViewDAGCombine1 && MatchFilterBB)
|
|
|
|
CurDAG->viewGraph("dag-combine1 input for " + BlockName);
|
2007-10-08 17:12:17 +02:00
|
|
|
|
2005-10-10 18:47:10 +02:00
|
|
|
// Run the DAG combiner in pre-legalize mode.
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2017-05-10 02:39:30 +02:00
|
|
|
CurDAG->Combine(BeforeLegalizeTypes, AA, OptLevel);
|
2008-07-01 19:59:20 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2011-03-23 02:38:28 +01:00
|
|
|
DEBUG(dbgs() << "Optimized lowered selection DAG: BB#" << BlockNumber
|
|
|
|
<< " '" << BlockName << "'\n"; CurDAG->dump());
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2005-01-07 08:47:53 +01:00
|
|
|
// Second step, hack on the DAG until it only uses operations and types that
|
|
|
|
// the target supports.
|
2015-01-14 07:03:18 +01:00
|
|
|
if (ViewLegalizeTypesDAGs && MatchFilterBB)
|
|
|
|
CurDAG->viewGraph("legalize-types input for " + BlockName);
|
2009-12-05 18:51:33 +01:00
|
|
|
|
|
|
|
bool Changed;
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2009-12-05 18:51:33 +01:00
|
|
|
Changed = CurDAG->LegalizeTypes();
|
|
|
|
}
|
|
|
|
|
2011-03-23 02:38:28 +01:00
|
|
|
DEBUG(dbgs() << "Type-legalized selection DAG: BB#" << BlockNumber
|
|
|
|
<< " '" << BlockName << "'\n"; CurDAG->dump());
|
2008-07-21 22:00:07 +02:00
|
|
|
|
2017-02-10 15:37:25 +01:00
|
|
|
// Only allow creation of legal node types.
|
2013-11-25 12:14:43 +01:00
|
|
|
CurDAG->NewNodesMustHaveLegalTypes = true;
|
|
|
|
|
2009-12-05 18:51:33 +01:00
|
|
|
if (Changed) {
|
2015-01-14 07:03:18 +01:00
|
|
|
if (ViewDAGCombineLT && MatchFilterBB)
|
2009-12-05 18:51:33 +01:00
|
|
|
CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
|
|
|
|
|
|
|
|
// Run the DAG combiner in post-type-legalize mode.
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
|
|
|
|
GroupName, GroupDescription, TimePassesIsEnabled);
|
2017-05-10 02:39:30 +02:00
|
|
|
CurDAG->Combine(AfterLegalizeTypes, AA, OptLevel);
|
2008-07-21 22:00:07 +02:00
|
|
|
}
|
|
|
|
|
2011-03-23 02:38:28 +01:00
|
|
|
DEBUG(dbgs() << "Optimized type-legalized selection DAG: BB#" << BlockNumber
|
|
|
|
<< " '" << BlockName << "'\n"; CurDAG->dump());
|
2009-12-05 18:51:33 +01:00
|
|
|
}
|
2008-07-21 22:00:07 +02:00
|
|
|
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2009-12-05 18:51:33 +01:00
|
|
|
Changed = CurDAG->LegalizeVectors();
|
|
|
|
}
|
2009-05-23 14:35:30 +02:00
|
|
|
|
2009-12-05 18:51:33 +01:00
|
|
|
if (Changed) {
|
2017-02-10 06:05:57 +01:00
|
|
|
DEBUG(dbgs() << "Vector-legalized selection DAG: BB#" << BlockNumber
|
|
|
|
<< " '" << BlockName << "'\n"; CurDAG->dump());
|
|
|
|
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2009-12-28 02:51:30 +01:00
|
|
|
CurDAG->LegalizeTypes();
|
2009-05-23 14:35:30 +02:00
|
|
|
}
|
|
|
|
|
2017-02-10 06:05:57 +01:00
|
|
|
DEBUG(dbgs() << "Vector/type-legalized selection DAG: BB#" << BlockNumber
|
|
|
|
<< " '" << BlockName << "'\n"; CurDAG->dump());
|
|
|
|
|
2015-01-14 07:03:18 +01:00
|
|
|
if (ViewDAGCombineLT && MatchFilterBB)
|
2009-12-05 18:51:33 +01:00
|
|
|
CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
|
2009-05-23 14:35:30 +02:00
|
|
|
|
2009-12-05 18:51:33 +01:00
|
|
|
// Run the DAG combiner in post-type-legalize mode.
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
|
|
|
|
GroupName, GroupDescription, TimePassesIsEnabled);
|
2017-05-10 02:39:30 +02:00
|
|
|
CurDAG->Combine(AfterLegalizeVectorOps, AA, OptLevel);
|
2009-05-23 14:35:30 +02:00
|
|
|
}
|
2009-12-05 18:51:33 +01:00
|
|
|
|
2011-03-23 02:38:28 +01:00
|
|
|
DEBUG(dbgs() << "Optimized vector-legalized selection DAG: BB#"
|
|
|
|
<< BlockNumber << " '" << BlockName << "'\n"; CurDAG->dump());
|
2008-07-11 01:37:50 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2015-01-14 07:03:18 +01:00
|
|
|
if (ViewLegalizeDAGs && MatchFilterBB)
|
|
|
|
CurDAG->viewGraph("legalize input for " + BlockName);
|
2008-07-21 22:00:07 +02:00
|
|
|
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2011-05-17 00:19:54 +02:00
|
|
|
CurDAG->Legalize();
|
2008-07-01 19:59:20 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2011-03-23 02:38:28 +01:00
|
|
|
DEBUG(dbgs() << "Legalized selection DAG: BB#" << BlockNumber
|
|
|
|
<< " '" << BlockName << "'\n"; CurDAG->dump());
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2015-01-14 07:03:18 +01:00
|
|
|
if (ViewDAGCombine2 && MatchFilterBB)
|
|
|
|
CurDAG->viewGraph("dag-combine2 input for " + BlockName);
|
2008-07-21 22:00:07 +02:00
|
|
|
|
2005-10-10 18:47:10 +02:00
|
|
|
// Run the DAG combiner in post-legalize mode.
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2017-05-10 02:39:30 +02:00
|
|
|
CurDAG->Combine(AfterLegalizeDAG, AA, OptLevel);
|
2008-07-01 19:59:20 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2011-03-23 02:38:28 +01:00
|
|
|
DEBUG(dbgs() << "Optimized legalized selection DAG: BB#" << BlockNumber
|
|
|
|
<< " '" << BlockName << "'\n"; CurDAG->dump());
|
2007-10-08 17:12:17 +02:00
|
|
|
|
2010-06-24 16:30:44 +02:00
|
|
|
if (OptLevel != CodeGenOpt::None)
|
2008-08-23 04:25:05 +02:00
|
|
|
ComputeLiveOutVRegInfo();
|
2006-04-28 04:09:19 +02:00
|
|
|
|
2015-01-14 07:03:18 +01:00
|
|
|
if (ViewISelDAGs && MatchFilterBB)
|
|
|
|
CurDAG->viewGraph("isel input for " + BlockName);
|
2010-03-14 20:27:55 +01:00
|
|
|
|
2005-03-30 03:10:47 +02:00
|
|
|
// Third, instruction select all of the operations to machine code, adding the
|
|
|
|
// code to the MachineBasicBlock.
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("isel", "Instruction Selection", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2010-03-02 07:34:30 +01:00
|
|
|
DoInstructionSelection();
|
2008-07-01 19:59:20 +02:00
|
|
|
}
|
2008-06-30 22:45:06 +02:00
|
|
|
|
2011-03-23 02:38:28 +01:00
|
|
|
DEBUG(dbgs() << "Selected selection DAG: BB#" << BlockNumber
|
|
|
|
<< " '" << BlockName << "'\n"; CurDAG->dump());
|
2008-07-21 22:00:07 +02:00
|
|
|
|
2015-01-14 07:03:18 +01:00
|
|
|
if (ViewSchedDAGs && MatchFilterBB)
|
|
|
|
CurDAG->viewGraph("scheduler input for " + BlockName);
|
2008-07-21 22:00:07 +02:00
|
|
|
|
2008-07-14 20:19:29 +02:00
|
|
|
// Schedule machine code.
|
2009-02-11 05:27:20 +01:00
|
|
|
ScheduleDAGSDNodes *Scheduler = CreateScheduler();
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2012-03-07 06:21:52 +01:00
|
|
|
Scheduler->Run(CurDAG, FuncInfo->MBB);
|
2008-07-14 20:19:29 +02:00
|
|
|
}
|
|
|
|
|
2016-02-02 19:20:45 +01:00
|
|
|
if (ViewSUnitDAGs && MatchFilterBB)
|
|
|
|
Scheduler->viewGraph();
|
2008-07-21 22:00:07 +02:00
|
|
|
|
2009-09-20 04:20:51 +02:00
|
|
|
// Emit machine code to BB. This can change 'BB' to the last block being
|
2008-06-30 22:45:06 +02:00
|
|
|
// inserted into.
|
2010-09-30 21:44:31 +02:00
|
|
|
MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("emit", "Instruction Creation", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2010-07-10 11:00:22 +02:00
|
|
|
|
2012-03-07 06:21:52 +01:00
|
|
|
// FuncInfo->InsertPt is passed by reference and set to the end of the
|
|
|
|
// scheduled instructions.
|
|
|
|
LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt);
|
2008-07-14 20:19:29 +02:00
|
|
|
}
|
|
|
|
|
2010-09-30 21:44:31 +02:00
|
|
|
// If the block was split, make sure we update any references that are used to
|
|
|
|
// update PHI nodes later on.
|
|
|
|
if (FirstMBB != LastMBB)
|
|
|
|
SDB->UpdateSplitBlock(FirstMBB, LastMBB);
|
|
|
|
|
2008-07-14 20:19:29 +02:00
|
|
|
// Free the scheduler state.
|
2010-06-18 17:56:31 +02:00
|
|
|
{
|
2016-11-18 20:43:18 +01:00
|
|
|
NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
|
|
|
|
GroupDescription, TimePassesIsEnabled);
|
2008-07-14 20:19:29 +02:00
|
|
|
delete Scheduler;
|
2008-07-01 19:59:20 +02:00
|
|
|
}
|
2008-06-30 22:45:06 +02:00
|
|
|
|
2010-05-01 02:25:44 +02:00
|
|
|
// Free the SelectionDAG state, now that we're finished with it.
|
|
|
|
CurDAG->clear();
|
2009-09-20 04:20:51 +02:00
|
|
|
}
|
2006-03-27 03:32:24 +02:00
|
|
|
|
2012-04-21 00:08:50 +02:00
|
|
|
namespace {
|
2017-02-04 03:00:53 +01:00
|
|
|
|
2012-04-21 00:08:50 +02:00
|
|
|
/// ISelUpdater - helper class to handle updates of the instruction selection
|
|
|
|
/// graph.
|
|
|
|
class ISelUpdater : public SelectionDAG::DAGUpdateListener {
|
|
|
|
SelectionDAG::allnodes_iterator &ISelPosition;
|
2017-02-04 03:00:53 +01:00
|
|
|
|
2012-04-21 00:08:50 +02:00
|
|
|
public:
|
|
|
|
ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
|
|
|
|
: SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
|
|
|
|
|
|
|
|
/// NodeDeleted - Handle nodes deleted from the graph. If the node being
|
|
|
|
/// deleted is the current ISelPosition node, update ISelPosition.
|
|
|
|
///
|
2014-03-08 07:31:39 +01:00
|
|
|
void NodeDeleted(SDNode *N, SDNode *E) override {
|
2012-04-21 00:08:50 +02:00
|
|
|
if (ISelPosition == SelectionDAG::allnodes_iterator(N))
|
|
|
|
++ISelPosition;
|
|
|
|
}
|
|
|
|
};
|
2017-02-04 03:00:53 +01:00
|
|
|
|
2012-04-21 00:08:50 +02:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2010-03-02 07:34:30 +01:00
|
|
|
void SelectionDAGISel::DoInstructionSelection() {
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << "===== Instruction selection begins: BB#"
|
2011-03-23 02:38:28 +01:00
|
|
|
<< FuncInfo->MBB->getNumber()
|
|
|
|
<< " '" << FuncInfo->MBB->getName() << "'\n");
|
2010-03-02 07:34:30 +01:00
|
|
|
|
|
|
|
PreprocessISelDAG();
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 07:34:30 +01:00
|
|
|
// Select target instructions for the DAG.
|
|
|
|
{
|
|
|
|
// Number all nodes with a topological order and set DAGSize.
|
|
|
|
DAGSize = CurDAG->AssignTopologicalOrder();
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 07:34:30 +01:00
|
|
|
// Create a dummy node (which is not added to allnodes), that adds
|
|
|
|
// a reference to the root node, preventing it from being deleted,
|
|
|
|
// and tracking any changes of the root.
|
|
|
|
HandleSDNode Dummy(CurDAG->getRoot());
|
2012-04-21 00:08:50 +02:00
|
|
|
SelectionDAG::allnodes_iterator ISelPosition (CurDAG->getRoot().getNode());
|
2010-03-02 07:34:30 +01:00
|
|
|
++ISelPosition;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2012-04-21 00:08:50 +02:00
|
|
|
// Make sure that ISelPosition gets properly updated when nodes are deleted
|
|
|
|
// in calls made from this function.
|
|
|
|
ISelUpdater ISU(*CurDAG, ISelPosition);
|
|
|
|
|
2010-03-02 07:34:30 +01:00
|
|
|
// The AllNodes list is now topological-sorted. Visit the
|
|
|
|
// nodes by starting at the end of the list (the root of the
|
|
|
|
// graph) and preceding back toward the beginning (the entry
|
|
|
|
// node).
|
|
|
|
while (ISelPosition != CurDAG->allnodes_begin()) {
|
2015-10-13 21:47:46 +02:00
|
|
|
SDNode *Node = &*--ISelPosition;
|
2010-03-02 07:34:30 +01:00
|
|
|
// Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
|
|
|
|
// but there are currently some corner cases that it misses. Also, this
|
|
|
|
// makes it theoretically possible to disable the DAGCombiner.
|
|
|
|
if (Node->use_empty())
|
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2017-01-27 00:27:59 +01:00
|
|
|
// When we are using non-default rounding modes or FP exception behavior
|
|
|
|
// FP operations are represented by StrictFP pseudo-operations. They
|
|
|
|
// need to be simplified here so that the target-specific instruction
|
|
|
|
// selectors know how to handle them.
|
|
|
|
//
|
|
|
|
// If the current node is a strict FP pseudo-op, the isStrictFPOp()
|
|
|
|
// function will provide the corresponding normal FP opcode to which the
|
|
|
|
// node should be mutated.
|
2017-05-25 23:31:00 +02:00
|
|
|
//
|
|
|
|
// FIXME: The backends need a way to handle FP constraints.
|
|
|
|
if (Node->isStrictFPOpcode())
|
|
|
|
Node = CurDAG->mutateStrictFPToFP(Node);
|
2017-01-27 00:27:59 +01:00
|
|
|
|
2016-05-06 01:19:08 +02:00
|
|
|
Select(Node);
|
2010-03-02 07:34:30 +01:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 07:34:30 +01:00
|
|
|
CurDAG->setRoot(Dummy.getValue());
|
2010-12-24 05:28:06 +01:00
|
|
|
}
|
2010-05-18 01:09:50 +02:00
|
|
|
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << "===== Instruction selection ends:\n");
|
2010-03-02 07:34:30 +01:00
|
|
|
|
|
|
|
PostprocessISelDAG();
|
|
|
|
}
|
|
|
|
|
2015-10-07 02:27:33 +02:00
|
|
|
static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI) {
|
|
|
|
for (const User *U : CPI->users()) {
|
|
|
|
if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(U)) {
|
|
|
|
Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
|
|
|
|
if (IID == Intrinsic::eh_exceptionpointer ||
|
|
|
|
IID == Intrinsic::eh_exceptioncode)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-04-14 21:53:31 +02:00
|
|
|
/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
|
|
|
|
/// do other setup for EH landing-pad blocks.
|
2015-04-21 20:23:57 +02:00
|
|
|
bool SelectionDAGISel::PrepareEHLandingPad() {
|
2011-10-06 00:16:11 +02:00
|
|
|
MachineBasicBlock *MBB = FuncInfo->MBB;
|
2015-11-07 02:11:31 +01:00
|
|
|
const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
|
2015-10-07 02:27:33 +02:00
|
|
|
const BasicBlock *LLVMBB = MBB->getBasicBlock();
|
2015-07-09 04:09:04 +02:00
|
|
|
const TargetRegisterClass *PtrRC =
|
|
|
|
TLI->getRegClassFor(TLI->getPointerTy(CurDAG->getDataLayout()));
|
2015-01-14 02:05:27 +01:00
|
|
|
|
2015-10-07 02:27:33 +02:00
|
|
|
// Catchpads have one live-in register, which typically holds the exception
|
|
|
|
// pointer or code.
|
|
|
|
if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI())) {
|
|
|
|
if (hasExceptionPointerOrCodeUser(CPI)) {
|
|
|
|
// Get or create the virtual register to hold the pointer or code. Mark
|
|
|
|
// the live in physreg and copy into the vreg.
|
2015-11-07 02:11:31 +01:00
|
|
|
MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
|
2015-10-07 02:27:33 +02:00
|
|
|
assert(EHPhysReg && "target lacks exception pointer register");
|
2015-10-09 02:15:13 +02:00
|
|
|
MBB->addLiveIn(EHPhysReg);
|
2015-10-07 02:27:33 +02:00
|
|
|
unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, PtrRC);
|
|
|
|
BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(),
|
|
|
|
TII->get(TargetOpcode::COPY), VReg)
|
|
|
|
.addReg(EHPhysReg, RegState::Kill);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!LLVMBB->isLandingPad())
|
|
|
|
return true;
|
|
|
|
|
2010-04-14 21:53:31 +02:00
|
|
|
// Add a label to mark the beginning of the landing pad. Deletion of the
|
|
|
|
// landing pad can thus be detected via the MachineModuleInfo.
|
2016-12-01 20:32:15 +01:00
|
|
|
MCSymbol *Label = MF->addLandingPad(MBB);
|
2010-04-14 21:53:31 +02:00
|
|
|
|
2011-10-06 00:24:35 +02:00
|
|
|
// Assign the call site to the landing pad's begin label.
|
2016-12-01 20:32:15 +01:00
|
|
|
MF->setCallSiteLandingPad(Label, SDB->LPadToCallSiteMap[MBB]);
|
2012-03-07 01:18:15 +01:00
|
|
|
|
2014-10-08 03:58:03 +02:00
|
|
|
const MCInstrDesc &II = TII->get(TargetOpcode::EH_LABEL);
|
2011-10-06 00:16:11 +02:00
|
|
|
BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
|
2010-07-10 11:00:22 +02:00
|
|
|
.addSym(Label);
|
2010-04-14 21:53:31 +02:00
|
|
|
|
|
|
|
// Mark exception register as live in.
|
2015-11-07 02:11:31 +01:00
|
|
|
if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
|
2013-07-04 06:53:45 +02:00
|
|
|
FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(Reg, PtrRC);
|
2010-04-14 21:53:31 +02:00
|
|
|
|
|
|
|
// Mark exception selector register as live in.
|
2015-11-07 02:11:31 +01:00
|
|
|
if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
|
2013-07-04 06:53:45 +02:00
|
|
|
FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(Reg, PtrRC);
|
2015-04-21 20:23:57 +02:00
|
|
|
|
|
|
|
return true;
|
2010-04-14 21:53:31 +02:00
|
|
|
}
|
2010-03-02 07:34:30 +01:00
|
|
|
|
2011-04-17 08:03:19 +02:00
|
|
|
/// isFoldedOrDeadInstruction - Return true if the specified instruction is
|
|
|
|
/// side-effect free and is either dead or folded into a generated instruction.
|
|
|
|
/// Return false if it needs to be emitted.
|
|
|
|
static bool isFoldedOrDeadInstruction(const Instruction *I,
|
|
|
|
FunctionLoweringInfo *FuncInfo) {
|
2011-04-22 23:59:37 +02:00
|
|
|
return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
|
2015-08-28 01:27:47 +02:00
|
|
|
!isa<TerminatorInst>(I) && // Terminators aren't folded.
|
2011-04-22 23:59:37 +02:00
|
|
|
!isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
|
2015-08-28 01:27:47 +02:00
|
|
|
!I->isEHPad() && // EH pad instructions aren't folded.
|
2011-04-22 23:59:37 +02:00
|
|
|
!FuncInfo->isExportedInst(I); // Exported instrs must be computed.
|
2011-04-17 08:03:19 +02:00
|
|
|
}
|
|
|
|
|
2016-04-05 20:13:16 +02:00
|
|
|
/// Set up SwiftErrorVals by going through the function. If the function has
|
|
|
|
/// swifterror argument, it will be the first entry.
|
|
|
|
static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI,
|
|
|
|
FunctionLoweringInfo *FuncInfo) {
|
|
|
|
if (!TLI->supportSwiftError())
|
|
|
|
return;
|
|
|
|
|
|
|
|
FuncInfo->SwiftErrorVals.clear();
|
2016-10-08 00:06:55 +02:00
|
|
|
FuncInfo->SwiftErrorVRegDefMap.clear();
|
|
|
|
FuncInfo->SwiftErrorVRegUpwardsUse.clear();
|
2017-06-15 19:34:42 +02:00
|
|
|
FuncInfo->SwiftErrorVRegDefUses.clear();
|
2016-10-08 00:06:55 +02:00
|
|
|
FuncInfo->SwiftErrorArg = nullptr;
|
2016-04-05 20:13:16 +02:00
|
|
|
|
|
|
|
// Check if function has a swifterror argument.
|
2016-10-08 00:06:55 +02:00
|
|
|
bool HaveSeenSwiftErrorArg = false;
|
2016-04-05 20:13:16 +02:00
|
|
|
for (Function::const_arg_iterator AI = Fn.arg_begin(), AE = Fn.arg_end();
|
|
|
|
AI != AE; ++AI)
|
2016-10-08 00:06:55 +02:00
|
|
|
if (AI->hasSwiftErrorAttr()) {
|
|
|
|
assert(!HaveSeenSwiftErrorArg &&
|
|
|
|
"Must have only one swifterror parameter");
|
2016-10-11 21:49:29 +02:00
|
|
|
(void)HaveSeenSwiftErrorArg; // silence warning.
|
2016-10-08 00:06:55 +02:00
|
|
|
HaveSeenSwiftErrorArg = true;
|
|
|
|
FuncInfo->SwiftErrorArg = &*AI;
|
2016-04-05 20:13:16 +02:00
|
|
|
FuncInfo->SwiftErrorVals.push_back(&*AI);
|
2016-10-08 00:06:55 +02:00
|
|
|
}
|
2016-04-05 20:13:16 +02:00
|
|
|
|
|
|
|
for (const auto &LLVMBB : Fn)
|
|
|
|
for (const auto &Inst : LLVMBB) {
|
|
|
|
if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(&Inst))
|
|
|
|
if (Alloca->isSwiftError())
|
|
|
|
FuncInfo->SwiftErrorVals.push_back(Alloca);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-08 00:06:55 +02:00
|
|
|
static void createSwiftErrorEntriesInEntryBlock(FunctionLoweringInfo *FuncInfo,
|
2017-02-27 23:12:06 +01:00
|
|
|
FastISel *FastIS,
|
2016-10-08 00:06:55 +02:00
|
|
|
const TargetLowering *TLI,
|
|
|
|
const TargetInstrInfo *TII,
|
|
|
|
SelectionDAGBuilder *SDB) {
|
2016-04-05 20:13:16 +02:00
|
|
|
if (!TLI->supportSwiftError())
|
|
|
|
return;
|
|
|
|
|
2016-10-08 00:06:55 +02:00
|
|
|
// We only need to do this when we have swifterror parameter or swifterror
|
2016-04-05 20:13:16 +02:00
|
|
|
// alloc.
|
|
|
|
if (FuncInfo->SwiftErrorVals.empty())
|
|
|
|
return;
|
|
|
|
|
2017-02-07 19:42:53 +01:00
|
|
|
assert(FuncInfo->MBB == &*FuncInfo->MF->begin() &&
|
|
|
|
"expected to insert into entry block");
|
|
|
|
auto &DL = FuncInfo->MF->getDataLayout();
|
|
|
|
auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
|
|
|
|
for (const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
|
|
|
|
// We will always generate a copy from the argument. It is always used at
|
|
|
|
// least by the 'return' of the swifterror.
|
|
|
|
if (FuncInfo->SwiftErrorArg && FuncInfo->SwiftErrorArg == SwiftErrorVal)
|
|
|
|
continue;
|
|
|
|
unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
|
|
|
|
// Assign Undef to Vreg. We construct MI directly to make sure it works
|
|
|
|
// with FastISel.
|
|
|
|
BuildMI(*FuncInfo->MBB, FuncInfo->MBB->getFirstNonPHI(),
|
|
|
|
SDB->getCurDebugLoc(), TII->get(TargetOpcode::IMPLICIT_DEF),
|
|
|
|
VReg);
|
2017-02-27 23:12:06 +01:00
|
|
|
|
|
|
|
// Keep FastIS informed about the value we just inserted.
|
|
|
|
if (FastIS)
|
|
|
|
FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
|
|
|
|
|
2017-02-07 19:42:53 +01:00
|
|
|
FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorVal, VReg);
|
2016-04-05 20:13:16 +02:00
|
|
|
}
|
2016-10-08 00:06:55 +02:00
|
|
|
}
|
2016-04-05 20:13:16 +02:00
|
|
|
|
2017-05-09 18:02:20 +02:00
|
|
|
/// Collect llvm.dbg.declare information. This is done after argument lowering
|
|
|
|
/// in case the declarations refer to arguments.
|
|
|
|
static void processDbgDeclares(FunctionLoweringInfo *FuncInfo) {
|
|
|
|
MachineFunction *MF = FuncInfo->MF;
|
|
|
|
const DataLayout &DL = MF->getDataLayout();
|
|
|
|
for (const BasicBlock &BB : *FuncInfo->Fn) {
|
|
|
|
for (const Instruction &I : BB) {
|
|
|
|
const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(&I);
|
|
|
|
if (!DI)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
assert(DI->getVariable() && "Missing variable");
|
|
|
|
assert(DI->getDebugLoc() && "Missing location");
|
|
|
|
const Value *Address = DI->getAddress();
|
|
|
|
if (!Address)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Look through casts and constant offset GEPs. These mostly come from
|
|
|
|
// inalloca.
|
|
|
|
APInt Offset(DL.getPointerSizeInBits(0), 0);
|
|
|
|
Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
|
|
|
|
|
|
|
|
// Check if the variable is a static alloca or a byval or inalloca
|
|
|
|
// argument passed in memory. If it is not, then we will ignore this
|
|
|
|
// intrinsic and handle this during isel like dbg.value.
|
2017-06-08 01:53:32 +02:00
|
|
|
int FI = std::numeric_limits<int>::max();
|
2017-05-09 18:02:20 +02:00
|
|
|
if (const auto *AI = dyn_cast<AllocaInst>(Address)) {
|
|
|
|
auto SI = FuncInfo->StaticAllocaMap.find(AI);
|
|
|
|
if (SI != FuncInfo->StaticAllocaMap.end())
|
|
|
|
FI = SI->second;
|
|
|
|
} else if (const auto *Arg = dyn_cast<Argument>(Address))
|
|
|
|
FI = FuncInfo->getArgumentFrameIndex(Arg);
|
|
|
|
|
2017-06-08 01:53:32 +02:00
|
|
|
if (FI == std::numeric_limits<int>::max())
|
2017-05-09 18:02:20 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
DIExpression *Expr = DI->getExpression();
|
|
|
|
if (Offset.getBoolValue())
|
|
|
|
Expr = DIExpression::prepend(Expr, DIExpression::NoDeref,
|
|
|
|
Offset.getZExtValue());
|
|
|
|
MF->setVariableDbgInfo(DI->getVariable(), Expr, FI, DI->getDebugLoc());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-08 00:06:55 +02:00
|
|
|
/// Propagate swifterror values through the machine function CFG.
|
|
|
|
static void propagateSwiftErrorVRegs(FunctionLoweringInfo *FuncInfo) {
|
|
|
|
auto *TLI = FuncInfo->TLI;
|
|
|
|
if (!TLI->supportSwiftError())
|
2016-04-05 20:13:16 +02:00
|
|
|
return;
|
|
|
|
|
2016-10-08 00:06:55 +02:00
|
|
|
// We only need to do this when we have swifterror parameter or swifterror
|
|
|
|
// alloc.
|
|
|
|
if (FuncInfo->SwiftErrorVals.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// For each machine basic block in reverse post order.
|
|
|
|
ReversePostOrderTraversal<MachineFunction *> RPOT(FuncInfo->MF);
|
2017-09-20 00:10:20 +02:00
|
|
|
for (MachineBasicBlock *MBB : RPOT) {
|
2016-10-08 00:06:55 +02:00
|
|
|
// For each swifterror value in the function.
|
|
|
|
for(const auto *SwiftErrorVal : FuncInfo->SwiftErrorVals) {
|
|
|
|
auto Key = std::make_pair(MBB, SwiftErrorVal);
|
|
|
|
auto UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
|
|
|
|
auto VRegDefIt = FuncInfo->SwiftErrorVRegDefMap.find(Key);
|
|
|
|
bool UpwardsUse = UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end();
|
|
|
|
unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0;
|
|
|
|
bool DownwardDef = VRegDefIt != FuncInfo->SwiftErrorVRegDefMap.end();
|
|
|
|
assert(!(UpwardsUse && !DownwardDef) &&
|
|
|
|
"We can't have an upwards use but no downwards def");
|
|
|
|
|
|
|
|
// If there is no upwards exposed use and an entry for the swifterror in
|
|
|
|
// the def map for this value we don't need to do anything: We already
|
|
|
|
// have a downward def for this basic block.
|
|
|
|
if (!UpwardsUse && DownwardDef)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Otherwise we either have an upwards exposed use vreg that we need to
|
|
|
|
// materialize or need to forward the downward def from predecessors.
|
|
|
|
|
|
|
|
// Check whether we have a single vreg def from all predecessors.
|
|
|
|
// Otherwise we need a phi.
|
|
|
|
SmallVector<std::pair<MachineBasicBlock *, unsigned>, 4> VRegs;
|
|
|
|
SmallSet<const MachineBasicBlock*, 8> Visited;
|
|
|
|
for (auto *Pred : MBB->predecessors()) {
|
|
|
|
if (!Visited.insert(Pred).second)
|
|
|
|
continue;
|
|
|
|
VRegs.push_back(std::make_pair(
|
|
|
|
Pred, FuncInfo->getOrCreateSwiftErrorVReg(Pred, SwiftErrorVal)));
|
|
|
|
if (Pred != MBB)
|
|
|
|
continue;
|
|
|
|
// We have a self-edge.
|
|
|
|
// If there was no upwards use in this basic block there is now one: the
|
|
|
|
// phi needs to use it self.
|
|
|
|
if (!UpwardsUse) {
|
|
|
|
UpwardsUse = true;
|
|
|
|
UUseIt = FuncInfo->SwiftErrorVRegUpwardsUse.find(Key);
|
|
|
|
assert(UUseIt != FuncInfo->SwiftErrorVRegUpwardsUse.end());
|
|
|
|
UUseVReg = UUseIt->second;
|
|
|
|
}
|
2016-04-05 20:13:16 +02:00
|
|
|
}
|
|
|
|
|
2016-10-08 00:06:55 +02:00
|
|
|
// We need a phi node if we have more than one predecessor with different
|
|
|
|
// downward defs.
|
|
|
|
bool needPHI =
|
|
|
|
VRegs.size() >= 1 &&
|
|
|
|
std::find_if(
|
|
|
|
VRegs.begin(), VRegs.end(),
|
|
|
|
[&](const std::pair<const MachineBasicBlock *, unsigned> &V)
|
|
|
|
-> bool { return V.second != VRegs[0].second; }) !=
|
|
|
|
VRegs.end();
|
|
|
|
|
|
|
|
// If there is no upwards exposed used and we don't need a phi just
|
|
|
|
// forward the swifterror vreg from the predecessor(s).
|
|
|
|
if (!UpwardsUse && !needPHI) {
|
|
|
|
assert(!VRegs.empty() &&
|
|
|
|
"No predecessors? The entry block should bail out earlier");
|
|
|
|
// Just forward the swifterror vreg from the predecessor(s).
|
|
|
|
FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, VRegs[0].second);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto DLoc = isa<Instruction>(SwiftErrorVal)
|
|
|
|
? dyn_cast<Instruction>(SwiftErrorVal)->getDebugLoc()
|
|
|
|
: DebugLoc();
|
|
|
|
const auto *TII = FuncInfo->MF->getSubtarget().getInstrInfo();
|
|
|
|
|
|
|
|
// If we don't need a phi create a copy to the upward exposed vreg.
|
|
|
|
if (!needPHI) {
|
|
|
|
assert(UpwardsUse);
|
2017-09-20 20:40:59 +02:00
|
|
|
assert(!VRegs.empty() &&
|
|
|
|
"No predecessors? Is the Calling Convention correct?");
|
2016-10-08 00:06:55 +02:00
|
|
|
unsigned DestReg = UUseVReg;
|
|
|
|
BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, TII->get(TargetOpcode::COPY),
|
|
|
|
DestReg)
|
|
|
|
.addReg(VRegs[0].second);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need a phi: if there is an upwards exposed use we already have a
|
|
|
|
// destination virtual register number otherwise we generate a new one.
|
|
|
|
auto &DL = FuncInfo->MF->getDataLayout();
|
|
|
|
auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
|
|
|
|
unsigned PHIVReg =
|
|
|
|
UpwardsUse ? UUseVReg
|
|
|
|
: FuncInfo->MF->getRegInfo().createVirtualRegister(RC);
|
|
|
|
MachineInstrBuilder SwiftErrorPHI =
|
|
|
|
BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc,
|
|
|
|
TII->get(TargetOpcode::PHI), PHIVReg);
|
|
|
|
for (auto BBRegPair : VRegs) {
|
|
|
|
SwiftErrorPHI.addReg(BBRegPair.second).addMBB(BBRegPair.first);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We did not have a definition in this block before: store the phi's vreg
|
|
|
|
// as this block downward exposed def.
|
|
|
|
if (!UpwardsUse)
|
|
|
|
FuncInfo->setCurrentSwiftErrorVReg(MBB, SwiftErrorVal, PHIVReg);
|
2016-04-05 20:13:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-20 15:03:48 +02:00
|
|
|
static void preassignSwiftErrorRegs(const TargetLowering *TLI,
|
|
|
|
FunctionLoweringInfo *FuncInfo,
|
|
|
|
BasicBlock::const_iterator Begin,
|
|
|
|
BasicBlock::const_iterator End) {
|
2017-06-15 19:34:42 +02:00
|
|
|
if (!TLI->supportSwiftError() || FuncInfo->SwiftErrorVals.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Iterator over instructions and assign vregs to swifterror defs and uses.
|
|
|
|
for (auto It = Begin; It != End; ++It) {
|
|
|
|
ImmutableCallSite CS(&*It);
|
|
|
|
if (CS) {
|
|
|
|
// A call-site with a swifterror argument is both use and def.
|
|
|
|
const Value *SwiftErrorAddr = nullptr;
|
|
|
|
for (auto &Arg : CS.args()) {
|
|
|
|
if (!Arg->isSwiftError())
|
|
|
|
continue;
|
|
|
|
// Use of swifterror.
|
|
|
|
assert(!SwiftErrorAddr && "Cannot have multiple swifterror arguments");
|
|
|
|
SwiftErrorAddr = &*Arg;
|
|
|
|
assert(SwiftErrorAddr->isSwiftError() &&
|
|
|
|
"Must have a swifterror value argument");
|
|
|
|
unsigned VReg; bool CreatedReg;
|
|
|
|
std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
|
|
|
|
&*It, FuncInfo->MBB, SwiftErrorAddr);
|
|
|
|
assert(CreatedReg);
|
|
|
|
}
|
|
|
|
if (!SwiftErrorAddr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Def of swifterror.
|
|
|
|
unsigned VReg; bool CreatedReg;
|
|
|
|
std::tie(VReg, CreatedReg) =
|
|
|
|
FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
|
|
|
|
assert(CreatedReg);
|
|
|
|
FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
|
|
|
|
|
|
|
|
// A load is a use.
|
|
|
|
} else if (const LoadInst *LI = dyn_cast<const LoadInst>(&*It)) {
|
|
|
|
const Value *V = LI->getOperand(0);
|
|
|
|
if (!V->isSwiftError())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned VReg; bool CreatedReg;
|
|
|
|
std::tie(VReg, CreatedReg) =
|
|
|
|
FuncInfo->getOrCreateSwiftErrorVRegUseAt(LI, FuncInfo->MBB, V);
|
|
|
|
assert(CreatedReg);
|
|
|
|
|
|
|
|
// A store is a def.
|
|
|
|
} else if (const StoreInst *SI = dyn_cast<const StoreInst>(&*It)) {
|
|
|
|
const Value *SwiftErrorAddr = SI->getOperand(1);
|
|
|
|
if (!SwiftErrorAddr->isSwiftError())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Def of swifterror.
|
|
|
|
unsigned VReg; bool CreatedReg;
|
|
|
|
std::tie(VReg, CreatedReg) =
|
|
|
|
FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
|
|
|
|
assert(CreatedReg);
|
|
|
|
FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
|
|
|
|
|
|
|
|
// A return in a swiferror returning function is a use.
|
|
|
|
} else if (const ReturnInst *R = dyn_cast<const ReturnInst>(&*It)) {
|
|
|
|
const Function *F = R->getParent()->getParent();
|
|
|
|
if(!F->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned VReg; bool CreatedReg;
|
|
|
|
std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
|
|
|
|
R, FuncInfo->MBB, FuncInfo->SwiftErrorArg);
|
|
|
|
assert(CreatedReg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-15 03:51:59 +02:00
|
|
|
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
|
2017-02-13 18:38:59 +01:00
|
|
|
FastISelFailed = false;
|
2008-09-29 23:55:50 +02:00
|
|
|
// Initialize the Fast-ISel state, if needed.
|
2014-04-14 02:51:57 +02:00
|
|
|
FastISel *FastIS = nullptr;
|
2013-11-21 11:55:15 +01:00
|
|
|
if (TM.Options.EnableFastISel)
|
2014-10-08 09:32:17 +02:00
|
|
|
FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
|
2008-09-29 23:55:50 +02:00
|
|
|
|
2016-04-05 20:13:16 +02:00
|
|
|
setupSwiftErrorVals(Fn, TLI, FuncInfo);
|
|
|
|
|
2011-02-24 11:00:04 +01:00
|
|
|
ReversePostOrderTraversal<const Function*> RPOT(&Fn);
|
2011-02-24 11:00:13 +01:00
|
|
|
|
2017-02-07 19:42:53 +01:00
|
|
|
// Lower arguments up front. An RPO iteration always visits the entry block
|
|
|
|
// first.
|
|
|
|
assert(*RPOT.begin() == &Fn.getEntryBlock());
|
|
|
|
++NumEntryBlocks;
|
|
|
|
|
|
|
|
// Set up FuncInfo for ISel. Entry blocks never have PHIs.
|
|
|
|
FuncInfo->MBB = FuncInfo->MBBMap[&Fn.getEntryBlock()];
|
|
|
|
FuncInfo->InsertPt = FuncInfo->MBB->begin();
|
|
|
|
|
|
|
|
if (!FastIS) {
|
|
|
|
LowerArguments(Fn);
|
|
|
|
} else {
|
|
|
|
// See if fast isel can lower the arguments.
|
|
|
|
FastIS->startNewBlock();
|
|
|
|
if (!FastIS->lowerArguments()) {
|
2017-02-13 18:38:59 +01:00
|
|
|
FastISelFailed = true;
|
2017-02-07 19:42:53 +01:00
|
|
|
// Fast isel failed to lower these arguments
|
|
|
|
++NumFastIselFailLowerArguments;
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
|
|
|
|
OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
|
|
|
|
Fn.getSubprogram(),
|
|
|
|
&Fn.getEntryBlock());
|
|
|
|
R << "FastISel didn't lower all arguments: "
|
|
|
|
<< ore::NV("Prototype", Fn.getType());
|
|
|
|
reportFastISelFailure(*MF, *ORE, R, EnableFastISelAbort > 1);
|
2017-02-07 19:42:53 +01:00
|
|
|
|
|
|
|
// Use SelectionDAG argument lowering
|
|
|
|
LowerArguments(Fn);
|
|
|
|
CurDAG->setRoot(SDB->getControlRoot());
|
|
|
|
SDB->clear();
|
|
|
|
CodeGenAndEmitDAG();
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we inserted any instructions at the beginning, make a note of
|
|
|
|
// where they are, so we can be sure to emit subsequent instructions
|
|
|
|
// after them.
|
|
|
|
if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
|
|
|
|
FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
|
|
|
|
else
|
|
|
|
FastIS->setLastLocalValue(nullptr);
|
|
|
|
}
|
2017-02-27 23:12:06 +01:00
|
|
|
createSwiftErrorEntriesInEntryBlock(FuncInfo, FastIS, TLI, TII, SDB);
|
2017-02-07 19:42:53 +01:00
|
|
|
|
2017-05-09 18:02:20 +02:00
|
|
|
processDbgDeclares(FuncInfo);
|
|
|
|
|
2017-02-07 19:42:53 +01:00
|
|
|
// Iterate over all basic blocks in the function.
|
|
|
|
for (const BasicBlock *LLVMBB : RPOT) {
|
2011-02-24 11:00:16 +01:00
|
|
|
if (OptLevel != CodeGenOpt::None) {
|
|
|
|
bool AllPredsVisited = true;
|
2014-07-21 19:06:51 +02:00
|
|
|
for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB);
|
|
|
|
PI != PE; ++PI) {
|
|
|
|
if (!FuncInfo->VisitedBBs.count(*PI)) {
|
2011-02-24 11:00:16 +01:00
|
|
|
AllPredsVisited = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-24 11:00:25 +01:00
|
|
|
if (AllPredsVisited) {
|
2011-04-17 08:03:19 +02:00
|
|
|
for (BasicBlock::const_iterator I = LLVMBB->begin();
|
2012-12-04 01:50:06 +01:00
|
|
|
const PHINode *PN = dyn_cast<PHINode>(I); ++I)
|
|
|
|
FuncInfo->ComputePHILiveOutRegInfo(PN);
|
2011-02-24 11:00:25 +01:00
|
|
|
} else {
|
2011-04-17 08:03:19 +02:00
|
|
|
for (BasicBlock::const_iterator I = LLVMBB->begin();
|
2012-12-04 01:50:06 +01:00
|
|
|
const PHINode *PN = dyn_cast<PHINode>(I); ++I)
|
|
|
|
FuncInfo->InvalidatePHILiveOutRegInfo(PN);
|
2011-02-24 11:00:16 +01:00
|
|
|
}
|
|
|
|
|
2011-02-24 11:00:13 +01:00
|
|
|
FuncInfo->VisitedBBs.insert(LLVMBB);
|
2011-02-24 11:00:16 +01:00
|
|
|
}
|
2011-02-24 11:00:13 +01:00
|
|
|
|
2015-10-13 21:47:46 +02:00
|
|
|
BasicBlock::const_iterator const Begin =
|
|
|
|
LLVMBB->getFirstNonPHI()->getIterator();
|
2010-04-15 03:51:59 +02:00
|
|
|
BasicBlock::const_iterator const End = LLVMBB->end();
|
2010-07-10 11:00:22 +02:00
|
|
|
BasicBlock::const_iterator BI = End;
|
2008-08-28 22:28:56 +02:00
|
|
|
|
2013-03-02 00:32:40 +01:00
|
|
|
FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
|
2015-09-09 01:28:38 +02:00
|
|
|
if (!FuncInfo->MBB)
|
|
|
|
continue; // Some blocks like catchpads have no code or MBB.
|
2017-02-07 19:42:53 +01:00
|
|
|
|
|
|
|
// Insert new instructions after any phi or argument setup code.
|
|
|
|
FuncInfo->InsertPt = FuncInfo->MBB->end();
|
2010-07-10 11:00:22 +02:00
|
|
|
|
|
|
|
// Setup an EH landing-pad block.
|
2013-07-04 06:53:45 +02:00
|
|
|
FuncInfo->ExceptionPointerVirtReg = 0;
|
|
|
|
FuncInfo->ExceptionSelectorVirtReg = 0;
|
2015-10-12 19:42:32 +02:00
|
|
|
if (LLVMBB->isEHPad())
|
|
|
|
if (!PrepareEHLandingPad())
|
|
|
|
continue;
|
2015-08-28 01:27:47 +02:00
|
|
|
|
2008-08-23 04:25:05 +02:00
|
|
|
// Before doing SelectionDAG ISel, see if FastISel has been requested.
|
2010-05-01 04:44:23 +02:00
|
|
|
if (FastIS) {
|
2017-02-07 19:42:53 +01:00
|
|
|
if (LLVMBB != &Fn.getEntryBlock())
|
|
|
|
FastIS->startNewBlock();
|
2010-07-10 11:00:22 +02:00
|
|
|
|
2013-02-27 23:52:54 +01:00
|
|
|
unsigned NumFastIselRemaining = std::distance(Begin, End);
|
2017-06-15 19:34:42 +02:00
|
|
|
|
|
|
|
// Pre-assign swifterror vregs.
|
|
|
|
preassignSwiftErrorRegs(TLI, FuncInfo, Begin, End);
|
|
|
|
|
2008-09-29 23:55:50 +02:00
|
|
|
// Do FastISel on as many instructions as possible.
|
2010-07-10 11:00:22 +02:00
|
|
|
for (; BI != Begin; --BI) {
|
2015-10-13 21:47:46 +02:00
|
|
|
const Instruction *Inst = &*std::prev(BI);
|
2010-07-10 11:00:22 +02:00
|
|
|
|
|
|
|
// If we no longer require this instruction, skip it.
|
Elide argument copies during instruction selection
Summary:
Avoids tons of prologue boilerplate when arguments are passed in memory
and left in memory. This can happen in a debug build or in a release
build when an argument alloca is escaped. This will dramatically affect
the code size of x86 debug builds, because X86 fast isel doesn't handle
arguments passed in memory at all. It only handles the x86_64 case of up
to 6 basic register parameters.
This is implemented by analyzing the entry block before ISel to identify
copy elision candidates. A copy elision candidate is an argument that is
used to fully initialize an alloca before any other possibly escaping
uses of that alloca. If an argument is a copy elision candidate, we set
a flag on the InputArg. If the the target generates loads from a fixed
stack object that matches the size and alignment requirements of the
alloca, the SelectionDAG builder will delete the stack object created
for the alloca and replace it with the fixed stack object. The load is
left behind to satisfy any remaining uses of the argument value. The
store is now dead and is therefore elided. The fixed stack object is
also marked as mutable, as it may now be modified by the user, and it
would be invalid to rematerialize the initial load from it.
Supersedes D28388
Fixes PR26328
Reviewers: chandlerc, MatzeB, qcolombet, inglorion, hans
Subscribers: igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D29668
llvm-svn: 296683
2017-03-01 22:42:00 +01:00
|
|
|
if (isFoldedOrDeadInstruction(Inst, FuncInfo) ||
|
|
|
|
ElidedArgCopyInstrs.count(Inst)) {
|
2011-11-16 22:02:08 +01:00
|
|
|
--NumFastIselRemaining;
|
2010-07-01 04:58:57 +02:00
|
|
|
continue;
|
2011-11-16 22:02:08 +01:00
|
|
|
}
|
2010-07-10 11:00:22 +02:00
|
|
|
|
|
|
|
// Bottom-up: reset the insert pos at the top, after any local-value
|
|
|
|
// instructions.
|
|
|
|
FastIS->recomputeInsertPt();
|
2010-07-01 04:58:57 +02:00
|
|
|
|
2010-01-12 05:32:35 +01:00
|
|
|
// Try to select the instruction with FastISel.
|
2014-09-03 22:56:52 +02:00
|
|
|
if (FastIS->selectInstruction(Inst)) {
|
2011-11-16 22:02:08 +01:00
|
|
|
--NumFastIselRemaining;
|
2013-03-08 23:56:31 +01:00
|
|
|
++NumFastIselSuccess;
|
2011-04-22 23:59:37 +02:00
|
|
|
// If fast isel succeeded, skip over all the folded instructions, and
|
|
|
|
// then see if there is a load right before the selected instructions.
|
|
|
|
// Try to fold the load if so.
|
|
|
|
const Instruction *BeforeInst = Inst;
|
2015-10-13 21:47:46 +02:00
|
|
|
while (BeforeInst != &*Begin) {
|
|
|
|
BeforeInst = &*std::prev(BasicBlock::const_iterator(BeforeInst));
|
2011-04-22 23:59:37 +02:00
|
|
|
if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
|
|
|
|
BeforeInst->hasOneUse() &&
|
2013-04-20 00:29:18 +02:00
|
|
|
FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
|
2011-04-22 23:59:37 +02:00
|
|
|
// If we succeeded, don't re-select the load.
|
2014-03-02 13:27:27 +01:00
|
|
|
BI = std::next(BasicBlock::const_iterator(BeforeInst));
|
2011-11-16 22:02:08 +01:00
|
|
|
--NumFastIselRemaining;
|
2013-03-08 23:56:31 +01:00
|
|
|
++NumFastIselSuccess;
|
2011-11-16 22:02:08 +01:00
|
|
|
}
|
2008-09-29 23:55:50 +02:00
|
|
|
continue;
|
implement rdar://6653118 - fastisel should fold loads where possible.
Since mem2reg isn't run at -O0, we get a ton of reloads from the stack,
for example, before, this code:
int foo(int x, int y, int z) {
return x+y+z;
}
used to compile into:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
movl 4(%rsp), %esi
addl %edx, %esi
movl (%rsp), %edx
addl %esi, %edx
movl %edx, %eax
addq $12, %rsp
ret
Now we produce:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
addl 4(%rsp), %edx ## Folded load
addl (%rsp), %edx ## Folded load
movl %edx, %eax
addq $12, %rsp
ret
Fewer instructions and less register use = faster compiles.
llvm-svn: 113102
2010-09-05 04:18:34 +02:00
|
|
|
}
|
2008-08-23 04:25:05 +02:00
|
|
|
|
2017-07-09 07:55:20 +02:00
|
|
|
FastISelFailed = true;
|
|
|
|
|
2008-09-29 23:55:50 +02:00
|
|
|
// Then handle certain instructions as single-LLVM-Instruction blocks.
|
2017-07-04 17:09:09 +02:00
|
|
|
// We cannot separate out GCrelocates to their own blocks since we need
|
|
|
|
// to keep track of gc-relocates for a particular gc-statepoint. This is
|
|
|
|
// done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
|
|
|
|
// visitGCRelocate.
|
|
|
|
if (isa<CallInst>(Inst) && !isStatepoint(Inst) && !isGCRelocate(Inst)) {
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
|
|
|
|
Inst->getDebugLoc(), LLVMBB);
|
|
|
|
|
|
|
|
R << "FastISel missed call";
|
2011-11-16 22:02:08 +01:00
|
|
|
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
if (R.isEnabled() || EnableFastISelAbort) {
|
|
|
|
std::string InstStrStorage;
|
|
|
|
raw_string_ostream InstStr(InstStrStorage);
|
|
|
|
InstStr << *Inst;
|
|
|
|
|
|
|
|
R << ": " << InstStr.str();
|
2008-08-23 04:25:05 +02:00
|
|
|
}
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
|
|
|
|
reportFastISelFailure(*MF, *ORE, R, EnableFastISelAbort > 2);
|
2008-08-23 04:25:05 +02:00
|
|
|
|
2015-08-28 01:27:47 +02:00
|
|
|
if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
|
|
|
|
!Inst->use_empty()) {
|
2010-07-10 11:00:22 +02:00
|
|
|
unsigned &R = FuncInfo->ValueMap[Inst];
|
2008-09-29 23:55:50 +02:00
|
|
|
if (!R)
|
2010-07-10 11:00:22 +02:00
|
|
|
R = FuncInfo->CreateRegs(Inst->getType());
|
2008-08-23 04:25:05 +02:00
|
|
|
}
|
2008-09-29 23:55:50 +02:00
|
|
|
|
2009-11-20 03:51:26 +01:00
|
|
|
bool HadTailCall = false;
|
2012-12-11 01:18:02 +01:00
|
|
|
MachineBasicBlock::iterator SavedInsertPt = FuncInfo->InsertPt;
|
2015-10-13 21:47:46 +02:00
|
|
|
SelectBasicBlock(Inst->getIterator(), BI, HadTailCall);
|
2009-11-20 03:51:26 +01:00
|
|
|
|
|
|
|
// If the call was emitted as a tail call, we're done with the block.
|
2012-12-11 01:18:02 +01:00
|
|
|
// We also need to delete any previously emitted instructions.
|
2009-11-20 03:51:26 +01:00
|
|
|
if (HadTailCall) {
|
2012-12-11 01:18:02 +01:00
|
|
|
FastIS->removeDeadCode(SavedInsertPt, FuncInfo->MBB->end());
|
2010-07-10 11:00:22 +02:00
|
|
|
--BI;
|
2009-11-20 03:51:26 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-12-11 01:18:02 +01:00
|
|
|
// Recompute NumFastIselRemaining as Selection DAG instruction
|
|
|
|
// selection may have handled the call, input args, etc.
|
|
|
|
unsigned RemainingNow = std::distance(Begin, BI);
|
2013-03-08 23:56:31 +01:00
|
|
|
NumFastIselFailures += NumFastIselRemaining - RemainingNow;
|
|
|
|
NumFastIselRemaining = RemainingNow;
|
2008-09-29 23:55:50 +02:00
|
|
|
continue;
|
2008-08-23 04:25:05 +02:00
|
|
|
}
|
2008-09-29 23:55:50 +02:00
|
|
|
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
|
|
|
|
Inst->getDebugLoc(), LLVMBB);
|
|
|
|
|
2015-02-28 20:34:54 +01:00
|
|
|
bool ShouldAbort = EnableFastISelAbort;
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
if (isa<TerminatorInst>(Inst)) {
|
|
|
|
// Use a different message for terminator misses.
|
|
|
|
R << "FastISel missed terminator";
|
|
|
|
// Don't abort for terminator unless the level is really high
|
|
|
|
ShouldAbort = (EnableFastISelAbort > 2);
|
|
|
|
} else {
|
|
|
|
R << "FastISel missed";
|
2008-09-29 23:55:50 +02:00
|
|
|
}
|
[CodeGen] Pass SDAG an ORE, and replace FastISel stats with remarks.
In the long-term, we want to replace statistics with something
finer-grained that lets us gather per-function data.
Remarks are that replacement.
Create an ORE instance in SelectionDAGISel, and pass it to
SelectionDAG.
SelectionDAG was used so that we can emit remarks from all
SelectionDAG-related code, including TargetLowering and DAGCombiner.
This isn't used in the current patch but Adam tells me he's interested
for the fp-contract combines.
Use the ORE instance to emit FastISel failures as remarks (instead of
the mix of dbgs() dumps and statistics that we currently have).
Eventually, we want to have an API that tells us whether remarks are
enabled (http://llvm.org/PR32352) so that we don't emit expensive
remarks (in this case, dumping IR) when it's not needed. For now, use
'isEnabled' as a crude replacement.
This does mean that the replacement for '-fast-isel-verbose' is now
'-pass-remarks-missed=isel'. Additionally, clang users also need to
enable remark diagnostics, using '-Rpass-missed=isel'.
This also removes '-fast-isel-verbose2': there are no static statistics
that we want to only enable in asserts builds, so we can always use
the remarks regardless of the build type.
Differential Revision: https://reviews.llvm.org/D31405
llvm-svn: 299093
2017-03-30 19:49:58 +02:00
|
|
|
|
|
|
|
if (R.isEnabled() || EnableFastISelAbort) {
|
|
|
|
std::string InstStrStorage;
|
|
|
|
raw_string_ostream InstStr(InstStrStorage);
|
|
|
|
InstStr << *Inst;
|
|
|
|
R << ": " << InstStr.str();
|
|
|
|
}
|
|
|
|
|
|
|
|
reportFastISelFailure(*MF, *ORE, R, ShouldAbort);
|
2015-02-27 19:32:11 +01:00
|
|
|
|
|
|
|
NumFastIselFailures += NumFastIselRemaining;
|
2008-09-29 23:55:50 +02:00
|
|
|
break;
|
2008-08-23 04:25:05 +02:00
|
|
|
}
|
2010-07-10 11:00:22 +02:00
|
|
|
|
|
|
|
FastIS->recomputeInsertPt();
|
2008-08-23 04:25:05 +02:00
|
|
|
}
|
2017-02-07 19:42:53 +01:00
|
|
|
|
[stack-protection] Add support for MSVC buffer security check
Summary:
This patch is adding support for the MSVC buffer security check implementation
The buffer security check is turned on with the '/GS' compiler switch.
* https://msdn.microsoft.com/en-us/library/8dbf701c.aspx
* To be added to clang here: http://reviews.llvm.org/D20347
Some overview of buffer security check feature and implementation:
* https://msdn.microsoft.com/en-us/library/aa290051(VS.71).aspx
* http://www.ksyash.com/2011/01/buffer-overflow-protection-3/
* http://blog.osom.info/2012/02/understanding-vs-c-compilers-buffer.html
For the following example:
```
int example(int offset, int index) {
char buffer[10];
memset(buffer, 0xCC, index);
return buffer[index];
}
```
The MSVC compiler is adding these instructions to perform stack integrity check:
```
push ebp
mov ebp,esp
sub esp,50h
[1] mov eax,dword ptr [__security_cookie (01068024h)]
[2] xor eax,ebp
[3] mov dword ptr [ebp-4],eax
push ebx
push esi
push edi
mov eax,dword ptr [index]
push eax
push 0CCh
lea ecx,[buffer]
push ecx
call _memset (010610B9h)
add esp,0Ch
mov eax,dword ptr [index]
movsx eax,byte ptr buffer[eax]
pop edi
pop esi
pop ebx
[4] mov ecx,dword ptr [ebp-4]
[5] xor ecx,ebp
[6] call @__security_check_cookie@4 (01061276h)
mov esp,ebp
pop ebp
ret
```
The instrumentation above is:
* [1] is loading the global security canary,
* [3] is storing the local computed ([2]) canary to the guard slot,
* [4] is loading the guard slot and ([5]) re-compute the global canary,
* [6] is validating the resulting canary with the '__security_check_cookie' and performs error handling.
Overview of the current stack-protection implementation:
* lib/CodeGen/StackProtector.cpp
* There is a default stack-protection implementation applied on intermediate representation.
* The target can overload 'getIRStackGuard' method if it has a standard location for the stack protector cookie.
* An intrinsic 'Intrinsic::stackprotector' is added to the prologue. It will be expanded by the instruction selection pass (DAG or Fast).
* Basic Blocks are added to every instrumented function to receive the code for handling stack guard validation and errors handling.
* Guard manipulation and comparison are added directly to the intermediate representation.
* lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
* lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
* There is an implementation that adds instrumentation during instruction selection (for better handling of sibbling calls).
* see long comment above 'class StackProtectorDescriptor' declaration.
* The target needs to override 'getSDagStackGuard' to activate SDAG stack protection generation. (note: getIRStackGuard MUST be nullptr).
* 'getSDagStackGuard' returns the appropriate stack guard (security cookie)
* The code is generated by 'SelectionDAGBuilder.cpp' and 'SelectionDAGISel.cpp'.
* include/llvm/Target/TargetLowering.h
* Contains function to retrieve the default Guard 'Value'; should be overriden by each target to select which implementation is used and provide Guard 'Value'.
* lib/Target/X86/X86ISelLowering.cpp
* Contains the x86 specialisation; Guard 'Value' used by the SelectionDAG algorithm.
Function-based Instrumentation:
* The MSVC doesn't inline the stack guard comparison in every function. Instead, a call to '__security_check_cookie' is added to the epilogue before every return instructions.
* To support function-based instrumentation, this patch is
* adding a function to get the function-based check (llvm 'Value', see include/llvm/Target/TargetLowering.h),
* If provided, the stack protection instrumentation won't be inlined and a call to that function will be added to the prologue.
* modifying (SelectionDAGISel.cpp) do avoid producing basic blocks used for inline instrumentation,
* generating the function-based instrumentation during the ISEL pass (SelectionDAGBuilder.cpp),
* if FastISEL (not SelectionDAG), using the fallback which rely on the same function-based implemented over intermediate representation (StackProtector.cpp).
Modifications
* adding support for MSVC (lib/Target/X86/X86ISelLowering.cpp)
* adding support function-based instrumentation (lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp, .h)
Results
* IR generated instrumentation:
```
clang-cl /GS test.cc /Od /c -mllvm -print-isel-input
```
```
*** Final LLVM Code input to ISel ***
; Function Attrs: nounwind sspstrong
define i32 @"\01?example@@YAHHH@Z"(i32 %offset, i32 %index) #0 {
entry:
%StackGuardSlot = alloca i8* <<<-- Allocated guard slot
%0 = call i8* @llvm.stackguard() <<<-- Loading Stack Guard value
call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot) <<<-- Prologue intrinsic call (store to Guard slot)
%index.addr = alloca i32, align 4
%offset.addr = alloca i32, align 4
%buffer = alloca [10 x i8], align 1
store i32 %index, i32* %index.addr, align 4
store i32 %offset, i32* %offset.addr, align 4
%arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 0
%1 = load i32, i32* %index.addr, align 4
call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -52, i32 %1, i32 1, i1 false)
%2 = load i32, i32* %index.addr, align 4
%arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 %2
%3 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %3 to i32
%4 = load volatile i8*, i8** %StackGuardSlot <<<-- Loading Guard slot
call void @__security_check_cookie(i8* %4) <<<-- Epilogue function-based check
ret i32 %conv
}
```
* SelectionDAG generated instrumentation:
```
clang-cl /GS test.cc /O1 /c /FA
```
```
"?example@@YAHHH@Z": # @"\01?example@@YAHHH@Z"
# BB#0: # %entry
pushl %esi
subl $16, %esp
movl ___security_cookie, %eax <<<-- Loading Stack Guard value
movl 28(%esp), %esi
movl %eax, 12(%esp) <<<-- Store to Guard slot
leal 2(%esp), %eax
pushl %esi
pushl $204
pushl %eax
calll _memset
addl $12, %esp
movsbl 2(%esp,%esi), %esi
movl 12(%esp), %ecx <<<-- Loading Guard slot
calll @__security_check_cookie@4 <<<-- Epilogue function-based check
movl %esi, %eax
addl $16, %esp
popl %esi
retl
```
Reviewers: kcc, pcc, eugenis, rnk
Subscribers: majnemer, llvm-commits, hans, thakis, rnk
Differential Revision: http://reviews.llvm.org/D20346
llvm-svn: 272053
2016-06-07 22:15:35 +02:00
|
|
|
if (getAnalysis<StackProtector>().shouldEmitSDCheck(*LLVMBB)) {
|
|
|
|
bool FunctionBasedInstrumentation =
|
|
|
|
TLI->getSSPStackGuardCheck(*Fn.getParent());
|
|
|
|
SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB],
|
|
|
|
FunctionBasedInstrumentation);
|
|
|
|
}
|
2008-08-23 04:25:05 +02:00
|
|
|
|
2010-10-25 23:31:46 +02:00
|
|
|
if (Begin != BI)
|
|
|
|
++NumDAGBlocks;
|
|
|
|
else
|
|
|
|
++NumFastIselBlocks;
|
|
|
|
|
2011-04-19 19:01:08 +02:00
|
|
|
if (Begin != BI) {
|
|
|
|
// Run SelectionDAG instruction selection on the remainder of the block
|
|
|
|
// not handled by FastISel. If FastISel is not run, this is the entire
|
|
|
|
// block.
|
|
|
|
bool HadTailCall;
|
|
|
|
SelectBasicBlock(Begin, BI, HadTailCall);
|
2017-03-01 01:43:42 +01:00
|
|
|
|
|
|
|
// But if FastISel was run, we already selected some of the block.
|
|
|
|
// If we emitted a tail-call, we need to delete any previously emitted
|
|
|
|
// instruction that follows it.
|
|
|
|
if (HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
|
|
|
|
FastIS->removeDeadCode(FuncInfo->InsertPt, FuncInfo->MBB->end());
|
2011-04-19 19:01:08 +02:00
|
|
|
}
|
2008-08-08 09:27:28 +02:00
|
|
|
|
2010-07-10 11:00:22 +02:00
|
|
|
FinishBasicBlock();
|
2010-04-22 21:55:20 +02:00
|
|
|
FuncInfo->PHINodesToUpdate.clear();
|
Elide argument copies during instruction selection
Summary:
Avoids tons of prologue boilerplate when arguments are passed in memory
and left in memory. This can happen in a debug build or in a release
build when an argument alloca is escaped. This will dramatically affect
the code size of x86 debug builds, because X86 fast isel doesn't handle
arguments passed in memory at all. It only handles the x86_64 case of up
to 6 basic register parameters.
This is implemented by analyzing the entry block before ISel to identify
copy elision candidates. A copy elision candidate is an argument that is
used to fully initialize an alloca before any other possibly escaping
uses of that alloca. If an argument is a copy elision candidate, we set
a flag on the InputArg. If the the target generates loads from a fixed
stack object that matches the size and alignment requirements of the
alloca, the SelectionDAG builder will delete the stack object created
for the alloca and replace it with the fixed stack object. The load is
left behind to satisfy any remaining uses of the argument value. The
store is now dead and is therefore elided. The fixed stack object is
also marked as mutable, as it may now be modified by the user, and it
would be invalid to rematerialize the initial load from it.
Supersedes D28388
Fixes PR26328
Reviewers: chandlerc, MatzeB, qcolombet, inglorion, hans
Subscribers: igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D29668
llvm-svn: 296683
2017-03-01 22:42:00 +01:00
|
|
|
ElidedArgCopyInstrs.clear();
|
2008-08-07 02:43:25 +02:00
|
|
|
}
|
2008-09-29 23:55:50 +02:00
|
|
|
|
2016-10-08 00:06:55 +02:00
|
|
|
propagateSwiftErrorVRegs(FuncInfo);
|
|
|
|
|
2008-09-29 23:55:50 +02:00
|
|
|
delete FastIS;
|
2011-05-23 19:44:13 +02:00
|
|
|
SDB->clearDanglingDebugInfo();
|
Teach selectiondag how to handle the stackprotectorcheck intrinsic.
Previously, generation of stack protectors was done exclusively in the
pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
splitting basic blocks at the IR level to create the success/failure basic
blocks in the tail of the basic block in question. As a result of this,
calls that would have qualified for the sibling call optimization were no
longer eligible for optimization since said calls were no longer right in
the "tail position" (i.e. the immediate predecessor of a ReturnInst
instruction).
Then it was noticed that since the sibling call optimization causes the
callee to reuse the caller's stack, if we could delay the generation of
the stack protector check until later in CodeGen after the sibling call
decision was made, we get both the tail call optimization and the stack
protector check!
A few goals in solving this problem were:
1. Preserve the architecture independence of stack protector generation.
2. Preserve the normal IR level stack protector check for platforms like
OpenBSD for which we support platform specific stack protector
generation.
The main problem that guided the present solution is that one can not
solve this problem in an architecture independent manner at the IR level
only. This is because:
1. The decision on whether or not to perform a sibling call on certain
platforms (for instance i386) requires lower level information
related to available registers that can not be known at the IR level.
2. Even if the previous point were not true, the decision on whether to
perform a tail call is done in LowerCallTo in SelectionDAG which
occurs after the Stack Protector Pass. As a result, one would need to
put the relevant callinst into the stack protector check success
basic block (where the return inst is placed) and then move it back
later at SelectionDAG/MI time before the stack protector check if the
tail call optimization failed. The MI level option was nixed
immediately since it would require platform specific pattern
matching. The SelectionDAG level option was nixed because
SelectionDAG only processes one IR level basic block at a time
implying one could not create a DAG Combine to move the callinst.
To get around this problem a few things were realized:
1. While one can not handle multiple IR level basic blocks at the
SelectionDAG Level, one can generate multiple machine basic blocks
for one IR level basic block. This is how we handle bit tests and
switches.
2. At the MI level, tail calls are represented via a special return
MIInst called "tcreturn". Thus if we know the basic block in which we
wish to insert the stack protector check, we get the correct behavior
by always inserting the stack protector check right before the return
statement. This is a "magical transformation" since no matter where
the stack protector check intrinsic is, we always insert the stack
protector check code at the end of the BB.
Given the aforementioned constraints, the following solution was devised:
1. On platforms that do not support SelectionDAG stack protector check
generation, allow for the normal IR level stack protector check
generation to continue.
2. On platforms that do support SelectionDAG stack protector check
generation:
a. Use the IR level stack protector pass to decide if a stack
protector is required/which BB we insert the stack protector check
in by reusing the logic already therein. If we wish to generate a
stack protector check in a basic block, we place a special IR
intrinsic called llvm.stackprotectorcheck right before the BB's
returninst or if there is a callinst that could potentially be
sibling call optimized, before the call inst.
b. Then when a BB with said intrinsic is processed, we codegen the BB
normally via SelectBasicBlock. In said process, when we visit the
stack protector check, we do not actually emit anything into the
BB. Instead, we just initialize the stack protector descriptor
class (which involves stashing information/creating the success
mbbb and the failure mbb if we have not created one for this
function yet) and export the guard variable that we are going to
compare.
c. After we finish selecting the basic block, in FinishBasicBlock if
the StackProtectorDescriptor attached to the SelectionDAGBuilder is
initialized, we first find a splice point in the parent basic block
before the terminator and then splice the terminator of said basic
block into the success basic block. Then we code-gen a new tail for
the parent basic block consisting of the two loads, the comparison,
and finally two branches to the success/failure basic blocks. We
conclude by code-gening the failure basic block if we have not
code-gened it already (all stack protector checks we generate in
the same function, use the same failure basic block).
llvm-svn: 188755
2013-08-20 09:00:16 +02:00
|
|
|
SDB->SPDescriptor.resetPerFunctionState();
|
|
|
|
}
|
|
|
|
|
2013-08-22 07:40:50 +02:00
|
|
|
/// Given that the input MI is before a partial terminator sequence TSeq, return
|
|
|
|
/// true if M + TSeq also a partial terminator sequence.
|
|
|
|
///
|
|
|
|
/// A Terminator sequence is a sequence of MachineInstrs which at this point in
|
|
|
|
/// lowering copy vregs into physical registers, which are then passed into
|
|
|
|
/// terminator instructors so we can satisfy ABI constraints. A partial
|
|
|
|
/// terminator sequence is an improper subset of a terminator sequence (i.e. it
|
|
|
|
/// may be the whole terminator sequence).
|
2016-07-08 21:11:40 +02:00
|
|
|
static bool MIIsInTerminatorSequence(const MachineInstr &MI) {
|
2013-08-22 07:40:50 +02:00
|
|
|
// If we do not have a copy or an implicit def, we return true if and only if
|
|
|
|
// MI is a debug value.
|
2016-07-08 21:11:40 +02:00
|
|
|
if (!MI.isCopy() && !MI.isImplicitDef())
|
2013-08-22 07:40:50 +02:00
|
|
|
// Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the
|
|
|
|
// physical registers if there is debug info associated with the terminator
|
|
|
|
// of our mbb. We want to include said debug info in our terminator
|
|
|
|
// sequence, so we return true in that case.
|
2016-07-08 21:11:40 +02:00
|
|
|
return MI.isDebugValue();
|
2013-08-22 07:40:50 +02:00
|
|
|
|
2013-09-24 03:50:26 +02:00
|
|
|
// We have left the terminator sequence if we are not doing one of the
|
|
|
|
// following:
|
|
|
|
//
|
|
|
|
// 1. Copying a vreg into a physical register.
|
|
|
|
// 2. Copying a vreg into a vreg.
|
|
|
|
// 3. Defining a register via an implicit def.
|
|
|
|
|
|
|
|
// OPI should always be a register definition...
|
2016-07-08 21:11:40 +02:00
|
|
|
MachineInstr::const_mop_iterator OPI = MI.operands_begin();
|
2013-09-26 07:53:31 +02:00
|
|
|
if (!OPI->isReg() || !OPI->isDef())
|
2013-09-24 03:50:26 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Defining any register via an implicit def is always ok.
|
2016-07-08 21:11:40 +02:00
|
|
|
if (MI.isImplicitDef())
|
2013-09-24 03:50:26 +02:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Grab the copy source...
|
|
|
|
MachineInstr::const_mop_iterator OPI2 = OPI;
|
|
|
|
++OPI2;
|
2016-07-08 21:11:40 +02:00
|
|
|
assert(OPI2 != MI.operands_end()
|
2013-09-24 03:50:26 +02:00
|
|
|
&& "Should have a copy implying we should have 2 arguments.");
|
|
|
|
|
|
|
|
// Make sure that the copy dest is not a vreg when the copy source is a
|
|
|
|
// physical register.
|
|
|
|
if (!OPI2->isReg() ||
|
2013-08-22 07:40:50 +02:00
|
|
|
(!TargetRegisterInfo::isPhysicalRegister(OPI->getReg()) &&
|
2013-09-24 03:50:26 +02:00
|
|
|
TargetRegisterInfo::isPhysicalRegister(OPI2->getReg())))
|
2013-08-22 07:40:50 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Teach selectiondag how to handle the stackprotectorcheck intrinsic.
Previously, generation of stack protectors was done exclusively in the
pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
splitting basic blocks at the IR level to create the success/failure basic
blocks in the tail of the basic block in question. As a result of this,
calls that would have qualified for the sibling call optimization were no
longer eligible for optimization since said calls were no longer right in
the "tail position" (i.e. the immediate predecessor of a ReturnInst
instruction).
Then it was noticed that since the sibling call optimization causes the
callee to reuse the caller's stack, if we could delay the generation of
the stack protector check until later in CodeGen after the sibling call
decision was made, we get both the tail call optimization and the stack
protector check!
A few goals in solving this problem were:
1. Preserve the architecture independence of stack protector generation.
2. Preserve the normal IR level stack protector check for platforms like
OpenBSD for which we support platform specific stack protector
generation.
The main problem that guided the present solution is that one can not
solve this problem in an architecture independent manner at the IR level
only. This is because:
1. The decision on whether or not to perform a sibling call on certain
platforms (for instance i386) requires lower level information
related to available registers that can not be known at the IR level.
2. Even if the previous point were not true, the decision on whether to
perform a tail call is done in LowerCallTo in SelectionDAG which
occurs after the Stack Protector Pass. As a result, one would need to
put the relevant callinst into the stack protector check success
basic block (where the return inst is placed) and then move it back
later at SelectionDAG/MI time before the stack protector check if the
tail call optimization failed. The MI level option was nixed
immediately since it would require platform specific pattern
matching. The SelectionDAG level option was nixed because
SelectionDAG only processes one IR level basic block at a time
implying one could not create a DAG Combine to move the callinst.
To get around this problem a few things were realized:
1. While one can not handle multiple IR level basic blocks at the
SelectionDAG Level, one can generate multiple machine basic blocks
for one IR level basic block. This is how we handle bit tests and
switches.
2. At the MI level, tail calls are represented via a special return
MIInst called "tcreturn". Thus if we know the basic block in which we
wish to insert the stack protector check, we get the correct behavior
by always inserting the stack protector check right before the return
statement. This is a "magical transformation" since no matter where
the stack protector check intrinsic is, we always insert the stack
protector check code at the end of the BB.
Given the aforementioned constraints, the following solution was devised:
1. On platforms that do not support SelectionDAG stack protector check
generation, allow for the normal IR level stack protector check
generation to continue.
2. On platforms that do support SelectionDAG stack protector check
generation:
a. Use the IR level stack protector pass to decide if a stack
protector is required/which BB we insert the stack protector check
in by reusing the logic already therein. If we wish to generate a
stack protector check in a basic block, we place a special IR
intrinsic called llvm.stackprotectorcheck right before the BB's
returninst or if there is a callinst that could potentially be
sibling call optimized, before the call inst.
b. Then when a BB with said intrinsic is processed, we codegen the BB
normally via SelectBasicBlock. In said process, when we visit the
stack protector check, we do not actually emit anything into the
BB. Instead, we just initialize the stack protector descriptor
class (which involves stashing information/creating the success
mbbb and the failure mbb if we have not created one for this
function yet) and export the guard variable that we are going to
compare.
c. After we finish selecting the basic block, in FinishBasicBlock if
the StackProtectorDescriptor attached to the SelectionDAGBuilder is
initialized, we first find a splice point in the parent basic block
before the terminator and then splice the terminator of said basic
block into the success basic block. Then we code-gen a new tail for
the parent basic block consisting of the two loads, the comparison,
and finally two branches to the success/failure basic blocks. We
conclude by code-gening the failure basic block if we have not
code-gened it already (all stack protector checks we generate in
the same function, use the same failure basic block).
llvm-svn: 188755
2013-08-20 09:00:16 +02:00
|
|
|
/// Find the split point at which to splice the end of BB into its success stack
|
|
|
|
/// protector check machine basic block.
|
2013-08-22 07:40:50 +02:00
|
|
|
///
|
|
|
|
/// On many platforms, due to ABI constraints, terminators, even before register
|
|
|
|
/// allocation, use physical registers. This creates an issue for us since
|
|
|
|
/// physical registers at this point can not travel across basic
|
|
|
|
/// blocks. Luckily, selectiondag always moves physical registers into vregs
|
|
|
|
/// when they enter functions and moves them through a sequence of copies back
|
|
|
|
/// into the physical registers right before the terminator creating a
|
|
|
|
/// ``Terminator Sequence''. This function is searching for the beginning of the
|
|
|
|
/// terminator sequence so that we can ensure that we splice off not just the
|
|
|
|
/// terminator, but additionally the copies that move the vregs into the
|
|
|
|
/// physical registers.
|
Teach selectiondag how to handle the stackprotectorcheck intrinsic.
Previously, generation of stack protectors was done exclusively in the
pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
splitting basic blocks at the IR level to create the success/failure basic
blocks in the tail of the basic block in question. As a result of this,
calls that would have qualified for the sibling call optimization were no
longer eligible for optimization since said calls were no longer right in
the "tail position" (i.e. the immediate predecessor of a ReturnInst
instruction).
Then it was noticed that since the sibling call optimization causes the
callee to reuse the caller's stack, if we could delay the generation of
the stack protector check until later in CodeGen after the sibling call
decision was made, we get both the tail call optimization and the stack
protector check!
A few goals in solving this problem were:
1. Preserve the architecture independence of stack protector generation.
2. Preserve the normal IR level stack protector check for platforms like
OpenBSD for which we support platform specific stack protector
generation.
The main problem that guided the present solution is that one can not
solve this problem in an architecture independent manner at the IR level
only. This is because:
1. The decision on whether or not to perform a sibling call on certain
platforms (for instance i386) requires lower level information
related to available registers that can not be known at the IR level.
2. Even if the previous point were not true, the decision on whether to
perform a tail call is done in LowerCallTo in SelectionDAG which
occurs after the Stack Protector Pass. As a result, one would need to
put the relevant callinst into the stack protector check success
basic block (where the return inst is placed) and then move it back
later at SelectionDAG/MI time before the stack protector check if the
tail call optimization failed. The MI level option was nixed
immediately since it would require platform specific pattern
matching. The SelectionDAG level option was nixed because
SelectionDAG only processes one IR level basic block at a time
implying one could not create a DAG Combine to move the callinst.
To get around this problem a few things were realized:
1. While one can not handle multiple IR level basic blocks at the
SelectionDAG Level, one can generate multiple machine basic blocks
for one IR level basic block. This is how we handle bit tests and
switches.
2. At the MI level, tail calls are represented via a special return
MIInst called "tcreturn". Thus if we know the basic block in which we
wish to insert the stack protector check, we get the correct behavior
by always inserting the stack protector check right before the return
statement. This is a "magical transformation" since no matter where
the stack protector check intrinsic is, we always insert the stack
protector check code at the end of the BB.
Given the aforementioned constraints, the following solution was devised:
1. On platforms that do not support SelectionDAG stack protector check
generation, allow for the normal IR level stack protector check
generation to continue.
2. On platforms that do support SelectionDAG stack protector check
generation:
a. Use the IR level stack protector pass to decide if a stack
protector is required/which BB we insert the stack protector check
in by reusing the logic already therein. If we wish to generate a
stack protector check in a basic block, we place a special IR
intrinsic called llvm.stackprotectorcheck right before the BB's
returninst or if there is a callinst that could potentially be
sibling call optimized, before the call inst.
b. Then when a BB with said intrinsic is processed, we codegen the BB
normally via SelectBasicBlock. In said process, when we visit the
stack protector check, we do not actually emit anything into the
BB. Instead, we just initialize the stack protector descriptor
class (which involves stashing information/creating the success
mbbb and the failure mbb if we have not created one for this
function yet) and export the guard variable that we are going to
compare.
c. After we finish selecting the basic block, in FinishBasicBlock if
the StackProtectorDescriptor attached to the SelectionDAGBuilder is
initialized, we first find a splice point in the parent basic block
before the terminator and then splice the terminator of said basic
block into the success basic block. Then we code-gen a new tail for
the parent basic block consisting of the two loads, the comparison,
and finally two branches to the success/failure basic blocks. We
conclude by code-gening the failure basic block if we have not
code-gened it already (all stack protector checks we generate in
the same function, use the same failure basic block).
llvm-svn: 188755
2013-08-20 09:00:16 +02:00
|
|
|
static MachineBasicBlock::iterator
|
[stack-protection] Add support for MSVC buffer security check
Summary:
This patch is adding support for the MSVC buffer security check implementation
The buffer security check is turned on with the '/GS' compiler switch.
* https://msdn.microsoft.com/en-us/library/8dbf701c.aspx
* To be added to clang here: http://reviews.llvm.org/D20347
Some overview of buffer security check feature and implementation:
* https://msdn.microsoft.com/en-us/library/aa290051(VS.71).aspx
* http://www.ksyash.com/2011/01/buffer-overflow-protection-3/
* http://blog.osom.info/2012/02/understanding-vs-c-compilers-buffer.html
For the following example:
```
int example(int offset, int index) {
char buffer[10];
memset(buffer, 0xCC, index);
return buffer[index];
}
```
The MSVC compiler is adding these instructions to perform stack integrity check:
```
push ebp
mov ebp,esp
sub esp,50h
[1] mov eax,dword ptr [__security_cookie (01068024h)]
[2] xor eax,ebp
[3] mov dword ptr [ebp-4],eax
push ebx
push esi
push edi
mov eax,dword ptr [index]
push eax
push 0CCh
lea ecx,[buffer]
push ecx
call _memset (010610B9h)
add esp,0Ch
mov eax,dword ptr [index]
movsx eax,byte ptr buffer[eax]
pop edi
pop esi
pop ebx
[4] mov ecx,dword ptr [ebp-4]
[5] xor ecx,ebp
[6] call @__security_check_cookie@4 (01061276h)
mov esp,ebp
pop ebp
ret
```
The instrumentation above is:
* [1] is loading the global security canary,
* [3] is storing the local computed ([2]) canary to the guard slot,
* [4] is loading the guard slot and ([5]) re-compute the global canary,
* [6] is validating the resulting canary with the '__security_check_cookie' and performs error handling.
Overview of the current stack-protection implementation:
* lib/CodeGen/StackProtector.cpp
* There is a default stack-protection implementation applied on intermediate representation.
* The target can overload 'getIRStackGuard' method if it has a standard location for the stack protector cookie.
* An intrinsic 'Intrinsic::stackprotector' is added to the prologue. It will be expanded by the instruction selection pass (DAG or Fast).
* Basic Blocks are added to every instrumented function to receive the code for handling stack guard validation and errors handling.
* Guard manipulation and comparison are added directly to the intermediate representation.
* lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
* lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
* There is an implementation that adds instrumentation during instruction selection (for better handling of sibbling calls).
* see long comment above 'class StackProtectorDescriptor' declaration.
* The target needs to override 'getSDagStackGuard' to activate SDAG stack protection generation. (note: getIRStackGuard MUST be nullptr).
* 'getSDagStackGuard' returns the appropriate stack guard (security cookie)
* The code is generated by 'SelectionDAGBuilder.cpp' and 'SelectionDAGISel.cpp'.
* include/llvm/Target/TargetLowering.h
* Contains function to retrieve the default Guard 'Value'; should be overriden by each target to select which implementation is used and provide Guard 'Value'.
* lib/Target/X86/X86ISelLowering.cpp
* Contains the x86 specialisation; Guard 'Value' used by the SelectionDAG algorithm.
Function-based Instrumentation:
* The MSVC doesn't inline the stack guard comparison in every function. Instead, a call to '__security_check_cookie' is added to the epilogue before every return instructions.
* To support function-based instrumentation, this patch is
* adding a function to get the function-based check (llvm 'Value', see include/llvm/Target/TargetLowering.h),
* If provided, the stack protection instrumentation won't be inlined and a call to that function will be added to the prologue.
* modifying (SelectionDAGISel.cpp) do avoid producing basic blocks used for inline instrumentation,
* generating the function-based instrumentation during the ISEL pass (SelectionDAGBuilder.cpp),
* if FastISEL (not SelectionDAG), using the fallback which rely on the same function-based implemented over intermediate representation (StackProtector.cpp).
Modifications
* adding support for MSVC (lib/Target/X86/X86ISelLowering.cpp)
* adding support function-based instrumentation (lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp, .h)
Results
* IR generated instrumentation:
```
clang-cl /GS test.cc /Od /c -mllvm -print-isel-input
```
```
*** Final LLVM Code input to ISel ***
; Function Attrs: nounwind sspstrong
define i32 @"\01?example@@YAHHH@Z"(i32 %offset, i32 %index) #0 {
entry:
%StackGuardSlot = alloca i8* <<<-- Allocated guard slot
%0 = call i8* @llvm.stackguard() <<<-- Loading Stack Guard value
call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot) <<<-- Prologue intrinsic call (store to Guard slot)
%index.addr = alloca i32, align 4
%offset.addr = alloca i32, align 4
%buffer = alloca [10 x i8], align 1
store i32 %index, i32* %index.addr, align 4
store i32 %offset, i32* %offset.addr, align 4
%arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 0
%1 = load i32, i32* %index.addr, align 4
call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -52, i32 %1, i32 1, i1 false)
%2 = load i32, i32* %index.addr, align 4
%arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 %2
%3 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %3 to i32
%4 = load volatile i8*, i8** %StackGuardSlot <<<-- Loading Guard slot
call void @__security_check_cookie(i8* %4) <<<-- Epilogue function-based check
ret i32 %conv
}
```
* SelectionDAG generated instrumentation:
```
clang-cl /GS test.cc /O1 /c /FA
```
```
"?example@@YAHHH@Z": # @"\01?example@@YAHHH@Z"
# BB#0: # %entry
pushl %esi
subl $16, %esp
movl ___security_cookie, %eax <<<-- Loading Stack Guard value
movl 28(%esp), %esi
movl %eax, 12(%esp) <<<-- Store to Guard slot
leal 2(%esp), %eax
pushl %esi
pushl $204
pushl %eax
calll _memset
addl $12, %esp
movsbl 2(%esp,%esi), %esi
movl 12(%esp), %ecx <<<-- Loading Guard slot
calll @__security_check_cookie@4 <<<-- Epilogue function-based check
movl %esi, %eax
addl $16, %esp
popl %esi
retl
```
Reviewers: kcc, pcc, eugenis, rnk
Subscribers: majnemer, llvm-commits, hans, thakis, rnk
Differential Revision: http://reviews.llvm.org/D20346
llvm-svn: 272053
2016-06-07 22:15:35 +02:00
|
|
|
FindSplitPointForStackProtector(MachineBasicBlock *BB) {
|
2013-09-26 07:53:31 +02:00
|
|
|
MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator();
|
2013-08-22 07:40:50 +02:00
|
|
|
//
|
Teach selectiondag how to handle the stackprotectorcheck intrinsic.
Previously, generation of stack protectors was done exclusively in the
pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
splitting basic blocks at the IR level to create the success/failure basic
blocks in the tail of the basic block in question. As a result of this,
calls that would have qualified for the sibling call optimization were no
longer eligible for optimization since said calls were no longer right in
the "tail position" (i.e. the immediate predecessor of a ReturnInst
instruction).
Then it was noticed that since the sibling call optimization causes the
callee to reuse the caller's stack, if we could delay the generation of
the stack protector check until later in CodeGen after the sibling call
decision was made, we get both the tail call optimization and the stack
protector check!
A few goals in solving this problem were:
1. Preserve the architecture independence of stack protector generation.
2. Preserve the normal IR level stack protector check for platforms like
OpenBSD for which we support platform specific stack protector
generation.
The main problem that guided the present solution is that one can not
solve this problem in an architecture independent manner at the IR level
only. This is because:
1. The decision on whether or not to perform a sibling call on certain
platforms (for instance i386) requires lower level information
related to available registers that can not be known at the IR level.
2. Even if the previous point were not true, the decision on whether to
perform a tail call is done in LowerCallTo in SelectionDAG which
occurs after the Stack Protector Pass. As a result, one would need to
put the relevant callinst into the stack protector check success
basic block (where the return inst is placed) and then move it back
later at SelectionDAG/MI time before the stack protector check if the
tail call optimization failed. The MI level option was nixed
immediately since it would require platform specific pattern
matching. The SelectionDAG level option was nixed because
SelectionDAG only processes one IR level basic block at a time
implying one could not create a DAG Combine to move the callinst.
To get around this problem a few things were realized:
1. While one can not handle multiple IR level basic blocks at the
SelectionDAG Level, one can generate multiple machine basic blocks
for one IR level basic block. This is how we handle bit tests and
switches.
2. At the MI level, tail calls are represented via a special return
MIInst called "tcreturn". Thus if we know the basic block in which we
wish to insert the stack protector check, we get the correct behavior
by always inserting the stack protector check right before the return
statement. This is a "magical transformation" since no matter where
the stack protector check intrinsic is, we always insert the stack
protector check code at the end of the BB.
Given the aforementioned constraints, the following solution was devised:
1. On platforms that do not support SelectionDAG stack protector check
generation, allow for the normal IR level stack protector check
generation to continue.
2. On platforms that do support SelectionDAG stack protector check
generation:
a. Use the IR level stack protector pass to decide if a stack
protector is required/which BB we insert the stack protector check
in by reusing the logic already therein. If we wish to generate a
stack protector check in a basic block, we place a special IR
intrinsic called llvm.stackprotectorcheck right before the BB's
returninst or if there is a callinst that could potentially be
sibling call optimized, before the call inst.
b. Then when a BB with said intrinsic is processed, we codegen the BB
normally via SelectBasicBlock. In said process, when we visit the
stack protector check, we do not actually emit anything into the
BB. Instead, we just initialize the stack protector descriptor
class (which involves stashing information/creating the success
mbbb and the failure mbb if we have not created one for this
function yet) and export the guard variable that we are going to
compare.
c. After we finish selecting the basic block, in FinishBasicBlock if
the StackProtectorDescriptor attached to the SelectionDAGBuilder is
initialized, we first find a splice point in the parent basic block
before the terminator and then splice the terminator of said basic
block into the success basic block. Then we code-gen a new tail for
the parent basic block consisting of the two loads, the comparison,
and finally two branches to the success/failure basic blocks. We
conclude by code-gening the failure basic block if we have not
code-gened it already (all stack protector checks we generate in
the same function, use the same failure basic block).
llvm-svn: 188755
2013-08-20 09:00:16 +02:00
|
|
|
if (SplitPoint == BB->begin())
|
|
|
|
return SplitPoint;
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator Start = BB->begin();
|
|
|
|
MachineBasicBlock::iterator Previous = SplitPoint;
|
|
|
|
--Previous;
|
|
|
|
|
2016-07-08 21:11:40 +02:00
|
|
|
while (MIIsInTerminatorSequence(*Previous)) {
|
Teach selectiondag how to handle the stackprotectorcheck intrinsic.
Previously, generation of stack protectors was done exclusively in the
pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
splitting basic blocks at the IR level to create the success/failure basic
blocks in the tail of the basic block in question. As a result of this,
calls that would have qualified for the sibling call optimization were no
longer eligible for optimization since said calls were no longer right in
the "tail position" (i.e. the immediate predecessor of a ReturnInst
instruction).
Then it was noticed that since the sibling call optimization causes the
callee to reuse the caller's stack, if we could delay the generation of
the stack protector check until later in CodeGen after the sibling call
decision was made, we get both the tail call optimization and the stack
protector check!
A few goals in solving this problem were:
1. Preserve the architecture independence of stack protector generation.
2. Preserve the normal IR level stack protector check for platforms like
OpenBSD for which we support platform specific stack protector
generation.
The main problem that guided the present solution is that one can not
solve this problem in an architecture independent manner at the IR level
only. This is because:
1. The decision on whether or not to perform a sibling call on certain
platforms (for instance i386) requires lower level information
related to available registers that can not be known at the IR level.
2. Even if the previous point were not true, the decision on whether to
perform a tail call is done in LowerCallTo in SelectionDAG which
occurs after the Stack Protector Pass. As a result, one would need to
put the relevant callinst into the stack protector check success
basic block (where the return inst is placed) and then move it back
later at SelectionDAG/MI time before the stack protector check if the
tail call optimization failed. The MI level option was nixed
immediately since it would require platform specific pattern
matching. The SelectionDAG level option was nixed because
SelectionDAG only processes one IR level basic block at a time
implying one could not create a DAG Combine to move the callinst.
To get around this problem a few things were realized:
1. While one can not handle multiple IR level basic blocks at the
SelectionDAG Level, one can generate multiple machine basic blocks
for one IR level basic block. This is how we handle bit tests and
switches.
2. At the MI level, tail calls are represented via a special return
MIInst called "tcreturn". Thus if we know the basic block in which we
wish to insert the stack protector check, we get the correct behavior
by always inserting the stack protector check right before the return
statement. This is a "magical transformation" since no matter where
the stack protector check intrinsic is, we always insert the stack
protector check code at the end of the BB.
Given the aforementioned constraints, the following solution was devised:
1. On platforms that do not support SelectionDAG stack protector check
generation, allow for the normal IR level stack protector check
generation to continue.
2. On platforms that do support SelectionDAG stack protector check
generation:
a. Use the IR level stack protector pass to decide if a stack
protector is required/which BB we insert the stack protector check
in by reusing the logic already therein. If we wish to generate a
stack protector check in a basic block, we place a special IR
intrinsic called llvm.stackprotectorcheck right before the BB's
returninst or if there is a callinst that could potentially be
sibling call optimized, before the call inst.
b. Then when a BB with said intrinsic is processed, we codegen the BB
normally via SelectBasicBlock. In said process, when we visit the
stack protector check, we do not actually emit anything into the
BB. Instead, we just initialize the stack protector descriptor
class (which involves stashing information/creating the success
mbbb and the failure mbb if we have not created one for this
function yet) and export the guard variable that we are going to
compare.
c. After we finish selecting the basic block, in FinishBasicBlock if
the StackProtectorDescriptor attached to the SelectionDAGBuilder is
initialized, we first find a splice point in the parent basic block
before the terminator and then splice the terminator of said basic
block into the success basic block. Then we code-gen a new tail for
the parent basic block consisting of the two loads, the comparison,
and finally two branches to the success/failure basic blocks. We
conclude by code-gening the failure basic block if we have not
code-gened it already (all stack protector checks we generate in
the same function, use the same failure basic block).
llvm-svn: 188755
2013-08-20 09:00:16 +02:00
|
|
|
SplitPoint = Previous;
|
|
|
|
if (Previous == Start)
|
|
|
|
break;
|
|
|
|
--Previous;
|
|
|
|
}
|
|
|
|
|
|
|
|
return SplitPoint;
|
2008-07-08 01:02:41 +02:00
|
|
|
}
|
|
|
|
|
2008-07-28 23:51:04 +02:00
|
|
|
void
|
2010-07-10 11:00:22 +02:00
|
|
|
SelectionDAGISel::FinishBasicBlock() {
|
2010-01-05 02:26:11 +01:00
|
|
|
DEBUG(dbgs() << "Total amount of phi nodes to update: "
|
2010-06-18 18:00:29 +02:00
|
|
|
<< FuncInfo->PHINodesToUpdate.size() << "\n";
|
|
|
|
for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
|
2010-01-05 02:26:11 +01:00
|
|
|
dbgs() << "Node " << i << " : ("
|
2010-04-22 21:55:20 +02:00
|
|
|
<< FuncInfo->PHINodesToUpdate[i].first
|
|
|
|
<< ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2005-03-30 03:10:47 +02:00
|
|
|
// Next, now that we know what the last MBB the LLVM BB expanded is, update
|
2005-01-07 08:47:53 +01:00
|
|
|
// PHI nodes in successors.
|
2015-04-23 18:45:24 +02:00
|
|
|
for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
|
|
|
|
MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[i].first);
|
|
|
|
assert(PHI->isPHI() &&
|
|
|
|
"This is not a machine PHI node that we are updating!");
|
|
|
|
if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
|
|
|
|
continue;
|
|
|
|
PHI.addReg(FuncInfo->PHINodesToUpdate[i].second).addMBB(FuncInfo->MBB);
|
2005-01-07 08:47:53 +01:00
|
|
|
}
|
2007-04-09 14:31:58 +02:00
|
|
|
|
Teach selectiondag how to handle the stackprotectorcheck intrinsic.
Previously, generation of stack protectors was done exclusively in the
pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
splitting basic blocks at the IR level to create the success/failure basic
blocks in the tail of the basic block in question. As a result of this,
calls that would have qualified for the sibling call optimization were no
longer eligible for optimization since said calls were no longer right in
the "tail position" (i.e. the immediate predecessor of a ReturnInst
instruction).
Then it was noticed that since the sibling call optimization causes the
callee to reuse the caller's stack, if we could delay the generation of
the stack protector check until later in CodeGen after the sibling call
decision was made, we get both the tail call optimization and the stack
protector check!
A few goals in solving this problem were:
1. Preserve the architecture independence of stack protector generation.
2. Preserve the normal IR level stack protector check for platforms like
OpenBSD for which we support platform specific stack protector
generation.
The main problem that guided the present solution is that one can not
solve this problem in an architecture independent manner at the IR level
only. This is because:
1. The decision on whether or not to perform a sibling call on certain
platforms (for instance i386) requires lower level information
related to available registers that can not be known at the IR level.
2. Even if the previous point were not true, the decision on whether to
perform a tail call is done in LowerCallTo in SelectionDAG which
occurs after the Stack Protector Pass. As a result, one would need to
put the relevant callinst into the stack protector check success
basic block (where the return inst is placed) and then move it back
later at SelectionDAG/MI time before the stack protector check if the
tail call optimization failed. The MI level option was nixed
immediately since it would require platform specific pattern
matching. The SelectionDAG level option was nixed because
SelectionDAG only processes one IR level basic block at a time
implying one could not create a DAG Combine to move the callinst.
To get around this problem a few things were realized:
1. While one can not handle multiple IR level basic blocks at the
SelectionDAG Level, one can generate multiple machine basic blocks
for one IR level basic block. This is how we handle bit tests and
switches.
2. At the MI level, tail calls are represented via a special return
MIInst called "tcreturn". Thus if we know the basic block in which we
wish to insert the stack protector check, we get the correct behavior
by always inserting the stack protector check right before the return
statement. This is a "magical transformation" since no matter where
the stack protector check intrinsic is, we always insert the stack
protector check code at the end of the BB.
Given the aforementioned constraints, the following solution was devised:
1. On platforms that do not support SelectionDAG stack protector check
generation, allow for the normal IR level stack protector check
generation to continue.
2. On platforms that do support SelectionDAG stack protector check
generation:
a. Use the IR level stack protector pass to decide if a stack
protector is required/which BB we insert the stack protector check
in by reusing the logic already therein. If we wish to generate a
stack protector check in a basic block, we place a special IR
intrinsic called llvm.stackprotectorcheck right before the BB's
returninst or if there is a callinst that could potentially be
sibling call optimized, before the call inst.
b. Then when a BB with said intrinsic is processed, we codegen the BB
normally via SelectBasicBlock. In said process, when we visit the
stack protector check, we do not actually emit anything into the
BB. Instead, we just initialize the stack protector descriptor
class (which involves stashing information/creating the success
mbbb and the failure mbb if we have not created one for this
function yet) and export the guard variable that we are going to
compare.
c. After we finish selecting the basic block, in FinishBasicBlock if
the StackProtectorDescriptor attached to the SelectionDAGBuilder is
initialized, we first find a splice point in the parent basic block
before the terminator and then splice the terminator of said basic
block into the success basic block. Then we code-gen a new tail for
the parent basic block consisting of the two loads, the comparison,
and finally two branches to the success/failure basic blocks. We
conclude by code-gening the failure basic block if we have not
code-gened it already (all stack protector checks we generate in
the same function, use the same failure basic block).
llvm-svn: 188755
2013-08-20 09:00:16 +02:00
|
|
|
// Handle stack protector.
|
[stack-protection] Add support for MSVC buffer security check
Summary:
This patch is adding support for the MSVC buffer security check implementation
The buffer security check is turned on with the '/GS' compiler switch.
* https://msdn.microsoft.com/en-us/library/8dbf701c.aspx
* To be added to clang here: http://reviews.llvm.org/D20347
Some overview of buffer security check feature and implementation:
* https://msdn.microsoft.com/en-us/library/aa290051(VS.71).aspx
* http://www.ksyash.com/2011/01/buffer-overflow-protection-3/
* http://blog.osom.info/2012/02/understanding-vs-c-compilers-buffer.html
For the following example:
```
int example(int offset, int index) {
char buffer[10];
memset(buffer, 0xCC, index);
return buffer[index];
}
```
The MSVC compiler is adding these instructions to perform stack integrity check:
```
push ebp
mov ebp,esp
sub esp,50h
[1] mov eax,dword ptr [__security_cookie (01068024h)]
[2] xor eax,ebp
[3] mov dword ptr [ebp-4],eax
push ebx
push esi
push edi
mov eax,dword ptr [index]
push eax
push 0CCh
lea ecx,[buffer]
push ecx
call _memset (010610B9h)
add esp,0Ch
mov eax,dword ptr [index]
movsx eax,byte ptr buffer[eax]
pop edi
pop esi
pop ebx
[4] mov ecx,dword ptr [ebp-4]
[5] xor ecx,ebp
[6] call @__security_check_cookie@4 (01061276h)
mov esp,ebp
pop ebp
ret
```
The instrumentation above is:
* [1] is loading the global security canary,
* [3] is storing the local computed ([2]) canary to the guard slot,
* [4] is loading the guard slot and ([5]) re-compute the global canary,
* [6] is validating the resulting canary with the '__security_check_cookie' and performs error handling.
Overview of the current stack-protection implementation:
* lib/CodeGen/StackProtector.cpp
* There is a default stack-protection implementation applied on intermediate representation.
* The target can overload 'getIRStackGuard' method if it has a standard location for the stack protector cookie.
* An intrinsic 'Intrinsic::stackprotector' is added to the prologue. It will be expanded by the instruction selection pass (DAG or Fast).
* Basic Blocks are added to every instrumented function to receive the code for handling stack guard validation and errors handling.
* Guard manipulation and comparison are added directly to the intermediate representation.
* lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
* lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
* There is an implementation that adds instrumentation during instruction selection (for better handling of sibbling calls).
* see long comment above 'class StackProtectorDescriptor' declaration.
* The target needs to override 'getSDagStackGuard' to activate SDAG stack protection generation. (note: getIRStackGuard MUST be nullptr).
* 'getSDagStackGuard' returns the appropriate stack guard (security cookie)
* The code is generated by 'SelectionDAGBuilder.cpp' and 'SelectionDAGISel.cpp'.
* include/llvm/Target/TargetLowering.h
* Contains function to retrieve the default Guard 'Value'; should be overriden by each target to select which implementation is used and provide Guard 'Value'.
* lib/Target/X86/X86ISelLowering.cpp
* Contains the x86 specialisation; Guard 'Value' used by the SelectionDAG algorithm.
Function-based Instrumentation:
* The MSVC doesn't inline the stack guard comparison in every function. Instead, a call to '__security_check_cookie' is added to the epilogue before every return instructions.
* To support function-based instrumentation, this patch is
* adding a function to get the function-based check (llvm 'Value', see include/llvm/Target/TargetLowering.h),
* If provided, the stack protection instrumentation won't be inlined and a call to that function will be added to the prologue.
* modifying (SelectionDAGISel.cpp) do avoid producing basic blocks used for inline instrumentation,
* generating the function-based instrumentation during the ISEL pass (SelectionDAGBuilder.cpp),
* if FastISEL (not SelectionDAG), using the fallback which rely on the same function-based implemented over intermediate representation (StackProtector.cpp).
Modifications
* adding support for MSVC (lib/Target/X86/X86ISelLowering.cpp)
* adding support function-based instrumentation (lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp, .h)
Results
* IR generated instrumentation:
```
clang-cl /GS test.cc /Od /c -mllvm -print-isel-input
```
```
*** Final LLVM Code input to ISel ***
; Function Attrs: nounwind sspstrong
define i32 @"\01?example@@YAHHH@Z"(i32 %offset, i32 %index) #0 {
entry:
%StackGuardSlot = alloca i8* <<<-- Allocated guard slot
%0 = call i8* @llvm.stackguard() <<<-- Loading Stack Guard value
call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot) <<<-- Prologue intrinsic call (store to Guard slot)
%index.addr = alloca i32, align 4
%offset.addr = alloca i32, align 4
%buffer = alloca [10 x i8], align 1
store i32 %index, i32* %index.addr, align 4
store i32 %offset, i32* %offset.addr, align 4
%arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 0
%1 = load i32, i32* %index.addr, align 4
call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -52, i32 %1, i32 1, i1 false)
%2 = load i32, i32* %index.addr, align 4
%arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 %2
%3 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %3 to i32
%4 = load volatile i8*, i8** %StackGuardSlot <<<-- Loading Guard slot
call void @__security_check_cookie(i8* %4) <<<-- Epilogue function-based check
ret i32 %conv
}
```
* SelectionDAG generated instrumentation:
```
clang-cl /GS test.cc /O1 /c /FA
```
```
"?example@@YAHHH@Z": # @"\01?example@@YAHHH@Z"
# BB#0: # %entry
pushl %esi
subl $16, %esp
movl ___security_cookie, %eax <<<-- Loading Stack Guard value
movl 28(%esp), %esi
movl %eax, 12(%esp) <<<-- Store to Guard slot
leal 2(%esp), %eax
pushl %esi
pushl $204
pushl %eax
calll _memset
addl $12, %esp
movsbl 2(%esp,%esi), %esi
movl 12(%esp), %ecx <<<-- Loading Guard slot
calll @__security_check_cookie@4 <<<-- Epilogue function-based check
movl %esi, %eax
addl $16, %esp
popl %esi
retl
```
Reviewers: kcc, pcc, eugenis, rnk
Subscribers: majnemer, llvm-commits, hans, thakis, rnk
Differential Revision: http://reviews.llvm.org/D20346
llvm-svn: 272053
2016-06-07 22:15:35 +02:00
|
|
|
if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
|
|
|
|
// The target provides a guard check function. There is no need to
|
|
|
|
// generate error handling code or to split current basic block.
|
|
|
|
MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
|
|
|
|
|
|
|
|
// Add load and check to the basicblock.
|
|
|
|
FuncInfo->MBB = ParentMBB;
|
|
|
|
FuncInfo->InsertPt =
|
|
|
|
FindSplitPointForStackProtector(ParentMBB);
|
|
|
|
SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
|
|
|
|
CurDAG->setRoot(SDB->getRoot());
|
|
|
|
SDB->clear();
|
|
|
|
CodeGenAndEmitDAG();
|
|
|
|
|
|
|
|
// Clear the Per-BB State.
|
|
|
|
SDB->SPDescriptor.resetPerBBState();
|
|
|
|
} else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
|
Teach selectiondag how to handle the stackprotectorcheck intrinsic.
Previously, generation of stack protectors was done exclusively in the
pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
splitting basic blocks at the IR level to create the success/failure basic
blocks in the tail of the basic block in question. As a result of this,
calls that would have qualified for the sibling call optimization were no
longer eligible for optimization since said calls were no longer right in
the "tail position" (i.e. the immediate predecessor of a ReturnInst
instruction).
Then it was noticed that since the sibling call optimization causes the
callee to reuse the caller's stack, if we could delay the generation of
the stack protector check until later in CodeGen after the sibling call
decision was made, we get both the tail call optimization and the stack
protector check!
A few goals in solving this problem were:
1. Preserve the architecture independence of stack protector generation.
2. Preserve the normal IR level stack protector check for platforms like
OpenBSD for which we support platform specific stack protector
generation.
The main problem that guided the present solution is that one can not
solve this problem in an architecture independent manner at the IR level
only. This is because:
1. The decision on whether or not to perform a sibling call on certain
platforms (for instance i386) requires lower level information
related to available registers that can not be known at the IR level.
2. Even if the previous point were not true, the decision on whether to
perform a tail call is done in LowerCallTo in SelectionDAG which
occurs after the Stack Protector Pass. As a result, one would need to
put the relevant callinst into the stack protector check success
basic block (where the return inst is placed) and then move it back
later at SelectionDAG/MI time before the stack protector check if the
tail call optimization failed. The MI level option was nixed
immediately since it would require platform specific pattern
matching. The SelectionDAG level option was nixed because
SelectionDAG only processes one IR level basic block at a time
implying one could not create a DAG Combine to move the callinst.
To get around this problem a few things were realized:
1. While one can not handle multiple IR level basic blocks at the
SelectionDAG Level, one can generate multiple machine basic blocks
for one IR level basic block. This is how we handle bit tests and
switches.
2. At the MI level, tail calls are represented via a special return
MIInst called "tcreturn". Thus if we know the basic block in which we
wish to insert the stack protector check, we get the correct behavior
by always inserting the stack protector check right before the return
statement. This is a "magical transformation" since no matter where
the stack protector check intrinsic is, we always insert the stack
protector check code at the end of the BB.
Given the aforementioned constraints, the following solution was devised:
1. On platforms that do not support SelectionDAG stack protector check
generation, allow for the normal IR level stack protector check
generation to continue.
2. On platforms that do support SelectionDAG stack protector check
generation:
a. Use the IR level stack protector pass to decide if a stack
protector is required/which BB we insert the stack protector check
in by reusing the logic already therein. If we wish to generate a
stack protector check in a basic block, we place a special IR
intrinsic called llvm.stackprotectorcheck right before the BB's
returninst or if there is a callinst that could potentially be
sibling call optimized, before the call inst.
b. Then when a BB with said intrinsic is processed, we codegen the BB
normally via SelectBasicBlock. In said process, when we visit the
stack protector check, we do not actually emit anything into the
BB. Instead, we just initialize the stack protector descriptor
class (which involves stashing information/creating the success
mbbb and the failure mbb if we have not created one for this
function yet) and export the guard variable that we are going to
compare.
c. After we finish selecting the basic block, in FinishBasicBlock if
the StackProtectorDescriptor attached to the SelectionDAGBuilder is
initialized, we first find a splice point in the parent basic block
before the terminator and then splice the terminator of said basic
block into the success basic block. Then we code-gen a new tail for
the parent basic block consisting of the two loads, the comparison,
and finally two branches to the success/failure basic blocks. We
conclude by code-gening the failure basic block if we have not
code-gened it already (all stack protector checks we generate in
the same function, use the same failure basic block).
llvm-svn: 188755
2013-08-20 09:00:16 +02:00
|
|
|
MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
|
|
|
|
MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
|
|
|
|
|
|
|
|
// Find the split point to split the parent mbb. At the same time copy all
|
|
|
|
// physical registers used in the tail of parent mbb into virtual registers
|
|
|
|
// before the split point and back into physical registers after the split
|
|
|
|
// point. This prevents us needing to deal with Live-ins and many other
|
|
|
|
// register allocation issues caused by us splitting the parent mbb. The
|
|
|
|
// register allocator will clean up said virtual copies later on.
|
|
|
|
MachineBasicBlock::iterator SplitPoint =
|
[stack-protection] Add support for MSVC buffer security check
Summary:
This patch is adding support for the MSVC buffer security check implementation
The buffer security check is turned on with the '/GS' compiler switch.
* https://msdn.microsoft.com/en-us/library/8dbf701c.aspx
* To be added to clang here: http://reviews.llvm.org/D20347
Some overview of buffer security check feature and implementation:
* https://msdn.microsoft.com/en-us/library/aa290051(VS.71).aspx
* http://www.ksyash.com/2011/01/buffer-overflow-protection-3/
* http://blog.osom.info/2012/02/understanding-vs-c-compilers-buffer.html
For the following example:
```
int example(int offset, int index) {
char buffer[10];
memset(buffer, 0xCC, index);
return buffer[index];
}
```
The MSVC compiler is adding these instructions to perform stack integrity check:
```
push ebp
mov ebp,esp
sub esp,50h
[1] mov eax,dword ptr [__security_cookie (01068024h)]
[2] xor eax,ebp
[3] mov dword ptr [ebp-4],eax
push ebx
push esi
push edi
mov eax,dword ptr [index]
push eax
push 0CCh
lea ecx,[buffer]
push ecx
call _memset (010610B9h)
add esp,0Ch
mov eax,dword ptr [index]
movsx eax,byte ptr buffer[eax]
pop edi
pop esi
pop ebx
[4] mov ecx,dword ptr [ebp-4]
[5] xor ecx,ebp
[6] call @__security_check_cookie@4 (01061276h)
mov esp,ebp
pop ebp
ret
```
The instrumentation above is:
* [1] is loading the global security canary,
* [3] is storing the local computed ([2]) canary to the guard slot,
* [4] is loading the guard slot and ([5]) re-compute the global canary,
* [6] is validating the resulting canary with the '__security_check_cookie' and performs error handling.
Overview of the current stack-protection implementation:
* lib/CodeGen/StackProtector.cpp
* There is a default stack-protection implementation applied on intermediate representation.
* The target can overload 'getIRStackGuard' method if it has a standard location for the stack protector cookie.
* An intrinsic 'Intrinsic::stackprotector' is added to the prologue. It will be expanded by the instruction selection pass (DAG or Fast).
* Basic Blocks are added to every instrumented function to receive the code for handling stack guard validation and errors handling.
* Guard manipulation and comparison are added directly to the intermediate representation.
* lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
* lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
* There is an implementation that adds instrumentation during instruction selection (for better handling of sibbling calls).
* see long comment above 'class StackProtectorDescriptor' declaration.
* The target needs to override 'getSDagStackGuard' to activate SDAG stack protection generation. (note: getIRStackGuard MUST be nullptr).
* 'getSDagStackGuard' returns the appropriate stack guard (security cookie)
* The code is generated by 'SelectionDAGBuilder.cpp' and 'SelectionDAGISel.cpp'.
* include/llvm/Target/TargetLowering.h
* Contains function to retrieve the default Guard 'Value'; should be overriden by each target to select which implementation is used and provide Guard 'Value'.
* lib/Target/X86/X86ISelLowering.cpp
* Contains the x86 specialisation; Guard 'Value' used by the SelectionDAG algorithm.
Function-based Instrumentation:
* The MSVC doesn't inline the stack guard comparison in every function. Instead, a call to '__security_check_cookie' is added to the epilogue before every return instructions.
* To support function-based instrumentation, this patch is
* adding a function to get the function-based check (llvm 'Value', see include/llvm/Target/TargetLowering.h),
* If provided, the stack protection instrumentation won't be inlined and a call to that function will be added to the prologue.
* modifying (SelectionDAGISel.cpp) do avoid producing basic blocks used for inline instrumentation,
* generating the function-based instrumentation during the ISEL pass (SelectionDAGBuilder.cpp),
* if FastISEL (not SelectionDAG), using the fallback which rely on the same function-based implemented over intermediate representation (StackProtector.cpp).
Modifications
* adding support for MSVC (lib/Target/X86/X86ISelLowering.cpp)
* adding support function-based instrumentation (lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp, .h)
Results
* IR generated instrumentation:
```
clang-cl /GS test.cc /Od /c -mllvm -print-isel-input
```
```
*** Final LLVM Code input to ISel ***
; Function Attrs: nounwind sspstrong
define i32 @"\01?example@@YAHHH@Z"(i32 %offset, i32 %index) #0 {
entry:
%StackGuardSlot = alloca i8* <<<-- Allocated guard slot
%0 = call i8* @llvm.stackguard() <<<-- Loading Stack Guard value
call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot) <<<-- Prologue intrinsic call (store to Guard slot)
%index.addr = alloca i32, align 4
%offset.addr = alloca i32, align 4
%buffer = alloca [10 x i8], align 1
store i32 %index, i32* %index.addr, align 4
store i32 %offset, i32* %offset.addr, align 4
%arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 0
%1 = load i32, i32* %index.addr, align 4
call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -52, i32 %1, i32 1, i1 false)
%2 = load i32, i32* %index.addr, align 4
%arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buffer, i32 0, i32 %2
%3 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %3 to i32
%4 = load volatile i8*, i8** %StackGuardSlot <<<-- Loading Guard slot
call void @__security_check_cookie(i8* %4) <<<-- Epilogue function-based check
ret i32 %conv
}
```
* SelectionDAG generated instrumentation:
```
clang-cl /GS test.cc /O1 /c /FA
```
```
"?example@@YAHHH@Z": # @"\01?example@@YAHHH@Z"
# BB#0: # %entry
pushl %esi
subl $16, %esp
movl ___security_cookie, %eax <<<-- Loading Stack Guard value
movl 28(%esp), %esi
movl %eax, 12(%esp) <<<-- Store to Guard slot
leal 2(%esp), %eax
pushl %esi
pushl $204
pushl %eax
calll _memset
addl $12, %esp
movsbl 2(%esp,%esi), %esi
movl 12(%esp), %ecx <<<-- Loading Guard slot
calll @__security_check_cookie@4 <<<-- Epilogue function-based check
movl %esi, %eax
addl $16, %esp
popl %esi
retl
```
Reviewers: kcc, pcc, eugenis, rnk
Subscribers: majnemer, llvm-commits, hans, thakis, rnk
Differential Revision: http://reviews.llvm.org/D20346
llvm-svn: 272053
2016-06-07 22:15:35 +02:00
|
|
|
FindSplitPointForStackProtector(ParentMBB);
|
Teach selectiondag how to handle the stackprotectorcheck intrinsic.
Previously, generation of stack protectors was done exclusively in the
pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
splitting basic blocks at the IR level to create the success/failure basic
blocks in the tail of the basic block in question. As a result of this,
calls that would have qualified for the sibling call optimization were no
longer eligible for optimization since said calls were no longer right in
the "tail position" (i.e. the immediate predecessor of a ReturnInst
instruction).
Then it was noticed that since the sibling call optimization causes the
callee to reuse the caller's stack, if we could delay the generation of
the stack protector check until later in CodeGen after the sibling call
decision was made, we get both the tail call optimization and the stack
protector check!
A few goals in solving this problem were:
1. Preserve the architecture independence of stack protector generation.
2. Preserve the normal IR level stack protector check for platforms like
OpenBSD for which we support platform specific stack protector
generation.
The main problem that guided the present solution is that one can not
solve this problem in an architecture independent manner at the IR level
only. This is because:
1. The decision on whether or not to perform a sibling call on certain
platforms (for instance i386) requires lower level information
related to available registers that can not be known at the IR level.
2. Even if the previous point were not true, the decision on whether to
perform a tail call is done in LowerCallTo in SelectionDAG which
occurs after the Stack Protector Pass. As a result, one would need to
put the relevant callinst into the stack protector check success
basic block (where the return inst is placed) and then move it back
later at SelectionDAG/MI time before the stack protector check if the
tail call optimization failed. The MI level option was nixed
immediately since it would require platform specific pattern
matching. The SelectionDAG level option was nixed because
SelectionDAG only processes one IR level basic block at a time
implying one could not create a DAG Combine to move the callinst.
To get around this problem a few things were realized:
1. While one can not handle multiple IR level basic blocks at the
SelectionDAG Level, one can generate multiple machine basic blocks
for one IR level basic block. This is how we handle bit tests and
switches.
2. At the MI level, tail calls are represented via a special return
MIInst called "tcreturn". Thus if we know the basic block in which we
wish to insert the stack protector check, we get the correct behavior
by always inserting the stack protector check right before the return
statement. This is a "magical transformation" since no matter where
the stack protector check intrinsic is, we always insert the stack
protector check code at the end of the BB.
Given the aforementioned constraints, the following solution was devised:
1. On platforms that do not support SelectionDAG stack protector check
generation, allow for the normal IR level stack protector check
generation to continue.
2. On platforms that do support SelectionDAG stack protector check
generation:
a. Use the IR level stack protector pass to decide if a stack
protector is required/which BB we insert the stack protector check
in by reusing the logic already therein. If we wish to generate a
stack protector check in a basic block, we place a special IR
intrinsic called llvm.stackprotectorcheck right before the BB's
returninst or if there is a callinst that could potentially be
sibling call optimized, before the call inst.
b. Then when a BB with said intrinsic is processed, we codegen the BB
normally via SelectBasicBlock. In said process, when we visit the
stack protector check, we do not actually emit anything into the
BB. Instead, we just initialize the stack protector descriptor
class (which involves stashing information/creating the success
mbbb and the failure mbb if we have not created one for this
function yet) and export the guard variable that we are going to
compare.
c. After we finish selecting the basic block, in FinishBasicBlock if
the StackProtectorDescriptor attached to the SelectionDAGBuilder is
initialized, we first find a splice point in the parent basic block
before the terminator and then splice the terminator of said basic
block into the success basic block. Then we code-gen a new tail for
the parent basic block consisting of the two loads, the comparison,
and finally two branches to the success/failure basic blocks. We
conclude by code-gening the failure basic block if we have not
code-gened it already (all stack protector checks we generate in
the same function, use the same failure basic block).
llvm-svn: 188755
2013-08-20 09:00:16 +02:00
|
|
|
|
|
|
|
// Splice the terminator of ParentMBB into SuccessMBB.
|
|
|
|
SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
|
|
|
|
SplitPoint,
|
|
|
|
ParentMBB->end());
|
|
|
|
|
|
|
|
// Add compare/jump on neq/jump to the parent BB.
|
|
|
|
FuncInfo->MBB = ParentMBB;
|
|
|
|
FuncInfo->InsertPt = ParentMBB->end();
|
|
|
|
SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
|
|
|
|
CurDAG->setRoot(SDB->getRoot());
|
|
|
|
SDB->clear();
|
|
|
|
CodeGenAndEmitDAG();
|
|
|
|
|
|
|
|
// CodeGen Failure MBB if we have not codegened it yet.
|
|
|
|
MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
|
2016-01-23 21:58:09 +01:00
|
|
|
if (FailureMBB->empty()) {
|
Teach selectiondag how to handle the stackprotectorcheck intrinsic.
Previously, generation of stack protectors was done exclusively in the
pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
splitting basic blocks at the IR level to create the success/failure basic
blocks in the tail of the basic block in question. As a result of this,
calls that would have qualified for the sibling call optimization were no
longer eligible for optimization since said calls were no longer right in
the "tail position" (i.e. the immediate predecessor of a ReturnInst
instruction).
Then it was noticed that since the sibling call optimization causes the
callee to reuse the caller's stack, if we could delay the generation of
the stack protector check until later in CodeGen after the sibling call
decision was made, we get both the tail call optimization and the stack
protector check!
A few goals in solving this problem were:
1. Preserve the architecture independence of stack protector generation.
2. Preserve the normal IR level stack protector check for platforms like
OpenBSD for which we support platform specific stack protector
generation.
The main problem that guided the present solution is that one can not
solve this problem in an architecture independent manner at the IR level
only. This is because:
1. The decision on whether or not to perform a sibling call on certain
platforms (for instance i386) requires lower level information
related to available registers that can not be known at the IR level.
2. Even if the previous point were not true, the decision on whether to
perform a tail call is done in LowerCallTo in SelectionDAG which
occurs after the Stack Protector Pass. As a result, one would need to
put the relevant callinst into the stack protector check success
basic block (where the return inst is placed) and then move it back
later at SelectionDAG/MI time before the stack protector check if the
tail call optimization failed. The MI level option was nixed
immediately since it would require platform specific pattern
matching. The SelectionDAG level option was nixed because
SelectionDAG only processes one IR level basic block at a time
implying one could not create a DAG Combine to move the callinst.
To get around this problem a few things were realized:
1. While one can not handle multiple IR level basic blocks at the
SelectionDAG Level, one can generate multiple machine basic blocks
for one IR level basic block. This is how we handle bit tests and
switches.
2. At the MI level, tail calls are represented via a special return
MIInst called "tcreturn". Thus if we know the basic block in which we
wish to insert the stack protector check, we get the correct behavior
by always inserting the stack protector check right before the return
statement. This is a "magical transformation" since no matter where
the stack protector check intrinsic is, we always insert the stack
protector check code at the end of the BB.
Given the aforementioned constraints, the following solution was devised:
1. On platforms that do not support SelectionDAG stack protector check
generation, allow for the normal IR level stack protector check
generation to continue.
2. On platforms that do support SelectionDAG stack protector check
generation:
a. Use the IR level stack protector pass to decide if a stack
protector is required/which BB we insert the stack protector check
in by reusing the logic already therein. If we wish to generate a
stack protector check in a basic block, we place a special IR
intrinsic called llvm.stackprotectorcheck right before the BB's
returninst or if there is a callinst that could potentially be
sibling call optimized, before the call inst.
b. Then when a BB with said intrinsic is processed, we codegen the BB
normally via SelectBasicBlock. In said process, when we visit the
stack protector check, we do not actually emit anything into the
BB. Instead, we just initialize the stack protector descriptor
class (which involves stashing information/creating the success
mbbb and the failure mbb if we have not created one for this
function yet) and export the guard variable that we are going to
compare.
c. After we finish selecting the basic block, in FinishBasicBlock if
the StackProtectorDescriptor attached to the SelectionDAGBuilder is
initialized, we first find a splice point in the parent basic block
before the terminator and then splice the terminator of said basic
block into the success basic block. Then we code-gen a new tail for
the parent basic block consisting of the two loads, the comparison,
and finally two branches to the success/failure basic blocks. We
conclude by code-gening the failure basic block if we have not
code-gened it already (all stack protector checks we generate in
the same function, use the same failure basic block).
llvm-svn: 188755
2013-08-20 09:00:16 +02:00
|
|
|
FuncInfo->MBB = FailureMBB;
|
|
|
|
FuncInfo->InsertPt = FailureMBB->end();
|
|
|
|
SDB->visitSPDescriptorFailure(SDB->SPDescriptor);
|
|
|
|
CurDAG->setRoot(SDB->getRoot());
|
|
|
|
SDB->clear();
|
|
|
|
CodeGenAndEmitDAG();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear the Per-BB State.
|
|
|
|
SDB->SPDescriptor.resetPerBBState();
|
|
|
|
}
|
|
|
|
|
2016-04-15 23:45:09 +02:00
|
|
|
// Lower each BitTestBlock.
|
|
|
|
for (auto &BTB : SDB->BitTestCases) {
|
2007-04-09 14:31:58 +02:00
|
|
|
// Lower header first, if it wasn't already lowered
|
2016-04-15 23:45:09 +02:00
|
|
|
if (!BTB.Emitted) {
|
2007-04-09 14:31:58 +02:00
|
|
|
// Set the current basic block to the mbb we wish to insert the code into
|
2016-04-15 23:45:09 +02:00
|
|
|
FuncInfo->MBB = BTB.Parent;
|
2010-07-10 11:00:22 +02:00
|
|
|
FuncInfo->InsertPt = FuncInfo->MBB->end();
|
2007-04-09 14:31:58 +02:00
|
|
|
// Emit the code
|
2016-04-15 23:45:09 +02:00
|
|
|
SDB->visitBitTestHeader(BTB, FuncInfo->MBB);
|
2009-11-23 19:04:58 +01:00
|
|
|
CurDAG->setRoot(SDB->getRoot());
|
|
|
|
SDB->clear();
|
2010-07-10 11:00:22 +02:00
|
|
|
CodeGenAndEmitDAG();
|
2009-09-20 04:20:51 +02:00
|
|
|
}
|
2007-04-09 14:31:58 +02:00
|
|
|
|
2016-04-15 23:45:09 +02:00
|
|
|
BranchProbability UnhandledProb = BTB.Prob;
|
|
|
|
for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
|
|
|
|
UnhandledProb -= BTB.Cases[j].ExtraProb;
|
2007-04-09 14:31:58 +02:00
|
|
|
// Set the current basic block to the mbb we wish to insert the code into
|
2016-04-15 23:45:09 +02:00
|
|
|
FuncInfo->MBB = BTB.Cases[j].ThisBB;
|
2010-07-10 11:00:22 +02:00
|
|
|
FuncInfo->InsertPt = FuncInfo->MBB->end();
|
2007-04-09 14:31:58 +02:00
|
|
|
// Emit the code
|
2015-08-25 23:34:38 +02:00
|
|
|
|
|
|
|
// If all cases cover a contiguous range, it is not necessary to jump to
|
|
|
|
// the default block after the last bit test fails. This is because the
|
|
|
|
// range check during bit test header creation has guaranteed that every
|
2016-04-15 23:45:30 +02:00
|
|
|
// case here doesn't go outside the range. In this case, there is no need
|
|
|
|
// to perform the last bit test, as it will always be true. Instead, make
|
|
|
|
// the second-to-last bit-test fall through to the target of the last bit
|
|
|
|
// test, and delete the last bit test.
|
|
|
|
|
2015-08-25 23:34:38 +02:00
|
|
|
MachineBasicBlock *NextMBB;
|
2016-04-15 23:45:30 +02:00
|
|
|
if (BTB.ContiguousRange && j + 2 == ej) {
|
|
|
|
// Second-to-last bit-test with contiguous range: fall through to the
|
|
|
|
// target of the final bit test.
|
2016-04-15 23:45:09 +02:00
|
|
|
NextMBB = BTB.Cases[j + 1].TargetBB;
|
2016-04-15 23:45:30 +02:00
|
|
|
} else if (j + 1 == ej) {
|
|
|
|
// For the last bit test, fall through to Default.
|
2016-04-15 23:45:09 +02:00
|
|
|
NextMBB = BTB.Default;
|
2016-04-15 23:45:30 +02:00
|
|
|
} else {
|
|
|
|
// Otherwise, fall through to the next bit test.
|
|
|
|
NextMBB = BTB.Cases[j + 1].ThisBB;
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2016-04-15 23:45:09 +02:00
|
|
|
SDB->visitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
|
2015-08-25 23:34:38 +02:00
|
|
|
FuncInfo->MBB);
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2009-11-23 19:04:58 +01:00
|
|
|
CurDAG->setRoot(SDB->getRoot());
|
|
|
|
SDB->clear();
|
2010-07-10 11:00:22 +02:00
|
|
|
CodeGenAndEmitDAG();
|
2015-08-25 23:34:38 +02:00
|
|
|
|
2016-04-15 23:45:30 +02:00
|
|
|
if (BTB.ContiguousRange && j + 2 == ej) {
|
|
|
|
// Since we're not going to use the final bit test, remove it.
|
|
|
|
BTB.Cases.pop_back();
|
2015-08-25 23:34:38 +02:00
|
|
|
break;
|
2016-04-15 23:45:30 +02:00
|
|
|
}
|
2007-04-09 14:31:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update PHI Nodes
|
2010-04-22 21:55:20 +02:00
|
|
|
for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
|
|
|
|
pi != pe; ++pi) {
|
2012-12-20 19:46:29 +01:00
|
|
|
MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
|
2007-04-09 14:31:58 +02:00
|
|
|
MachineBasicBlock *PHIBB = PHI->getParent();
|
2010-02-09 20:54:29 +01:00
|
|
|
assert(PHI->isPHI() &&
|
2007-04-09 14:31:58 +02:00
|
|
|
"This is not a machine PHI node that we are updating!");
|
|
|
|
// This is "default" BB. We have two jumps to it. From "header" BB and
|
2016-04-15 23:45:30 +02:00
|
|
|
// from last "case" BB, unless the latter was skipped.
|
|
|
|
if (PHIBB == BTB.Default) {
|
|
|
|
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(BTB.Parent);
|
|
|
|
if (!BTB.ContiguousRange) {
|
|
|
|
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
|
|
|
|
.addMBB(BTB.Cases.back().ThisBB);
|
|
|
|
}
|
|
|
|
}
|
2007-04-09 14:31:58 +02:00
|
|
|
// One of "cases" BB.
|
2016-04-15 23:45:09 +02:00
|
|
|
for (unsigned j = 0, ej = BTB.Cases.size();
|
2008-08-28 01:52:12 +02:00
|
|
|
j != ej; ++j) {
|
2016-04-15 23:45:09 +02:00
|
|
|
MachineBasicBlock* cBB = BTB.Cases[j].ThisBB;
|
2012-12-20 19:46:29 +01:00
|
|
|
if (cBB->isSuccessor(PHIBB))
|
|
|
|
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(cBB);
|
2007-04-09 14:31:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-11-23 19:04:58 +01:00
|
|
|
SDB->BitTestCases.clear();
|
2007-04-09 14:31:58 +02:00
|
|
|
|
2006-04-23 08:26:20 +02:00
|
|
|
// If the JumpTable record is filled in, then we need to emit a jump table.
|
|
|
|
// Updating the PHI nodes is tricky in this case, since we need to determine
|
|
|
|
// whether the PHI is a successor of the range check MBB or the jump table MBB
|
2009-11-23 19:04:58 +01:00
|
|
|
for (unsigned i = 0, e = SDB->JTCases.size(); i != e; ++i) {
|
2007-03-25 17:07:15 +02:00
|
|
|
// Lower header first, if it wasn't already lowered
|
2009-11-23 19:04:58 +01:00
|
|
|
if (!SDB->JTCases[i].first.Emitted) {
|
2007-03-25 17:07:15 +02:00
|
|
|
// Set the current basic block to the mbb we wish to insert the code into
|
2010-07-10 11:00:22 +02:00
|
|
|
FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
|
|
|
|
FuncInfo->InsertPt = FuncInfo->MBB->end();
|
2007-03-25 17:07:15 +02:00
|
|
|
// Emit the code
|
2010-04-20 00:41:47 +02:00
|
|
|
SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
|
2010-07-10 11:00:22 +02:00
|
|
|
FuncInfo->MBB);
|
2009-11-23 19:04:58 +01:00
|
|
|
CurDAG->setRoot(SDB->getRoot());
|
|
|
|
SDB->clear();
|
2010-07-10 11:00:22 +02:00
|
|
|
CodeGenAndEmitDAG();
|
2007-04-09 14:31:58 +02:00
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-04-22 20:53:45 +02:00
|
|
|
// Set the current basic block to the mbb we wish to insert the code into
|
2010-07-10 11:00:22 +02:00
|
|
|
FuncInfo->MBB = SDB->JTCases[i].second.MBB;
|
|
|
|
FuncInfo->InsertPt = FuncInfo->MBB->end();
|
2006-04-22 20:53:45 +02:00
|
|
|
// Emit the code
|
2009-11-23 19:04:58 +01:00
|
|
|
SDB->visitJumpTable(SDB->JTCases[i].second);
|
|
|
|
CurDAG->setRoot(SDB->getRoot());
|
|
|
|
SDB->clear();
|
2010-07-10 11:00:22 +02:00
|
|
|
CodeGenAndEmitDAG();
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-04-22 20:53:45 +02:00
|
|
|
// Update PHI Nodes
|
2010-04-22 21:55:20 +02:00
|
|
|
for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
|
|
|
|
pi != pe; ++pi) {
|
2012-12-20 19:46:29 +01:00
|
|
|
MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
|
2006-04-22 20:53:45 +02:00
|
|
|
MachineBasicBlock *PHIBB = PHI->getParent();
|
2010-02-09 20:54:29 +01:00
|
|
|
assert(PHI->isPHI() &&
|
2006-04-22 20:53:45 +02:00
|
|
|
"This is not a machine PHI node that we are updating!");
|
2007-04-09 14:31:58 +02:00
|
|
|
// "default" BB. We can go there only from header BB.
|
2012-12-20 19:46:29 +01:00
|
|
|
if (PHIBB == SDB->JTCases[i].second.Default)
|
|
|
|
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
|
|
|
|
.addMBB(SDB->JTCases[i].first.HeaderBB);
|
2007-04-09 14:31:58 +02:00
|
|
|
// JT BB. Just iterate over successors here
|
2012-12-20 19:46:29 +01:00
|
|
|
if (FuncInfo->MBB->isSuccessor(PHIBB))
|
|
|
|
PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(FuncInfo->MBB);
|
2006-04-22 20:53:45 +02:00
|
|
|
}
|
|
|
|
}
|
2009-11-23 19:04:58 +01:00
|
|
|
SDB->JTCases.clear();
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-03-27 03:32:24 +02:00
|
|
|
// If we generated any switch lowering information, build and codegen any
|
|
|
|
// additional DAGs necessary.
|
2009-11-23 19:04:58 +01:00
|
|
|
for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
|
2006-03-27 03:32:24 +02:00
|
|
|
// Set the current basic block to the mbb we wish to insert the code into
|
2011-01-14 23:26:16 +01:00
|
|
|
FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
|
2010-07-10 11:00:22 +02:00
|
|
|
FuncInfo->InsertPt = FuncInfo->MBB->end();
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2010-05-01 02:25:44 +02:00
|
|
|
// Determine the unique successors.
|
|
|
|
SmallVector<MachineBasicBlock *, 2> Succs;
|
|
|
|
Succs.push_back(SDB->SwitchCases[i].TrueBB);
|
|
|
|
if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
|
|
|
|
Succs.push_back(SDB->SwitchCases[i].FalseBB);
|
|
|
|
|
2011-01-14 23:26:16 +01:00
|
|
|
// Emit the code. Note that this could result in FuncInfo->MBB being split.
|
2010-07-10 11:00:22 +02:00
|
|
|
SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB);
|
2009-11-23 19:04:58 +01:00
|
|
|
CurDAG->setRoot(SDB->getRoot());
|
2010-05-01 02:25:44 +02:00
|
|
|
SDB->clear();
|
2010-07-10 11:00:22 +02:00
|
|
|
CodeGenAndEmitDAG();
|
2011-01-14 23:26:16 +01:00
|
|
|
|
|
|
|
// Remember the last block, now that any splitting is done, for use in
|
|
|
|
// populating PHI nodes in successors.
|
|
|
|
MachineBasicBlock *ThisBB = FuncInfo->MBB;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-09-07 03:59:34 +02:00
|
|
|
// Handle any PHI nodes in successors of this chunk, as if we were coming
|
|
|
|
// from the original BB before switch expansion. Note that PHI nodes can
|
|
|
|
// occur multiple times in PHINodesToUpdate. We have to be very careful to
|
|
|
|
// handle them the right number of times.
|
2010-05-01 02:25:44 +02:00
|
|
|
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
|
2010-07-10 11:00:22 +02:00
|
|
|
FuncInfo->MBB = Succs[i];
|
|
|
|
FuncInfo->InsertPt = FuncInfo->MBB->end();
|
|
|
|
// FuncInfo->MBB may have been removed from the CFG if a branch was
|
|
|
|
// constant folded.
|
|
|
|
if (ThisBB->isSuccessor(FuncInfo->MBB)) {
|
2012-12-20 19:46:29 +01:00
|
|
|
for (MachineBasicBlock::iterator
|
|
|
|
MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
|
|
|
|
MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
|
|
|
|
MachineInstrBuilder PHI(*MF, MBBI);
|
2010-01-11 22:02:33 +01:00
|
|
|
// This value for this PHI node is recorded in PHINodesToUpdate.
|
|
|
|
for (unsigned pn = 0; ; ++pn) {
|
2010-04-22 21:55:20 +02:00
|
|
|
assert(pn != FuncInfo->PHINodesToUpdate.size() &&
|
2010-01-11 22:02:33 +01:00
|
|
|
"Didn't find PHI entry!");
|
2012-12-20 19:46:29 +01:00
|
|
|
if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
|
|
|
|
PHI.addReg(FuncInfo->PHINodesToUpdate[pn].second).addMBB(ThisBB);
|
2010-01-11 22:02:33 +01:00
|
|
|
break;
|
|
|
|
}
|
2009-09-18 10:26:06 +02:00
|
|
|
}
|
2006-09-07 03:59:34 +02:00
|
|
|
}
|
2006-03-27 03:32:24 +02:00
|
|
|
}
|
|
|
|
}
|
2005-03-30 03:10:47 +02:00
|
|
|
}
|
2009-11-23 19:04:58 +01:00
|
|
|
SDB->SwitchCases.clear();
|
2005-01-07 08:47:53 +01:00
|
|
|
}
|
2006-01-21 03:32:06 +01:00
|
|
|
|
2009-02-06 19:26:51 +01:00
|
|
|
/// Create the scheduler. If a specific scheduler was specified
|
|
|
|
/// via the SchedulerRegistry, use it, otherwise select the
|
|
|
|
/// one preferred by the target.
|
2008-07-14 20:19:29 +02:00
|
|
|
///
|
2009-02-11 05:27:20 +01:00
|
|
|
ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
|
2015-07-28 08:18:04 +02:00
|
|
|
return ISHeuristic(this, OptLevel);
|
2006-01-21 03:32:06 +01:00
|
|
|
}
|
2006-02-24 03:13:54 +01:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Helper functions used by the generated instruction selector.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Calls to these methods are generated by tblgen.
|
|
|
|
|
|
|
|
/// CheckAndMask - The isel is trying to match something like (and X, 255). If
|
|
|
|
/// the dag combiner simplified the 255, we still want to match. RHS is the
|
|
|
|
/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
|
|
|
|
/// specified in the .td file (e.g. 255).
|
2009-09-20 04:20:51 +02:00
|
|
|
bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
|
2007-07-25 01:00:27 +02:00
|
|
|
int64_t DesiredMaskS) const {
|
2008-02-25 22:11:39 +01:00
|
|
|
const APInt &ActualMask = RHS->getAPIntValue();
|
|
|
|
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// If the actual mask exactly matches, success!
|
|
|
|
if (ActualMask == DesiredMask)
|
|
|
|
return true;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// If the actual AND mask is allowing unallowed bits, this doesn't match.
|
2008-02-25 22:11:39 +01:00
|
|
|
if (ActualMask.intersects(~DesiredMask))
|
2006-10-11 05:58:02 +02:00
|
|
|
return false;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// Otherwise, the DAG Combiner may have proven that the value coming in is
|
|
|
|
// either already zero or is not demanded. Check for known zero input bits.
|
2008-02-25 22:11:39 +01:00
|
|
|
APInt NeededMask = DesiredMask & ~ActualMask;
|
2007-06-22 16:59:07 +02:00
|
|
|
if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
|
2006-10-11 05:58:02 +02:00
|
|
|
return true;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// TODO: check to see if missing bits are just not demanded.
|
|
|
|
|
|
|
|
// Otherwise, this pattern doesn't match.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CheckOrMask - The isel is trying to match something like (or X, 255). If
|
|
|
|
/// the dag combiner simplified the 255, we still want to match. RHS is the
|
|
|
|
/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
|
|
|
|
/// specified in the .td file (e.g. 255).
|
2009-09-20 04:20:51 +02:00
|
|
|
bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
|
2008-02-25 22:11:39 +01:00
|
|
|
int64_t DesiredMaskS) const {
|
|
|
|
const APInt &ActualMask = RHS->getAPIntValue();
|
|
|
|
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// If the actual mask exactly matches, success!
|
|
|
|
if (ActualMask == DesiredMask)
|
|
|
|
return true;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// If the actual AND mask is allowing unallowed bits, this doesn't match.
|
2008-02-25 22:11:39 +01:00
|
|
|
if (ActualMask.intersects(~DesiredMask))
|
2006-10-11 05:58:02 +02:00
|
|
|
return false;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// Otherwise, the DAG Combiner may have proven that the value coming in is
|
|
|
|
// either already zero or is not demanded. Check for known zero input bits.
|
2008-02-25 22:11:39 +01:00
|
|
|
APInt NeededMask = DesiredMask & ~ActualMask;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2017-04-28 07:31:46 +02:00
|
|
|
KnownBits Known;
|
|
|
|
CurDAG->computeKnownBits(LHS, Known);
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// If all the missing bits in the or are already known to be set, match!
|
2017-04-28 07:31:46 +02:00
|
|
|
if (NeededMask.isSubsetOf(Known.One))
|
2006-10-11 05:58:02 +02:00
|
|
|
return true;
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// TODO: check to see if missing bits are just not demanded.
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-10-11 05:58:02 +02:00
|
|
|
// Otherwise, this pattern doesn't match.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2006-02-24 03:13:54 +01:00
|
|
|
/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
|
|
|
|
/// by tblgen. Others should not call it.
|
2016-06-12 17:39:02 +02:00
|
|
|
void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
|
|
|
|
const SDLoc &DL) {
|
2008-07-27 23:46:04 +02:00
|
|
|
std::vector<SDValue> InOps;
|
2006-02-24 03:13:54 +01:00
|
|
|
std::swap(InOps, Ops);
|
|
|
|
|
2010-04-07 07:20:54 +02:00
|
|
|
Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
|
|
|
|
Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
|
|
|
|
Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
|
2011-01-08 00:50:32 +01:00
|
|
|
Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
|
2006-02-24 03:13:54 +01:00
|
|
|
|
2010-04-07 07:20:54 +02:00
|
|
|
unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
|
2010-12-21 03:38:05 +01:00
|
|
|
if (InOps[e-1].getValueType() == MVT::Glue)
|
2010-12-23 18:13:18 +01:00
|
|
|
--e; // Don't process a glue operand if it is here.
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-02-24 03:13:54 +01:00
|
|
|
while (i != e) {
|
2008-09-12 18:56:44 +02:00
|
|
|
unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
|
2010-04-07 07:20:54 +02:00
|
|
|
if (!InlineAsm::isMemKind(Flags)) {
|
2006-02-24 03:13:54 +01:00
|
|
|
// Just skip over this operand, copying the operands verbatim.
|
2009-03-20 19:03:34 +01:00
|
|
|
Ops.insert(Ops.end(), InOps.begin()+i,
|
|
|
|
InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
|
|
|
|
i += InlineAsm::getNumOperandRegisters(Flags) + 1;
|
2006-02-24 03:13:54 +01:00
|
|
|
} else {
|
2009-03-20 19:03:34 +01:00
|
|
|
assert(InlineAsm::getNumOperandRegisters(Flags) == 1 &&
|
|
|
|
"Memory operand with multiple values?");
|
2015-03-13 13:45:09 +01:00
|
|
|
|
|
|
|
unsigned TiedToOperand;
|
|
|
|
if (InlineAsm::isUseOperandTiedToDef(Flags, TiedToOperand)) {
|
|
|
|
// We need the constraint ID from the operand this is tied to.
|
|
|
|
unsigned CurOp = InlineAsm::Op_FirstOperand;
|
|
|
|
Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
|
|
|
|
for (; TiedToOperand; --TiedToOperand) {
|
|
|
|
CurOp += InlineAsm::getNumOperandRegisters(Flags)+1;
|
|
|
|
Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-24 03:13:54 +01:00
|
|
|
// Otherwise, this is a memory operand. Ask the target to select it.
|
2008-07-27 23:46:04 +02:00
|
|
|
std::vector<SDValue> SelOps;
|
2016-07-18 15:17:31 +02:00
|
|
|
unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags);
|
|
|
|
if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
|
2010-04-08 00:58:41 +02:00
|
|
|
report_fatal_error("Could not match memory address. Inline asm"
|
2010-04-08 01:50:38 +02:00
|
|
|
" failure!");
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2006-02-24 03:13:54 +01:00
|
|
|
// Add this to the output node.
|
2010-04-07 07:20:54 +02:00
|
|
|
unsigned NewFlags =
|
|
|
|
InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
|
2016-07-18 15:17:31 +02:00
|
|
|
NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
|
2015-04-28 16:05:47 +02:00
|
|
|
Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
|
2006-02-24 03:13:54 +01:00
|
|
|
Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
|
|
|
|
i += 2;
|
|
|
|
}
|
|
|
|
}
|
2009-09-20 04:20:51 +02:00
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
// Add the glue input back if present.
|
2006-02-24 03:13:54 +01:00
|
|
|
if (e != InOps.size())
|
|
|
|
Ops.push_back(InOps.back());
|
|
|
|
}
|
2007-05-01 23:15:47 +02:00
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
/// findGlueUse - Return use of MVT::Glue value produced by the specified
|
2009-05-08 20:51:58 +02:00
|
|
|
/// SDNode.
|
|
|
|
///
|
2010-12-23 18:13:18 +01:00
|
|
|
static SDNode *findGlueUse(SDNode *N) {
|
2009-05-08 20:51:58 +02:00
|
|
|
unsigned FlagResNo = N->getNumValues()-1;
|
|
|
|
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
|
|
|
|
SDUse &Use = I.getUse();
|
|
|
|
if (Use.getResNo() == FlagResNo)
|
|
|
|
return Use.getUser();
|
|
|
|
}
|
2014-04-14 02:51:57 +02:00
|
|
|
return nullptr;
|
2009-05-08 20:51:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
|
2017-06-02 09:11:00 +02:00
|
|
|
/// This function iteratively traverses up the operand chain, ignoring
|
2009-05-08 20:51:58 +02:00
|
|
|
/// certain nodes.
|
|
|
|
static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
|
2014-08-21 07:55:13 +02:00
|
|
|
SDNode *Root, SmallPtrSetImpl<SDNode*> &Visited,
|
2010-03-02 23:20:06 +01:00
|
|
|
bool IgnoreChains) {
|
2010-02-23 20:32:27 +01:00
|
|
|
// The NodeID's are given uniques ID's where a node ID is guaranteed to be
|
|
|
|
// greater than all of its (recursive) operands. If we scan to a point where
|
|
|
|
// 'use' is smaller than the node we're scanning for, then we know we will
|
|
|
|
// never find it.
|
|
|
|
//
|
|
|
|
// The Use may be -1 (unassigned) if it is a newly allocated node. This can
|
2010-12-23 18:13:18 +01:00
|
|
|
// happen because we scan down to newly selected nodes in the case of glue
|
2010-02-23 20:32:27 +01:00
|
|
|
// uses.
|
2017-06-02 09:11:00 +02:00
|
|
|
std::vector<SDNode *> WorkList;
|
|
|
|
WorkList.push_back(Use);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2017-06-02 09:11:00 +02:00
|
|
|
while (!WorkList.empty()) {
|
|
|
|
Use = WorkList.back();
|
|
|
|
WorkList.pop_back();
|
|
|
|
if (Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1)
|
|
|
|
continue;
|
2009-05-08 20:51:58 +02:00
|
|
|
|
2017-06-02 09:11:00 +02:00
|
|
|
// Don't revisit nodes if we already scanned it and didn't fail, we know we
|
|
|
|
// won't fail if we scan it again.
|
|
|
|
if (!Visited.insert(Use).second)
|
2010-03-02 23:20:06 +01:00
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2017-06-02 09:11:00 +02:00
|
|
|
for (const SDValue &Op : Use->op_values()) {
|
|
|
|
// Ignore chain uses, they are validated by HandleMergeInputChains.
|
|
|
|
if (Op.getValueType() == MVT::Other && IgnoreChains)
|
|
|
|
continue;
|
2009-05-08 20:51:58 +02:00
|
|
|
|
2017-06-02 09:11:00 +02:00
|
|
|
SDNode *N = Op.getNode();
|
|
|
|
if (N == Def) {
|
|
|
|
if (Use == ImmedUse || Use == Root)
|
|
|
|
continue; // We are not looking for immediate use.
|
|
|
|
assert(N != Root);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Traverse up the operand chain.
|
|
|
|
WorkList.push_back(N);
|
|
|
|
}
|
2009-05-08 20:51:58 +02:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-02-15 20:41:07 +01:00
|
|
|
/// IsProfitableToFold - Returns true if it's profitable to fold the specific
|
|
|
|
/// operand node N of U during instruction selection that starts at Root.
|
|
|
|
bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
|
|
|
|
SDNode *Root) const {
|
|
|
|
if (OptLevel == CodeGenOpt::None) return false;
|
|
|
|
return N.hasOneUse();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// IsLegalToFold - Returns true if the specific operand node N of
|
|
|
|
/// U can be folded during instruction selection that starts at Root.
|
2010-03-02 23:20:06 +01:00
|
|
|
bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
|
2010-04-17 17:26:15 +02:00
|
|
|
CodeGenOpt::Level OptLevel,
|
|
|
|
bool IgnoreChains) {
|
2009-05-08 20:51:58 +02:00
|
|
|
if (OptLevel == CodeGenOpt::None) return false;
|
|
|
|
|
|
|
|
// If Root use can somehow reach N through a path that that doesn't contain
|
|
|
|
// U then folding N would create a cycle. e.g. In the following
|
|
|
|
// diagram, Root can reach N through X. If N is folded into into Root, then
|
|
|
|
// X is both a predecessor and a successor of U.
|
|
|
|
//
|
|
|
|
// [N*] //
|
|
|
|
// ^ ^ //
|
|
|
|
// / \ //
|
|
|
|
// [U*] [X]? //
|
|
|
|
// ^ ^ //
|
|
|
|
// \ / //
|
|
|
|
// \ / //
|
|
|
|
// [Root*] //
|
|
|
|
//
|
|
|
|
// * indicates nodes to be folded together.
|
|
|
|
//
|
2010-12-23 18:13:18 +01:00
|
|
|
// If Root produces glue, then it gets (even more) interesting. Since it
|
|
|
|
// will be "glued" together with its glue use in the scheduler, we need to
|
2009-05-08 20:51:58 +02:00
|
|
|
// check if it might reach N.
|
|
|
|
//
|
|
|
|
// [N*] //
|
|
|
|
// ^ ^ //
|
|
|
|
// / \ //
|
|
|
|
// [U*] [X]? //
|
|
|
|
// ^ ^ //
|
|
|
|
// \ \ //
|
|
|
|
// \ | //
|
|
|
|
// [Root*] | //
|
|
|
|
// ^ | //
|
|
|
|
// f | //
|
|
|
|
// | / //
|
|
|
|
// [Y] / //
|
|
|
|
// ^ / //
|
|
|
|
// f / //
|
|
|
|
// | / //
|
2010-12-23 18:13:18 +01:00
|
|
|
// [GU] //
|
2009-05-08 20:51:58 +02:00
|
|
|
//
|
2010-12-23 18:13:18 +01:00
|
|
|
// If GU (glue use) indirectly reaches N (the load), and Root folds N
|
|
|
|
// (call it Fold), then X is a predecessor of GU and a successor of
|
|
|
|
// Fold. But since Fold and GU are glued together, this will create
|
2009-05-08 20:51:58 +02:00
|
|
|
// a cycle in the scheduling graph.
|
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
// If the node has glue, walk down the graph to the "lowest" node in the
|
|
|
|
// glueged set.
|
2009-08-11 00:56:29 +02:00
|
|
|
EVT VT = Root->getValueType(Root->getNumValues()-1);
|
2010-12-21 03:38:05 +01:00
|
|
|
while (VT == MVT::Glue) {
|
2010-12-23 18:13:18 +01:00
|
|
|
SDNode *GU = findGlueUse(Root);
|
2014-04-14 02:51:57 +02:00
|
|
|
if (!GU)
|
2009-05-08 20:51:58 +02:00
|
|
|
break;
|
2010-12-23 18:13:18 +01:00
|
|
|
Root = GU;
|
2009-05-08 20:51:58 +02:00
|
|
|
VT = Root->getValueType(Root->getNumValues()-1);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
// If our query node has a glue result with a use, we've walked up it. If
|
2010-03-05 07:19:13 +01:00
|
|
|
// the user (which has already been selected) has a chain or indirectly uses
|
|
|
|
// the chain, our WalkChainUsers predicate will not consider it. Because of
|
|
|
|
// this, we cannot ignore chains in this predicate.
|
|
|
|
IgnoreChains = false;
|
2009-05-08 20:51:58 +02:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-05 06:49:45 +01:00
|
|
|
SmallPtrSet<SDNode*, 16> Visited;
|
|
|
|
return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains);
|
2009-05-08 20:51:58 +02:00
|
|
|
}
|
|
|
|
|
2016-05-11 00:58:26 +02:00
|
|
|
void SelectionDAGISel::Select_INLINEASM(SDNode *N) {
|
2015-04-28 16:05:47 +02:00
|
|
|
SDLoc DL(N);
|
|
|
|
|
2010-01-05 02:24:18 +01:00
|
|
|
std::vector<SDValue> Ops(N->op_begin(), N->op_end());
|
2015-04-28 16:05:47 +02:00
|
|
|
SelectInlineAsmMemoryOperands(Ops, DL);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2015-03-07 18:41:00 +01:00
|
|
|
const EVT VTs[] = {MVT::Other, MVT::Glue};
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue New = CurDAG->getNode(ISD::INLINEASM, DL, VTs, Ops);
|
2010-03-02 23:20:06 +01:00
|
|
|
New->setNodeId(-1);
|
2016-05-11 00:58:26 +02:00
|
|
|
ReplaceUses(N, New.getNode());
|
|
|
|
CurDAG->RemoveDeadNode(N);
|
2009-10-29 23:30:23 +01:00
|
|
|
}
|
|
|
|
|
2016-05-11 00:58:26 +02:00
|
|
|
void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
|
2014-05-06 18:51:25 +02:00
|
|
|
SDLoc dl(Op);
|
2015-05-18 18:42:10 +02:00
|
|
|
MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1));
|
2014-05-06 18:51:25 +02:00
|
|
|
const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
|
2014-10-08 09:32:17 +02:00
|
|
|
unsigned Reg =
|
2015-07-09 19:40:29 +02:00
|
|
|
TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0),
|
|
|
|
*CurDAG);
|
2014-05-06 18:51:25 +02:00
|
|
|
SDValue New = CurDAG->getCopyFromReg(
|
2015-05-18 18:42:10 +02:00
|
|
|
Op->getOperand(0), dl, Reg, Op->getValueType(0));
|
2014-05-06 18:51:25 +02:00
|
|
|
New->setNodeId(-1);
|
2016-05-11 00:58:26 +02:00
|
|
|
ReplaceUses(Op, New.getNode());
|
|
|
|
CurDAG->RemoveDeadNode(Op);
|
2014-05-06 18:51:25 +02:00
|
|
|
}
|
|
|
|
|
2016-05-11 00:58:26 +02:00
|
|
|
void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
|
2014-05-06 18:51:25 +02:00
|
|
|
SDLoc dl(Op);
|
|
|
|
MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1));
|
|
|
|
const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
|
2014-10-08 09:32:17 +02:00
|
|
|
unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(),
|
2015-07-09 19:40:29 +02:00
|
|
|
Op->getOperand(2).getValueType(),
|
|
|
|
*CurDAG);
|
2014-05-06 18:51:25 +02:00
|
|
|
SDValue New = CurDAG->getCopyToReg(
|
2015-05-18 18:42:10 +02:00
|
|
|
Op->getOperand(0), dl, Reg, Op->getOperand(2));
|
2014-05-06 18:51:25 +02:00
|
|
|
New->setNodeId(-1);
|
2016-05-11 00:58:26 +02:00
|
|
|
ReplaceUses(Op, New.getNode());
|
|
|
|
CurDAG->RemoveDeadNode(Op);
|
2014-05-06 18:51:25 +02:00
|
|
|
}
|
|
|
|
|
2016-05-11 00:58:26 +02:00
|
|
|
void SelectionDAGISel::Select_UNDEF(SDNode *N) {
|
2016-05-11 23:00:33 +02:00
|
|
|
CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
|
2009-10-29 23:30:23 +01:00
|
|
|
}
|
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
/// GetVBR - decode a vbr encoding whose top bit is set.
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline uint64_t
|
2010-02-28 23:37:22 +01:00
|
|
|
GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
|
|
|
|
assert(Val >= 128 && "Not a VBR");
|
|
|
|
Val &= 127; // Remove first vbr bit.
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
unsigned Shift = 7;
|
|
|
|
uint64_t NextBits;
|
|
|
|
do {
|
2010-02-28 23:38:43 +01:00
|
|
|
NextBits = MatcherTable[Idx++];
|
2010-02-28 23:37:22 +01:00
|
|
|
Val |= (NextBits&127) << Shift;
|
|
|
|
Shift += 7;
|
|
|
|
} while (NextBits & 128);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
return Val;
|
|
|
|
}
|
|
|
|
|
2016-05-06 00:37:45 +02:00
|
|
|
/// When a match is complete, this method updates uses of interior chain results
|
|
|
|
/// to use the new results.
|
|
|
|
void SelectionDAGISel::UpdateChains(
|
|
|
|
SDNode *NodeToMatch, SDValue InputChain,
|
2017-01-30 19:29:46 +01:00
|
|
|
SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
|
2010-03-02 08:50:03 +01:00
|
|
|
SmallVector<SDNode*, 4> NowDeadNodes;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// Now that all the normal results are replaced, we replace the chain and
|
2010-12-23 18:13:18 +01:00
|
|
|
// glue results if present.
|
2010-02-28 23:37:22 +01:00
|
|
|
if (!ChainNodesMatched.empty()) {
|
2014-04-14 02:51:57 +02:00
|
|
|
assert(InputChain.getNode() &&
|
2010-02-28 23:37:22 +01:00
|
|
|
"Matched input chains but didn't produce a chain");
|
|
|
|
// Loop over all of the nodes we matched that produced a chain result.
|
|
|
|
// Replace all the chain results with the final chain we ended up with.
|
|
|
|
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
|
|
|
|
SDNode *ChainNode = ChainNodesMatched[i];
|
2017-01-30 19:29:46 +01:00
|
|
|
// If ChainNode is null, it's because we replaced it on a previous
|
|
|
|
// iteration and we cleared it out of the map. Just skip it.
|
|
|
|
if (!ChainNode)
|
|
|
|
continue;
|
|
|
|
|
2016-06-03 22:47:40 +02:00
|
|
|
assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
|
|
|
|
"Deleted node left in chain");
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// Don't replace the results of the root node if we're doing a
|
|
|
|
// MorphNodeTo.
|
|
|
|
if (ChainNode == NodeToMatch && isMorphNodeTo)
|
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
|
2010-12-21 03:38:05 +01:00
|
|
|
if (ChainVal.getValueType() == MVT::Glue)
|
2010-02-28 23:37:22 +01:00
|
|
|
ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
|
|
|
|
assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
|
2017-01-30 19:29:46 +01:00
|
|
|
SelectionDAG::DAGNodeDeletedListener NDL(
|
|
|
|
*CurDAG, [&](SDNode *N, SDNode *E) {
|
|
|
|
std::replace(ChainNodesMatched.begin(), ChainNodesMatched.end(), N,
|
|
|
|
static_cast<SDNode *>(nullptr));
|
|
|
|
});
|
2012-04-21 00:08:46 +02:00
|
|
|
CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-28 07:28:31 +02:00
|
|
|
// If the node became dead and we haven't already seen it, delete it.
|
2016-05-06 20:42:16 +02:00
|
|
|
if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
|
2010-03-28 07:28:31 +02:00
|
|
|
!std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
|
2010-03-02 08:50:03 +01:00
|
|
|
NowDeadNodes.push_back(ChainNode);
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 08:50:03 +01:00
|
|
|
if (!NowDeadNodes.empty())
|
2012-04-21 00:08:46 +02:00
|
|
|
CurDAG->RemoveDeadNodes(NowDeadNodes);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << "ISEL: Match complete!\n");
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
enum ChainResult {
|
|
|
|
CR_Simple,
|
|
|
|
CR_InducesCycle,
|
|
|
|
CR_LeadsToInteriorNode
|
|
|
|
};
|
|
|
|
|
|
|
|
/// WalkChainUsers - Walk down the users of the specified chained node that is
|
|
|
|
/// part of the pattern we're matching, looking at all of the users we find.
|
|
|
|
/// This determines whether something is an interior node, whether we have a
|
|
|
|
/// non-pattern node in between two pattern nodes (which prevent folding because
|
|
|
|
/// it would induce a cycle) and whether we have a TokenFactor node sandwiched
|
|
|
|
/// between pattern nodes (in which case the TF becomes part of the pattern).
|
|
|
|
///
|
|
|
|
/// The walk we do here is guaranteed to be small because we quickly get down to
|
|
|
|
/// already selected nodes "below" us.
|
2010-12-24 05:28:06 +01:00
|
|
|
static ChainResult
|
2012-05-01 01:41:30 +02:00
|
|
|
WalkChainUsers(const SDNode *ChainedNode,
|
2016-01-31 04:59:34 +01:00
|
|
|
SmallVectorImpl<SDNode *> &ChainedNodesInPattern,
|
|
|
|
DenseMap<const SDNode *, ChainResult> &TokenFactorResult,
|
|
|
|
SmallVectorImpl<SDNode *> &InteriorChainedNodes) {
|
2010-03-02 03:22:10 +01:00
|
|
|
ChainResult Result = CR_Simple;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
for (SDNode::use_iterator UI = ChainedNode->use_begin(),
|
|
|
|
E = ChainedNode->use_end(); UI != E; ++UI) {
|
|
|
|
// Make sure the use is of the chain, not some other value we produce.
|
|
|
|
if (UI.getUse().getValueType() != MVT::Other) continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
SDNode *User = *UI;
|
|
|
|
|
2013-09-22 10:21:56 +02:00
|
|
|
if (User->getOpcode() == ISD::HANDLENODE) // Root of the graph.
|
|
|
|
continue;
|
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
// If we see an already-selected machine node, then we've gone beyond the
|
|
|
|
// pattern that we're selecting down into the already selected chunk of the
|
|
|
|
// DAG.
|
2012-09-06 11:17:37 +02:00
|
|
|
unsigned UserOpcode = User->getOpcode();
|
2013-09-22 10:21:56 +02:00
|
|
|
if (User->isMachineOpcode() ||
|
|
|
|
UserOpcode == ISD::CopyToReg ||
|
2012-09-06 11:17:37 +02:00
|
|
|
UserOpcode == ISD::CopyFromReg ||
|
|
|
|
UserOpcode == ISD::INLINEASM ||
|
|
|
|
UserOpcode == ISD::EH_LABEL ||
|
|
|
|
UserOpcode == ISD::LIFETIME_START ||
|
|
|
|
UserOpcode == ISD::LIFETIME_END) {
|
2010-03-02 23:20:06 +01:00
|
|
|
// If their node ID got reset to -1 then they've already been selected.
|
|
|
|
// Treat them like a MachineOpcode.
|
|
|
|
if (User->getNodeId() == -1)
|
|
|
|
continue;
|
|
|
|
}
|
2010-03-02 03:22:10 +01:00
|
|
|
|
|
|
|
// If we have a TokenFactor, we handle it specially.
|
|
|
|
if (User->getOpcode() != ISD::TokenFactor) {
|
|
|
|
// If the node isn't a token factor and isn't part of our pattern, then it
|
|
|
|
// must be a random chained node in between two nodes we're selecting.
|
|
|
|
// This happens when we have something like:
|
|
|
|
// x = load ptr
|
|
|
|
// call
|
|
|
|
// y = x+4
|
|
|
|
// store y -> ptr
|
|
|
|
// Because we structurally match the load/store as a read/modify/write,
|
|
|
|
// but the call is chained between them. We cannot fold in this case
|
|
|
|
// because it would induce a cycle in the graph.
|
|
|
|
if (!std::count(ChainedNodesInPattern.begin(),
|
|
|
|
ChainedNodesInPattern.end(), User))
|
|
|
|
return CR_InducesCycle;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
// Otherwise we found a node that is part of our pattern. For example in:
|
|
|
|
// x = load ptr
|
|
|
|
// y = x+4
|
|
|
|
// store y -> ptr
|
|
|
|
// This would happen when we're scanning down from the load and see the
|
|
|
|
// store as a user. Record that there is a use of ChainedNode that is
|
|
|
|
// part of the pattern and keep scanning uses.
|
|
|
|
Result = CR_LeadsToInteriorNode;
|
|
|
|
InteriorChainedNodes.push_back(User);
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
// If we found a TokenFactor, there are two cases to consider: first if the
|
|
|
|
// TokenFactor is just hanging "below" the pattern we're matching (i.e. no
|
|
|
|
// uses of the TF are in our pattern) we just want to ignore it. Second,
|
|
|
|
// the TokenFactor can be sandwiched in between two chained nodes, like so:
|
|
|
|
// [Load chain]
|
|
|
|
// ^
|
|
|
|
// |
|
|
|
|
// [Load]
|
|
|
|
// ^ ^
|
|
|
|
// | \ DAG's like cheese
|
|
|
|
// / \ do you?
|
|
|
|
// / |
|
|
|
|
// [TokenFactor] [Op]
|
|
|
|
// ^ ^
|
|
|
|
// | |
|
|
|
|
// \ /
|
|
|
|
// \ /
|
|
|
|
// [Store]
|
|
|
|
//
|
|
|
|
// In this case, the TokenFactor becomes part of our match and we rewrite it
|
|
|
|
// as a new TokenFactor.
|
|
|
|
//
|
|
|
|
// To distinguish these two cases, do a recursive walk down the uses.
|
2016-01-31 04:59:34 +01:00
|
|
|
auto MemoizeResult = TokenFactorResult.find(User);
|
|
|
|
bool Visited = MemoizeResult != TokenFactorResult.end();
|
|
|
|
// Recursively walk chain users only if the result is not memoized.
|
|
|
|
if (!Visited) {
|
|
|
|
auto Res = WalkChainUsers(User, ChainedNodesInPattern, TokenFactorResult,
|
|
|
|
InteriorChainedNodes);
|
|
|
|
MemoizeResult = TokenFactorResult.insert(std::make_pair(User, Res)).first;
|
|
|
|
}
|
|
|
|
switch (MemoizeResult->second) {
|
2010-03-02 03:22:10 +01:00
|
|
|
case CR_Simple:
|
|
|
|
// If the uses of the TokenFactor are just already-selected nodes, ignore
|
|
|
|
// it, it is "below" our pattern.
|
|
|
|
continue;
|
|
|
|
case CR_InducesCycle:
|
|
|
|
// If the uses of the TokenFactor lead to nodes that are not part of our
|
|
|
|
// pattern that are not selected, folding would turn this into a cycle,
|
|
|
|
// bail out now.
|
|
|
|
return CR_InducesCycle;
|
|
|
|
case CR_LeadsToInteriorNode:
|
|
|
|
break; // Otherwise, keep processing.
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
// Okay, we know we're in the interesting interior case. The TokenFactor
|
|
|
|
// is now going to be considered part of the pattern so that we rewrite its
|
|
|
|
// uses (it may have uses that are not part of the pattern) with the
|
|
|
|
// ultimate chain result of the generated code. We will also add its chain
|
|
|
|
// inputs as inputs to the ultimate TokenFactor we create.
|
|
|
|
Result = CR_LeadsToInteriorNode;
|
2016-01-31 04:59:34 +01:00
|
|
|
if (!Visited) {
|
|
|
|
ChainedNodesInPattern.push_back(User);
|
|
|
|
InteriorChainedNodes.push_back(User);
|
|
|
|
}
|
2010-03-02 03:22:10 +01:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2010-03-02 01:00:03 +01:00
|
|
|
/// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
|
2010-03-02 20:34:59 +01:00
|
|
|
/// operation for when the pattern matched at least one node with a chains. The
|
2010-03-02 03:22:10 +01:00
|
|
|
/// input vector contains a list of all of the chained nodes that we match. We
|
|
|
|
/// must determine if this is a valid thing to cover (i.e. matching it won't
|
|
|
|
/// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
|
|
|
|
/// be used as the input node chain for the generated nodes.
|
2010-03-02 01:00:03 +01:00
|
|
|
static SDValue
|
2010-03-02 03:22:10 +01:00
|
|
|
HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
|
2010-03-02 01:00:03 +01:00
|
|
|
SelectionDAG *CurDAG) {
|
2016-01-31 04:59:34 +01:00
|
|
|
// Used for memoization. Without it WalkChainUsers could take exponential
|
|
|
|
// time to run.
|
|
|
|
DenseMap<const SDNode *, ChainResult> TokenFactorResult;
|
2010-03-02 03:22:10 +01:00
|
|
|
// Walk all of the chained nodes we've matched, recursively scanning down the
|
|
|
|
// users of the chain result. This adds any TokenFactor nodes that are caught
|
|
|
|
// in between chained nodes to the chained and interior nodes list.
|
|
|
|
SmallVector<SDNode*, 3> InteriorChainedNodes;
|
|
|
|
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
|
|
|
|
if (WalkChainUsers(ChainNodesMatched[i], ChainNodesMatched,
|
2016-01-31 04:59:34 +01:00
|
|
|
TokenFactorResult,
|
2010-03-02 03:22:10 +01:00
|
|
|
InteriorChainedNodes) == CR_InducesCycle)
|
|
|
|
return SDValue(); // Would induce a cycle.
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
// Okay, we have walked all the matched nodes and collected TokenFactor nodes
|
|
|
|
// that we are interested in. Form our input TokenFactor node.
|
2010-03-02 01:00:03 +01:00
|
|
|
SmallVector<SDValue, 3> InputChains;
|
|
|
|
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
|
2010-03-02 03:22:10 +01:00
|
|
|
// Add the input chain of this node to the InputChains list (which will be
|
|
|
|
// the operands of the generated TokenFactor) if it's not an interior node.
|
|
|
|
SDNode *N = ChainNodesMatched[i];
|
|
|
|
if (N->getOpcode() != ISD::TokenFactor) {
|
|
|
|
if (std::count(InteriorChainedNodes.begin(),InteriorChainedNodes.end(),N))
|
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
// Otherwise, add the input chain.
|
|
|
|
SDValue InChain = ChainNodesMatched[i]->getOperand(0);
|
|
|
|
assert(InChain.getValueType() == MVT::Other && "Not a chain");
|
2010-03-02 01:00:03 +01:00
|
|
|
InputChains.push_back(InChain);
|
2010-03-02 03:22:10 +01:00
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 03:22:10 +01:00
|
|
|
// If we have a token factor, we want to add all inputs of the token factor
|
|
|
|
// that are not part of the pattern we're matching.
|
2015-06-26 21:37:02 +02:00
|
|
|
for (const SDValue &Op : N->op_values()) {
|
2010-03-02 03:22:10 +01:00
|
|
|
if (!std::count(ChainNodesMatched.begin(), ChainNodesMatched.end(),
|
2015-06-26 21:37:02 +02:00
|
|
|
Op.getNode()))
|
|
|
|
InputChains.push_back(Op);
|
2010-03-02 03:22:10 +01:00
|
|
|
}
|
2010-03-02 01:00:03 +01:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 01:00:03 +01:00
|
|
|
if (InputChains.size() == 1)
|
|
|
|
return InputChains[0];
|
2013-05-25 04:42:55 +02:00
|
|
|
return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
|
2014-04-26 20:35:24 +02:00
|
|
|
MVT::Other, InputChains);
|
2010-12-24 05:28:06 +01:00
|
|
|
}
|
2010-02-28 23:37:22 +01:00
|
|
|
|
2010-03-02 07:55:04 +01:00
|
|
|
/// MorphNode - Handle morphing a node in place for the selector.
|
|
|
|
SDNode *SelectionDAGISel::
|
|
|
|
MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
|
2014-04-27 21:21:20 +02:00
|
|
|
ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
|
2010-03-02 07:55:04 +01:00
|
|
|
// It is possible we're using MorphNodeTo to replace a node with no
|
|
|
|
// normal results with one that has a normal result (or we could be
|
2010-12-23 18:13:18 +01:00
|
|
|
// adding a chain) and the input could have glue and chains as well.
|
2010-03-28 10:43:23 +02:00
|
|
|
// In this case we need to shift the operands down.
|
2010-03-02 07:55:04 +01:00
|
|
|
// FIXME: This is a horrible hack and broken in obscure cases, no worse
|
2010-03-27 19:54:50 +01:00
|
|
|
// than the old isel though.
|
2010-12-23 18:13:18 +01:00
|
|
|
int OldGlueResultNo = -1, OldChainResultNo = -1;
|
2010-03-02 07:55:04 +01:00
|
|
|
|
|
|
|
unsigned NTMNumResults = Node->getNumValues();
|
2010-12-21 03:38:05 +01:00
|
|
|
if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
|
2010-12-23 18:13:18 +01:00
|
|
|
OldGlueResultNo = NTMNumResults-1;
|
2010-03-02 07:55:04 +01:00
|
|
|
if (NTMNumResults != 1 &&
|
|
|
|
Node->getValueType(NTMNumResults-2) == MVT::Other)
|
|
|
|
OldChainResultNo = NTMNumResults-2;
|
|
|
|
} else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
|
|
|
|
OldChainResultNo = NTMNumResults-1;
|
|
|
|
|
2010-03-02 08:14:49 +01:00
|
|
|
// Call the underlying SelectionDAG routine to do the transmogrification. Note
|
|
|
|
// that this deletes operands of the old node that become dead.
|
2014-04-27 21:21:20 +02:00
|
|
|
SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops);
|
2010-03-02 07:55:04 +01:00
|
|
|
|
|
|
|
// MorphNodeTo can operate in two ways: if an existing node with the
|
|
|
|
// specified operands exists, it can just return it. Otherwise, it
|
|
|
|
// updates the node in place to have the requested operands.
|
|
|
|
if (Res == Node) {
|
|
|
|
// If we updated the node in place, reset the node ID. To the isel,
|
|
|
|
// this should be just like a newly allocated machine node.
|
|
|
|
Res->setNodeId(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned ResNumResults = Res->getNumValues();
|
2010-12-23 18:13:18 +01:00
|
|
|
// Move the glue if needed.
|
|
|
|
if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
|
|
|
|
(unsigned)OldGlueResultNo != ResNumResults-1)
|
2010-12-24 05:28:06 +01:00
|
|
|
CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldGlueResultNo),
|
2010-03-02 07:55:04 +01:00
|
|
|
SDValue(Res, ResNumResults-1));
|
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
|
2010-07-04 10:58:43 +02:00
|
|
|
--ResNumResults;
|
2010-03-02 07:55:04 +01:00
|
|
|
|
|
|
|
// Move the chain reference if needed.
|
|
|
|
if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
|
|
|
|
(unsigned)OldChainResultNo != ResNumResults-1)
|
2010-12-24 05:28:06 +01:00
|
|
|
CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
|
2010-03-02 07:55:04 +01:00
|
|
|
SDValue(Res, ResNumResults-1));
|
|
|
|
|
|
|
|
// Otherwise, no replacement happened because the node already exists. Replace
|
|
|
|
// Uses of the old node with the new one.
|
2016-06-01 22:55:26 +02:00
|
|
|
if (Res != Node) {
|
2010-03-02 07:55:04 +01:00
|
|
|
CurDAG->ReplaceAllUsesWith(Node, Res);
|
2016-06-01 22:55:26 +02:00
|
|
|
CurDAG->RemoveDeadNode(Node);
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 07:55:04 +01:00
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2012-09-16 18:48:25 +02:00
|
|
|
/// CheckSame - Implements OP_CheckSame.
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:46:25 +01:00
|
|
|
CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
2010-09-22 00:00:25 +02:00
|
|
|
SDValue N,
|
2017-02-04 03:00:53 +01:00
|
|
|
const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
|
2010-03-03 08:46:25 +01:00
|
|
|
// Accept if it is exactly the same as a previously recorded node.
|
|
|
|
unsigned RecNo = MatcherTable[MatcherIndex++];
|
|
|
|
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
|
2010-09-22 00:00:25 +02:00
|
|
|
return N == RecordedNodes[RecNo].first;
|
2010-03-03 08:46:25 +01:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2013-10-05 07:38:16 +02:00
|
|
|
/// CheckChildSame - Implements OP_CheckChildXSame.
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2013-10-05 07:38:16 +02:00
|
|
|
CheckChildSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
2017-02-04 03:00:53 +01:00
|
|
|
SDValue N,
|
|
|
|
const SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes,
|
|
|
|
unsigned ChildNo) {
|
2013-10-05 07:38:16 +02:00
|
|
|
if (ChildNo >= N.getNumOperands())
|
|
|
|
return false; // Match fails if out of range child #.
|
|
|
|
return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo),
|
|
|
|
RecordedNodes);
|
|
|
|
}
|
|
|
|
|
2010-03-03 08:31:15 +01:00
|
|
|
/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:31:15 +01:00
|
|
|
CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
2012-05-01 01:41:30 +02:00
|
|
|
const SelectionDAGISel &SDISel) {
|
2010-03-03 08:31:15 +01:00
|
|
|
return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CheckNodePredicate - Implements OP_CheckNodePredicate.
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:31:15 +01:00
|
|
|
CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
2012-05-01 01:41:30 +02:00
|
|
|
const SelectionDAGISel &SDISel, SDNode *N) {
|
2010-03-03 08:31:15 +01:00
|
|
|
return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:31:15 +01:00
|
|
|
CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
|
|
|
SDNode *N) {
|
2010-03-25 07:33:05 +01:00
|
|
|
uint16_t Opc = MatcherTable[MatcherIndex++];
|
|
|
|
Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
|
|
|
|
return N->getOpcode() == Opc;
|
2010-03-03 08:31:15 +01:00
|
|
|
}
|
|
|
|
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2015-07-09 04:09:04 +02:00
|
|
|
CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
|
|
|
|
const TargetLowering *TLI, const DataLayout &DL) {
|
2010-03-03 08:31:15 +01:00
|
|
|
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
|
|
|
|
if (N.getValueType() == VT) return true;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:31:15 +01:00
|
|
|
// Handle the case when VT is iPTR.
|
2015-07-09 04:09:04 +02:00
|
|
|
return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
|
2010-03-03 08:31:15 +01:00
|
|
|
}
|
|
|
|
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:46:25 +01:00
|
|
|
CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
2015-07-09 04:09:04 +02:00
|
|
|
SDValue N, const TargetLowering *TLI, const DataLayout &DL,
|
|
|
|
unsigned ChildNo) {
|
2010-03-03 08:46:25 +01:00
|
|
|
if (ChildNo >= N.getNumOperands())
|
|
|
|
return false; // Match fails if out of range child #.
|
2015-07-09 04:09:04 +02:00
|
|
|
return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI,
|
|
|
|
DL);
|
2010-03-03 08:46:25 +01:00
|
|
|
}
|
|
|
|
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:46:25 +01:00
|
|
|
CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
|
|
|
SDValue N) {
|
|
|
|
return cast<CondCodeSDNode>(N)->get() ==
|
|
|
|
(ISD::CondCode)MatcherTable[MatcherIndex++];
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:46:25 +01:00
|
|
|
CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
2015-07-09 04:09:04 +02:00
|
|
|
SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
|
2010-03-03 08:46:25 +01:00
|
|
|
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
|
|
|
|
if (cast<VTSDNode>(N)->getVT() == VT)
|
|
|
|
return true;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:46:25 +01:00
|
|
|
// Handle the case when VT is iPTR.
|
2015-07-09 04:09:04 +02:00
|
|
|
return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
|
2010-03-03 08:46:25 +01:00
|
|
|
}
|
|
|
|
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:31:15 +01:00
|
|
|
CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
|
|
|
SDValue N) {
|
|
|
|
int64_t Val = MatcherTable[MatcherIndex++];
|
|
|
|
if (Val & 128)
|
|
|
|
Val = GetVBR(Val, MatcherTable, MatcherIndex);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:31:15 +01:00
|
|
|
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
|
2014-04-14 02:51:57 +02:00
|
|
|
return C && C->getSExtValue() == Val;
|
2010-03-03 08:31:15 +01:00
|
|
|
}
|
|
|
|
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2014-02-05 06:44:28 +01:00
|
|
|
CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
|
|
|
SDValue N, unsigned ChildNo) {
|
|
|
|
if (ChildNo >= N.getNumOperands())
|
|
|
|
return false; // Match fails if out of range child #.
|
|
|
|
return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:46:25 +01:00
|
|
|
CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
2012-05-01 01:41:30 +02:00
|
|
|
SDValue N, const SelectionDAGISel &SDISel) {
|
2010-03-03 08:46:25 +01:00
|
|
|
int64_t Val = MatcherTable[MatcherIndex++];
|
|
|
|
if (Val & 128)
|
|
|
|
Val = GetVBR(Val, MatcherTable, MatcherIndex);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:46:25 +01:00
|
|
|
if (N->getOpcode() != ISD::AND) return false;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:46:25 +01:00
|
|
|
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
|
2014-04-14 02:51:57 +02:00
|
|
|
return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
|
2010-03-03 08:46:25 +01:00
|
|
|
}
|
|
|
|
|
2015-09-10 22:34:57 +02:00
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static inline bool
|
2010-03-03 08:46:25 +01:00
|
|
|
CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
|
2012-05-01 01:41:30 +02:00
|
|
|
SDValue N, const SelectionDAGISel &SDISel) {
|
2010-03-03 08:46:25 +01:00
|
|
|
int64_t Val = MatcherTable[MatcherIndex++];
|
|
|
|
if (Val & 128)
|
|
|
|
Val = GetVBR(Val, MatcherTable, MatcherIndex);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:46:25 +01:00
|
|
|
if (N->getOpcode() != ISD::OR) return false;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:46:25 +01:00
|
|
|
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
|
2014-04-14 02:51:57 +02:00
|
|
|
return C && SDISel.CheckOrMask(N.getOperand(0), C, Val);
|
2010-03-03 08:46:25 +01:00
|
|
|
}
|
|
|
|
|
2010-03-03 08:31:15 +01:00
|
|
|
/// IsPredicateKnownToFail - If we know how and can do so without pushing a
|
|
|
|
/// scope, evaluate the current node. If the current predicate is known to
|
|
|
|
/// fail, set Result=true and return anything. If the current predicate is
|
|
|
|
/// known to pass, set Result=false and return the MatcherIndex to continue
|
|
|
|
/// with. If the current predicate is unknown, set Result=false and return the
|
2010-12-24 05:28:06 +01:00
|
|
|
/// MatcherIndex to continue with.
|
2010-03-03 08:31:15 +01:00
|
|
|
static unsigned IsPredicateKnownToFail(const unsigned char *Table,
|
|
|
|
unsigned Index, SDValue N,
|
2012-05-01 01:41:30 +02:00
|
|
|
bool &Result,
|
|
|
|
const SelectionDAGISel &SDISel,
|
2017-02-04 03:00:53 +01:00
|
|
|
SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
|
2010-03-03 08:31:15 +01:00
|
|
|
switch (Table[Index++]) {
|
|
|
|
default:
|
|
|
|
Result = false;
|
|
|
|
return Index-1; // Could not evaluate this predicate.
|
2010-03-03 08:46:25 +01:00
|
|
|
case SelectionDAGISel::OPC_CheckSame:
|
|
|
|
Result = !::CheckSame(Table, Index, N, RecordedNodes);
|
|
|
|
return Index;
|
2013-10-05 07:38:16 +02:00
|
|
|
case SelectionDAGISel::OPC_CheckChild0Same:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild1Same:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild2Same:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild3Same:
|
|
|
|
Result = !::CheckChildSame(Table, Index, N, RecordedNodes,
|
|
|
|
Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
|
|
|
|
return Index;
|
2010-03-03 08:31:15 +01:00
|
|
|
case SelectionDAGISel::OPC_CheckPatternPredicate:
|
2010-03-03 08:46:25 +01:00
|
|
|
Result = !::CheckPatternPredicate(Table, Index, SDISel);
|
2010-03-03 08:31:15 +01:00
|
|
|
return Index;
|
|
|
|
case SelectionDAGISel::OPC_CheckPredicate:
|
2010-03-03 08:46:25 +01:00
|
|
|
Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode());
|
2010-03-03 08:31:15 +01:00
|
|
|
return Index;
|
|
|
|
case SelectionDAGISel::OPC_CheckOpcode:
|
|
|
|
Result = !::CheckOpcode(Table, Index, N.getNode());
|
|
|
|
return Index;
|
|
|
|
case SelectionDAGISel::OPC_CheckType:
|
2015-07-09 04:09:04 +02:00
|
|
|
Result = !::CheckType(Table, Index, N, SDISel.TLI,
|
|
|
|
SDISel.CurDAG->getDataLayout());
|
2010-03-03 08:31:15 +01:00
|
|
|
return Index;
|
|
|
|
case SelectionDAGISel::OPC_CheckChild0Type:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild1Type:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild2Type:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild3Type:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild4Type:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild5Type:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild6Type:
|
2010-03-03 08:46:25 +01:00
|
|
|
case SelectionDAGISel::OPC_CheckChild7Type:
|
2015-07-09 04:09:04 +02:00
|
|
|
Result = !::CheckChildType(
|
|
|
|
Table, Index, N, SDISel.TLI, SDISel.CurDAG->getDataLayout(),
|
|
|
|
Table[Index - 1] - SelectionDAGISel::OPC_CheckChild0Type);
|
2010-03-03 08:46:25 +01:00
|
|
|
return Index;
|
|
|
|
case SelectionDAGISel::OPC_CheckCondCode:
|
|
|
|
Result = !::CheckCondCode(Table, Index, N);
|
|
|
|
return Index;
|
|
|
|
case SelectionDAGISel::OPC_CheckValueType:
|
2015-07-09 04:09:04 +02:00
|
|
|
Result = !::CheckValueType(Table, Index, N, SDISel.TLI,
|
|
|
|
SDISel.CurDAG->getDataLayout());
|
2010-03-03 08:31:15 +01:00
|
|
|
return Index;
|
|
|
|
case SelectionDAGISel::OPC_CheckInteger:
|
|
|
|
Result = !::CheckInteger(Table, Index, N);
|
|
|
|
return Index;
|
2014-02-05 06:44:28 +01:00
|
|
|
case SelectionDAGISel::OPC_CheckChild0Integer:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild1Integer:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild2Integer:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild3Integer:
|
|
|
|
case SelectionDAGISel::OPC_CheckChild4Integer:
|
|
|
|
Result = !::CheckChildInteger(Table, Index, N,
|
|
|
|
Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
|
|
|
|
return Index;
|
2010-03-03 08:46:25 +01:00
|
|
|
case SelectionDAGISel::OPC_CheckAndImm:
|
|
|
|
Result = !::CheckAndImm(Table, Index, N, SDISel);
|
|
|
|
return Index;
|
|
|
|
case SelectionDAGISel::OPC_CheckOrImm:
|
|
|
|
Result = !::CheckOrImm(Table, Index, N, SDISel);
|
|
|
|
return Index;
|
2010-03-03 08:31:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-15 19:08:50 +02:00
|
|
|
namespace {
|
2017-02-04 03:00:53 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
struct MatchScope {
|
|
|
|
/// FailIndex - If this match fails, this is the index to continue with.
|
|
|
|
unsigned FailIndex;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
/// NodeStack - The node stack when the scope was formed.
|
|
|
|
SmallVector<SDValue, 4> NodeStack;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
/// NumRecordedNodes - The number of recorded nodes when the scope was formed.
|
|
|
|
unsigned NumRecordedNodes;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
/// NumMatchedMemRefs - The number of matched memref entries.
|
|
|
|
unsigned NumMatchedMemRefs;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
|
|
|
/// InputChain/InputGlue - The current chain/glue
|
2010-12-23 18:13:18 +01:00
|
|
|
SDValue InputChain, InputGlue;
|
2010-02-28 23:37:22 +01:00
|
|
|
|
|
|
|
/// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
|
2016-05-06 00:37:45 +02:00
|
|
|
bool HasChainNodesMatched;
|
2010-02-28 23:37:22 +01:00
|
|
|
};
|
|
|
|
|
2014-10-03 22:00:34 +02:00
|
|
|
/// \\brief A DAG update listener to keep the matching state
|
|
|
|
/// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
|
|
|
|
/// change the DAG while matching. X86 addressing mode matcher is an example
|
|
|
|
/// for this.
|
|
|
|
class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
|
|
|
|
{
|
2017-02-03 13:28:40 +01:00
|
|
|
SDNode **NodeToMatch;
|
|
|
|
SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes;
|
|
|
|
SmallVectorImpl<MatchScope> &MatchScopes;
|
2017-02-04 03:00:53 +01:00
|
|
|
|
2014-10-03 22:00:34 +02:00
|
|
|
public:
|
2017-02-03 13:28:40 +01:00
|
|
|
MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
|
|
|
|
SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
|
|
|
|
SmallVectorImpl<MatchScope> &MS)
|
|
|
|
: SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
|
|
|
|
RecordedNodes(RN), MatchScopes(MS) {}
|
2014-10-03 22:00:34 +02:00
|
|
|
|
2015-04-11 04:11:45 +02:00
|
|
|
void NodeDeleted(SDNode *N, SDNode *E) override {
|
2014-10-03 22:00:34 +02:00
|
|
|
// Some early-returns here to avoid the search if we deleted the node or
|
|
|
|
// if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
|
|
|
|
// do, so it's unnecessary to update matching state at that point).
|
|
|
|
// Neither of these can occur currently because we only install this
|
|
|
|
// update listener during matching a complex patterns.
|
|
|
|
if (!E || E->isMachineOpcode())
|
|
|
|
return;
|
2017-02-03 13:28:40 +01:00
|
|
|
// Check if NodeToMatch was updated.
|
|
|
|
if (N == *NodeToMatch)
|
|
|
|
*NodeToMatch = E;
|
2014-10-03 22:00:34 +02:00
|
|
|
// Performing linear search here does not matter because we almost never
|
|
|
|
// run this code. You'd have to have a CSE during complex pattern
|
|
|
|
// matching.
|
|
|
|
for (auto &I : RecordedNodes)
|
|
|
|
if (I.first.getNode() == N)
|
|
|
|
I.first.setNode(E);
|
|
|
|
|
|
|
|
for (auto &I : MatchScopes)
|
|
|
|
for (auto &J : I.NodeStack)
|
|
|
|
if (J.getNode() == N)
|
|
|
|
J.setNode(E);
|
|
|
|
}
|
|
|
|
};
|
2017-02-04 03:00:53 +01:00
|
|
|
|
2016-02-02 19:20:45 +01:00
|
|
|
} // end anonymous namespace
|
2010-04-15 19:08:50 +02:00
|
|
|
|
2016-05-11 00:58:26 +02:00
|
|
|
void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
|
|
|
|
const unsigned char *MatcherTable,
|
|
|
|
unsigned TableSize) {
|
2010-02-28 23:37:22 +01:00
|
|
|
// FIXME: Should these even be selected? Handle these cases in the caller?
|
|
|
|
switch (NodeToMatch->getOpcode()) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case ISD::EntryToken: // These nodes remain the same.
|
|
|
|
case ISD::BasicBlock:
|
|
|
|
case ISD::Register:
|
2012-01-19 00:52:12 +01:00
|
|
|
case ISD::RegisterMask:
|
2010-02-28 23:37:22 +01:00
|
|
|
case ISD::HANDLENODE:
|
2010-04-07 07:20:54 +02:00
|
|
|
case ISD::MDNODE_SDNODE:
|
2010-02-28 23:37:22 +01:00
|
|
|
case ISD::TargetConstant:
|
|
|
|
case ISD::TargetConstantFP:
|
|
|
|
case ISD::TargetConstantPool:
|
|
|
|
case ISD::TargetFrameIndex:
|
|
|
|
case ISD::TargetExternalSymbol:
|
2015-06-22 19:46:53 +02:00
|
|
|
case ISD::MCSymbol:
|
2010-02-28 23:37:22 +01:00
|
|
|
case ISD::TargetBlockAddress:
|
|
|
|
case ISD::TargetJumpTable:
|
|
|
|
case ISD::TargetGlobalTLSAddress:
|
|
|
|
case ISD::TargetGlobalAddress:
|
|
|
|
case ISD::TokenFactor:
|
|
|
|
case ISD::CopyFromReg:
|
|
|
|
case ISD::CopyToReg:
|
2010-03-14 03:33:54 +01:00
|
|
|
case ISD::EH_LABEL:
|
2017-09-05 22:14:58 +02:00
|
|
|
case ISD::ANNOTATION_LABEL:
|
2012-09-06 11:17:37 +02:00
|
|
|
case ISD::LIFETIME_START:
|
|
|
|
case ISD::LIFETIME_END:
|
2010-03-02 23:20:06 +01:00
|
|
|
NodeToMatch->setNodeId(-1); // Mark selected.
|
2016-05-11 00:58:26 +02:00
|
|
|
return;
|
2010-02-28 23:37:22 +01:00
|
|
|
case ISD::AssertSext:
|
|
|
|
case ISD::AssertZext:
|
|
|
|
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, 0),
|
|
|
|
NodeToMatch->getOperand(0));
|
2016-05-06 20:42:16 +02:00
|
|
|
CurDAG->RemoveDeadNode(NodeToMatch);
|
2016-05-11 00:58:26 +02:00
|
|
|
return;
|
|
|
|
case ISD::INLINEASM:
|
|
|
|
Select_INLINEASM(NodeToMatch);
|
|
|
|
return;
|
|
|
|
case ISD::READ_REGISTER:
|
|
|
|
Select_READ_REGISTER(NodeToMatch);
|
|
|
|
return;
|
|
|
|
case ISD::WRITE_REGISTER:
|
|
|
|
Select_WRITE_REGISTER(NodeToMatch);
|
|
|
|
return;
|
|
|
|
case ISD::UNDEF:
|
|
|
|
Select_UNDEF(NodeToMatch);
|
|
|
|
return;
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
|
|
|
|
|
|
|
|
// Set up the node stack with NodeToMatch as the only node on the stack.
|
|
|
|
SmallVector<SDValue, 8> NodeStack;
|
|
|
|
SDValue N = SDValue(NodeToMatch, 0);
|
|
|
|
NodeStack.push_back(N);
|
|
|
|
|
|
|
|
// MatchScopes - Scopes used when matching, if a match failure happens, this
|
|
|
|
// indicates where to continue checking.
|
|
|
|
SmallVector<MatchScope, 8> MatchScopes;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// RecordedNodes - This is the set of nodes that have been recorded by the
|
2010-09-22 00:00:25 +02:00
|
|
|
// state machine. The second value is the parent of the node, or null if the
|
|
|
|
// root is recorded.
|
|
|
|
SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// MatchedMemRefs - This is the set of MemRef's we've seen in the input
|
|
|
|
// pattern.
|
|
|
|
SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
// These are the current input chain and glue for use when generating nodes.
|
2010-02-28 23:37:22 +01:00
|
|
|
// Various Emit operations change these. For example, emitting a copytoreg
|
|
|
|
// uses and updates these.
|
2010-12-23 18:13:18 +01:00
|
|
|
SDValue InputChain, InputGlue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// ChainNodesMatched - If a pattern matches nodes that have input/output
|
|
|
|
// chains, the OPC_EmitMergeInputChains operation is emitted which indicates
|
|
|
|
// which ones they are. The result is captured into this list so that we can
|
|
|
|
// update the chain results when the pattern is complete.
|
|
|
|
SmallVector<SDNode*, 3> ChainNodesMatched;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << "ISEL: Starting pattern match on root node: ";
|
2010-02-28 23:37:22 +01:00
|
|
|
NodeToMatch->dump(CurDAG);
|
2013-04-19 23:37:07 +02:00
|
|
|
dbgs() << '\n');
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-01 19:47:11 +01:00
|
|
|
// Determine where to start the interpreter. Normally we start at opcode #0,
|
|
|
|
// but if the state machine starts with an OPC_SwitchOpcode, then we
|
|
|
|
// accelerate the first lookup (which is guaranteed to be hot) with the
|
|
|
|
// OpcodeOffset table.
|
2010-02-28 23:37:22 +01:00
|
|
|
unsigned MatcherIndex = 0;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-01 19:47:11 +01:00
|
|
|
if (!OpcodeOffset.empty()) {
|
|
|
|
// Already computed the OpcodeOffset table, just index into it.
|
|
|
|
if (N.getOpcode() < OpcodeOffset.size())
|
|
|
|
MatcherIndex = OpcodeOffset[N.getOpcode()];
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
|
2010-03-01 19:47:11 +01:00
|
|
|
|
|
|
|
} else if (MatcherTable[0] == OPC_SwitchOpcode) {
|
|
|
|
// Otherwise, the table isn't computed, but the state machine does start
|
|
|
|
// with an OPC_SwitchOpcode instruction. Populate the table now, since this
|
|
|
|
// is the first time we're selecting an instruction.
|
|
|
|
unsigned Idx = 1;
|
2017-02-04 03:00:53 +01:00
|
|
|
while (true) {
|
2010-03-01 19:47:11 +01:00
|
|
|
// Get the size of this case.
|
|
|
|
unsigned CaseSize = MatcherTable[Idx++];
|
|
|
|
if (CaseSize & 128)
|
|
|
|
CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
|
|
|
|
if (CaseSize == 0) break;
|
|
|
|
|
|
|
|
// Get the opcode, add the index to the table.
|
2010-03-25 07:33:05 +01:00
|
|
|
uint16_t Opc = MatcherTable[Idx++];
|
|
|
|
Opc |= (unsigned short)MatcherTable[Idx++] << 8;
|
2010-03-01 19:47:11 +01:00
|
|
|
if (Opc >= OpcodeOffset.size())
|
|
|
|
OpcodeOffset.resize((Opc+1)*2);
|
|
|
|
OpcodeOffset[Opc] = Idx;
|
|
|
|
Idx += CaseSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Okay, do the lookup for the first opcode.
|
|
|
|
if (N.getOpcode() < OpcodeOffset.size())
|
|
|
|
MatcherIndex = OpcodeOffset[N.getOpcode()];
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2017-02-04 03:00:53 +01:00
|
|
|
while (true) {
|
2010-02-28 23:37:22 +01:00
|
|
|
assert(MatcherIndex < TableSize && "Invalid index");
|
2010-03-09 03:15:05 +01:00
|
|
|
#ifndef NDEBUG
|
|
|
|
unsigned CurrentOpcodeIndex = MatcherIndex;
|
|
|
|
#endif
|
2010-02-28 23:37:22 +01:00
|
|
|
BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
|
|
|
|
switch (Opcode) {
|
|
|
|
case OPC_Scope: {
|
2010-03-03 08:31:15 +01:00
|
|
|
// Okay, the semantics of this operation are that we should push a scope
|
|
|
|
// then evaluate the first child. However, pushing a scope only to have
|
|
|
|
// the first check fail (which then pops it) is inefficient. If we can
|
|
|
|
// determine immediately that the first check (or first several) will
|
|
|
|
// immediately fail, don't even bother pushing a scope for them.
|
|
|
|
unsigned FailIndex;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2017-02-04 03:00:53 +01:00
|
|
|
while (true) {
|
2010-03-03 08:31:15 +01:00
|
|
|
unsigned NumToSkip = MatcherTable[MatcherIndex++];
|
|
|
|
if (NumToSkip & 128)
|
|
|
|
NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
|
|
|
|
// Found the end of the scope with no match.
|
|
|
|
if (NumToSkip == 0) {
|
|
|
|
FailIndex = 0;
|
|
|
|
break;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:31:15 +01:00
|
|
|
FailIndex = MatcherIndex+NumToSkip;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-27 19:54:50 +01:00
|
|
|
unsigned MatcherIndexOfPredicate = MatcherIndex;
|
|
|
|
(void)MatcherIndexOfPredicate; // silence warning.
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:31:15 +01:00
|
|
|
// If we can't evaluate this predicate without pushing a scope (e.g. if
|
|
|
|
// it is a 'MoveParent') or if the predicate succeeds on this node, we
|
|
|
|
// push the scope and evaluate the full predicate chain.
|
|
|
|
bool Result;
|
|
|
|
MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
|
2010-03-03 08:46:25 +01:00
|
|
|
Result, *this, RecordedNodes);
|
2010-03-03 08:31:15 +01:00
|
|
|
if (!Result)
|
|
|
|
break;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << " Skipped scope entry (due to false predicate) at "
|
2010-03-27 19:54:50 +01:00
|
|
|
<< "index " << MatcherIndexOfPredicate
|
|
|
|
<< ", continuing at " << FailIndex << "\n");
|
2013-03-08 23:56:31 +01:00
|
|
|
++NumDAGIselRetries;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:31:15 +01:00
|
|
|
// Otherwise, we know that this case of the Scope is guaranteed to fail,
|
|
|
|
// move to the next case.
|
|
|
|
MatcherIndex = FailIndex;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:31:15 +01:00
|
|
|
// If the whole scope failed to match, bail.
|
|
|
|
if (FailIndex == 0) break;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// Push a MatchScope which indicates where to go if the first child fails
|
|
|
|
// to match.
|
|
|
|
MatchScope NewEntry;
|
2010-03-03 08:31:15 +01:00
|
|
|
NewEntry.FailIndex = FailIndex;
|
2010-02-28 23:37:22 +01:00
|
|
|
NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
|
|
|
|
NewEntry.NumRecordedNodes = RecordedNodes.size();
|
|
|
|
NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
|
|
|
|
NewEntry.InputChain = InputChain;
|
2010-12-23 18:13:18 +01:00
|
|
|
NewEntry.InputGlue = InputGlue;
|
2010-02-28 23:37:22 +01:00
|
|
|
NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
|
|
|
|
MatchScopes.push_back(NewEntry);
|
|
|
|
continue;
|
|
|
|
}
|
2010-09-22 00:00:25 +02:00
|
|
|
case OPC_RecordNode: {
|
2010-02-28 23:37:22 +01:00
|
|
|
// Remember this node, it may end up being an operand in the pattern.
|
2014-04-14 02:51:57 +02:00
|
|
|
SDNode *Parent = nullptr;
|
2010-09-22 00:00:25 +02:00
|
|
|
if (NodeStack.size() > 1)
|
|
|
|
Parent = NodeStack[NodeStack.size()-2].getNode();
|
|
|
|
RecordedNodes.push_back(std::make_pair(N, Parent));
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2010-09-22 00:00:25 +02:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_RecordChild0: case OPC_RecordChild1:
|
|
|
|
case OPC_RecordChild2: case OPC_RecordChild3:
|
|
|
|
case OPC_RecordChild4: case OPC_RecordChild5:
|
|
|
|
case OPC_RecordChild6: case OPC_RecordChild7: {
|
|
|
|
unsigned ChildNo = Opcode-OPC_RecordChild0;
|
|
|
|
if (ChildNo >= N.getNumOperands())
|
|
|
|
break; // Match fails if out of range child #.
|
|
|
|
|
2010-09-22 00:00:25 +02:00
|
|
|
RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
|
|
|
|
N.getNode()));
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
case OPC_RecordMemRef:
|
|
|
|
MatchedMemRefs.push_back(cast<MemSDNode>(N)->getMemOperand());
|
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-23 18:24:32 +01:00
|
|
|
case OPC_CaptureGlueInput:
|
2010-12-23 18:13:18 +01:00
|
|
|
// If the current node has an input glue, capture it in InputGlue.
|
2010-02-28 23:37:22 +01:00
|
|
|
if (N->getNumOperands() != 0 &&
|
2010-12-21 03:38:05 +01:00
|
|
|
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
|
2010-12-23 18:13:18 +01:00
|
|
|
InputGlue = N->getOperand(N->getNumOperands()-1);
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_MoveChild: {
|
|
|
|
unsigned ChildNo = MatcherTable[MatcherIndex++];
|
|
|
|
if (ChildNo >= N.getNumOperands())
|
|
|
|
break; // Match fails if out of range child #.
|
|
|
|
N = N.getOperand(ChildNo);
|
|
|
|
NodeStack.push_back(N);
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2016-05-02 03:53:30 +02:00
|
|
|
case OPC_MoveChild0: case OPC_MoveChild1:
|
|
|
|
case OPC_MoveChild2: case OPC_MoveChild3:
|
|
|
|
case OPC_MoveChild4: case OPC_MoveChild5:
|
|
|
|
case OPC_MoveChild6: case OPC_MoveChild7: {
|
|
|
|
unsigned ChildNo = Opcode-OPC_MoveChild0;
|
|
|
|
if (ChildNo >= N.getNumOperands())
|
|
|
|
break; // Match fails if out of range child #.
|
|
|
|
N = N.getOperand(ChildNo);
|
|
|
|
NodeStack.push_back(N);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_MoveParent:
|
|
|
|
// Pop the current node off the NodeStack.
|
|
|
|
NodeStack.pop_back();
|
|
|
|
assert(!NodeStack.empty() && "Node stack imbalance!");
|
2010-12-24 05:28:06 +01:00
|
|
|
N = NodeStack.back();
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:46:25 +01:00
|
|
|
case OPC_CheckSame:
|
|
|
|
if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2013-10-05 07:38:16 +02:00
|
|
|
|
|
|
|
case OPC_CheckChild0Same: case OPC_CheckChild1Same:
|
|
|
|
case OPC_CheckChild2Same: case OPC_CheckChild3Same:
|
|
|
|
if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
|
|
|
|
Opcode-OPC_CheckChild0Same))
|
|
|
|
break;
|
|
|
|
continue;
|
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_CheckPatternPredicate:
|
2010-03-03 08:31:15 +01:00
|
|
|
if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
|
|
|
case OPC_CheckPredicate:
|
2010-03-03 08:31:15 +01:00
|
|
|
if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this,
|
|
|
|
N.getNode()))
|
|
|
|
break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2010-03-04 02:23:08 +01:00
|
|
|
case OPC_CheckComplexPat: {
|
|
|
|
unsigned CPNum = MatcherTable[MatcherIndex++];
|
|
|
|
unsigned RecNo = MatcherTable[MatcherIndex++];
|
|
|
|
assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
|
2014-10-03 22:00:34 +02:00
|
|
|
|
|
|
|
// If target can modify DAG during matching, keep the matching state
|
|
|
|
// consistent.
|
|
|
|
std::unique_ptr<MatchStateUpdater> MSU;
|
|
|
|
if (ComplexPatternFuncMutatesDAG())
|
2017-02-03 13:28:40 +01:00
|
|
|
MSU.reset(new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
|
2014-10-03 22:00:34 +02:00
|
|
|
MatchScopes));
|
|
|
|
|
2010-09-22 00:00:25 +02:00
|
|
|
if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
|
|
|
|
RecordedNodes[RecNo].first, CPNum,
|
2010-03-04 02:23:08 +01:00
|
|
|
RecordedNodes))
|
2010-02-28 23:37:22 +01:00
|
|
|
break;
|
|
|
|
continue;
|
2010-03-04 02:23:08 +01:00
|
|
|
}
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_CheckOpcode:
|
2010-03-03 08:31:15 +01:00
|
|
|
if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
|
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 08:31:15 +01:00
|
|
|
case OPC_CheckType:
|
2015-07-09 04:09:04 +02:00
|
|
|
if (!::CheckType(MatcherTable, MatcherIndex, N, TLI,
|
|
|
|
CurDAG->getDataLayout()))
|
2013-06-19 23:36:55 +02:00
|
|
|
break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-01 07:59:22 +01:00
|
|
|
case OPC_SwitchOpcode: {
|
|
|
|
unsigned CurNodeOpcode = N.getOpcode();
|
2010-03-01 08:43:08 +01:00
|
|
|
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
|
2010-03-01 07:59:22 +01:00
|
|
|
unsigned CaseSize;
|
2017-02-04 03:00:53 +01:00
|
|
|
while (true) {
|
2010-03-01 07:59:22 +01:00
|
|
|
// Get the size of this case.
|
|
|
|
CaseSize = MatcherTable[MatcherIndex++];
|
|
|
|
if (CaseSize & 128)
|
|
|
|
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
|
|
|
|
if (CaseSize == 0) break;
|
|
|
|
|
2010-03-25 07:33:05 +01:00
|
|
|
uint16_t Opc = MatcherTable[MatcherIndex++];
|
|
|
|
Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
|
|
|
|
|
2010-03-01 07:59:22 +01:00
|
|
|
// If the opcode matches, then we will execute this case.
|
2010-03-25 07:33:05 +01:00
|
|
|
if (CurNodeOpcode == Opc)
|
2010-03-01 07:59:22 +01:00
|
|
|
break;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-01 07:59:22 +01:00
|
|
|
// Otherwise, skip over this case.
|
|
|
|
MatcherIndex += CaseSize;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 07:28:15 +01:00
|
|
|
// If no cases matched, bail out.
|
2010-03-01 07:59:22 +01:00
|
|
|
if (CaseSize == 0) break;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-01 07:59:22 +01:00
|
|
|
// Otherwise, execute the case we found.
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart
|
2010-03-01 07:59:22 +01:00
|
|
|
<< " to " << MatcherIndex << "\n");
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 07:28:15 +01:00
|
|
|
case OPC_SwitchType: {
|
2013-08-15 04:44:19 +02:00
|
|
|
MVT CurNodeVT = N.getSimpleValueType();
|
2010-03-03 07:28:15 +01:00
|
|
|
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
|
|
|
|
unsigned CaseSize;
|
2017-02-04 03:00:53 +01:00
|
|
|
while (true) {
|
2010-03-03 07:28:15 +01:00
|
|
|
// Get the size of this case.
|
|
|
|
CaseSize = MatcherTable[MatcherIndex++];
|
|
|
|
if (CaseSize & 128)
|
|
|
|
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
|
|
|
|
if (CaseSize == 0) break;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-11-03 13:17:33 +01:00
|
|
|
MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
|
2010-03-03 07:28:15 +01:00
|
|
|
if (CaseVT == MVT::iPTR)
|
2015-07-09 04:09:04 +02:00
|
|
|
CaseVT = TLI->getPointerTy(CurDAG->getDataLayout());
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 07:28:15 +01:00
|
|
|
// If the VT matches, then we will execute this case.
|
|
|
|
if (CurNodeVT == CaseVT)
|
|
|
|
break;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 07:28:15 +01:00
|
|
|
// Otherwise, skip over this case.
|
|
|
|
MatcherIndex += CaseSize;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 07:28:15 +01:00
|
|
|
// If no cases matched, bail out.
|
|
|
|
if (CaseSize == 0) break;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-03 07:28:15 +01:00
|
|
|
// Otherwise, execute the case we found.
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
|
2010-03-03 07:28:15 +01:00
|
|
|
<< "] from " << SwitchStart << " to " << MatcherIndex<<'\n');
|
|
|
|
continue;
|
|
|
|
}
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_CheckChild0Type: case OPC_CheckChild1Type:
|
|
|
|
case OPC_CheckChild2Type: case OPC_CheckChild3Type:
|
|
|
|
case OPC_CheckChild4Type: case OPC_CheckChild5Type:
|
2010-03-03 08:46:25 +01:00
|
|
|
case OPC_CheckChild6Type: case OPC_CheckChild7Type:
|
2014-10-08 09:32:17 +02:00
|
|
|
if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
|
2015-07-09 04:09:04 +02:00
|
|
|
CurDAG->getDataLayout(),
|
|
|
|
Opcode - OPC_CheckChild0Type))
|
2010-03-03 08:31:15 +01:00
|
|
|
break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
|
|
|
case OPC_CheckCondCode:
|
2010-03-03 08:46:25 +01:00
|
|
|
if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2010-03-03 08:46:25 +01:00
|
|
|
case OPC_CheckValueType:
|
2015-07-09 04:09:04 +02:00
|
|
|
if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
|
|
|
|
CurDAG->getDataLayout()))
|
2013-06-19 23:36:55 +02:00
|
|
|
break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2010-03-03 08:31:15 +01:00
|
|
|
case OPC_CheckInteger:
|
|
|
|
if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2014-02-05 06:44:28 +01:00
|
|
|
case OPC_CheckChild0Integer: case OPC_CheckChild1Integer:
|
|
|
|
case OPC_CheckChild2Integer: case OPC_CheckChild3Integer:
|
|
|
|
case OPC_CheckChild4Integer:
|
|
|
|
if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
|
|
|
|
Opcode-OPC_CheckChild0Integer)) break;
|
|
|
|
continue;
|
2010-03-03 08:46:25 +01:00
|
|
|
case OPC_CheckAndImm:
|
|
|
|
if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2010-03-03 08:46:25 +01:00
|
|
|
case OPC_CheckOrImm:
|
|
|
|
if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_CheckFoldableChainNode: {
|
|
|
|
assert(NodeStack.size() != 1 && "No parent node");
|
|
|
|
// Verify that all intermediate nodes between the root and this one have
|
|
|
|
// a single use.
|
|
|
|
bool HasMultipleUses = false;
|
|
|
|
for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i)
|
2017-03-14 02:42:23 +01:00
|
|
|
if (!NodeStack[i].getNode()->hasOneUse()) {
|
2010-02-28 23:37:22 +01:00
|
|
|
HasMultipleUses = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (HasMultipleUses) break;
|
|
|
|
|
|
|
|
// Check to see that the target thinks this is profitable to fold and that
|
|
|
|
// we can fold it without inducing cycles in the graph.
|
|
|
|
if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
|
|
|
|
NodeToMatch) ||
|
|
|
|
!IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
|
2010-04-17 17:26:15 +02:00
|
|
|
NodeToMatch, OptLevel,
|
|
|
|
true/*We validate our own chains*/))
|
2010-02-28 23:37:22 +01:00
|
|
|
break;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
case OPC_EmitInteger: {
|
|
|
|
MVT::SimpleValueType VT =
|
|
|
|
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
|
|
|
|
int64_t Val = MatcherTable[MatcherIndex++];
|
|
|
|
if (Val & 128)
|
|
|
|
Val = GetVBR(Val, MatcherTable, MatcherIndex);
|
2010-09-22 00:00:25 +02:00
|
|
|
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
|
2015-04-28 16:05:47 +02:00
|
|
|
CurDAG->getTargetConstant(Val, SDLoc(NodeToMatch),
|
|
|
|
VT), nullptr));
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
case OPC_EmitRegister: {
|
|
|
|
MVT::SimpleValueType VT =
|
|
|
|
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
|
|
|
|
unsigned RegNo = MatcherTable[MatcherIndex++];
|
2010-09-22 00:00:25 +02:00
|
|
|
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
|
2014-04-14 02:51:57 +02:00
|
|
|
CurDAG->getRegister(RegNo, VT), nullptr));
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
|
|
|
}
|
2011-03-01 02:37:19 +01:00
|
|
|
case OPC_EmitRegister2: {
|
|
|
|
// For targets w/ more than 256 register names, the register enum
|
|
|
|
// values are stored in two bytes in the matcher table (just like
|
|
|
|
// opcodes).
|
|
|
|
MVT::SimpleValueType VT =
|
|
|
|
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
|
|
|
|
unsigned RegNo = MatcherTable[MatcherIndex++];
|
|
|
|
RegNo |= MatcherTable[MatcherIndex++] << 8;
|
|
|
|
RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
|
2014-04-14 02:51:57 +02:00
|
|
|
CurDAG->getRegister(RegNo, VT), nullptr));
|
2011-03-01 02:37:19 +01:00
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_EmitConvertToTarget: {
|
|
|
|
// Convert from IMM/FPIMM to target version.
|
|
|
|
unsigned RecNo = MatcherTable[MatcherIndex++];
|
2013-10-07 00:38:19 +02:00
|
|
|
assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
|
2010-09-22 00:00:25 +02:00
|
|
|
SDValue Imm = RecordedNodes[RecNo].first;
|
2010-02-28 23:37:22 +01:00
|
|
|
|
|
|
|
if (Imm->getOpcode() == ISD::Constant) {
|
2013-03-07 07:34:49 +01:00
|
|
|
const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue();
|
2016-04-29 19:42:45 +02:00
|
|
|
Imm = CurDAG->getTargetConstant(*Val, SDLoc(NodeToMatch),
|
|
|
|
Imm.getValueType());
|
2010-02-28 23:37:22 +01:00
|
|
|
} else if (Imm->getOpcode() == ISD::ConstantFP) {
|
|
|
|
const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
|
2016-04-29 19:42:45 +02:00
|
|
|
Imm = CurDAG->getTargetConstantFP(*Val, SDLoc(NodeToMatch),
|
|
|
|
Imm.getValueType());
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-09-22 00:00:25 +02:00
|
|
|
RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-28 07:50:16 +02:00
|
|
|
case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
|
2016-03-07 08:29:12 +01:00
|
|
|
case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1
|
|
|
|
case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2
|
2010-03-28 07:50:16 +02:00
|
|
|
// These are space-optimized forms of OPC_EmitMergeInputChains.
|
2014-04-14 02:51:57 +02:00
|
|
|
assert(!InputChain.getNode() &&
|
2010-03-28 07:50:16 +02:00
|
|
|
"EmitMergeInputChains should be the first chain producing node");
|
|
|
|
assert(ChainNodesMatched.empty() &&
|
|
|
|
"Should only have one EmitMergeInputChains per match");
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-28 07:50:16 +02:00
|
|
|
// Read all of the chained nodes.
|
2016-03-07 08:29:12 +01:00
|
|
|
unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
|
2013-10-07 00:38:19 +02:00
|
|
|
assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
|
2010-09-22 00:00:25 +02:00
|
|
|
ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-28 07:50:16 +02:00
|
|
|
// FIXME: What if other value results of the node have uses not matched
|
|
|
|
// by this pattern?
|
|
|
|
if (ChainNodesMatched.back() != NodeToMatch &&
|
2010-09-22 00:00:25 +02:00
|
|
|
!RecordedNodes[RecNo].first.hasOneUse()) {
|
2010-03-28 07:50:16 +02:00
|
|
|
ChainNodesMatched.clear();
|
|
|
|
break;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-28 07:50:16 +02:00
|
|
|
// Merge the input chains if they are not intra-pattern references.
|
|
|
|
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2014-04-14 02:51:57 +02:00
|
|
|
if (!InputChain.getNode())
|
2010-03-28 07:50:16 +02:00
|
|
|
break; // Failed to merge.
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_EmitMergeInputChains: {
|
2014-04-14 02:51:57 +02:00
|
|
|
assert(!InputChain.getNode() &&
|
2010-02-28 23:37:22 +01:00
|
|
|
"EmitMergeInputChains should be the first chain producing node");
|
|
|
|
// This node gets a list of nodes we matched in the input that have
|
|
|
|
// chains. We want to token factor all of the input chains to these nodes
|
|
|
|
// together. However, if any of the input chains is actually one of the
|
|
|
|
// nodes matched in this pattern, then we have an intra-match reference.
|
|
|
|
// Ignore these because the newly token factored chain should not refer to
|
|
|
|
// the old nodes.
|
|
|
|
unsigned NumChains = MatcherTable[MatcherIndex++];
|
|
|
|
assert(NumChains != 0 && "Can't TF zero chains");
|
|
|
|
|
|
|
|
assert(ChainNodesMatched.empty() &&
|
|
|
|
"Should only have one EmitMergeInputChains per match");
|
|
|
|
|
|
|
|
// Read all of the chained nodes.
|
2010-03-02 20:34:59 +01:00
|
|
|
for (unsigned i = 0; i != NumChains; ++i) {
|
|
|
|
unsigned RecNo = MatcherTable[MatcherIndex++];
|
2013-10-07 00:38:19 +02:00
|
|
|
assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
|
2010-09-22 00:00:25 +02:00
|
|
|
ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// FIXME: What if other value results of the node have uses not matched
|
|
|
|
// by this pattern?
|
|
|
|
if (ChainNodesMatched.back() != NodeToMatch &&
|
2010-09-22 00:00:25 +02:00
|
|
|
!RecordedNodes[RecNo].first.hasOneUse()) {
|
2010-02-28 23:37:22 +01:00
|
|
|
ChainNodesMatched.clear();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-02 01:00:03 +01:00
|
|
|
// If the inner loop broke out, the match fails.
|
|
|
|
if (ChainNodesMatched.empty())
|
|
|
|
break;
|
2010-02-28 23:37:22 +01:00
|
|
|
|
2010-03-02 01:00:03 +01:00
|
|
|
// Merge the input chains if they are not intra-pattern references.
|
|
|
|
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2014-04-14 02:51:57 +02:00
|
|
|
if (!InputChain.getNode())
|
2010-03-02 01:00:03 +01:00
|
|
|
break; // Failed to merge.
|
2010-02-28 23:37:22 +01:00
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_EmitCopyToReg: {
|
|
|
|
unsigned RecNo = MatcherTable[MatcherIndex++];
|
2013-10-07 00:38:19 +02:00
|
|
|
assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
|
2010-02-28 23:37:22 +01:00
|
|
|
unsigned DestPhysReg = MatcherTable[MatcherIndex++];
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2014-04-14 02:51:57 +02:00
|
|
|
if (!InputChain.getNode())
|
2010-02-28 23:37:22 +01:00
|
|
|
InputChain = CurDAG->getEntryNode();
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2013-05-25 04:42:55 +02:00
|
|
|
InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch),
|
2010-09-22 00:00:25 +02:00
|
|
|
DestPhysReg, RecordedNodes[RecNo].first,
|
2010-12-23 18:13:18 +01:00
|
|
|
InputGlue);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
InputGlue = InputChain.getValue(1);
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_EmitNodeXForm: {
|
|
|
|
unsigned XFormNo = MatcherTable[MatcherIndex++];
|
|
|
|
unsigned RecNo = MatcherTable[MatcherIndex++];
|
2013-10-07 00:38:19 +02:00
|
|
|
assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
|
2010-09-22 00:00:25 +02:00
|
|
|
SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
|
2014-04-14 02:51:57 +02:00
|
|
|
RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr));
|
2010-02-28 23:37:22 +01:00
|
|
|
continue;
|
|
|
|
}
|
2017-02-14 19:32:41 +01:00
|
|
|
case OPC_Coverage: {
|
|
|
|
// This is emitted right before MorphNode/EmitNode.
|
|
|
|
// So it should be safe to assume that this node has been selected
|
|
|
|
unsigned index = MatcherTable[MatcherIndex++];
|
|
|
|
index |= (MatcherTable[MatcherIndex++] << 8);
|
|
|
|
dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
|
|
|
|
dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2016-05-03 07:54:13 +02:00
|
|
|
case OPC_EmitNode: case OPC_MorphNodeTo:
|
|
|
|
case OPC_EmitNode0: case OPC_EmitNode1: case OPC_EmitNode2:
|
|
|
|
case OPC_MorphNodeTo0: case OPC_MorphNodeTo1: case OPC_MorphNodeTo2: {
|
2010-02-28 23:38:43 +01:00
|
|
|
uint16_t TargetOpc = MatcherTable[MatcherIndex++];
|
|
|
|
TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
|
2010-02-28 23:37:22 +01:00
|
|
|
unsigned EmitNodeInfo = MatcherTable[MatcherIndex++];
|
|
|
|
// Get the result VT list.
|
2016-05-03 07:54:13 +02:00
|
|
|
unsigned NumVTs;
|
|
|
|
// If this is one of the compressed forms, get the number of VTs based
|
|
|
|
// on the Opcode. Otherwise read the next byte from the table.
|
|
|
|
if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
|
|
|
|
NumVTs = Opcode - OPC_MorphNodeTo0;
|
|
|
|
else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
|
|
|
|
NumVTs = Opcode - OPC_EmitNode0;
|
|
|
|
else
|
|
|
|
NumVTs = MatcherTable[MatcherIndex++];
|
2010-02-28 23:37:22 +01:00
|
|
|
SmallVector<EVT, 4> VTs;
|
|
|
|
for (unsigned i = 0; i != NumVTs; ++i) {
|
|
|
|
MVT::SimpleValueType VT =
|
|
|
|
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
|
2014-10-08 09:32:17 +02:00
|
|
|
if (VT == MVT::iPTR)
|
2015-07-09 04:09:04 +02:00
|
|
|
VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy;
|
2010-02-28 23:37:22 +01:00
|
|
|
VTs.push_back(VT);
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
if (EmitNodeInfo & OPFL_Chain)
|
|
|
|
VTs.push_back(MVT::Other);
|
2010-12-23 18:13:18 +01:00
|
|
|
if (EmitNodeInfo & OPFL_GlueOutput)
|
2010-12-21 03:38:05 +01:00
|
|
|
VTs.push_back(MVT::Glue);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-01 08:43:08 +01:00
|
|
|
// This is hot code, so optimize the two most common cases of 1 and 2
|
|
|
|
// results.
|
|
|
|
SDVTList VTList;
|
|
|
|
if (VTs.size() == 1)
|
|
|
|
VTList = CurDAG->getVTList(VTs[0]);
|
|
|
|
else if (VTs.size() == 2)
|
|
|
|
VTList = CurDAG->getVTList(VTs[0], VTs[1]);
|
|
|
|
else
|
2014-04-16 08:10:51 +02:00
|
|
|
VTList = CurDAG->getVTList(VTs);
|
2010-02-28 23:37:22 +01:00
|
|
|
|
|
|
|
// Get the operand list.
|
|
|
|
unsigned NumOps = MatcherTable[MatcherIndex++];
|
|
|
|
SmallVector<SDValue, 8> Ops;
|
|
|
|
for (unsigned i = 0; i != NumOps; ++i) {
|
|
|
|
unsigned RecNo = MatcherTable[MatcherIndex++];
|
|
|
|
if (RecNo & 128)
|
|
|
|
RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
|
2010-09-22 00:00:25 +02:00
|
|
|
Ops.push_back(RecordedNodes[RecNo].first);
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// If there are variadic operands to add, handle them now.
|
|
|
|
if (EmitNodeInfo & OPFL_VariadicInfo) {
|
|
|
|
// Determine the start index to copy from.
|
|
|
|
unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
|
|
|
|
FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
|
|
|
|
assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
|
|
|
|
"Invalid variadic node");
|
2010-12-23 18:13:18 +01:00
|
|
|
// Copy all of the variadic operands, not including a potential glue
|
2010-02-28 23:37:22 +01:00
|
|
|
// input.
|
|
|
|
for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
|
|
|
|
i != e; ++i) {
|
|
|
|
SDValue V = NodeToMatch->getOperand(i);
|
2010-12-21 03:38:05 +01:00
|
|
|
if (V.getValueType() == MVT::Glue) break;
|
2010-02-28 23:37:22 +01:00
|
|
|
Ops.push_back(V);
|
|
|
|
}
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
// If this has chain/glue inputs, add them.
|
2010-02-28 23:37:22 +01:00
|
|
|
if (EmitNodeInfo & OPFL_Chain)
|
|
|
|
Ops.push_back(InputChain);
|
2014-04-14 02:51:57 +02:00
|
|
|
if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
|
2010-12-23 18:13:18 +01:00
|
|
|
Ops.push_back(InputGlue);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// Create the node.
|
2014-04-14 02:51:57 +02:00
|
|
|
SDNode *Res = nullptr;
|
2016-05-03 07:54:13 +02:00
|
|
|
bool IsMorphNodeTo = Opcode == OPC_MorphNodeTo ||
|
|
|
|
(Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2);
|
|
|
|
if (!IsMorphNodeTo) {
|
2010-02-28 23:37:22 +01:00
|
|
|
// If this is a normal EmitNode command, just create the new node and
|
|
|
|
// add the results to the RecordedNodes list.
|
2013-05-25 04:42:55 +02:00
|
|
|
Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch),
|
2013-04-20 00:22:57 +02:00
|
|
|
VTList, Ops);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
// Add all the non-glue/non-chain results to the RecordedNodes list.
|
2010-02-28 23:37:22 +01:00
|
|
|
for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
|
2010-12-21 03:38:05 +01:00
|
|
|
if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
|
2010-09-22 00:00:25 +02:00
|
|
|
RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
|
2014-04-14 02:51:57 +02:00
|
|
|
nullptr));
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
2012-04-21 01:36:09 +02:00
|
|
|
} else {
|
2016-06-03 20:09:53 +02:00
|
|
|
assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
|
|
|
|
"NodeToMatch was removed partway through selection");
|
2016-06-03 22:47:40 +02:00
|
|
|
SelectionDAG::DAGNodeDeletedListener NDL(*CurDAG, [&](SDNode *N,
|
|
|
|
SDNode *E) {
|
|
|
|
auto &Chain = ChainNodesMatched;
|
2016-08-12 00:21:41 +02:00
|
|
|
assert((!E || !is_contained(Chain, N)) &&
|
2016-06-03 22:47:40 +02:00
|
|
|
"Chain node replaced during MorphNode");
|
|
|
|
Chain.erase(std::remove(Chain.begin(), Chain.end(), N), Chain.end());
|
|
|
|
});
|
2016-06-03 20:09:53 +02:00
|
|
|
Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops, EmitNodeInfo);
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
// If the node had chain/glue results, update our notion of the current
|
|
|
|
// chain and glue.
|
|
|
|
if (EmitNodeInfo & OPFL_GlueOutput) {
|
|
|
|
InputGlue = SDValue(Res, VTs.size()-1);
|
2010-02-28 23:37:22 +01:00
|
|
|
if (EmitNodeInfo & OPFL_Chain)
|
|
|
|
InputChain = SDValue(Res, VTs.size()-2);
|
|
|
|
} else if (EmitNodeInfo & OPFL_Chain)
|
|
|
|
InputChain = SDValue(Res, VTs.size()-1);
|
|
|
|
|
2010-12-23 18:13:18 +01:00
|
|
|
// If the OPFL_MemRefs glue is set on this node, slap all of the
|
2010-02-28 23:37:22 +01:00
|
|
|
// accumulated memrefs onto it.
|
|
|
|
//
|
|
|
|
// FIXME: This is vastly incorrect for patterns with multiple outputs
|
|
|
|
// instructions that access memory and for ComplexPatterns that match
|
|
|
|
// loads.
|
|
|
|
if (EmitNodeInfo & OPFL_MemRefs) {
|
2011-05-20 01:44:34 +02:00
|
|
|
// Only attach load or store memory operands if the generated
|
|
|
|
// instruction may load or store.
|
2014-10-08 03:58:03 +02:00
|
|
|
const MCInstrDesc &MCID = TII->get(TargetOpc);
|
2011-06-28 21:10:37 +02:00
|
|
|
bool mayLoad = MCID.mayLoad();
|
|
|
|
bool mayStore = MCID.mayStore();
|
2011-05-20 01:44:34 +02:00
|
|
|
|
|
|
|
unsigned NumMemRefs = 0;
|
2013-07-03 07:18:47 +02:00
|
|
|
for (SmallVectorImpl<MachineMemOperand *>::const_iterator I =
|
|
|
|
MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
|
2011-05-20 01:44:34 +02:00
|
|
|
if ((*I)->isLoad()) {
|
|
|
|
if (mayLoad)
|
|
|
|
++NumMemRefs;
|
|
|
|
} else if ((*I)->isStore()) {
|
|
|
|
if (mayStore)
|
|
|
|
++NumMemRefs;
|
|
|
|
} else {
|
|
|
|
++NumMemRefs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
MachineSDNode::mmo_iterator MemRefs =
|
2011-05-20 01:44:34 +02:00
|
|
|
MF->allocateMemRefsArray(NumMemRefs);
|
|
|
|
|
|
|
|
MachineSDNode::mmo_iterator MemRefsPos = MemRefs;
|
2013-07-03 07:18:47 +02:00
|
|
|
for (SmallVectorImpl<MachineMemOperand *>::const_iterator I =
|
|
|
|
MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
|
2011-05-20 01:44:34 +02:00
|
|
|
if ((*I)->isLoad()) {
|
|
|
|
if (mayLoad)
|
|
|
|
*MemRefsPos++ = *I;
|
|
|
|
} else if ((*I)->isStore()) {
|
|
|
|
if (mayStore)
|
|
|
|
*MemRefsPos++ = *I;
|
|
|
|
} else {
|
|
|
|
*MemRefsPos++ = *I;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
cast<MachineSDNode>(Res)
|
2011-05-20 01:44:34 +02:00
|
|
|
->setMemRefs(MemRefs, MemRefs + NumMemRefs);
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << " "
|
2016-05-03 07:54:13 +02:00
|
|
|
<< (IsMorphNodeTo ? "Morphed" : "Created")
|
2013-04-19 23:37:07 +02:00
|
|
|
<< " node: "; Res->dump(CurDAG); dbgs() << "\n");
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// If this was a MorphNodeTo then we're completely done!
|
2016-05-03 07:54:13 +02:00
|
|
|
if (IsMorphNodeTo) {
|
2016-05-06 00:37:45 +02:00
|
|
|
// Update chain uses.
|
2016-06-01 22:55:26 +02:00
|
|
|
UpdateChains(Res, InputChain, ChainNodesMatched, true);
|
2016-05-11 00:58:26 +02:00
|
|
|
return;
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
case OPC_CompleteMatch: {
|
|
|
|
// The match has been completed, and any new nodes (if any) have been
|
|
|
|
// created. Patch up references to the matched dag to use the newly
|
|
|
|
// created nodes.
|
|
|
|
unsigned NumResults = MatcherTable[MatcherIndex++];
|
|
|
|
|
|
|
|
for (unsigned i = 0; i != NumResults; ++i) {
|
|
|
|
unsigned ResSlot = MatcherTable[MatcherIndex++];
|
|
|
|
if (ResSlot & 128)
|
|
|
|
ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2013-10-07 00:38:19 +02:00
|
|
|
assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
|
2010-09-22 00:00:25 +02:00
|
|
|
SDValue Res = RecordedNodes[ResSlot].first;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-28 07:54:03 +02:00
|
|
|
assert(i < NodeToMatch->getNumValues() &&
|
|
|
|
NodeToMatch->getValueType(i) != MVT::Other &&
|
2010-12-21 03:38:05 +01:00
|
|
|
NodeToMatch->getValueType(i) != MVT::Glue &&
|
2010-03-28 07:54:03 +02:00
|
|
|
"Invalid number of results to complete!");
|
2010-02-28 23:37:22 +01:00
|
|
|
assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
|
|
|
|
NodeToMatch->getValueType(i) == MVT::iPTR ||
|
|
|
|
Res.getValueType() == MVT::iPTR ||
|
|
|
|
NodeToMatch->getValueType(i).getSizeInBits() ==
|
2016-09-14 18:05:51 +02:00
|
|
|
Res.getValueSizeInBits()) &&
|
2010-02-28 23:37:22 +01:00
|
|
|
"invalid replacement");
|
|
|
|
CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);
|
|
|
|
}
|
|
|
|
|
2016-05-06 00:37:45 +02:00
|
|
|
// Update chain uses.
|
|
|
|
UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, false);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2016-05-06 00:37:45 +02:00
|
|
|
// If the root node defines glue, we need to update it to the glue result.
|
|
|
|
// TODO: This never happens in our tests and I think it can be removed /
|
|
|
|
// replaced with an assert, but if we do it this the way the change is
|
|
|
|
// NFC.
|
|
|
|
if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) ==
|
|
|
|
MVT::Glue &&
|
|
|
|
InputGlue.getNode())
|
|
|
|
CurDAG->ReplaceAllUsesOfValueWith(
|
|
|
|
SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1), InputGlue);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
assert(NodeToMatch->use_empty() &&
|
|
|
|
"Didn't replace all uses of the node?");
|
2016-05-06 20:42:16 +02:00
|
|
|
CurDAG->RemoveDeadNode(NodeToMatch);
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2016-05-11 00:58:26 +02:00
|
|
|
return;
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// If the code reached this point, then the match failed. See if there is
|
|
|
|
// another child to try in the current 'Scope', otherwise pop it until we
|
|
|
|
// find a case to check.
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex << "\n");
|
2013-03-08 23:56:31 +01:00
|
|
|
++NumDAGIselRetries;
|
2017-02-04 03:00:53 +01:00
|
|
|
while (true) {
|
2010-02-28 23:37:22 +01:00
|
|
|
if (MatchScopes.empty()) {
|
|
|
|
CannotYetSelect(NodeToMatch);
|
2016-05-11 00:58:26 +02:00
|
|
|
return;
|
2010-02-28 23:37:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Restore the interpreter state back to the point where the scope was
|
|
|
|
// formed.
|
|
|
|
MatchScope &LastScope = MatchScopes.back();
|
|
|
|
RecordedNodes.resize(LastScope.NumRecordedNodes);
|
|
|
|
NodeStack.clear();
|
|
|
|
NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
|
|
|
|
N = NodeStack.back();
|
|
|
|
|
|
|
|
if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
|
|
|
|
MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
|
|
|
|
MatcherIndex = LastScope.FailIndex;
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2013-04-19 23:37:07 +02:00
|
|
|
DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
InputChain = LastScope.InputChain;
|
2010-12-23 18:13:18 +01:00
|
|
|
InputGlue = LastScope.InputGlue;
|
2010-02-28 23:37:22 +01:00
|
|
|
if (!LastScope.HasChainNodesMatched)
|
|
|
|
ChainNodesMatched.clear();
|
|
|
|
|
|
|
|
// Check to see what the offset is at the new MatcherIndex. If it is zero
|
|
|
|
// we have reached the end of this scope, otherwise we have another child
|
|
|
|
// in the current scope to try.
|
|
|
|
unsigned NumToSkip = MatcherTable[MatcherIndex++];
|
|
|
|
if (NumToSkip & 128)
|
|
|
|
NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
|
|
|
|
|
|
|
|
// If we have another child in this scope to match, update FailIndex and
|
|
|
|
// try it.
|
|
|
|
if (NumToSkip != 0) {
|
|
|
|
LastScope.FailIndex = MatcherIndex+NumToSkip;
|
|
|
|
break;
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-02-28 23:37:22 +01:00
|
|
|
// End of this scope, pop it and try the next child in the containing
|
|
|
|
// scope.
|
|
|
|
MatchScopes.pop_back();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-01-05 02:24:18 +01:00
|
|
|
void SelectionDAGISel::CannotYetSelect(SDNode *N) {
|
2014-06-27 00:52:05 +02:00
|
|
|
std::string msg;
|
|
|
|
raw_string_ostream Msg(msg);
|
2010-12-21 03:07:03 +01:00
|
|
|
Msg << "Cannot select: ";
|
2010-12-24 05:28:06 +01:00
|
|
|
|
2010-03-04 01:21:16 +01:00
|
|
|
if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
|
|
|
|
N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
|
|
|
|
N->getOpcode() != ISD::INTRINSIC_VOID) {
|
|
|
|
N->printrFull(Msg, CurDAG);
|
2012-08-22 08:07:19 +02:00
|
|
|
Msg << "\nIn function: " << MF->getName();
|
2010-03-04 01:21:16 +01:00
|
|
|
} else {
|
|
|
|
bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
|
|
|
|
unsigned iid =
|
|
|
|
cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
|
|
|
|
if (iid < Intrinsic::num_intrinsics)
|
2016-08-23 18:23:45 +02:00
|
|
|
Msg << "intrinsic %" << Intrinsic::getName((Intrinsic::ID)iid, None);
|
2010-03-04 01:21:16 +01:00
|
|
|
else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
|
|
|
|
Msg << "target intrinsic %" << TII->getName(iid);
|
|
|
|
else
|
|
|
|
Msg << "unknown intrinsic #" << iid;
|
|
|
|
}
|
2010-04-08 00:58:41 +02:00
|
|
|
report_fatal_error(Msg.str());
|
2009-10-29 23:30:23 +01:00
|
|
|
}
|
|
|
|
|
2007-05-03 03:11:54 +02:00
|
|
|
char SelectionDAGISel::ID = 0;
|