1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-27 05:53:07 +01:00
llvm-mirror/lib/Target/ARM64/ARM64TargetMachine.cpp
Chad Rosier da933c5aba [ARM64][fast-isel] Disable target specific optimizations at -O0. Functionally,
this patch disables the dead register elimination pass and the load/store pair
optimization pass at -O0.  The ILP optimizations don't require the optimization
level to be checked because the call to addILPOpts is predicated with the
necessary check.  The AdvSIMDScalar pass is disabled by default at all
optimization levels.  This patch leaves that pass disabled by default.

Also, move command-line options into ARM64TargetMachine.cpp and add a few
additional flags to aid in debugging.  This fixes an issue with the
-debug-pass=Structure flag where passes were printed, but not actually run
(i.e., AdvSIMDScalar pass).

llvm-svn: 208223
2014-05-07 16:41:55 +00:00

211 lines
7.6 KiB
C++

//===-- ARM64TargetMachine.cpp - Define TargetMachine for ARM64 -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#include "ARM64.h"
#include "ARM64TargetMachine.h"
#include "llvm/PassManager.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/Scalar.h"
using namespace llvm;
static cl::opt<bool>
EnableCCMP("arm64-ccmp", cl::desc("Enable the CCMP formation pass"),
cl::init(true), cl::Hidden);
static cl::opt<bool>
EnableEarlyIfConvert("arm64-early-ifcvt", cl::desc("Enable the early if "
"converter pass"), cl::init(true), cl::Hidden);
static cl::opt<bool>
EnableStPairSuppress("arm64-stp-suppress", cl::desc("Suppress STP for ARM64"),
cl::init(true), cl::Hidden);
static cl::opt<bool>
EnableAdvSIMDScalar("arm64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar"
" integer instructions"), cl::init(false), cl::Hidden);
static cl::opt<bool>
EnablePromoteConstant("arm64-promote-const", cl::desc("Enable the promote "
"constant pass"), cl::init(true), cl::Hidden);
static cl::opt<bool>
EnableCollectLOH("arm64-collect-loh", cl::desc("Enable the pass that emits the"
" linker optimization hints (LOH)"), cl::init(true),
cl::Hidden);
static cl::opt<bool>
EnableDeadRegisterElimination("arm64-dead-def-elimination", cl::Hidden,
cl::desc("Enable the pass that removes dead"
" definitons and replaces stores to"
" them with stores to the zero"
" register"),
cl::init(true));
static cl::opt<bool>
EnableLoadStoreOpt("arm64-load-store-opt", cl::desc("Enable the load/store pair"
" optimization pass"), cl::init(true), cl::Hidden);
extern "C" void LLVMInitializeARM64Target() {
// Register the target.
RegisterTargetMachine<ARM64leTargetMachine> X(TheARM64leTarget);
RegisterTargetMachine<ARM64beTargetMachine> Y(TheARM64beTarget);
}
/// TargetMachine ctor - Create an ARM64 architecture model.
///
ARM64TargetMachine::ARM64TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL,
bool LittleEndian)
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS, LittleEndian),
// This nested ternary is horrible, but DL needs to be properly initialized
// before TLInfo is constructed.
DL(Subtarget.isTargetMachO() ?
"e-m:o-i64:64-i128:128-n32:64-S128" :
(LittleEndian ?
"e-m:e-i64:64-i128:128-n32:64-S128" :
"E-m:e-i64:64-i128:128-n32:64-S128")),
InstrInfo(Subtarget), TLInfo(*this), FrameLowering(*this, Subtarget),
TSInfo(*this) {
initAsmInfo();
}
void ARM64leTargetMachine::anchor() { }
ARM64leTargetMachine::
ARM64leTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
: ARM64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
void ARM64beTargetMachine::anchor() { }
ARM64beTargetMachine::
ARM64beTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
: ARM64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
namespace {
/// ARM64 Code Generator Pass Configuration Options.
class ARM64PassConfig : public TargetPassConfig {
public:
ARM64PassConfig(ARM64TargetMachine *TM, PassManagerBase &PM)
: TargetPassConfig(TM, PM) {}
ARM64TargetMachine &getARM64TargetMachine() const {
return getTM<ARM64TargetMachine>();
}
bool addPreISel() override;
bool addInstSelector() override;
bool addILPOpts() override;
bool addPreRegAlloc() override;
bool addPostRegAlloc() override;
bool addPreSched2() override;
bool addPreEmitPass() override;
};
} // namespace
void ARM64TargetMachine::addAnalysisPasses(PassManagerBase &PM) {
// Add first the target-independent BasicTTI pass, then our ARM64 pass. This
// allows the ARM64 pass to delegate to the target independent layer when
// appropriate.
PM.add(createBasicTargetTransformInfoPass(this));
PM.add(createARM64TargetTransformInfoPass(this));
}
TargetPassConfig *ARM64TargetMachine::createPassConfig(PassManagerBase &PM) {
return new ARM64PassConfig(this, PM);
}
// Pass Pipeline Configuration
bool ARM64PassConfig::addPreISel() {
// Run promote constant before global merge, so that the promoted constants
// get a chance to be merged
if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
addPass(createARM64PromoteConstantPass());
if (TM->getOptLevel() != CodeGenOpt::None)
addPass(createGlobalMergePass(TM));
if (TM->getOptLevel() != CodeGenOpt::None)
addPass(createARM64AddressTypePromotionPass());
// Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
// ourselves.
addPass(createAtomicExpandLoadLinkedPass(TM));
return false;
}
bool ARM64PassConfig::addInstSelector() {
addPass(createARM64ISelDag(getARM64TargetMachine(), getOptLevel()));
// For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
// references to _TLS_MODULE_BASE_ as possible.
if (TM->getSubtarget<ARM64Subtarget>().isTargetELF() &&
getOptLevel() != CodeGenOpt::None)
addPass(createARM64CleanupLocalDynamicTLSPass());
return false;
}
bool ARM64PassConfig::addILPOpts() {
if (EnableCCMP)
addPass(createARM64ConditionalCompares());
if (EnableEarlyIfConvert)
addPass(&EarlyIfConverterID);
if (EnableStPairSuppress)
addPass(createARM64StorePairSuppressPass());
return true;
}
bool ARM64PassConfig::addPreRegAlloc() {
// Use AdvSIMD scalar instructions whenever profitable.
if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar)
addPass(createARM64AdvSIMDScalar());
return true;
}
bool ARM64PassConfig::addPostRegAlloc() {
// Change dead register definitions to refer to the zero register.
if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
addPass(createARM64DeadRegisterDefinitions());
return true;
}
bool ARM64PassConfig::addPreSched2() {
// Expand some pseudo instructions to allow proper scheduling.
addPass(createARM64ExpandPseudoPass());
// Use load/store pair instructions when possible.
if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
addPass(createARM64LoadStoreOptimizationPass());
return true;
}
bool ARM64PassConfig::addPreEmitPass() {
// Relax conditional branch instructions if they're otherwise out of
// range of their destination.
addPass(createARM64BranchRelaxation());
if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
TM->getSubtarget<ARM64Subtarget>().isTargetMachO())
addPass(createARM64CollectLOHPass());
return true;
}