mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
[X86] Deduplicate static calling convention helpers for code size, NFC
Summary: Right now we include ${TGT}GenCallingConv.inc once per each instruction selection method implemented by ${TGT}: - ${TGT}ISelLowering.cpp - ${TGT}CallLowering.cpp - ${TGT}FastISel.cpp Instead, add a mechanism to tablegen for marking a particular convention as "External", which causes tablegen to emit into the ::llvm namespace, instead of as a static helper. This allows us to provide a header to forward declare it, so we can simply call the function from all the places it is referenced. Typically the calling convention analyzer is called indirectly, so it doesn't benefit from inlining. This saves a bit of final binary size, but mostly just saves object file size: before after diff artifact 12852K 12492K -360K X86ISelLowering.cpp.obj 4640K 4280K -360K X86FastISel.cpp.obj 1704K 2092K +388K X86CallingConv.cpp.obj 52448K 52336K -112K llc.exe I didn't collect before numbers for X86CallLowering.cpp.obj, which is for GlobalISel, but we should save 360K there as well. This patch applies the strategy to the X86 backend, but there is no reason it couldn't be applied to the other backends that implement multiple ISel strategies, like AArch64. Reviewers: craig.topper, hfinkel, efriedma Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits Differential Revision: https://reviews.llvm.org/D56883 llvm-svn: 351616
This commit is contained in:
parent
fa15d4bb37
commit
fa6cb76678
@ -160,6 +160,11 @@ class CCDelegateTo<CallingConv cc> : CCAction {
|
||||
/// that the target supports.
|
||||
class CallingConv<list<CCAction> actions> {
|
||||
list<CCAction> Actions = actions;
|
||||
|
||||
/// If true, this calling convention will be emitted as externally visible in
|
||||
/// the llvm namespaces instead of as a static function.
|
||||
bit Entry = 0;
|
||||
|
||||
bit Custom = 0;
|
||||
}
|
||||
|
||||
|
@ -48,8 +48,6 @@
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
#include "X86GenCallingConv.inc"
|
||||
|
||||
X86CallLowering::X86CallLowering(const X86TargetLowering &TLI)
|
||||
: CallLowering(&TLI) {}
|
||||
|
||||
|
@ -12,16 +12,23 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "MCTargetDesc/X86MCTargetDesc.h"
|
||||
#include "X86CallingConv.h"
|
||||
#include "X86Subtarget.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/CallingConvLower.h"
|
||||
#include "llvm/IR/CallingConv.h"
|
||||
|
||||
namespace llvm {
|
||||
using namespace llvm;
|
||||
|
||||
bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
/// When regcall calling convention compiled to 32 bit arch, special treatment
|
||||
/// is required for 64 bit masks.
|
||||
/// The value should be assigned to two GPRs.
|
||||
/// \return true if registers were allocated and false otherwise.
|
||||
static bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT,
|
||||
MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
ISD::ArgFlagsTy &ArgFlags,
|
||||
CCState &State) {
|
||||
// List of GPR registers that are available to store values in regcall
|
||||
// calling convention.
|
||||
static const MCPhysReg RegList[] = {X86::EAX, X86::ECX, X86::EDX, X86::EDI,
|
||||
@ -113,7 +120,13 @@ static bool CC_X86_VectorCallAssignRegister(unsigned &ValNo, MVT &ValVT,
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
/// Vectorcall calling convention has special handling for vector types or
|
||||
/// HVA for 64 bit arch.
|
||||
/// For HVAs shadow registers might be allocated on the first pass
|
||||
/// and actual XMM registers are allocated on the second pass.
|
||||
/// For vector types, actual XMM registers are allocated on the first pass.
|
||||
/// \return true if registers were allocated and false otherwise.
|
||||
static bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
// On the second pass, go through the HVAs only.
|
||||
@ -165,7 +178,12 @@ bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
return ArgFlags.isHva();
|
||||
}
|
||||
|
||||
bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
/// Vectorcall calling convention has special handling for vector types or
|
||||
/// HVA for 32 bit arch.
|
||||
/// For HVAs actual XMM registers are allocated on the second pass.
|
||||
/// For vector types, actual XMM registers are allocated on the first pass.
|
||||
/// \return true if registers were allocated and false otherwise.
|
||||
static bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
// On the second pass, go through the HVAs only.
|
||||
@ -205,7 +223,16 @@ bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
return false; // No register was assigned - Continue the search.
|
||||
}
|
||||
|
||||
bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
static bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &,
|
||||
CCValAssign::LocInfo &, ISD::ArgFlagsTy &,
|
||||
CCState &) {
|
||||
llvm_unreachable("The AnyReg calling convention is only supported by the "
|
||||
"stackmap and patchpoint intrinsics.");
|
||||
// gracefully fallback to X86 C calling convention on Release builds.
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
// This is similar to CCAssignToReg<[EAX, EDX, ECX]>, but makes sure
|
||||
@ -261,4 +288,5 @@ bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
return true;
|
||||
}
|
||||
|
||||
} // End llvm namespace
|
||||
// Provides entry points of CC_X86 and RetCC_X86.
|
||||
#include "X86GenCallingConv.inc"
|
||||
|
@ -21,45 +21,12 @@
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// When regcall calling convention compiled to 32 bit arch, special treatment
|
||||
/// is required for 64 bit masks.
|
||||
/// The value should be assigned to two GPRs.
|
||||
/// \return true if registers were allocated and false otherwise.
|
||||
bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State);
|
||||
bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT,
|
||||
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
|
||||
CCState &State);
|
||||
|
||||
/// Vectorcall calling convention has special handling for vector types or
|
||||
/// HVA for 64 bit arch.
|
||||
/// For HVAs shadow registers might be allocated on the first pass
|
||||
/// and actual XMM registers are allocated on the second pass.
|
||||
/// For vector types, actual XMM registers are allocated on the first pass.
|
||||
/// \return true if registers were allocated and false otherwise.
|
||||
bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State);
|
||||
|
||||
/// Vectorcall calling convention has special handling for vector types or
|
||||
/// HVA for 32 bit arch.
|
||||
/// For HVAs actual XMM registers are allocated on the second pass.
|
||||
/// For vector types, actual XMM registers are allocated on the first pass.
|
||||
/// \return true if registers were allocated and false otherwise.
|
||||
bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State);
|
||||
|
||||
inline bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &,
|
||||
CCValAssign::LocInfo &, ISD::ArgFlagsTy &,
|
||||
CCState &) {
|
||||
llvm_unreachable("The AnyReg calling convention is only supported by the " \
|
||||
"stackmap and patchpoint intrinsics.");
|
||||
// gracefully fallback to X86 C calling convention on Release builds.
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State);
|
||||
bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
|
||||
ISD::ArgFlagsTy ArgFlags, CCState &State);
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
|
@ -477,6 +477,7 @@ def RetCC_X86_64 : CallingConv<[
|
||||
]>;
|
||||
|
||||
// This is the return-value convention used for the entire X86 backend.
|
||||
let Entry = 1 in
|
||||
def RetCC_X86 : CallingConv<[
|
||||
|
||||
// Check if this is the Intel OpenCL built-ins calling convention
|
||||
@ -1039,6 +1040,7 @@ def CC_X86_64 : CallingConv<[
|
||||
]>;
|
||||
|
||||
// This is the argument convention used for the entire X86 backend.
|
||||
let Entry = 1 in
|
||||
def CC_X86 : CallingConv<[
|
||||
CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
|
||||
CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
|
||||
|
@ -312,8 +312,6 @@ bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
|
||||
return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
|
||||
}
|
||||
|
||||
#include "X86GenCallingConv.inc"
|
||||
|
||||
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
|
||||
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
|
||||
/// Return true and the result register by reference if it is possible.
|
||||
|
@ -2347,8 +2347,6 @@ bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
|
||||
// Return Value Calling Convention Implementation
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "X86GenCallingConv.inc"
|
||||
|
||||
bool X86TargetLowering::CanLowerReturn(
|
||||
CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
|
||||
|
@ -41,11 +41,17 @@ void CallingConvEmitter::run(raw_ostream &O) {
|
||||
// each other.
|
||||
for (Record *CC : CCs) {
|
||||
if (!CC->getValueAsBit("Custom")) {
|
||||
O << "static bool " << CC->getName()
|
||||
<< "(unsigned ValNo, MVT ValVT,\n"
|
||||
<< std::string(CC->getName().size() + 13, ' ')
|
||||
<< "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
|
||||
<< std::string(CC->getName().size() + 13, ' ')
|
||||
unsigned Pad = CC->getName().size();
|
||||
if (CC->getValueAsBit("Entry")) {
|
||||
O << "bool llvm::";
|
||||
Pad += 12;
|
||||
} else {
|
||||
O << "static bool ";
|
||||
Pad += 13;
|
||||
}
|
||||
O << CC->getName() << "(unsigned ValNo, MVT ValVT,\n"
|
||||
<< std::string(Pad, ' ') << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
|
||||
<< std::string(Pad, ' ')
|
||||
<< "ISD::ArgFlagsTy ArgFlags, CCState &State);\n";
|
||||
}
|
||||
}
|
||||
@ -62,12 +68,18 @@ void CallingConvEmitter::EmitCallingConv(Record *CC, raw_ostream &O) {
|
||||
ListInit *CCActions = CC->getValueAsListInit("Actions");
|
||||
Counter = 0;
|
||||
|
||||
O << "\n\nstatic bool " << CC->getName()
|
||||
<< "(unsigned ValNo, MVT ValVT,\n"
|
||||
<< std::string(CC->getName().size()+13, ' ')
|
||||
<< "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
|
||||
<< std::string(CC->getName().size()+13, ' ')
|
||||
<< "ISD::ArgFlagsTy ArgFlags, CCState &State) {\n";
|
||||
O << "\n\n";
|
||||
unsigned Pad = CC->getName().size();
|
||||
if (CC->getValueAsBit("Entry")) {
|
||||
O << "bool llvm::";
|
||||
Pad += 12;
|
||||
} else {
|
||||
O << "static bool ";
|
||||
Pad += 13;
|
||||
}
|
||||
O << CC->getName() << "(unsigned ValNo, MVT ValVT,\n"
|
||||
<< std::string(Pad, ' ') << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
|
||||
<< std::string(Pad, ' ') << "ISD::ArgFlagsTy ArgFlags, CCState &State) {\n";
|
||||
// Emit all of the actions, in order.
|
||||
for (unsigned i = 0, e = CCActions->size(); i != e; ++i) {
|
||||
O << "\n";
|
||||
|
Loading…
Reference in New Issue
Block a user