1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

GlobalISel: implement simple function calls on AArch64.

We're still limited in the arguments we support, but this at least handles the
basic cases.

llvm-svn: 278293
This commit is contained in:
Tim Northover 2016-08-10 21:44:01 +00:00
parent 179532ade9
commit cd8fd28f8c
8 changed files with 184 additions and 49 deletions

View File

@ -46,8 +46,8 @@ class CallLowering {
/// This hook is used by GlobalISel.
///
/// \return True if the lowering succeeds, false otherwise.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
unsigned VReg) const {
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val, unsigned VReg) const {
return false;
}
@ -63,7 +63,27 @@ class CallLowering {
virtual bool
lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function::ArgumentListType &Args,
const SmallVectorImpl<unsigned> &VRegs) const {
ArrayRef<unsigned> VRegs) const {
return false;
}
/// This hook must be implemented to lower the given call instruction,
/// including argument and return value marshalling.
///
/// \p CalleeReg is a virtual-register containing the destination if
/// `CI.getCalledFunction()` returns null (i.e. if the call is indirect);
/// otherwise it is 0.
///
/// \p ResReg is a register where the call's return value should be stored (or
/// 0 if there is no return value).
///
/// \p ArgRegs is a list of virtual registers containing each argument that
/// needs to be passed.
///
/// \return true if the lowering succeeded, false otherwise.
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, const CallInst &CI,
unsigned CalleeReg, unsigned ResReg,
ArrayRef<unsigned> ArgRegs) const {
return false;
}
};

View File

@ -187,12 +187,25 @@ bool IRTranslator::translateCast(unsigned Opcode, const CastInst &CI) {
bool IRTranslator::translateCall(const CallInst &CI) {
auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo();
const Function &F = *CI.getCalledFunction();
Intrinsic::ID ID = F.getIntrinsicID();
if (TII && ID == Intrinsic::not_intrinsic)
ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(&F));
const Function *F = CI.getCalledFunction();
assert(ID != Intrinsic::not_intrinsic && "FIXME: support real calls");
if (!F || !F->isIntrinsic()) {
// FIXME: handle multiple return values.
unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
SmallVector<unsigned, 8> Args;
for (auto &Arg: CI.arg_operands())
Args.push_back(getOrCreateVReg(*Arg));
return CLI->lowerCall(MIRBuilder, CI,
F ? 0 : getOrCreateVReg(*CI.getCalledValue()), Res,
Args);
}
Intrinsic::ID ID = F->getIntrinsicID();
if (TII && ID == Intrinsic::not_intrinsic)
ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
// Need types (starting with return) & args.
SmallVector<LLT, 4> Tys;

View File

@ -18,7 +18,8 @@
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#ifndef LLVM_BUILD_GLOBAL_ISEL
@ -30,61 +31,54 @@ AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
}
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val, unsigned VReg) const {
MachineInstr *Return = MIRBuilder.buildInstr(AArch64::RET_ReallyLR);
assert(Return && "Unable to build a return instruction?!");
const Value *Val, unsigned VReg) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
MachineInstrBuilder MIB = MIRBuilder.buildInstr(AArch64::RET_ReallyLR);
assert(MIB.getInstr() && "Unable to build a return instruction?!");
assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
if (VReg) {
assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy()) &&
"Type not supported yet");
const Function &F = *MIRBuilder.getMF().getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned Size = DL.getTypeSizeInBits(Val->getType());
assert((Size == 64 || Size == 32) && "Size not supported yet");
unsigned ResReg = (Size == 32) ? AArch64::W0 : AArch64::X0;
// Set the insertion point to be right before Return.
MIRBuilder.setInstr(*Return, /* Before */ true);
MachineInstr *Copy = MIRBuilder.buildCopy(ResReg, VReg);
(void)Copy;
assert(Copy->getNextNode() == Return &&
"The insertion did not happen where we expected");
MachineInstrBuilder(MIRBuilder.getMF(), Return)
.addReg(ResReg, RegState::Implicit);
MIRBuilder.setInstr(*MIB.getInstr(), /* Before */ true);
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
handleAssignments(
MIRBuilder, AssignFn, MVT::getVT(Val->getType()), VReg,
[&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
MIRBuilder.buildCopy(PhysReg, ValReg);
MIB.addUse(PhysReg, RegState::Implicit);
});
}
return true;
}
bool AArch64CallLowering::lowerFormalArguments(
MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args,
const SmallVectorImpl<unsigned> &VRegs) const {
bool AArch64CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
CCAssignFn *AssignFn,
ArrayRef<MVT> ArgTypes,
ArrayRef<unsigned> ArgRegs,
AssignFnTy AssignValToReg) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
unsigned NumArgs = Args.size();
Function::const_arg_iterator CurOrigArg = Args.begin();
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
for (unsigned i = 0; i != NumArgs; ++i, ++CurOrigArg) {
MVT ValVT = MVT::getVT(CurOrigArg->getType());
CCAssignFn *AssignFn =
TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
bool Res =
AssignFn(i, ValVT, ValVT, CCValAssign::Full, ISD::ArgFlagsTy(), CCInfo);
unsigned NumArgs = ArgTypes.size();
auto CurVT = ArgTypes.begin();
for (unsigned i = 0; i != NumArgs; ++i, ++CurVT) {
bool Res = AssignFn(i, *CurVT, *CurVT, CCValAssign::Full, ISD::ArgFlagsTy(),
CCInfo);
assert(!Res && "Call operand has unhandled type");
(void)Res;
}
assert(ArgLocs.size() == Args.size() &&
assert(ArgLocs.size() == ArgTypes.size() &&
"We have a different number of location and args?!");
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
assert(VA.isRegLoc() && "Not yet implemented");
// Transform the arguments in physical registers into virtual ones.
MIRBuilder.getMBB().addLiveIn(VA.getLocReg());
MIRBuilder.buildCopy(VRegs[i], VA.getLocReg());
switch (VA.getLocInfo()) {
default:
@ -103,6 +97,91 @@ bool AArch64CallLowering::lowerFormalArguments(
assert(0 && "Not yet implemented");
break;
}
// Everything checks out, tell the caller where we've decided this
// parameter/return value should go.
AssignValToReg(MIRBuilder, ArgRegs[i], VA.getLocReg());
}
return true;
}
bool AArch64CallLowering::lowerFormalArguments(
MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args,
ArrayRef<unsigned> VRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
SmallVector<MVT, 8> ArgTys;
for (auto &Arg : Args)
ArgTys.push_back(MVT::getVT(Arg.getType()));
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
CCAssignFn *AssignFn =
TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
return handleAssignments(
MIRBuilder, AssignFn, ArgTys, VRegs,
[](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
MIRBuilder.getMBB().addLiveIn(PhysReg);
MIRBuilder.buildCopy(ValReg, PhysReg);
});
}
bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
const CallInst &CI, unsigned CalleeReg,
unsigned ResReg,
ArrayRef<unsigned> ArgRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
// First step is to marshall all the function's parameters into the correct
// physregs and memory locations. Gather the sequence of argument types that
// we'll pass to the assigner function.
SmallVector<MVT, 8> ArgTys;
for (auto &Arg : CI.arg_operands())
ArgTys.push_back(MVT::getVT(Arg->getType()));
// Find out which ABI gets to decide where things go.
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
CCAssignFn *CallAssignFn =
TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
// And finally we can do the actual assignments. For a call we need to keep
// track of the registers used because they'll be implicit uses of the BL.
SmallVector<unsigned, 8> PhysRegs;
handleAssignments(
MIRBuilder, CallAssignFn, ArgTys, ArgRegs,
[&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
MIRBuilder.buildCopy(PhysReg, ValReg);
PhysRegs.push_back(PhysReg);
});
// Now we can build the actual call instruction.
MachineInstrBuilder MIB;
if (CalleeReg)
MIB = MIRBuilder.buildInstr(AArch64::BLR).addUse(CalleeReg);
else
MIB = MIRBuilder.buildInstr(AArch64::BL)
.addGlobalAddress(CI.getCalledFunction());
// Tell the call which registers are clobbered.
auto TRI = MF.getSubtarget().getRegisterInfo();
MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv()));
for (auto Reg : PhysRegs)
MIB.addUse(Reg, RegState::Implicit);
// Finally we can copy the returned value back into its virtual-register. In
// symmetry with the arugments, the physical register must be an
// implicit-define of the call instruction.
CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
if (!CI.getType()->isVoidTy())
handleAssignments(
MIRBuilder, RetAssignFn, MVT::getVT(CI.getType()), ResReg,
[&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
MIRBuilder.buildCopy(ValReg, PhysReg);
MIB.addDef(PhysReg, RegState::Implicit);
});
return true;
}

View File

@ -16,6 +16,8 @@
#define LLVM_LIB_TARGET_AARCH64_AARCH64CALLLOWERING
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ValueTypes.h"
namespace llvm {
@ -27,10 +29,22 @@ class AArch64CallLowering: public CallLowering {
bool lowerReturn(MachineIRBuilder &MIRBuiler, const Value *Val,
unsigned VReg) const override;
bool
lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function::ArgumentListType &Args,
const SmallVectorImpl<unsigned> &VRegs) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function::ArgumentListType &Args,
ArrayRef<unsigned> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, const CallInst &CI,
unsigned CalleeReg, unsigned ResReg,
const ArrayRef<unsigned> ArgRegs) const override;
private:
typedef std::function<void(MachineIRBuilder &, unsigned, unsigned)>
AssignFnTy;
bool handleAssignments(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn,
ArrayRef<MVT> ArgsTypes, ArrayRef<unsigned> ArgRegs,
AssignFnTy AssignValToReg) const;
};
} // End of namespace llvm;
#endif

View File

@ -2434,6 +2434,12 @@ CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
}
}
CCAssignFn *
AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {
return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
: RetCC_AArch64_AAPCS;
}
SDValue AArch64TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,

View File

@ -230,6 +230,9 @@ public:
/// Selects the correct CCAssignFn for a given CallingConvention value.
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
/// Selects the correct CCAssignFn for a given CallingConvention value.
CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
/// Determine which of the bits specified in Mask are known to be either zero
/// or one and return them in the KnownZero/KnownOne bitsets.
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,

View File

@ -36,7 +36,7 @@ bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
bool AMDGPUCallLowering::lowerFormalArguments(
MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args,
const SmallVectorImpl<unsigned> &VRegs) const {
ArrayRef<unsigned> VRegs) const {
// TODO: Implement once there are generic loads/stores.
return true;
}

View File

@ -30,7 +30,7 @@ class AMDGPUCallLowering: public CallLowering {
bool
lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function::ArgumentListType &Args,
const SmallVectorImpl<unsigned> &VRegs) const override;
ArrayRef<unsigned> VRegs) const override;
};
} // End of namespace llvm;
#endif