mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
AMDGPU/GlobalISel: Implement tail calls
Or at least the sibling call cases which the DAG already handles.
This commit is contained in:
parent
1b32fba3b3
commit
2f4beff49d
@ -188,7 +188,11 @@ struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler {
|
||||
const LLT S32 = LLT::scalar(32);
|
||||
|
||||
if (IsTailCall) {
|
||||
llvm_unreachable("implement me");
|
||||
Offset += FPDiff;
|
||||
int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
|
||||
auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI);
|
||||
MPO = MachinePointerInfo::getFixedStack(MF, FI);
|
||||
return FIReg.getReg(0);
|
||||
}
|
||||
|
||||
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
||||
@ -714,6 +718,8 @@ bool AMDGPUCallLowering::lowerFormalArguments(
|
||||
if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B))
|
||||
return false;
|
||||
|
||||
uint64_t StackOffset = Assigner.StackOffset;
|
||||
|
||||
if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) {
|
||||
// Special inputs come after user arguments.
|
||||
TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
|
||||
@ -728,6 +734,12 @@ bool AMDGPUCallLowering::lowerFormalArguments(
|
||||
TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
|
||||
}
|
||||
|
||||
// When we tail call, we need to check if the callee's arguments will fit on
|
||||
// the caller's stack. So, whenever we lower formal arguments, we should keep
|
||||
// track of this information, since we might lower a tail call in this
|
||||
// function later.
|
||||
Info->setBytesInStackArgArea(StackOffset);
|
||||
|
||||
// Move back to the end of the basic block.
|
||||
B.setMBB(MBB);
|
||||
|
||||
@ -890,7 +902,7 @@ getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) {
|
||||
|
||||
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
|
||||
bool IsTailCall) {
|
||||
return AMDGPU::SI_CALL;
|
||||
return IsTailCall ? AMDGPU::SI_TCRETURN : AMDGPU::SI_CALL;
|
||||
}
|
||||
|
||||
// Add operands to call instruction to track the callee.
|
||||
@ -914,6 +926,316 @@ static bool addCallTargetOperands(MachineInstrBuilder &CallInst,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay(
|
||||
CallLoweringInfo &Info, MachineFunction &MF,
|
||||
SmallVectorImpl<ArgInfo> &InArgs) const {
|
||||
const Function &CallerF = MF.getFunction();
|
||||
CallingConv::ID CalleeCC = Info.CallConv;
|
||||
CallingConv::ID CallerCC = CallerF.getCallingConv();
|
||||
|
||||
// If the calling conventions match, then everything must be the same.
|
||||
if (CalleeCC == CallerCC)
|
||||
return true;
|
||||
|
||||
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
||||
|
||||
// Make sure that the caller and callee preserve all of the same registers.
|
||||
auto TRI = ST.getRegisterInfo();
|
||||
|
||||
const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
|
||||
const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
|
||||
if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
|
||||
return false;
|
||||
|
||||
// Check if the caller and callee will handle arguments in the same way.
|
||||
const SITargetLowering &TLI = *getTLI<SITargetLowering>();
|
||||
CCAssignFn *CalleeAssignFnFixed;
|
||||
CCAssignFn *CalleeAssignFnVarArg;
|
||||
std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
|
||||
getAssignFnsForCC(CalleeCC, TLI);
|
||||
|
||||
CCAssignFn *CallerAssignFnFixed;
|
||||
CCAssignFn *CallerAssignFnVarArg;
|
||||
std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
|
||||
getAssignFnsForCC(CallerCC, TLI);
|
||||
|
||||
// FIXME: We are not accounting for potential differences in implicitly passed
|
||||
// inputs, but only the fixed ABI is supported now anyway.
|
||||
IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
|
||||
CalleeAssignFnVarArg);
|
||||
IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
|
||||
CallerAssignFnVarArg);
|
||||
return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner);
|
||||
}
|
||||
|
||||
bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable(
|
||||
CallLoweringInfo &Info, MachineFunction &MF,
|
||||
SmallVectorImpl<ArgInfo> &OutArgs) const {
|
||||
// If there are no outgoing arguments, then we are done.
|
||||
if (OutArgs.empty())
|
||||
return true;
|
||||
|
||||
const Function &CallerF = MF.getFunction();
|
||||
CallingConv::ID CalleeCC = Info.CallConv;
|
||||
CallingConv::ID CallerCC = CallerF.getCallingConv();
|
||||
const SITargetLowering &TLI = *getTLI<SITargetLowering>();
|
||||
|
||||
CCAssignFn *AssignFnFixed;
|
||||
CCAssignFn *AssignFnVarArg;
|
||||
std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
|
||||
|
||||
// We have outgoing arguments. Make sure that we can tail call with them.
|
||||
SmallVector<CCValAssign, 16> OutLocs;
|
||||
CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext());
|
||||
OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
|
||||
|
||||
if (!determineAssignments(Assigner, OutArgs, OutInfo)) {
|
||||
LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Make sure that they can fit on the caller's stack.
|
||||
const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
|
||||
if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) {
|
||||
LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Verify that the parameters in callee-saved registers match.
|
||||
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
||||
const SIRegisterInfo *TRI = ST.getRegisterInfo();
|
||||
const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs);
|
||||
}
|
||||
|
||||
/// Return true if the calling convention is one that we can guarantee TCO for.
|
||||
static bool canGuaranteeTCO(CallingConv::ID CC) {
|
||||
return CC == CallingConv::Fast;
|
||||
}
|
||||
|
||||
/// Return true if we might ever do TCO for calls with this calling convention.
|
||||
static bool mayTailCallThisCC(CallingConv::ID CC) {
|
||||
switch (CC) {
|
||||
case CallingConv::C:
|
||||
case CallingConv::AMDGPU_Gfx:
|
||||
return true;
|
||||
default:
|
||||
return canGuaranteeTCO(CC);
|
||||
}
|
||||
}
|
||||
|
||||
bool AMDGPUCallLowering::isEligibleForTailCallOptimization(
|
||||
MachineIRBuilder &B, CallLoweringInfo &Info,
|
||||
SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const {
|
||||
// Must pass all target-independent checks in order to tail call optimize.
|
||||
if (!Info.IsTailCall)
|
||||
return false;
|
||||
|
||||
MachineFunction &MF = B.getMF();
|
||||
const Function &CallerF = MF.getFunction();
|
||||
CallingConv::ID CalleeCC = Info.CallConv;
|
||||
CallingConv::ID CallerCC = CallerF.getCallingConv();
|
||||
|
||||
const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
|
||||
const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
|
||||
// Kernels aren't callable, and don't have a live in return address so it
|
||||
// doesn't make sense to do a tail call with entry functions.
|
||||
if (!CallerPreserved)
|
||||
return false;
|
||||
|
||||
if (!mayTailCallThisCC(CalleeCC)) {
|
||||
LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (any_of(CallerF.args(), [](const Argument &A) {
|
||||
return A.hasByValAttr() || A.hasSwiftErrorAttr();
|
||||
})) {
|
||||
LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval "
|
||||
"or swifterror arguments\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we have -tailcallopt, then we're done.
|
||||
if (MF.getTarget().Options.GuaranteedTailCallOpt)
|
||||
return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv();
|
||||
|
||||
// Verify that the incoming and outgoing arguments from the callee are
|
||||
// safe to tail call.
|
||||
if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
|
||||
LLVM_DEBUG(
|
||||
dbgs()
|
||||
<< "... Caller and callee have incompatible calling conventions.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
|
||||
return false;
|
||||
|
||||
LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
// Insert outgoing implicit arguments for a call, by inserting copies to the
|
||||
// implicit argument registers and adding the necessary implicit uses to the
|
||||
// call instruction.
|
||||
void AMDGPUCallLowering::handleImplicitCallArguments(
|
||||
MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst,
|
||||
const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo,
|
||||
ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const {
|
||||
if (!ST.enableFlatScratch()) {
|
||||
// Insert copies for the SRD. In the HSA case, this should be an identity
|
||||
// copy.
|
||||
auto ScratchRSrcReg =
|
||||
MIRBuilder.buildCopy(LLT::vector(4, 32), FuncInfo.getScratchRSrcReg());
|
||||
MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
|
||||
CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit);
|
||||
}
|
||||
|
||||
for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) {
|
||||
MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second);
|
||||
CallInst.addReg(ArgReg.first, RegState::Implicit);
|
||||
}
|
||||
}
|
||||
|
||||
bool AMDGPUCallLowering::lowerTailCall(
|
||||
MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
|
||||
SmallVectorImpl<ArgInfo> &OutArgs) const {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
||||
SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
|
||||
const Function &F = MF.getFunction();
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
const SITargetLowering &TLI = *getTLI<SITargetLowering>();
|
||||
|
||||
// True when we're tail calling, but without -tailcallopt.
|
||||
bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt;
|
||||
|
||||
// Find out which ABI gets to decide where things go.
|
||||
CallingConv::ID CalleeCC = Info.CallConv;
|
||||
CCAssignFn *AssignFnFixed;
|
||||
CCAssignFn *AssignFnVarArg;
|
||||
std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
|
||||
|
||||
MachineInstrBuilder CallSeqStart;
|
||||
if (!IsSibCall)
|
||||
CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP);
|
||||
|
||||
unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true);
|
||||
auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
|
||||
if (!addCallTargetOperands(MIB, MIRBuilder, Info))
|
||||
return false;
|
||||
|
||||
// Byte offset for the tail call. When we are sibcalling, this will always
|
||||
// be 0.
|
||||
MIB.addImm(0);
|
||||
|
||||
// Tell the call which registers are clobbered.
|
||||
const SIRegisterInfo *TRI = ST.getRegisterInfo();
|
||||
const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
|
||||
MIB.addRegMask(Mask);
|
||||
|
||||
// FPDiff is the byte offset of the call's argument area from the callee's.
|
||||
// Stores to callee stack arguments will be placed in FixedStackSlots offset
|
||||
// by this amount for a tail call. In a sibling call it must be 0 because the
|
||||
// caller will deallocate the entire stack and the callee still expects its
|
||||
// arguments to begin at SP+0.
|
||||
int FPDiff = 0;
|
||||
|
||||
// This will be 0 for sibcalls, potentially nonzero for tail calls produced
|
||||
// by -tailcallopt. For sibcalls, the memory operands for the call are
|
||||
// already available in the caller's incoming argument space.
|
||||
unsigned NumBytes = 0;
|
||||
if (!IsSibCall) {
|
||||
// We aren't sibcalling, so we need to compute FPDiff. We need to do this
|
||||
// before handling assignments, because FPDiff must be known for memory
|
||||
// arguments.
|
||||
unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
|
||||
SmallVector<CCValAssign, 16> OutLocs;
|
||||
CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
|
||||
|
||||
// FIXME: Not accounting for callee implicit inputs
|
||||
OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg);
|
||||
if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
|
||||
return false;
|
||||
|
||||
// The callee will pop the argument stack as a tail call. Thus, we must
|
||||
// keep it 16-byte aligned.
|
||||
NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment());
|
||||
|
||||
// FPDiff will be negative if this tail call requires more space than we
|
||||
// would automatically have in our incoming argument space. Positive if we
|
||||
// actually shrink the stack.
|
||||
FPDiff = NumReusableBytes - NumBytes;
|
||||
|
||||
// The stack pointer must be 16-byte aligned at all times it's used for a
|
||||
// memory operation, which in practice means at *all* times and in
|
||||
// particular across call boundaries. Therefore our own arguments started at
|
||||
// a 16-byte aligned SP and the delta applied for the tail call should
|
||||
// satisfy the same constraint.
|
||||
assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
|
||||
}
|
||||
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext());
|
||||
|
||||
// We could pass MIB and directly add the implicit uses to the call
|
||||
// now. However, as an aesthetic choice, place implicit argument operands
|
||||
// after the ordinary user argument registers.
|
||||
SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs;
|
||||
|
||||
if (AMDGPUTargetMachine::EnableFixedFunctionABI &&
|
||||
Info.CallConv != CallingConv::AMDGPU_Gfx) {
|
||||
// With a fixed ABI, allocate fixed registers before user arguments.
|
||||
if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info))
|
||||
return false;
|
||||
}
|
||||
|
||||
OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
|
||||
|
||||
if (!determineAssignments(Assigner, OutArgs, CCInfo))
|
||||
return false;
|
||||
|
||||
// Do the actual argument marshalling.
|
||||
AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff);
|
||||
if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
|
||||
return false;
|
||||
|
||||
handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs);
|
||||
|
||||
// If we have -tailcallopt, we need to adjust the stack. We'll do the call
|
||||
// sequence start and end here.
|
||||
if (!IsSibCall) {
|
||||
MIB->getOperand(1).setImm(FPDiff);
|
||||
CallSeqStart.addImm(NumBytes).addImm(0);
|
||||
// End the call sequence *before* emitting the call. Normally, we would
|
||||
// tidy the frame up after the call. However, here, we've laid out the
|
||||
// parameters so that when SP is reset, they will be in the correct
|
||||
// location.
|
||||
MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0);
|
||||
}
|
||||
|
||||
// Now we can add the actual call instruction to the correct basic block.
|
||||
MIRBuilder.insertInstr(MIB);
|
||||
|
||||
// If Callee is a reg, since it is used by a target specific
|
||||
// instruction, it must have a register class matching the
|
||||
// constraint of that instruction.
|
||||
|
||||
// FIXME: We should define regbankselectable call instructions to handle
|
||||
// divergent call targets.
|
||||
if (MIB->getOperand(0).isReg()) {
|
||||
MIB->getOperand(0).setReg(constrainOperandRegClass(
|
||||
MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB,
|
||||
MIB->getDesc(), MIB->getOperand(0), 0));
|
||||
}
|
||||
|
||||
MF.getFrameInfo().setHasTailCall();
|
||||
Info.LoweredTailCall = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
CallLoweringInfo &Info) const {
|
||||
if (Info.IsVarArg) {
|
||||
@ -951,7 +1273,8 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv);
|
||||
|
||||
// If we can lower as a tail call, do that instead.
|
||||
bool CanTailCallOpt = false;
|
||||
bool CanTailCallOpt =
|
||||
isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
|
||||
|
||||
// We must emit a tail call if we have musttail.
|
||||
if (Info.IsMustTailCall && !CanTailCallOpt) {
|
||||
@ -959,6 +1282,9 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (CanTailCallOpt)
|
||||
return lowerTailCall(MIRBuilder, Info, OutArgs);
|
||||
|
||||
// Find out which ABI gets to decide where things go.
|
||||
CCAssignFn *AssignFnFixed;
|
||||
CCAssignFn *AssignFnVarArg;
|
||||
@ -1011,19 +1337,7 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
|
||||
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
||||
|
||||
if (!ST.enableFlatScratch()) {
|
||||
// Insert copies for the SRD. In the HSA case, this should be an identity
|
||||
// copy.
|
||||
auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::vector(4, 32),
|
||||
MFI->getScratchRSrcReg());
|
||||
MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
|
||||
MIB.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit);
|
||||
}
|
||||
|
||||
for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) {
|
||||
MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second);
|
||||
MIB.addReg(ArgReg.first, RegState::Implicit);
|
||||
}
|
||||
handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs);
|
||||
|
||||
// Get a count of how many bytes are to be pushed on the stack.
|
||||
unsigned NumBytes = CCInfo.getNextStackOffset();
|
||||
|
@ -19,7 +19,9 @@
|
||||
namespace llvm {
|
||||
|
||||
class AMDGPUTargetLowering;
|
||||
class GCNSubtarget;
|
||||
class MachineInstrBuilder;
|
||||
class SIMachineFunctionInfo;
|
||||
|
||||
class AMDGPUCallLowering final : public CallLowering {
|
||||
void lowerParameterPtr(Register DstReg, MachineIRBuilder &B, Type *ParamTy,
|
||||
@ -54,6 +56,29 @@ public:
|
||||
SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs,
|
||||
CallLoweringInfo &Info) const;
|
||||
|
||||
bool
|
||||
doCallerAndCalleePassArgsTheSameWay(CallLoweringInfo &Info,
|
||||
MachineFunction &MF,
|
||||
SmallVectorImpl<ArgInfo> &InArgs) const;
|
||||
|
||||
bool
|
||||
areCalleeOutgoingArgsTailCallable(CallLoweringInfo &Info, MachineFunction &MF,
|
||||
SmallVectorImpl<ArgInfo> &OutArgs) const;
|
||||
|
||||
/// Returns true if the call can be lowered as a tail call.
|
||||
bool
|
||||
isEligibleForTailCallOptimization(MachineIRBuilder &MIRBuilder,
|
||||
CallLoweringInfo &Info,
|
||||
SmallVectorImpl<ArgInfo> &InArgs,
|
||||
SmallVectorImpl<ArgInfo> &OutArgs) const;
|
||||
|
||||
void handleImplicitCallArguments(
|
||||
MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst,
|
||||
const GCNSubtarget &ST, const SIMachineFunctionInfo &MFI,
|
||||
ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const;
|
||||
|
||||
bool lowerTailCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
|
||||
SmallVectorImpl<ArgInfo> &OutArgs) const;
|
||||
bool lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
CallLoweringInfo &Info) const override;
|
||||
|
||||
|
1837
test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll
Normal file
1837
test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll
Normal file
File diff suppressed because it is too large
Load Diff
41
test/CodeGen/AMDGPU/GlobalISel/irtranslator-tail-call.ll
Normal file
41
test/CodeGen/AMDGPU/GlobalISel/irtranslator-tail-call.ll
Normal file
@ -0,0 +1,41 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||||
; RUN: llc -global-isel -amdgpu-fixed-function-abi -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s
|
||||
|
||||
declare hidden void @external_void_func_void()
|
||||
|
||||
define void @tail_call_void_func_void() {
|
||||
; CHECK-LABEL: name: tail_call_void_func_void
|
||||
; CHECK: bb.1 (%ir-block.0):
|
||||
; CHECK: liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
|
||||
; CHECK: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
|
||||
; CHECK: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
|
||||
; CHECK: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
|
||||
; CHECK: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
|
||||
; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
|
||||
; CHECK: [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
|
||||
; CHECK: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
|
||||
; CHECK: [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
|
||||
; CHECK: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
|
||||
; CHECK: [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_void
|
||||
; CHECK: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
|
||||
; CHECK: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
|
||||
; CHECK: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
|
||||
; CHECK: [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
|
||||
; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
|
||||
; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
|
||||
; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
|
||||
; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
|
||||
; CHECK: [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
|
||||
; CHECK: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
|
||||
; CHECK: $sgpr4_sgpr5 = COPY [[COPY9]](p4)
|
||||
; CHECK: $sgpr6_sgpr7 = COPY [[COPY10]](p4)
|
||||
; CHECK: $sgpr8_sgpr9 = COPY [[COPY11]](p4)
|
||||
; CHECK: $sgpr10_sgpr11 = COPY [[COPY12]](s64)
|
||||
; CHECK: $sgpr12 = COPY [[COPY13]](s32)
|
||||
; CHECK: $sgpr13 = COPY [[COPY14]](s32)
|
||||
; CHECK: $sgpr14 = COPY [[COPY15]](s32)
|
||||
; CHECK: $vgpr31 = COPY [[COPY16]](s32)
|
||||
; CHECK: SI_TCRETURN [[GV]](p0), @external_void_func_void, 0, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
|
||||
tail call void @external_void_func_void()
|
||||
ret void
|
||||
}
|
@ -19,7 +19,7 @@ define amdgpu_kernel void @test_call_undef() #0 {
|
||||
; SDAG: s_waitcnt
|
||||
; SDAG-NEXT: .Lfunc_end
|
||||
|
||||
; GISEL: s_swappc_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
|
||||
; GISEL: s_setpc_b64 s{{\[[0-9]+:[0-9]+\]}}
|
||||
define i32 @test_tail_call_undef() #0 {
|
||||
%call = tail call i32 undef(i32 1)
|
||||
ret i32 %call
|
||||
@ -43,7 +43,7 @@ define amdgpu_kernel void @test_call_null() #0 {
|
||||
; SDAG: s_waitcnt
|
||||
; SDAG-NEXT: .Lfunc_end
|
||||
|
||||
; GISEL: s_swappc_b64 s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
|
||||
; GISEL: s_setpc_b64 s{{\[[0-9]+:[0-9]+\]$}}
|
||||
define i32 @test_tail_call_null() #0 {
|
||||
%call = tail call i32 null(i32 1)
|
||||
ret i32 %call
|
||||
|
@ -1,5 +1,6 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN -enable-var-scope %s
|
||||
; RUN: llc -global-isel -mtriple=amdgcn--amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN -enable-var-scope %s
|
||||
|
||||
; Callee with SGPR and VGPR arguments
|
||||
define hidden amdgpu_gfx float @callee(float %v.arg0, float inreg %s.arg1) {
|
||||
|
Loading…
Reference in New Issue
Block a user