mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 02:33:06 +01:00
Mips/GlobalISel: Use more standard call lowering infrastructure
This also fixes some missing implicit uses on call instructions, adds missing G_ASSERT_SEXT/ZEXT annotations, and some missing outgoing sext/zexts. This also fixes not respecting tablegen requested type promotions. This starts treating f64 passed in i32 GPRs as a type of custom assignment, which restores some previously XFAILed tests. This is due to getNumRegistersForCallingConv returns a static value, but in this case it is context dependent on other arguments. Most of the ugliness is reproducing a hack CC_MipsO32 uses in SelectionDAG. CC_MipsO32 depends on a bunch of vectors populated from the original IR argument types in MipsCCState. The way this ends up working in GlobalISel is it only ends up inspecting the most recently added vector element. I'm pretty sure there are cleaner ways to do this, but this seemed easier than fixing up the current DAG handling. This is another case where it would be easier of the CCAssignFns were passed the original type instead of only the pre-legalized ones. There's still a lot of junk here that shouldn't be necessary. This also likely breaks big endian handling, but it wasn't complete/tested anyway since the IRTranslator gives up on big endian targets.
This commit is contained in:
parent
b34fbda5c6
commit
2398c72d1f
@ -281,7 +281,7 @@ public:
|
||||
/// \return The number of \p VAs that have been assigned after the first
|
||||
/// one, and which should therefore be skipped from further
|
||||
/// processing.
|
||||
virtual unsigned assignCustomValue(const ArgInfo &Arg,
|
||||
virtual unsigned assignCustomValue(ArgInfo &Arg,
|
||||
ArrayRef<CCValAssign> VAs) {
|
||||
// This is not a pure virtual method because not all targets need to worry
|
||||
// about custom values.
|
||||
|
@ -129,7 +129,7 @@ struct ARMOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
|
||||
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
|
||||
}
|
||||
|
||||
unsigned assignCustomValue(const CallLowering::ArgInfo &Arg,
|
||||
unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
|
||||
ArrayRef<CCValAssign> VAs) override {
|
||||
assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet");
|
||||
|
||||
@ -297,7 +297,7 @@ struct ARMIncomingValueHandler : public CallLowering::IncomingValueHandler {
|
||||
}
|
||||
}
|
||||
|
||||
unsigned assignCustomValue(const ARMCallLowering::ArgInfo &Arg,
|
||||
unsigned assignCustomValue(ARMCallLowering::ArgInfo &Arg,
|
||||
ArrayRef<CCValAssign> VAs) override {
|
||||
assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet");
|
||||
|
||||
|
@ -12,8 +12,7 @@
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
/// This function returns true if CallSym is a long double emulation routine.
|
||||
static bool isF128SoftLibCall(const char *CallSym) {
|
||||
bool MipsCCState::isF128SoftLibCall(const char *CallSym) {
|
||||
const char *const LibCalls[] = {
|
||||
"__addtf3", "__divtf3", "__eqtf2", "__extenddftf2",
|
||||
"__extendsftf2", "__fixtfdi", "__fixtfsi", "__fixtfti",
|
||||
@ -37,7 +36,7 @@ static bool isF128SoftLibCall(const char *CallSym) {
|
||||
|
||||
/// This function returns true if Ty is fp128, {f128} or i128 which was
|
||||
/// originally a fp128.
|
||||
static bool originalTypeIsF128(const Type *Ty, const char *Func) {
|
||||
bool MipsCCState::originalTypeIsF128(const Type *Ty, const char *Func) {
|
||||
if (Ty->isFP128Ty())
|
||||
return true;
|
||||
|
||||
@ -47,11 +46,12 @@ static bool originalTypeIsF128(const Type *Ty, const char *Func) {
|
||||
|
||||
// If the Ty is i128 and the function being called is a long double emulation
|
||||
// routine, then the original type is f128.
|
||||
// FIXME: This is unsound because these functions could be indirectly called
|
||||
return (Func && Ty->isIntegerTy(128) && isF128SoftLibCall(Func));
|
||||
}
|
||||
|
||||
/// Return true if the original type was vXfXX.
|
||||
static bool originalEVTTypeIsVectorFloat(EVT Ty) {
|
||||
bool MipsCCState::originalEVTTypeIsVectorFloat(EVT Ty) {
|
||||
if (Ty.isVector() && Ty.getVectorElementType().isFloatingPoint())
|
||||
return true;
|
||||
|
||||
@ -59,7 +59,7 @@ static bool originalEVTTypeIsVectorFloat(EVT Ty) {
|
||||
}
|
||||
|
||||
/// Return true if the original type was vXfXX / vXfXX.
|
||||
static bool originalTypeIsVectorFloat(const Type * Ty) {
|
||||
bool MipsCCState::originalTypeIsVectorFloat(const Type *Ty) {
|
||||
if (Ty->isVectorTy() && Ty->isFPOrFPVectorTy())
|
||||
return true;
|
||||
|
||||
@ -126,6 +126,18 @@ void MipsCCState::PreAnalyzeReturnForVectorFloat(
|
||||
}
|
||||
}
|
||||
|
||||
void MipsCCState::PreAnalyzeReturnValue(EVT ArgVT) {
|
||||
OriginalRetWasFloatVector.push_back(originalEVTTypeIsVectorFloat(ArgVT));
|
||||
}
|
||||
|
||||
void MipsCCState::PreAnalyzeCallOperand(const Type *ArgTy, bool IsFixed,
|
||||
const char *Func) {
|
||||
OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy, Func));
|
||||
OriginalArgWasFloat.push_back(ArgTy->isFloatingPointTy());
|
||||
OriginalArgWasFloatVector.push_back(ArgTy->isVectorTy());
|
||||
CallOperandIsFixed.push_back(IsFixed);
|
||||
}
|
||||
|
||||
/// Identify lowered values that originated from f128, float and sret to vXfXX
|
||||
/// arguments and record this.
|
||||
void MipsCCState::PreAnalyzeCallOperands(
|
||||
@ -142,6 +154,27 @@ void MipsCCState::PreAnalyzeCallOperands(
|
||||
}
|
||||
}
|
||||
|
||||
void MipsCCState::PreAnalyzeFormalArgument(const Type *ArgTy,
|
||||
ISD::ArgFlagsTy Flags) {
|
||||
// SRet arguments cannot originate from f128 or {f128} returns so we just
|
||||
// push false. We have to handle this specially since SRet arguments
|
||||
// aren't mapped to an original argument.
|
||||
if (Flags.isSRet()) {
|
||||
OriginalArgWasF128.push_back(false);
|
||||
OriginalArgWasFloat.push_back(false);
|
||||
OriginalArgWasFloatVector.push_back(false);
|
||||
return;
|
||||
}
|
||||
|
||||
OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy, nullptr));
|
||||
OriginalArgWasFloat.push_back(ArgTy->isFloatingPointTy());
|
||||
|
||||
// The MIPS vector ABI exhibits a corner case of sorts or quirk; if the
|
||||
// first argument is actually an SRet pointer to a vector, then the next
|
||||
// argument slot is $a2.
|
||||
OriginalArgWasFloatVector.push_back(ArgTy->isVectorTy());
|
||||
}
|
||||
|
||||
/// Identify lowered values that originated from f128, float and vXfXX arguments
|
||||
/// and record this.
|
||||
void MipsCCState::PreAnalyzeFormalArgumentsForF128(
|
||||
|
@ -26,6 +26,21 @@ public:
|
||||
getSpecialCallingConvForCallee(const SDNode *Callee,
|
||||
const MipsSubtarget &Subtarget);
|
||||
|
||||
/// This function returns true if CallSym is a long double emulation routine.
|
||||
///
|
||||
/// FIXME: Changing the ABI based on the callee name is unsound. The lib func
|
||||
/// address could be captured.
|
||||
static bool isF128SoftLibCall(const char *CallSym);
|
||||
|
||||
static bool originalTypeIsF128(const Type *Ty, const char *Func);
|
||||
static bool originalEVTTypeIsVectorFloat(EVT Ty);
|
||||
static bool originalTypeIsVectorFloat(const Type *Ty);
|
||||
|
||||
void PreAnalyzeCallOperand(const Type *ArgTy, bool IsFixed, const char *Func);
|
||||
|
||||
void PreAnalyzeFormalArgument(const Type *ArgTy, ISD::ArgFlagsTy Flags);
|
||||
void PreAnalyzeReturnValue(EVT ArgVT);
|
||||
|
||||
private:
|
||||
/// Identify lowered values that originated from f128 arguments and record
|
||||
/// this for use by RetCC_MipsN.
|
||||
@ -85,17 +100,23 @@ public:
|
||||
SpecialCallingConvType SpecialCC = NoSpecialCallingConv)
|
||||
: CCState(CC, isVarArg, MF, locs, C), SpecialCallingConv(SpecialCC) {}
|
||||
|
||||
void PreAnalyzeCallOperands(
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs, CCAssignFn Fn,
|
||||
std::vector<TargetLowering::ArgListEntry> &FuncArgs, const char *Func) {
|
||||
OriginalArgWasF128.clear();
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasFloatVector.clear();
|
||||
CallOperandIsFixed.clear();
|
||||
PreAnalyzeCallOperands(Outs, FuncArgs, Func);
|
||||
}
|
||||
|
||||
void
|
||||
AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
CCAssignFn Fn,
|
||||
std::vector<TargetLowering::ArgListEntry> &FuncArgs,
|
||||
const char *Func) {
|
||||
PreAnalyzeCallOperands(Outs, FuncArgs, Func);
|
||||
PreAnalyzeCallOperands(Outs, Fn, FuncArgs, Func);
|
||||
CCState::AnalyzeCallOperands(Outs, Fn);
|
||||
OriginalArgWasF128.clear();
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasFloatVector.clear();
|
||||
CallOperandIsFixed.clear();
|
||||
}
|
||||
|
||||
// The AnalyzeCallOperands in the base class is not usable since we must
|
||||
@ -107,34 +128,56 @@ public:
|
||||
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
|
||||
CCAssignFn Fn) = delete;
|
||||
|
||||
void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
CCAssignFn Fn) {
|
||||
PreAnalyzeFormalArgumentsForF128(Ins);
|
||||
CCState::AnalyzeFormalArguments(Ins, Fn);
|
||||
void PreAnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
CCAssignFn Fn) {
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasF128.clear();
|
||||
OriginalArgWasFloatVector.clear();
|
||||
PreAnalyzeFormalArgumentsForF128(Ins);
|
||||
}
|
||||
|
||||
void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
CCAssignFn Fn) {
|
||||
PreAnalyzeFormalArguments(Ins, Fn);
|
||||
CCState::AnalyzeFormalArguments(Ins, Fn);
|
||||
}
|
||||
|
||||
void PreAnalyzeCallResult(const Type *RetTy, const char *Func) {
|
||||
OriginalArgWasF128.push_back(originalTypeIsF128(RetTy, Func));
|
||||
OriginalArgWasFloat.push_back(RetTy->isFloatingPointTy());
|
||||
OriginalRetWasFloatVector.push_back(originalTypeIsVectorFloat(RetTy));
|
||||
}
|
||||
|
||||
void PreAnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
CCAssignFn Fn, const Type *RetTy,
|
||||
const char *Func) {
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasF128.clear();
|
||||
OriginalArgWasFloatVector.clear();
|
||||
PreAnalyzeCallResultForF128(Ins, RetTy, Func);
|
||||
PreAnalyzeCallResultForVectorFloat(Ins, RetTy);
|
||||
}
|
||||
|
||||
void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
CCAssignFn Fn, const Type *RetTy,
|
||||
const char *Func) {
|
||||
PreAnalyzeCallResultForF128(Ins, RetTy, Func);
|
||||
PreAnalyzeCallResultForVectorFloat(Ins, RetTy);
|
||||
PreAnalyzeCallResult(Ins, Fn, RetTy, Func);
|
||||
CCState::AnalyzeCallResult(Ins, Fn);
|
||||
}
|
||||
|
||||
void PreAnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
CCAssignFn Fn) {
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasF128.clear();
|
||||
OriginalArgWasFloatVector.clear();
|
||||
PreAnalyzeReturnForF128(Outs);
|
||||
PreAnalyzeReturnForVectorFloat(Outs);
|
||||
}
|
||||
|
||||
void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
CCAssignFn Fn) {
|
||||
PreAnalyzeReturnForF128(Outs);
|
||||
PreAnalyzeReturnForVectorFloat(Outs);
|
||||
PreAnalyzeReturn(Outs, Fn);
|
||||
CCState::AnalyzeReturn(Outs, Fn);
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasF128.clear();
|
||||
OriginalArgWasFloatVector.clear();
|
||||
}
|
||||
|
||||
bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
|
||||
|
@ -24,98 +24,89 @@ using namespace llvm;
|
||||
MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
|
||||
: CallLowering(&TLI) {}
|
||||
|
||||
bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA,
|
||||
const EVT &VT) {
|
||||
if (VA.isRegLoc()) {
|
||||
assignValueToReg(VReg, VA, VT);
|
||||
} else if (VA.isMemLoc()) {
|
||||
assignValueToAddress(VReg, VA);
|
||||
} else {
|
||||
return false;
|
||||
struct MipsOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
|
||||
/// This is the name of the function being called
|
||||
/// FIXME: Relying on this is unsound
|
||||
const char *Func = nullptr;
|
||||
|
||||
/// Is this a return value, or an outgoing call operand.
|
||||
bool IsReturn;
|
||||
|
||||
MipsOutgoingValueAssigner(CCAssignFn *AssignFn_, const char *Func,
|
||||
bool IsReturn)
|
||||
: OutgoingValueAssigner(AssignFn_), Func(Func), IsReturn(IsReturn) {}
|
||||
|
||||
bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
|
||||
CCValAssign::LocInfo LocInfo,
|
||||
const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
|
||||
CCState &State_) override {
|
||||
MipsCCState &State = static_cast<MipsCCState &>(State_);
|
||||
|
||||
if (IsReturn)
|
||||
State.PreAnalyzeReturnValue(EVT::getEVT(Info.Ty));
|
||||
else
|
||||
State.PreAnalyzeCallOperand(Info.Ty, Info.IsFixed, Func);
|
||||
|
||||
return CallLowering::OutgoingValueAssigner::assignArg(
|
||||
ValNo, OrigVT, ValVT, LocVT, LocInfo, Info, Flags, State);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<Register> VRegs,
|
||||
ArrayRef<CCValAssign> ArgLocs,
|
||||
unsigned ArgLocsStartIndex,
|
||||
const EVT &VT) {
|
||||
for (unsigned i = 0; i < VRegs.size(); ++i)
|
||||
if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i], VT))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
struct MipsIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
|
||||
/// This is the name of the function being called
|
||||
/// FIXME: Relying on this is unsound
|
||||
const char *Func = nullptr;
|
||||
|
||||
void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
|
||||
SmallVectorImpl<Register> &VRegs) {
|
||||
if (!MIRBuilder.getMF().getDataLayout().isLittleEndian())
|
||||
std::reverse(VRegs.begin(), VRegs.end());
|
||||
}
|
||||
/// Is this a call return value, or an incoming function argument.
|
||||
bool IsReturn;
|
||||
|
||||
bool MipsCallLowering::MipsHandler::handle(
|
||||
ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) {
|
||||
SmallVector<Register, 4> VRegs;
|
||||
unsigned SplitLength;
|
||||
const Function &F = MIRBuilder.getMF().getFunction();
|
||||
const DataLayout &DL = F.getParent()->getDataLayout();
|
||||
const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>(
|
||||
MIRBuilder.getMF().getSubtarget().getTargetLowering());
|
||||
MipsIncomingValueAssigner(CCAssignFn *AssignFn_, const char *Func,
|
||||
bool IsReturn)
|
||||
: IncomingValueAssigner(AssignFn_), Func(Func), IsReturn(IsReturn) {}
|
||||
|
||||
for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size();
|
||||
++ArgsIndex, ArgLocsIndex += SplitLength) {
|
||||
EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty);
|
||||
SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(),
|
||||
F.getCallingConv(), VT);
|
||||
assert(Args[ArgsIndex].Regs.size() == 1 && "Can't handle multple regs yet");
|
||||
bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
|
||||
CCValAssign::LocInfo LocInfo,
|
||||
const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
|
||||
CCState &State_) override {
|
||||
MipsCCState &State = static_cast<MipsCCState &>(State_);
|
||||
|
||||
if (SplitLength > 1) {
|
||||
VRegs.clear();
|
||||
MVT RegisterVT = TLI.getRegisterTypeForCallingConv(
|
||||
F.getContext(), F.getCallingConv(), VT);
|
||||
for (unsigned i = 0; i < SplitLength; ++i)
|
||||
VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT}));
|
||||
if (IsReturn)
|
||||
State.PreAnalyzeCallResult(Info.Ty, Func);
|
||||
else
|
||||
State.PreAnalyzeFormalArgument(Info.Ty, Flags);
|
||||
|
||||
if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Regs[0],
|
||||
VT))
|
||||
return false;
|
||||
} else {
|
||||
if (!assign(Args[ArgsIndex].Regs[0], ArgLocs[ArgLocsIndex], VT))
|
||||
return false;
|
||||
}
|
||||
return CallLowering::IncomingValueAssigner::assignArg(
|
||||
ValNo, OrigVT, ValVT, LocVT, LocInfo, Info, Flags, State);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
namespace {
|
||||
class MipsIncomingValueHandler : public MipsCallLowering::MipsHandler {
|
||||
class MipsIncomingValueHandler : public CallLowering::IncomingValueHandler {
|
||||
const MipsSubtarget &STI;
|
||||
|
||||
public:
|
||||
MipsIncomingValueHandler(MachineIRBuilder &MIRBuilder,
|
||||
MachineRegisterInfo &MRI)
|
||||
: MipsHandler(MIRBuilder, MRI) {}
|
||||
: IncomingValueHandler(MIRBuilder, MRI),
|
||||
STI(MIRBuilder.getMF().getSubtarget<MipsSubtarget>()) {}
|
||||
|
||||
private:
|
||||
void assignValueToReg(Register ValVReg, const CCValAssign &VA,
|
||||
const EVT &VT) override;
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) override;
|
||||
|
||||
Register getStackAddress(const CCValAssign &VA,
|
||||
MachineMemOperand *&MMO) override;
|
||||
Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO,
|
||||
ISD::ArgFlagsTy Flags) override;
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override;
|
||||
|
||||
void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
|
||||
|
||||
bool handleSplit(SmallVectorImpl<Register> &VRegs,
|
||||
ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
|
||||
Register ArgsReg, const EVT &VT) override;
|
||||
unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
|
||||
ArrayRef<CCValAssign> VAs) override;
|
||||
|
||||
virtual void markPhysRegUsed(unsigned PhysReg) {
|
||||
MIRBuilder.getMRI()->addLiveIn(PhysReg);
|
||||
MIRBuilder.getMBB().addLiveIn(PhysReg);
|
||||
}
|
||||
|
||||
MachineInstrBuilder buildLoad(const DstOp &Res, const CCValAssign &VA) {
|
||||
MachineMemOperand *MMO;
|
||||
Register Addr = getStackAddress(VA, MMO);
|
||||
return MIRBuilder.buildLoad(Res, Addr, *MMO);
|
||||
}
|
||||
};
|
||||
|
||||
class CallReturnHandler : public MipsIncomingValueHandler {
|
||||
@ -135,191 +126,154 @@ private:
|
||||
} // end anonymous namespace
|
||||
|
||||
void MipsIncomingValueHandler::assignValueToReg(Register ValVReg,
|
||||
const CCValAssign &VA,
|
||||
const EVT &VT) {
|
||||
Register PhysReg = VA.getLocReg();
|
||||
if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
|
||||
const MipsSubtarget &STI =
|
||||
static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
|
||||
bool IsEL = STI.isLittle();
|
||||
LLT s32 = LLT::scalar(32);
|
||||
auto Lo = MIRBuilder.buildCopy(s32, Register(PhysReg + (IsEL ? 0 : 1)));
|
||||
auto Hi = MIRBuilder.buildCopy(s32, Register(PhysReg + (IsEL ? 1 : 0)));
|
||||
MIRBuilder.buildMerge(ValVReg, {Lo, Hi});
|
||||
markPhysRegUsed(PhysReg);
|
||||
markPhysRegUsed(PhysReg + 1);
|
||||
} else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
|
||||
MIRBuilder.buildCopy(ValVReg, PhysReg);
|
||||
markPhysRegUsed(PhysReg);
|
||||
} else {
|
||||
switch (VA.getLocInfo()) {
|
||||
case CCValAssign::LocInfo::SExt:
|
||||
case CCValAssign::LocInfo::ZExt:
|
||||
case CCValAssign::LocInfo::AExt: {
|
||||
auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
|
||||
MIRBuilder.buildTrunc(ValVReg, Copy);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
MIRBuilder.buildCopy(ValVReg, PhysReg);
|
||||
break;
|
||||
}
|
||||
markPhysRegUsed(PhysReg);
|
||||
}
|
||||
Register PhysReg,
|
||||
CCValAssign &VA) {
|
||||
markPhysRegUsed(PhysReg);
|
||||
IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
|
||||
}
|
||||
|
||||
Register MipsIncomingValueHandler::getStackAddress(const CCValAssign &VA,
|
||||
MachineMemOperand *&MMO) {
|
||||
Register MipsIncomingValueHandler::getStackAddress(uint64_t Size,
|
||||
int64_t Offset,
|
||||
MachinePointerInfo &MPO,
|
||||
ISD::ArgFlagsTy Flags) {
|
||||
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
|
||||
unsigned Offset = VA.getLocMemOffset();
|
||||
MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
|
||||
// FIXME: This should only be immutable for non-byval memory arguments.
|
||||
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
||||
MachinePointerInfo MPO =
|
||||
MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
||||
|
||||
const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
|
||||
Align Alignment = commonAlignment(TFL->getStackAlign(), Offset);
|
||||
MMO =
|
||||
MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Alignment);
|
||||
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
||||
|
||||
return MIRBuilder.buildFrameIndex(LLT::pointer(0, 32), FI).getReg(0);
|
||||
}
|
||||
|
||||
void MipsIncomingValueHandler::assignValueToAddress(Register ValVReg,
|
||||
const CCValAssign &VA) {
|
||||
if (VA.getLocInfo() == CCValAssign::SExt ||
|
||||
VA.getLocInfo() == CCValAssign::ZExt ||
|
||||
VA.getLocInfo() == CCValAssign::AExt) {
|
||||
auto Load = buildLoad(LLT::scalar(32), VA);
|
||||
MIRBuilder.buildTrunc(ValVReg, Load);
|
||||
} else
|
||||
buildLoad(ValVReg, VA);
|
||||
Register Addr, LLT MemTy,
|
||||
MachinePointerInfo &MPO,
|
||||
CCValAssign &VA) {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, MemTy,
|
||||
inferAlignFromPtrInfo(MF, MPO));
|
||||
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
|
||||
}
|
||||
|
||||
bool MipsIncomingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
|
||||
ArrayRef<CCValAssign> ArgLocs,
|
||||
unsigned ArgLocsStartIndex,
|
||||
Register ArgsReg, const EVT &VT) {
|
||||
if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
|
||||
return false;
|
||||
setLeastSignificantFirst(VRegs);
|
||||
MIRBuilder.buildMerge(ArgsReg, VRegs);
|
||||
return true;
|
||||
/// Handle cases when f64 is split into 2 32-bit GPRs. This is a custom
|
||||
/// assignment because generic code assumes getNumRegistersForCallingConv is
|
||||
/// accurate. In this case it is not because the type/number are context
|
||||
/// dependent on other arguments.
|
||||
unsigned
|
||||
MipsIncomingValueHandler::assignCustomValue(CallLowering::ArgInfo &Arg,
|
||||
ArrayRef<CCValAssign> VAs) {
|
||||
const CCValAssign &VALo = VAs[0];
|
||||
const CCValAssign &VAHi = VAs[1];
|
||||
|
||||
assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
|
||||
VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
|
||||
"unexpected custom value");
|
||||
|
||||
auto CopyLo = MIRBuilder.buildCopy(LLT::scalar(32), VALo.getLocReg());
|
||||
auto CopyHi = MIRBuilder.buildCopy(LLT::scalar(32), VAHi.getLocReg());
|
||||
if (!STI.isLittle())
|
||||
std::swap(CopyLo, CopyHi);
|
||||
|
||||
Arg.OrigRegs.assign(Arg.Regs.begin(), Arg.Regs.end());
|
||||
Arg.Regs = { CopyLo.getReg(0), CopyHi.getReg(0) };
|
||||
MIRBuilder.buildMerge(Arg.OrigRegs[0], {CopyLo, CopyHi});
|
||||
|
||||
markPhysRegUsed(VALo.getLocReg());
|
||||
markPhysRegUsed(VAHi.getLocReg());
|
||||
return 2;
|
||||
}
|
||||
|
||||
namespace {
|
||||
class MipsOutgoingValueHandler : public MipsCallLowering::MipsHandler {
|
||||
class MipsOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
|
||||
const MipsSubtarget &STI;
|
||||
|
||||
public:
|
||||
MipsOutgoingValueHandler(MachineIRBuilder &MIRBuilder,
|
||||
MachineRegisterInfo &MRI, MachineInstrBuilder &MIB)
|
||||
: MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
|
||||
: OutgoingValueHandler(MIRBuilder, MRI),
|
||||
STI(MIRBuilder.getMF().getSubtarget<MipsSubtarget>()), MIB(MIB) {}
|
||||
|
||||
private:
|
||||
void assignValueToReg(Register ValVReg, const CCValAssign &VA,
|
||||
const EVT &VT) override;
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) override;
|
||||
|
||||
Register getStackAddress(const CCValAssign &VA,
|
||||
MachineMemOperand *&MMO) override;
|
||||
Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO,
|
||||
ISD::ArgFlagsTy Flags) override;
|
||||
|
||||
void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
|
||||
|
||||
bool handleSplit(SmallVectorImpl<Register> &VRegs,
|
||||
ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
|
||||
Register ArgsReg, const EVT &VT) override;
|
||||
|
||||
Register extendRegister(Register ValReg, const CCValAssign &VA);
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override;
|
||||
unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
|
||||
ArrayRef<CCValAssign> VAs) override;
|
||||
|
||||
MachineInstrBuilder &MIB;
|
||||
};
|
||||
} // end anonymous namespace
|
||||
|
||||
void MipsOutgoingValueHandler::assignValueToReg(Register ValVReg,
|
||||
const CCValAssign &VA,
|
||||
const EVT &VT) {
|
||||
Register PhysReg = VA.getLocReg();
|
||||
if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
|
||||
const MipsSubtarget &STI =
|
||||
static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
|
||||
bool IsEL = STI.isLittle();
|
||||
auto Unmerge = MIRBuilder.buildUnmerge(LLT::scalar(32), ValVReg);
|
||||
MIRBuilder.buildCopy(Register(PhysReg + (IsEL ? 0 : 1)), Unmerge.getReg(0));
|
||||
MIRBuilder.buildCopy(Register(PhysReg + (IsEL ? 1 : 0)), Unmerge.getReg(1));
|
||||
} else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
|
||||
MIRBuilder.buildCopy(PhysReg, ValVReg);
|
||||
} else {
|
||||
Register ExtReg = extendRegister(ValVReg, VA);
|
||||
MIRBuilder.buildCopy(PhysReg, ExtReg);
|
||||
MIB.addUse(PhysReg, RegState::Implicit);
|
||||
}
|
||||
Register PhysReg,
|
||||
CCValAssign &VA) {
|
||||
Register ExtReg = extendRegister(ValVReg, VA);
|
||||
MIRBuilder.buildCopy(PhysReg, ExtReg);
|
||||
MIB.addUse(PhysReg, RegState::Implicit);
|
||||
}
|
||||
|
||||
Register MipsOutgoingValueHandler::getStackAddress(const CCValAssign &VA,
|
||||
MachineMemOperand *&MMO) {
|
||||
Register MipsOutgoingValueHandler::getStackAddress(uint64_t Size,
|
||||
int64_t Offset,
|
||||
MachinePointerInfo &MPO,
|
||||
ISD::ArgFlagsTy Flags) {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
|
||||
MPO = MachinePointerInfo::getStack(MF, Offset);
|
||||
|
||||
LLT p0 = LLT::pointer(0, 32);
|
||||
LLT s32 = LLT::scalar(32);
|
||||
auto SPReg = MIRBuilder.buildCopy(p0, Register(Mips::SP));
|
||||
|
||||
unsigned Offset = VA.getLocMemOffset();
|
||||
auto OffsetReg = MIRBuilder.buildConstant(s32, Offset);
|
||||
|
||||
auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
|
||||
|
||||
MachinePointerInfo MPO =
|
||||
MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
|
||||
unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
|
||||
Align Alignment = commonAlignment(TFL->getStackAlign(), Offset);
|
||||
MMO =
|
||||
MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Alignment);
|
||||
|
||||
return AddrReg.getReg(0);
|
||||
}
|
||||
|
||||
void MipsOutgoingValueHandler::assignValueToAddress(Register ValVReg,
|
||||
const CCValAssign &VA) {
|
||||
MachineMemOperand *MMO;
|
||||
Register Addr = getStackAddress(VA, MMO);
|
||||
Register Addr, LLT MemTy,
|
||||
MachinePointerInfo &MPO,
|
||||
CCValAssign &VA) {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
uint64_t LocMemOffset = VA.getLocMemOffset();
|
||||
|
||||
auto MMO = MF.getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOStore, MemTy,
|
||||
commonAlignment(STI.getStackAlignment(), LocMemOffset));
|
||||
|
||||
Register ExtReg = extendRegister(ValVReg, VA);
|
||||
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
|
||||
}
|
||||
|
||||
Register MipsOutgoingValueHandler::extendRegister(Register ValReg,
|
||||
const CCValAssign &VA) {
|
||||
LLT LocTy{VA.getLocVT()};
|
||||
switch (VA.getLocInfo()) {
|
||||
case CCValAssign::SExt: {
|
||||
return MIRBuilder.buildSExt(LocTy, ValReg).getReg(0);
|
||||
}
|
||||
case CCValAssign::ZExt: {
|
||||
return MIRBuilder.buildZExt(LocTy, ValReg).getReg(0);
|
||||
}
|
||||
case CCValAssign::AExt: {
|
||||
return MIRBuilder.buildAnyExt(LocTy, ValReg).getReg(0);
|
||||
}
|
||||
// TODO : handle upper extends
|
||||
case CCValAssign::Full:
|
||||
return ValReg;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
llvm_unreachable("unable to extend register");
|
||||
}
|
||||
unsigned
|
||||
MipsOutgoingValueHandler::assignCustomValue(CallLowering::ArgInfo &Arg,
|
||||
ArrayRef<CCValAssign> VAs) {
|
||||
const CCValAssign &VALo = VAs[0];
|
||||
const CCValAssign &VAHi = VAs[1];
|
||||
|
||||
bool MipsOutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
|
||||
ArrayRef<CCValAssign> ArgLocs,
|
||||
unsigned ArgLocsStartIndex,
|
||||
Register ArgsReg, const EVT &VT) {
|
||||
MIRBuilder.buildUnmerge(VRegs, ArgsReg);
|
||||
setLeastSignificantFirst(VRegs);
|
||||
if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
|
||||
return false;
|
||||
assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
|
||||
VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
|
||||
"unexpected custom value");
|
||||
|
||||
return true;
|
||||
auto Unmerge =
|
||||
MIRBuilder.buildUnmerge({LLT::scalar(32), LLT::scalar(32)}, Arg.Regs[0]);
|
||||
Register Lo = Unmerge.getReg(0);
|
||||
Register Hi = Unmerge.getReg(1);
|
||||
|
||||
Arg.OrigRegs.assign(Arg.Regs.begin(), Arg.Regs.end());
|
||||
Arg.Regs = { Lo, Hi };
|
||||
if (!STI.isLittle())
|
||||
std::swap(Lo, Hi);
|
||||
|
||||
MIRBuilder.buildCopy(VALo.getLocReg(), Lo);
|
||||
MIRBuilder.buildCopy(VAHi.getLocReg(), Hi);
|
||||
return 2;
|
||||
}
|
||||
|
||||
static bool isSupportedArgumentType(Type *T) {
|
||||
@ -344,36 +298,6 @@ static bool isSupportedReturnType(Type *T) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT,
|
||||
const ISD::ArgFlagsTy &Flags) {
|
||||
// > does not mean loss of information as type RegisterVT can't hold type VT,
|
||||
// it means that type VT is split into multiple registers of type RegisterVT
|
||||
if (VT.getFixedSizeInBits() >= RegisterVT.getFixedSizeInBits())
|
||||
return CCValAssign::LocInfo::Full;
|
||||
if (Flags.isSExt())
|
||||
return CCValAssign::LocInfo::SExt;
|
||||
if (Flags.isZExt())
|
||||
return CCValAssign::LocInfo::ZExt;
|
||||
return CCValAssign::LocInfo::AExt;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs,
|
||||
const SmallVectorImpl<T> &Arguments) {
|
||||
for (unsigned i = 0; i < ArgLocs.size(); ++i) {
|
||||
const CCValAssign &VA = ArgLocs[i];
|
||||
CCValAssign::LocInfo LocInfo = determineLocInfo(
|
||||
Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags);
|
||||
if (VA.isMemLoc())
|
||||
ArgLocs[i] =
|
||||
CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
|
||||
VA.getLocMemOffset(), VA.getLocVT(), LocInfo);
|
||||
else
|
||||
ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
|
||||
VA.getLocReg(), VA.getLocVT(), LocInfo);
|
||||
}
|
||||
}
|
||||
|
||||
bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
|
||||
const Value *Val, ArrayRef<Register> VRegs,
|
||||
FunctionLoweringInfo &FLI) const {
|
||||
@ -391,24 +315,28 @@ bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
|
||||
|
||||
SmallVector<ArgInfo, 8> RetInfos;
|
||||
|
||||
ArgInfo ArgRetInfo(VRegs, Val->getType(), 0);
|
||||
ArgInfo ArgRetInfo(VRegs, *Val, 0);
|
||||
setArgFlags(ArgRetInfo, AttributeList::ReturnIndex, DL, F);
|
||||
splitToValueTypes(ArgRetInfo, RetInfos, DL, F.getCallingConv());
|
||||
|
||||
SmallVector<ISD::OutputArg, 8> Outs;
|
||||
subTargetRegTypeForCallingConv(F, RetInfos, Outs);
|
||||
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
SmallVector<ISD::OutputArg, 8> Outs;
|
||||
|
||||
MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
|
||||
F.getContext());
|
||||
CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn());
|
||||
setLocInfo(ArgLocs, Outs);
|
||||
|
||||
MipsOutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
|
||||
if (!RetHandler.handle(ArgLocs, RetInfos)) {
|
||||
std::string FuncName = F.getName().str();
|
||||
MipsOutgoingValueAssigner Assigner(TLI.CCAssignFnForReturn(),
|
||||
FuncName.c_str(), /*IsReturn*/ true);
|
||||
|
||||
if (!determineAssignments(Assigner, RetInfos, CCInfo))
|
||||
return false;
|
||||
|
||||
if (!handleAssignments(RetHandler, RetInfos, CCInfo, ArgLocs, MIRBuilder))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
MIRBuilder.insertInstr(Ret);
|
||||
return true;
|
||||
}
|
||||
@ -434,14 +362,14 @@ bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
||||
SmallVector<ArgInfo, 8> ArgInfos;
|
||||
unsigned i = 0;
|
||||
for (auto &Arg : F.args()) {
|
||||
ArgInfo AInfo(VRegs[i], Arg.getType(), i);
|
||||
ArgInfo AInfo(VRegs[i], Arg, i);
|
||||
setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F);
|
||||
ArgInfos.push_back(AInfo);
|
||||
|
||||
splitToValueTypes(AInfo, ArgInfos, DL, F.getCallingConv());
|
||||
++i;
|
||||
}
|
||||
|
||||
SmallVector<ISD::InputArg, 8> Ins;
|
||||
subTargetRegTypeForCallingConv(F, ArgInfos, Ins);
|
||||
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
|
||||
@ -452,11 +380,15 @@ bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
||||
const MipsABIInfo &ABI = TM.getABI();
|
||||
CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
|
||||
Align(1));
|
||||
CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall());
|
||||
setLocInfo(ArgLocs, Ins);
|
||||
|
||||
const std::string FuncName = F.getName().str();
|
||||
MipsIncomingValueAssigner Assigner(TLI.CCAssignFnForCall(), FuncName.c_str(),
|
||||
/*IsReturn*/ false);
|
||||
if (!determineAssignments(Assigner, ArgInfos, CCInfo))
|
||||
return false;
|
||||
|
||||
MipsIncomingValueHandler Handler(MIRBuilder, MF.getRegInfo());
|
||||
if (!Handler.handle(ArgLocs, ArgInfos))
|
||||
if (!handleAssignments(Handler, ArgInfos, CCInfo, ArgLocs, MIRBuilder))
|
||||
return false;
|
||||
|
||||
if (F.isVarArg()) {
|
||||
@ -547,19 +479,8 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
FuncOrigArgs.reserve(Info.OrigArgs.size());
|
||||
|
||||
SmallVector<ArgInfo, 8> ArgInfos;
|
||||
unsigned i = 0;
|
||||
for (auto &Arg : Info.OrigArgs) {
|
||||
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
Entry.Ty = Arg.Ty;
|
||||
FuncOrigArgs.push_back(Entry);
|
||||
|
||||
ArgInfos.push_back(Arg);
|
||||
++i;
|
||||
}
|
||||
|
||||
SmallVector<ISD::OutputArg, 8> Outs;
|
||||
subTargetRegTypeForCallingConv(F, ArgInfos, Outs);
|
||||
for (auto &Arg : Info.OrigArgs)
|
||||
splitToValueTypes(Arg, ArgInfos, DL, Info.CallConv);
|
||||
|
||||
SmallVector<CCValAssign, 8> ArgLocs;
|
||||
bool IsCalleeVarArg = false;
|
||||
@ -567,20 +488,26 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
const Function *CF = static_cast<const Function *>(Info.Callee.getGlobal());
|
||||
IsCalleeVarArg = CF->isVarArg();
|
||||
}
|
||||
|
||||
// FIXME: Should use MipsCCState::getSpecialCallingConvForCallee, but it
|
||||
// depends on looking directly at the call target.
|
||||
MipsCCState CCInfo(Info.CallConv, IsCalleeVarArg, MF, ArgLocs,
|
||||
F.getContext());
|
||||
|
||||
CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv),
|
||||
Align(1));
|
||||
|
||||
const char *Call =
|
||||
Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr;
|
||||
CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call);
|
||||
setLocInfo(ArgLocs, Outs);
|
||||
|
||||
MipsOutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
|
||||
if (!RetHandler.handle(ArgLocs, ArgInfos)) {
|
||||
MipsOutgoingValueAssigner Assigner(TLI.CCAssignFnForCall(), Call,
|
||||
/*IsReturn*/ false);
|
||||
if (!determineAssignments(Assigner, ArgInfos, CCInfo))
|
||||
return false;
|
||||
|
||||
MipsOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), MIB);
|
||||
if (!handleAssignments(ArgHandler, ArgInfos, CCInfo, ArgLocs, MIRBuilder))
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned NextStackOffset = CCInfo.getNextStackOffset();
|
||||
unsigned StackAlignment = F.getParent()->getOverrideStackAlignment();
|
||||
@ -607,21 +534,25 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
|
||||
if (!Info.OrigRet.Ty->isVoidTy()) {
|
||||
ArgInfos.clear();
|
||||
splitToValueTypes(Info.OrigRet, ArgInfos, DL, Info.CallConv);
|
||||
|
||||
CallLowering::splitToValueTypes(Info.OrigRet, ArgInfos, DL,
|
||||
F.getCallingConv());
|
||||
|
||||
const std::string FuncName = F.getName().str();
|
||||
SmallVector<ISD::InputArg, 8> Ins;
|
||||
subTargetRegTypeForCallingConv(F, ArgInfos, Ins);
|
||||
|
||||
SmallVector<CCValAssign, 8> ArgLocs;
|
||||
MipsCCState CCInfo(Info.CallConv, F.isVarArg(), MF, ArgLocs,
|
||||
MipsIncomingValueAssigner Assigner(TLI.CCAssignFnForReturn(),
|
||||
FuncName.c_str(),
|
||||
/*IsReturn*/ true);
|
||||
CallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
|
||||
|
||||
MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
|
||||
F.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), Info.OrigRet.Ty,
|
||||
Call);
|
||||
setLocInfo(ArgLocs, Ins);
|
||||
if (!determineAssignments(Assigner, ArgInfos, CCInfo))
|
||||
return false;
|
||||
|
||||
CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB);
|
||||
if (!Handler.handle(ArgLocs, ArgInfos))
|
||||
if (!handleAssignments(RetHandler, ArgInfos, CCInfo, ArgLocs, MIRBuilder))
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -629,34 +560,3 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void MipsCallLowering::subTargetRegTypeForCallingConv(
|
||||
const Function &F, ArrayRef<ArgInfo> Args,
|
||||
SmallVectorImpl<T> &ISDArgs) const {
|
||||
const DataLayout &DL = F.getParent()->getDataLayout();
|
||||
const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
|
||||
|
||||
unsigned ArgNo = 0;
|
||||
for (auto &Arg : Args) {
|
||||
|
||||
EVT VT = TLI.getValueType(DL, Arg.Ty);
|
||||
MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(),
|
||||
F.getCallingConv(), VT);
|
||||
unsigned NumRegs = TLI.getNumRegistersForCallingConv(
|
||||
F.getContext(), F.getCallingConv(), VT);
|
||||
|
||||
for (unsigned i = 0; i < NumRegs; ++i) {
|
||||
ISD::ArgFlagsTy Flags = Arg.Flags[0];
|
||||
|
||||
if (i == 0)
|
||||
Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL));
|
||||
else
|
||||
Flags.setOrigAlign(Align(1));
|
||||
|
||||
ISDArgs.emplace_back(Flags, RegisterVT, VT, true, Arg.OrigArgIndex,
|
||||
0);
|
||||
}
|
||||
++ArgNo;
|
||||
}
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ class MipsTargetLowering;
|
||||
class MipsCallLowering : public CallLowering {
|
||||
|
||||
public:
|
||||
#if 0
|
||||
class MipsHandler {
|
||||
public:
|
||||
MipsHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
|
||||
@ -60,6 +61,7 @@ public:
|
||||
unsigned ArgLocsStartIndex, Register ArgsReg,
|
||||
const EVT &VT) = 0;
|
||||
};
|
||||
#endif
|
||||
|
||||
MipsCallLowering(const MipsTargetLowering &TLI);
|
||||
|
||||
@ -73,14 +75,6 @@ public:
|
||||
|
||||
bool lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
CallLoweringInfo &Info) const override;
|
||||
|
||||
private:
|
||||
/// Based on registers available on target machine split or extend
|
||||
/// type if needed, also change pointer type to appropriate integer
|
||||
/// type.
|
||||
template <typename T>
|
||||
void subTargetRegTypeForCallingConv(const Function &F, ArrayRef<ArgInfo> Args,
|
||||
SmallVectorImpl<T> &ISDArgs) const;
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
@ -7,7 +7,8 @@ define signext i8 @sext_arg_i8(i8 signext %a) {
|
||||
; MIPS32: bb.1.entry:
|
||||
; MIPS32: liveins: $a0
|
||||
; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
|
||||
; MIPS32: [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 8
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
|
||||
; MIPS32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s8)
|
||||
; MIPS32: $v0 = COPY [[SEXT]](s32)
|
||||
; MIPS32: RetRA implicit $v0
|
||||
@ -20,7 +21,8 @@ define zeroext i8 @zext_arg_i8(i8 zeroext %a) {
|
||||
; MIPS32: bb.1.entry:
|
||||
; MIPS32: liveins: $a0
|
||||
; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
|
||||
; MIPS32: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 8
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_ZEXT]](s32)
|
||||
; MIPS32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
|
||||
; MIPS32: $v0 = COPY [[ZEXT]](s32)
|
||||
; MIPS32: RetRA implicit $v0
|
||||
@ -55,20 +57,22 @@ define signext i8 @call_sext_stack_arg_i8(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i8
|
||||
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
|
||||
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
|
||||
; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %fixed-stack.0, align 8)
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD]](s32)
|
||||
; MIPS32: [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[LOAD]], 8
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
|
||||
; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
|
||||
; MIPS32: $a0 = COPY [[COPY]](s32)
|
||||
; MIPS32: $a1 = COPY [[COPY1]](s32)
|
||||
; MIPS32: $a2 = COPY [[COPY2]](s32)
|
||||
; MIPS32: $a3 = COPY [[COPY3]](s32)
|
||||
; MIPS32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s8)
|
||||
; MIPS32: [[COPY4:%[0-9]+]]:_(p0) = COPY $sp
|
||||
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C]](s32)
|
||||
; MIPS32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s8)
|
||||
; MIPS32: G_STORE [[SEXT]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack + 16, align 8)
|
||||
; MIPS32: JAL @sext_stack_arg_i8, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
|
||||
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v0
|
||||
; MIPS32: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY5]](s32)
|
||||
; MIPS32: [[ASSERT_SEXT1:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY5]], 8
|
||||
; MIPS32: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT1]](s32)
|
||||
; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
|
||||
; MIPS32: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC1]](s8)
|
||||
; MIPS32: $v0 = COPY [[SEXT1]](s32)
|
||||
@ -88,20 +92,22 @@ define zeroext i8 @call_zext_stack_arg_i8(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i8
|
||||
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
|
||||
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
|
||||
; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %fixed-stack.0, align 8)
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD]](s32)
|
||||
; MIPS32: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[LOAD]], 8
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_ZEXT]](s32)
|
||||
; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
|
||||
; MIPS32: $a0 = COPY [[COPY]](s32)
|
||||
; MIPS32: $a1 = COPY [[COPY1]](s32)
|
||||
; MIPS32: $a2 = COPY [[COPY2]](s32)
|
||||
; MIPS32: $a3 = COPY [[COPY3]](s32)
|
||||
; MIPS32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
|
||||
; MIPS32: [[COPY4:%[0-9]+]]:_(p0) = COPY $sp
|
||||
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C]](s32)
|
||||
; MIPS32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
|
||||
; MIPS32: G_STORE [[ZEXT]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack + 16, align 8)
|
||||
; MIPS32: JAL @zext_stack_arg_i8, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
|
||||
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v0
|
||||
; MIPS32: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY5]](s32)
|
||||
; MIPS32: [[ASSERT_ZEXT1:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY5]], 8
|
||||
; MIPS32: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_ZEXT1]](s32)
|
||||
; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
|
||||
; MIPS32: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s8)
|
||||
; MIPS32: $v0 = COPY [[ZEXT1]](s32)
|
||||
@ -127,10 +133,10 @@ define i8 @call_aext_stack_arg_i8(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i8 %a) {
|
||||
; MIPS32: $a1 = COPY [[COPY1]](s32)
|
||||
; MIPS32: $a2 = COPY [[COPY2]](s32)
|
||||
; MIPS32: $a3 = COPY [[COPY3]](s32)
|
||||
; MIPS32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
|
||||
; MIPS32: [[COPY4:%[0-9]+]]:_(p0) = COPY $sp
|
||||
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C]](s32)
|
||||
; MIPS32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
|
||||
; MIPS32: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack + 16, align 8)
|
||||
; MIPS32: JAL @aext_stack_arg_i8, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
|
||||
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v0
|
||||
@ -150,7 +156,8 @@ define signext i16 @sext_arg_i16(i16 signext %a) {
|
||||
; MIPS32: bb.1.entry:
|
||||
; MIPS32: liveins: $a0
|
||||
; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
|
||||
; MIPS32: [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 16
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_SEXT]](s32)
|
||||
; MIPS32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s16)
|
||||
; MIPS32: $v0 = COPY [[SEXT]](s32)
|
||||
; MIPS32: RetRA implicit $v0
|
||||
@ -163,7 +170,8 @@ define zeroext i16 @zext_arg_i16(i16 zeroext %a) {
|
||||
; MIPS32: bb.1.entry:
|
||||
; MIPS32: liveins: $a0
|
||||
; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
|
||||
; MIPS32: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT]](s32)
|
||||
; MIPS32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s16)
|
||||
; MIPS32: $v0 = COPY [[ZEXT]](s32)
|
||||
; MIPS32: RetRA implicit $v0
|
||||
@ -198,20 +206,22 @@ define signext i16 @call_sext_stack_arg_i16(i32 %x1, i32 %x2, i32 %x3, i32 %x4,
|
||||
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
|
||||
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
|
||||
; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %fixed-stack.0, align 8)
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
|
||||
; MIPS32: [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[LOAD]], 16
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_SEXT]](s32)
|
||||
; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
|
||||
; MIPS32: $a0 = COPY [[COPY]](s32)
|
||||
; MIPS32: $a1 = COPY [[COPY1]](s32)
|
||||
; MIPS32: $a2 = COPY [[COPY2]](s32)
|
||||
; MIPS32: $a3 = COPY [[COPY3]](s32)
|
||||
; MIPS32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s16)
|
||||
; MIPS32: [[COPY4:%[0-9]+]]:_(p0) = COPY $sp
|
||||
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C]](s32)
|
||||
; MIPS32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s16)
|
||||
; MIPS32: G_STORE [[SEXT]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack + 16, align 8)
|
||||
; MIPS32: JAL @sext_stack_arg_i16, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
|
||||
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v0
|
||||
; MIPS32: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
|
||||
; MIPS32: [[ASSERT_SEXT1:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY5]], 16
|
||||
; MIPS32: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_SEXT1]](s32)
|
||||
; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
|
||||
; MIPS32: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC1]](s16)
|
||||
; MIPS32: $v0 = COPY [[SEXT1]](s32)
|
||||
@ -231,20 +241,22 @@ define zeroext i16 @call_zext_stack_arg_i16(i32 %x1, i32 %x2, i32 %x3, i32 %x4,
|
||||
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
|
||||
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
|
||||
; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %fixed-stack.0, align 8)
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
|
||||
; MIPS32: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[LOAD]], 16
|
||||
; MIPS32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT]](s32)
|
||||
; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
|
||||
; MIPS32: $a0 = COPY [[COPY]](s32)
|
||||
; MIPS32: $a1 = COPY [[COPY1]](s32)
|
||||
; MIPS32: $a2 = COPY [[COPY2]](s32)
|
||||
; MIPS32: $a3 = COPY [[COPY3]](s32)
|
||||
; MIPS32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s16)
|
||||
; MIPS32: [[COPY4:%[0-9]+]]:_(p0) = COPY $sp
|
||||
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C]](s32)
|
||||
; MIPS32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s16)
|
||||
; MIPS32: G_STORE [[ZEXT]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack + 16, align 8)
|
||||
; MIPS32: JAL @zext_stack_arg_i16, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
|
||||
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v0
|
||||
; MIPS32: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
|
||||
; MIPS32: [[ASSERT_ZEXT1:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY5]], 16
|
||||
; MIPS32: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT1]](s32)
|
||||
; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
|
||||
; MIPS32: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s16)
|
||||
; MIPS32: $v0 = COPY [[ZEXT1]](s32)
|
||||
@ -270,10 +282,10 @@ define i16 @call_aext_stack_arg_i16(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i16 %a)
|
||||
; MIPS32: $a1 = COPY [[COPY1]](s32)
|
||||
; MIPS32: $a2 = COPY [[COPY2]](s32)
|
||||
; MIPS32: $a3 = COPY [[COPY3]](s32)
|
||||
; MIPS32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
|
||||
; MIPS32: [[COPY4:%[0-9]+]]:_(p0) = COPY $sp
|
||||
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C]](s32)
|
||||
; MIPS32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
|
||||
; MIPS32: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack + 16, align 8)
|
||||
; MIPS32: JAL @aext_stack_arg_i16, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
|
||||
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v0
|
||||
|
@ -1,4 +1,3 @@
|
||||
; XFAIL: *
|
||||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||||
|
||||
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=FP32
|
||||
@ -157,7 +156,7 @@ define float @call_float_in_gpr(i32 %a, float %b) {
|
||||
; FP32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
||||
; FP32: $a0 = COPY [[COPY]](s32)
|
||||
; FP32: $a1 = COPY [[COPY1]](s32)
|
||||
; FP32: JAL @float_in_gpr, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit-def $f0
|
||||
; FP32: JAL @float_in_gpr, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $f0
|
||||
; FP32: [[COPY2:%[0-9]+]]:_(s32) = COPY $f0
|
||||
; FP32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
||||
; FP32: $f0 = COPY [[COPY2]](s32)
|
||||
@ -170,7 +169,7 @@ define float @call_float_in_gpr(i32 %a, float %b) {
|
||||
; FP64: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp
|
||||
; FP64: $a0 = COPY [[COPY]](s32)
|
||||
; FP64: $a1 = COPY [[COPY1]](s32)
|
||||
; FP64: JAL @float_in_gpr, csr_o32_fp64, implicit-def $ra, implicit-def $sp, implicit $a0, implicit-def $f0
|
||||
; FP64: JAL @float_in_gpr, csr_o32_fp64, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit-def $f0
|
||||
; FP64: [[COPY2:%[0-9]+]]:_(s32) = COPY $f0
|
||||
; FP64: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
|
||||
; FP64: $f0 = COPY [[COPY2]](s32)
|
||||
|
@ -1,4 +1,3 @@
|
||||
; XFAIL: *
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32,FP32
|
||||
; RUN: llc -O0 -mtriple=mipsel-linux-gnu -mattr=+fp64,+mips32r2 -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32,FP64
|
||||
|
Loading…
Reference in New Issue
Block a user