mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 20:23:11 +01:00
Redirect DataLayout from TargetMachine to Module in ComputeValueVTs()
Summary: Avoid using the TargetMachine owned DataLayout and use the Module owned one instead. This requires passing the DataLayout up the stack to ComputeValueVTs(). This change is part of a series of commits dedicated to have a single DataLayout during compilation by using always the one owned by the module. Reviewers: echristo Subscribers: jholewinski, yaron.keren, rafael, llvm-commits Differential Revision: http://reviews.llvm.org/D11019 From: Mehdi Amini <mehdi.amini@apple.com> llvm-svn: 241773
This commit is contained in:
parent
caa7b95d82
commit
c61d582f14
@ -64,7 +64,7 @@ inline unsigned ComputeLinearIndex(Type *Ty,
|
||||
/// If Offsets is non-null, it points to a vector to be filled in
|
||||
/// with the in-memory offsets of each of the individual values.
|
||||
///
|
||||
void ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
|
||||
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
|
||||
SmallVectorImpl<EVT> &ValueVTs,
|
||||
SmallVectorImpl<uint64_t> *Offsets = nullptr,
|
||||
uint64_t StartingOffset = 0);
|
||||
|
@ -2819,9 +2819,9 @@ public:
|
||||
/// Given an LLVM IR type and return type attributes, compute the return value
|
||||
/// EVTs and flags, and optionally also the offsets, if the return value is
|
||||
/// being lowered to memory.
|
||||
void GetReturnInfo(Type* ReturnType, AttributeSet attr,
|
||||
void GetReturnInfo(Type *ReturnType, AttributeSet attr,
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
const TargetLowering &TLI);
|
||||
const TargetLowering &TLI, const DataLayout &DL);
|
||||
|
||||
} // end llvm namespace
|
||||
|
||||
|
@ -81,27 +81,27 @@ unsigned llvm::ComputeLinearIndex(Type *Ty,
|
||||
/// If Offsets is non-null, it points to a vector to be filled in
|
||||
/// with the in-memory offsets of each of the individual values.
|
||||
///
|
||||
void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
|
||||
SmallVectorImpl<EVT> &ValueVTs,
|
||||
void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
|
||||
Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
|
||||
SmallVectorImpl<uint64_t> *Offsets,
|
||||
uint64_t StartingOffset) {
|
||||
// Given a struct type, recursively traverse the elements.
|
||||
if (StructType *STy = dyn_cast<StructType>(Ty)) {
|
||||
const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
|
||||
const StructLayout *SL = DL.getStructLayout(STy);
|
||||
for (StructType::element_iterator EB = STy->element_begin(),
|
||||
EI = EB,
|
||||
EE = STy->element_end();
|
||||
EI != EE; ++EI)
|
||||
ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
|
||||
ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
|
||||
StartingOffset + SL->getElementOffset(EI - EB));
|
||||
return;
|
||||
}
|
||||
// Given an array type, recursively traverse the elements.
|
||||
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
Type *EltTy = ATy->getElementType();
|
||||
uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
|
||||
uint64_t EltSize = DL.getTypeAllocSize(EltTy);
|
||||
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
|
||||
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
|
||||
ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
|
||||
StartingOffset + i * EltSize);
|
||||
return;
|
||||
}
|
||||
|
@ -908,10 +908,10 @@ bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
|
||||
// Handle the incoming return values from the call.
|
||||
CLI.clearIns();
|
||||
SmallVector<EVT, 4> RetTys;
|
||||
ComputeValueVTs(TLI, CLI.RetTy, RetTys);
|
||||
ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
|
||||
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI);
|
||||
GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
|
||||
|
||||
bool CanLowerReturn = TLI.CanLowerReturn(
|
||||
CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
|
||||
@ -1480,7 +1480,7 @@ bool FastISel::selectExtractValue(const User *U) {
|
||||
unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
|
||||
|
||||
SmallVector<EVT, 4> AggValueVTs;
|
||||
ComputeValueVTs(TLI, AggTy, AggValueVTs);
|
||||
ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
|
||||
|
||||
for (unsigned i = 0; i < VTIndex; i++)
|
||||
ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
|
||||
|
@ -90,7 +90,8 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
|
||||
|
||||
// Check whether the function can return without sret-demotion.
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI);
|
||||
GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
|
||||
mf.getDataLayout());
|
||||
CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
|
||||
Fn->isVarArg(), Outs, Fn->getContext());
|
||||
|
||||
@ -236,7 +237,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
|
||||
assert(PHIReg && "PHI node does not have an assigned virtual register!");
|
||||
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(*TLI, PN->getType(), ValueVTs);
|
||||
ComputeValueVTs(*TLI, MF->getDataLayout(), PN->getType(), ValueVTs);
|
||||
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
|
||||
EVT VT = ValueVTs[vti];
|
||||
unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
|
||||
@ -366,7 +367,7 @@ unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
|
||||
const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
|
||||
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(*TLI, Ty, ValueVTs);
|
||||
ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
|
||||
|
||||
unsigned FirstReg = 0;
|
||||
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
|
||||
@ -413,7 +414,7 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
|
||||
return;
|
||||
|
||||
SmallVector<EVT, 1> ValueVTs;
|
||||
ComputeValueVTs(*TLI, Ty, ValueVTs);
|
||||
ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
|
||||
assert(ValueVTs.size() == 1 &&
|
||||
"PHIs with non-vector integer types should have a single VT.");
|
||||
EVT IntVT = ValueVTs[0];
|
||||
|
@ -588,14 +588,14 @@ RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt,
|
||||
EVT valuevt)
|
||||
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
|
||||
|
||||
RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &tli,
|
||||
unsigned Reg, Type *Ty) {
|
||||
ComputeValueVTs(tli, Ty, ValueVTs);
|
||||
RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
|
||||
const DataLayout &DL, unsigned Reg, Type *Ty) {
|
||||
ComputeValueVTs(TLI, DL, Ty, ValueVTs);
|
||||
|
||||
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
|
||||
EVT ValueVT = ValueVTs[Value];
|
||||
unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
|
||||
MVT RegisterVT = tli.getRegisterType(Context, ValueVT);
|
||||
unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
|
||||
MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
|
||||
for (unsigned i = 0; i != NumRegs; ++i)
|
||||
Regs.push_back(Reg + i);
|
||||
RegVTs.push_back(RegisterVT);
|
||||
@ -964,8 +964,8 @@ SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
|
||||
|
||||
if (It != FuncInfo.ValueMap.end()) {
|
||||
unsigned InReg = It->second;
|
||||
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), InReg,
|
||||
Ty);
|
||||
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
|
||||
DAG.getDataLayout(), InReg, Ty);
|
||||
SDValue Chain = DAG.getEntryNode();
|
||||
Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
|
||||
resolveDanglingDebugInfo(V, Result);
|
||||
@ -1095,7 +1095,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
|
||||
"Unknown struct or array constant!");
|
||||
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(TLI, C->getType(), ValueVTs);
|
||||
ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
|
||||
unsigned NumElts = ValueVTs.size();
|
||||
if (NumElts == 0)
|
||||
return SDValue(); // empty struct
|
||||
@ -1153,7 +1153,8 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
|
||||
// If this is an instruction which fast-isel has deferred, select it now.
|
||||
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
|
||||
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
|
||||
RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
|
||||
RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
|
||||
Inst->getType());
|
||||
SDValue Chain = DAG.getEntryNode();
|
||||
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
|
||||
}
|
||||
@ -1163,6 +1164,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
|
||||
|
||||
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
auto &DL = DAG.getDataLayout();
|
||||
SDValue Chain = getControlRoot();
|
||||
SmallVector<ISD::OutputArg, 8> Outs;
|
||||
SmallVector<SDValue, 8> OutVals;
|
||||
@ -1175,7 +1177,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
|
||||
// Leave Outs empty so that LowerReturn won't try to load return
|
||||
// registers the usual way.
|
||||
SmallVector<EVT, 1> PtrValueVTs;
|
||||
ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
|
||||
ComputeValueVTs(TLI, DL, PointerType::getUnqual(F->getReturnType()),
|
||||
PtrValueVTs);
|
||||
|
||||
SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
|
||||
@ -1183,7 +1185,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
|
||||
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
SmallVector<uint64_t, 4> Offsets;
|
||||
ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
|
||||
ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
|
||||
unsigned NumValues = ValueVTs.size();
|
||||
|
||||
SmallVector<SDValue, 4> Chains(NumValues);
|
||||
@ -1203,7 +1205,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
|
||||
MVT::Other, Chains);
|
||||
} else if (I.getNumOperands() != 0) {
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs);
|
||||
ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
|
||||
unsigned NumValues = ValueVTs.size();
|
||||
if (NumValues) {
|
||||
SDValue RetOp = getValue(I.getOperand(0));
|
||||
@ -2012,7 +2014,7 @@ void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
|
||||
|
||||
SmallVector<EVT, 2> ValueVTs;
|
||||
SDLoc dl = getCurSDLoc();
|
||||
ComputeValueVTs(TLI, LP.getType(), ValueVTs);
|
||||
ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
|
||||
assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
|
||||
|
||||
// Get the two live-in registers as SDValues. The physregs have already been
|
||||
@ -2276,7 +2278,8 @@ void SelectionDAGBuilder::visitFCmp(const User &I) {
|
||||
|
||||
void SelectionDAGBuilder::visitSelect(const User &I) {
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(DAG.getTargetLoweringInfo(), I.getType(), ValueVTs);
|
||||
ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
|
||||
ValueVTs);
|
||||
unsigned NumValues = ValueVTs.size();
|
||||
if (NumValues == 0) return;
|
||||
|
||||
@ -2675,9 +2678,9 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
|
||||
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
SmallVector<EVT, 4> AggValueVTs;
|
||||
ComputeValueVTs(TLI, AggTy, AggValueVTs);
|
||||
ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
|
||||
SmallVector<EVT, 4> ValValueVTs;
|
||||
ComputeValueVTs(TLI, ValTy, ValValueVTs);
|
||||
ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
|
||||
|
||||
unsigned NumAggValues = AggValueVTs.size();
|
||||
unsigned NumValValues = ValValueVTs.size();
|
||||
@ -2721,7 +2724,7 @@ void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
|
||||
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
SmallVector<EVT, 4> ValValueVTs;
|
||||
ComputeValueVTs(TLI, ValTy, ValValueVTs);
|
||||
ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
|
||||
|
||||
unsigned NumValValues = ValValueVTs.size();
|
||||
|
||||
@ -2897,7 +2900,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
SmallVector<uint64_t, 4> Offsets;
|
||||
ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
|
||||
ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
|
||||
unsigned NumValues = ValueVTs.size();
|
||||
if (NumValues == 0)
|
||||
return;
|
||||
@ -2974,8 +2977,8 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
|
||||
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
SmallVector<uint64_t, 4> Offsets;
|
||||
ComputeValueVTs(DAG.getTargetLoweringInfo(), SrcV->getType(),
|
||||
ValueVTs, &Offsets);
|
||||
ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
|
||||
SrcV->getType(), ValueVTs, &Offsets);
|
||||
unsigned NumValues = ValueVTs.size();
|
||||
if (NumValues == 0)
|
||||
return;
|
||||
@ -3390,7 +3393,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
|
||||
}
|
||||
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(TLI, I.getType(), ValueVTs);
|
||||
ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
|
||||
|
||||
if (HasChain)
|
||||
ValueVTs.push_back(MVT::Other);
|
||||
@ -6653,7 +6656,7 @@ void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
|
||||
// Create the return types based on the intrinsic definition
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
SmallVector<EVT, 3> ValueVTs;
|
||||
ComputeValueVTs(TLI, CS->getType(), ValueVTs);
|
||||
ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
|
||||
assert(ValueVTs.size() == 1 && "Expected only one return value type.");
|
||||
|
||||
// There is always a chain and a glue type at the end
|
||||
@ -6718,10 +6721,10 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
|
||||
SmallVector<EVT, 4> RetTys;
|
||||
SmallVector<uint64_t, 4> Offsets;
|
||||
auto &DL = CLI.DAG.getDataLayout();
|
||||
ComputeValueVTs(*this, CLI.RetTy, RetTys, &Offsets);
|
||||
ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
|
||||
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this);
|
||||
GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
|
||||
|
||||
bool CanLowerReturn =
|
||||
this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
|
||||
@ -6784,7 +6787,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
|
||||
ArgListTy &Args = CLI.getArgs();
|
||||
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
|
||||
ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
|
||||
Type *FinalType = Args[i].Ty;
|
||||
if (Args[i].isByVal)
|
||||
FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
|
||||
@ -6923,7 +6926,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
|
||||
SmallVector<EVT, 1> PVTs;
|
||||
Type *PtrRetTy = PointerType::getUnqual(OrigRetTy);
|
||||
|
||||
ComputeValueVTs(*this, PtrRetTy, PVTs);
|
||||
ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
|
||||
assert(PVTs.size() == 1 && "Pointers should fit in one register");
|
||||
EVT PtrVT = PVTs[0];
|
||||
|
||||
@ -6997,7 +7000,8 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
|
||||
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
|
||||
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
|
||||
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
|
||||
V->getType());
|
||||
SDValue Chain = DAG.getEntryNode();
|
||||
|
||||
ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
|
||||
@ -7036,7 +7040,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
|
||||
if (!FuncInfo->CanLowerReturn) {
|
||||
// Put in an sret pointer parameter before all the other parameters.
|
||||
SmallVector<EVT, 1> ValueVTs;
|
||||
ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
|
||||
ComputeValueVTs(*TLI, DAG.getDataLayout(),
|
||||
PointerType::getUnqual(F.getReturnType()), ValueVTs);
|
||||
|
||||
// NOTE: Assuming that a pointer will never break down to more than one VT
|
||||
// or one register.
|
||||
@ -7053,7 +7058,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
|
||||
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
|
||||
I != E; ++I, ++Idx) {
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(*TLI, I->getType(), ValueVTs);
|
||||
ComputeValueVTs(*TLI, DAG.getDataLayout(), I->getType(), ValueVTs);
|
||||
bool isArgValueUsed = !I->use_empty();
|
||||
unsigned PartBase = 0;
|
||||
Type *FinalType = I->getType();
|
||||
@ -7153,7 +7158,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
|
||||
// Create a virtual register for the sret pointer, and put in a copy
|
||||
// from the sret argument into it.
|
||||
SmallVector<EVT, 1> ValueVTs;
|
||||
ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
|
||||
ComputeValueVTs(*TLI, DAG.getDataLayout(),
|
||||
PointerType::getUnqual(F.getReturnType()), ValueVTs);
|
||||
MVT VT = ValueVTs[0].getSimpleVT();
|
||||
MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
|
||||
ISD::NodeType AssertOp = ISD::DELETED_NODE;
|
||||
@ -7177,7 +7183,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
|
||||
++I, ++Idx) {
|
||||
SmallVector<SDValue, 4> ArgValues;
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(*TLI, I->getType(), ValueVTs);
|
||||
ComputeValueVTs(*TLI, DAG.getDataLayout(), I->getType(), ValueVTs);
|
||||
unsigned NumValues = ValueVTs.size();
|
||||
|
||||
// If this argument is unused then remember its value. It is used to generate
|
||||
@ -7324,7 +7330,7 @@ SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
|
||||
// the input for this MBB.
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
ComputeValueVTs(TLI, PN->getType(), ValueVTs);
|
||||
ComputeValueVTs(TLI, DAG.getDataLayout(), PN->getType(), ValueVTs);
|
||||
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
|
||||
EVT VT = ValueVTs[vti];
|
||||
unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
|
||||
|
@ -915,8 +915,8 @@ struct RegsForValue {
|
||||
|
||||
RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt);
|
||||
|
||||
RegsForValue(LLVMContext &Context, const TargetLowering &tli, unsigned Reg,
|
||||
Type *Ty);
|
||||
RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
|
||||
const DataLayout &DL, unsigned Reg, Type *Ty);
|
||||
|
||||
/// append - Add the specified values to this one.
|
||||
void append(const RegsForValue &RHS) {
|
||||
|
@ -337,9 +337,9 @@ lowerCallFromStatepoint(ImmutableStatepoint ISP, MachineBasicBlock *LandingPad,
|
||||
// TODO: To eliminate this problem we can remove gc.result intrinsics
|
||||
// completelly and make statepoint call to return a tuple.
|
||||
unsigned Reg = Builder.FuncInfo.CreateRegs(ISP.getActualReturnType());
|
||||
RegsForValue RFV(*Builder.DAG.getContext(),
|
||||
Builder.DAG.getTargetLoweringInfo(), Reg,
|
||||
ISP.getActualReturnType());
|
||||
RegsForValue RFV(
|
||||
*Builder.DAG.getContext(), Builder.DAG.getTargetLoweringInfo(),
|
||||
Builder.DAG.getDataLayout(), Reg, ISP.getActualReturnType());
|
||||
SDValue Chain = Builder.DAG.getEntryNode();
|
||||
|
||||
RFV.getCopyToRegs(ReturnValue, Builder.DAG, Builder.getCurSDLoc(), Chain,
|
||||
|
@ -1484,11 +1484,11 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT
|
||||
/// type of the given function. This does not require a DAG or a return value,
|
||||
/// and is suitable for use before any DAGs for the function are constructed.
|
||||
/// TODO: Move this out of TargetLowering.cpp.
|
||||
void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr,
|
||||
void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr,
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
const TargetLowering &TLI) {
|
||||
const TargetLowering &TLI, const DataLayout &DL) {
|
||||
SmallVector<EVT, 4> ValueVTs;
|
||||
ComputeValueVTs(TLI, ReturnType, ValueVTs);
|
||||
ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
|
||||
unsigned NumValues = ValueVTs.size();
|
||||
if (NumValues == 0) return;
|
||||
|
||||
|
@ -3689,7 +3689,7 @@ bool AArch64FastISel::selectRet(const Instruction *I) {
|
||||
if (Ret->getNumOperands() > 0) {
|
||||
CallingConv::ID CC = F.getCallingConv();
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ValLocs;
|
||||
|
@ -2093,7 +2093,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
|
||||
CallingConv::ID CC = F.getCallingConv();
|
||||
if (Ret->getNumOperands() > 0) {
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ValLocs;
|
||||
|
@ -1415,7 +1415,8 @@ bool MipsFastISel::selectRet(const Instruction *I) {
|
||||
if (Ret->getNumOperands() > 0) {
|
||||
CallingConv::ID CC = F.getCallingConv();
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ValLocs;
|
||||
MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
|
||||
|
@ -340,7 +340,7 @@ MCOperand NVPTXAsmPrinter::GetSymbolRef(const MCSymbol *Symbol) {
|
||||
}
|
||||
|
||||
void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
|
||||
const DataLayout *TD = TM.getDataLayout();
|
||||
const DataLayout &DL = getDataLayout();
|
||||
const TargetLowering *TLI = nvptxSubtarget->getTargetLowering();
|
||||
|
||||
Type *Ty = F->getReturnType();
|
||||
@ -369,17 +369,17 @@ void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
|
||||
O << ".param .b" << TLI->getPointerTy().getSizeInBits()
|
||||
<< " func_retval0";
|
||||
} else if ((Ty->getTypeID() == Type::StructTyID) || isa<VectorType>(Ty)) {
|
||||
unsigned totalsz = TD->getTypeAllocSize(Ty);
|
||||
unsigned totalsz = DL.getTypeAllocSize(Ty);
|
||||
unsigned retAlignment = 0;
|
||||
if (!llvm::getAlign(*F, 0, retAlignment))
|
||||
retAlignment = TD->getABITypeAlignment(Ty);
|
||||
retAlignment = DL.getABITypeAlignment(Ty);
|
||||
O << ".param .align " << retAlignment << " .b8 func_retval0[" << totalsz
|
||||
<< "]";
|
||||
} else
|
||||
llvm_unreachable("Unknown return type");
|
||||
} else {
|
||||
SmallVector<EVT, 16> vtparts;
|
||||
ComputeValueVTs(*TLI, Ty, vtparts);
|
||||
ComputeValueVTs(*TLI, DL, Ty, vtparts);
|
||||
unsigned idx = 0;
|
||||
for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
|
||||
unsigned elems = 1;
|
||||
@ -1579,7 +1579,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
|
||||
// Further, if a part is vector, print the above for
|
||||
// each vector element.
|
||||
SmallVector<EVT, 16> vtparts;
|
||||
ComputeValueVTs(*TLI, ETy, vtparts);
|
||||
ComputeValueVTs(*TLI, getDataLayout(), ETy, vtparts);
|
||||
for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
|
||||
unsigned elems = 1;
|
||||
EVT elemtype = vtparts[i];
|
||||
|
@ -80,14 +80,14 @@ static bool IsPTXVectorType(MVT VT) {
|
||||
/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
|
||||
/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
|
||||
/// LowerCall, and LowerReturn.
|
||||
static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
|
||||
SmallVectorImpl<EVT> &ValueVTs,
|
||||
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
|
||||
Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
|
||||
SmallVectorImpl<uint64_t> *Offsets = nullptr,
|
||||
uint64_t StartingOffset = 0) {
|
||||
SmallVector<EVT, 16> TempVTs;
|
||||
SmallVector<uint64_t, 16> TempOffsets;
|
||||
|
||||
ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
|
||||
ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
|
||||
for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
|
||||
EVT VT = TempVTs[i];
|
||||
uint64_t Off = TempOffsets[i];
|
||||
@ -960,7 +960,7 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
|
||||
O << "[" << sz << "]";
|
||||
// update the index for Outs
|
||||
SmallVector<EVT, 16> vtparts;
|
||||
ComputeValueVTs(*this, Ty, vtparts);
|
||||
ComputeValueVTs(*this, *TD, Ty, vtparts);
|
||||
if (unsigned len = vtparts.size())
|
||||
OIdx += len - 1;
|
||||
continue;
|
||||
@ -1064,9 +1064,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
assert(isABI && "Non-ABI compilation is not supported");
|
||||
if (!isABI)
|
||||
return Chain;
|
||||
const DataLayout *TD = getDataLayout();
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
const Function *F = MF.getFunction();
|
||||
auto &DL = MF.getDataLayout();
|
||||
|
||||
SDValue tempChain = Chain;
|
||||
Chain = DAG.getCALLSEQ_START(Chain,
|
||||
@ -1096,11 +1096,11 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
// aggregate
|
||||
SmallVector<EVT, 16> vtparts;
|
||||
SmallVector<uint64_t, 16> Offsets;
|
||||
ComputePTXValueVTs(*this, Ty, vtparts, &Offsets, 0);
|
||||
ComputePTXValueVTs(*this, DL, Ty, vtparts, &Offsets, 0);
|
||||
|
||||
unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
|
||||
// declare .param .align <align> .b8 .param<n>[<size>];
|
||||
unsigned sz = TD->getTypeAllocSize(Ty);
|
||||
unsigned sz = DL.getTypeAllocSize(Ty);
|
||||
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
|
||||
SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, dl,
|
||||
MVT::i32),
|
||||
@ -1140,7 +1140,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
EVT ObjectVT = getValueType(Ty);
|
||||
unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
|
||||
// declare .param .align <align> .b8 .param<n>[<size>];
|
||||
unsigned sz = TD->getTypeAllocSize(Ty);
|
||||
unsigned sz = DL.getTypeAllocSize(Ty);
|
||||
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
|
||||
SDValue DeclareParamOps[] = { Chain,
|
||||
DAG.getConstant(align, dl, MVT::i32),
|
||||
@ -1321,7 +1321,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
SmallVector<uint64_t, 16> Offsets;
|
||||
const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
|
||||
assert(PTy && "Type of a byval parameter should be pointer");
|
||||
ComputePTXValueVTs(*this, PTy->getElementType(), vtparts, &Offsets, 0);
|
||||
ComputePTXValueVTs(*this, DL, PTy->getElementType(), vtparts, &Offsets, 0);
|
||||
|
||||
// declare .param .align <align> .b8 .param<n>[<size>];
|
||||
unsigned sz = Outs[OIdx].Flags.getByValSize();
|
||||
@ -1371,12 +1371,12 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
// Handle Result
|
||||
if (Ins.size() > 0) {
|
||||
SmallVector<EVT, 16> resvtparts;
|
||||
ComputeValueVTs(*this, retTy, resvtparts);
|
||||
ComputeValueVTs(*this, DL, retTy, resvtparts);
|
||||
|
||||
// Declare
|
||||
// .param .align 16 .b8 retval0[<size-in-bytes>], or
|
||||
// .param .b<size-in-bits> retval0
|
||||
unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
|
||||
unsigned resultsz = DL.getTypeAllocSizeInBits(retTy);
|
||||
// Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for
|
||||
// these three types to match the logic in
|
||||
// NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.
|
||||
@ -1590,13 +1590,13 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
|
||||
InVals.push_back(Elt);
|
||||
}
|
||||
Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
|
||||
Ofst += DL.getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SmallVector<EVT, 16> VTs;
|
||||
SmallVector<uint64_t, 16> Offsets;
|
||||
ComputePTXValueVTs(*this, retTy, VTs, &Offsets, 0);
|
||||
ComputePTXValueVTs(*this, DL, retTy, VTs, &Offsets, 0);
|
||||
assert(VTs.size() == Ins.size() && "Bad value decomposition");
|
||||
unsigned RetAlign = getArgumentAlignment(Callee, CS, retTy, 0);
|
||||
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
|
||||
@ -1608,8 +1608,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
|
||||
SmallVector<EVT, 4> LoadRetVTs;
|
||||
EVT TheLoadType = VTs[i];
|
||||
if (retTy->isIntegerTy() &&
|
||||
TD->getTypeAllocSizeInBits(retTy) < 32) {
|
||||
if (retTy->isIntegerTy() && DL.getTypeAllocSizeInBits(retTy) < 32) {
|
||||
// This is for integer types only, and specifically not for
|
||||
// aggregates.
|
||||
LoadRetVTs.push_back(MVT::i32);
|
||||
@ -2064,7 +2063,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
const DataLayout *TD = getDataLayout();
|
||||
const DataLayout &DL = MF.getDataLayout();
|
||||
|
||||
const Function *F = MF.getFunction();
|
||||
const AttributeSet &PAL = F->getAttributes();
|
||||
@ -2118,7 +2117,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
if (Ty->isAggregateType()) {
|
||||
SmallVector<EVT, 16> vtparts;
|
||||
|
||||
ComputePTXValueVTs(*this, Ty, vtparts);
|
||||
ComputePTXValueVTs(*this, DL, Ty, vtparts);
|
||||
assert(vtparts.size() > 0 && "empty aggregate type not expected");
|
||||
for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
|
||||
++parti) {
|
||||
@ -2156,7 +2155,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
// NOTE: Here, we lose the ability to issue vector loads for vectors
|
||||
// that are a part of a struct. This should be investigated in the
|
||||
// future.
|
||||
ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
|
||||
ComputePTXValueVTs(*this, DL, Ty, vtparts, &offsets, 0);
|
||||
assert(vtparts.size() > 0 && "empty aggregate type not expected");
|
||||
bool aggregateIsPacked = false;
|
||||
if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
|
||||
@ -2172,10 +2171,10 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
SDValue srcAddr =
|
||||
DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
|
||||
DAG.getConstant(offsets[parti], dl, getPointerTy()));
|
||||
unsigned partAlign =
|
||||
aggregateIsPacked ? 1
|
||||
: TD->getABITypeAlignment(
|
||||
partVT.getTypeForEVT(F->getContext()));
|
||||
unsigned partAlign = aggregateIsPacked
|
||||
? 1
|
||||
: DL.getABITypeAlignment(
|
||||
partVT.getTypeForEVT(F->getContext()));
|
||||
SDValue p;
|
||||
if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
|
||||
ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
|
||||
@ -2212,9 +2211,9 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
Value *SrcValue = Constant::getNullValue(PointerType::get(
|
||||
EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
|
||||
SDValue P = DAG.getLoad(
|
||||
EltVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false,
|
||||
false, true,
|
||||
TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
|
||||
EltVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false, false,
|
||||
true,
|
||||
DL.getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
|
||||
if (P.getNode())
|
||||
P.getNode()->setIROrder(idx + 1);
|
||||
|
||||
@ -2229,9 +2228,9 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
Value *SrcValue = Constant::getNullValue(PointerType::get(
|
||||
VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
|
||||
SDValue P = DAG.getLoad(
|
||||
VecVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false,
|
||||
false, true,
|
||||
TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
|
||||
VecVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false, false,
|
||||
true,
|
||||
DL.getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
|
||||
if (P.getNode())
|
||||
P.getNode()->setIROrder(idx + 1);
|
||||
|
||||
@ -2275,7 +2274,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
SDValue P = DAG.getLoad(
|
||||
VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
|
||||
false, true,
|
||||
TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
|
||||
DL.getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
|
||||
if (P.getNode())
|
||||
P.getNode()->setIROrder(idx + 1);
|
||||
|
||||
@ -2288,7 +2287,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
|
||||
InVals.push_back(Elt);
|
||||
}
|
||||
Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
|
||||
Ofst += DL.getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
|
||||
}
|
||||
InsIdx += NumElts;
|
||||
}
|
||||
@ -2307,14 +2306,15 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
|
||||
ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
|
||||
ISD::SEXTLOAD : ISD::ZEXTLOAD;
|
||||
p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
|
||||
MachinePointerInfo(srcValue), ObjectVT, false, false,
|
||||
false,
|
||||
TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
|
||||
p = DAG.getExtLoad(
|
||||
ExtOp, dl, Ins[InsIdx].VT, Root, Arg, MachinePointerInfo(srcValue),
|
||||
ObjectVT, false, false, false,
|
||||
DL.getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
|
||||
} else {
|
||||
p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
|
||||
MachinePointerInfo(srcValue), false, false, false,
|
||||
TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
|
||||
p = DAG.getLoad(
|
||||
Ins[InsIdx].VT, dl, Root, Arg, MachinePointerInfo(srcValue), false,
|
||||
false, false,
|
||||
DL.getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
|
||||
}
|
||||
if (p.getNode())
|
||||
p.getNode()->setIROrder(idx + 1);
|
||||
@ -2493,7 +2493,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
|
||||
} else {
|
||||
SmallVector<EVT, 16> ValVTs;
|
||||
SmallVector<uint64_t, 16> Offsets;
|
||||
ComputePTXValueVTs(*this, RetTy, ValVTs, &Offsets, 0);
|
||||
ComputePTXValueVTs(*this, DAG.getDataLayout(), RetTy, ValVTs, &Offsets, 0);
|
||||
assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
|
||||
|
||||
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
|
||||
|
@ -1594,7 +1594,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
|
||||
|
||||
if (Ret->getNumOperands() > 0) {
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ValLocs;
|
||||
|
@ -1000,7 +1000,8 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
|
||||
if (Ret->getNumOperands() > 0) {
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
|
||||
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI,
|
||||
MF->getDataLayout());
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ValLocs;
|
||||
|
Loading…
Reference in New Issue
Block a user