mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
AArch64: Safely handle the incoming sret call argument.
This adds a safe interface to the machine independent InputArg struct for accessing the index of the original (IR-level) argument. When a non-native return type is lowered, we generate the hidden machine-level sret argument on-the-fly. Before this fix, we were representing this argument as OrigArgIndex == 0, which is an outright lie. In particular this crashed in the AArch64 backend where we actually try to access the type of the original argument. Now we use a sentinel value for machine arguments that have no original argument index. AArch64, ARM, Mips, and PPC now check for this case before accessing the original argument. Fixes <rdar://19792160> Null pointer assertion in AArch64TargetLowering llvm-svn: 229413
This commit is contained in:
parent
4546f3de43
commit
e7964c82c7
@ -134,6 +134,8 @@ namespace ISD {
|
|||||||
|
|
||||||
/// Index original Function's argument.
|
/// Index original Function's argument.
|
||||||
unsigned OrigArgIndex;
|
unsigned OrigArgIndex;
|
||||||
|
/// Sentinel value for implicit machine-level input arguments.
|
||||||
|
static const unsigned NoArgIndex = UINT_MAX;
|
||||||
|
|
||||||
/// Offset in bytes of current input value relative to the beginning of
|
/// Offset in bytes of current input value relative to the beginning of
|
||||||
/// original argument. E.g. if argument was splitted into four 32 bit
|
/// original argument. E.g. if argument was splitted into four 32 bit
|
||||||
@ -147,6 +149,15 @@ namespace ISD {
|
|||||||
VT = vt.getSimpleVT();
|
VT = vt.getSimpleVT();
|
||||||
ArgVT = argvt;
|
ArgVT = argvt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool isOrigArg() const {
|
||||||
|
return OrigArgIndex != NoArgIndex;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned getOrigArgIndex() const {
|
||||||
|
assert(OrigArgIndex != NoArgIndex && "Implicit machine-level argument");
|
||||||
|
return OrigArgIndex;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// OutputArg - This struct carries flags and a value for a
|
/// OutputArg - This struct carries flags and a value for a
|
||||||
|
@ -7673,7 +7673,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
|
|||||||
ISD::ArgFlagsTy Flags;
|
ISD::ArgFlagsTy Flags;
|
||||||
Flags.setSRet();
|
Flags.setSRet();
|
||||||
MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
|
MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
|
||||||
ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 0, 0);
|
ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
|
||||||
|
ISD::InputArg::NoArgIndex, 0);
|
||||||
Ins.push_back(RetArg);
|
Ins.push_back(RetArg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2021,18 +2021,19 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
|
|||||||
unsigned CurArgIdx = 0;
|
unsigned CurArgIdx = 0;
|
||||||
for (unsigned i = 0; i != NumArgs; ++i) {
|
for (unsigned i = 0; i != NumArgs; ++i) {
|
||||||
MVT ValVT = Ins[i].VT;
|
MVT ValVT = Ins[i].VT;
|
||||||
std::advance(CurOrigArg, Ins[i].OrigArgIndex - CurArgIdx);
|
if (Ins[i].isOrigArg()) {
|
||||||
CurArgIdx = Ins[i].OrigArgIndex;
|
std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
|
||||||
|
CurArgIdx = Ins[i].getOrigArgIndex();
|
||||||
// Get type of the original argument.
|
|
||||||
EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true);
|
|
||||||
MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
|
|
||||||
// If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
|
|
||||||
if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
|
|
||||||
ValVT = MVT::i8;
|
|
||||||
else if (ActualMVT == MVT::i16)
|
|
||||||
ValVT = MVT::i16;
|
|
||||||
|
|
||||||
|
// Get type of the original argument.
|
||||||
|
EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true);
|
||||||
|
MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
|
||||||
|
// If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
|
||||||
|
if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
|
||||||
|
ValVT = MVT::i8;
|
||||||
|
else if (ActualMVT == MVT::i16)
|
||||||
|
ValVT = MVT::i16;
|
||||||
|
}
|
||||||
CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
|
CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
|
||||||
bool Res =
|
bool Res =
|
||||||
AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
|
AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
|
||||||
|
@ -3084,8 +3084,11 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||||||
|
|
||||||
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
||||||
CCValAssign &VA = ArgLocs[i];
|
CCValAssign &VA = ArgLocs[i];
|
||||||
std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx);
|
if (Ins[VA.getValNo()].isOrigArg()) {
|
||||||
CurArgIdx = Ins[VA.getValNo()].OrigArgIndex;
|
std::advance(CurOrigArg,
|
||||||
|
Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
|
||||||
|
CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
|
||||||
|
}
|
||||||
// Arguments stored in registers.
|
// Arguments stored in registers.
|
||||||
if (VA.isRegLoc()) {
|
if (VA.isRegLoc()) {
|
||||||
EVT RegVT = VA.getLocVT();
|
EVT RegVT = VA.getLocVT();
|
||||||
@ -3165,7 +3168,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||||||
assert(VA.isMemLoc());
|
assert(VA.isMemLoc());
|
||||||
assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
|
assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
|
||||||
|
|
||||||
int index = ArgLocs[i].getValNo();
|
int index = VA.getValNo();
|
||||||
|
|
||||||
// Some Ins[] entries become multiple ArgLoc[] entries.
|
// Some Ins[] entries become multiple ArgLoc[] entries.
|
||||||
// Process them only once.
|
// Process them only once.
|
||||||
@ -3178,6 +3181,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||||||
// Since they could be overwritten by lowering of arguments in case of
|
// Since they could be overwritten by lowering of arguments in case of
|
||||||
// a tail call.
|
// a tail call.
|
||||||
if (Flags.isByVal()) {
|
if (Flags.isByVal()) {
|
||||||
|
assert(Ins[index].isOrigArg() &&
|
||||||
|
"Byval arguments cannot be implicit");
|
||||||
unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
|
unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
|
||||||
|
|
||||||
ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());
|
ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());
|
||||||
|
@ -132,8 +132,8 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(Ins[i].OrigArgIndex < MF.getFunction()->arg_size());
|
assert(Ins[i].getOrigArgIndex() < MF.getFunction()->arg_size());
|
||||||
std::advance(FuncArg, Ins[i].OrigArgIndex);
|
std::advance(FuncArg, Ins[i].getOrigArgIndex());
|
||||||
|
|
||||||
OriginalArgWasF128.push_back(
|
OriginalArgWasF128.push_back(
|
||||||
originalTypeIsF128(FuncArg->getType(), nullptr));
|
originalTypeIsF128(FuncArg->getType(), nullptr));
|
||||||
|
@ -2873,13 +2873,16 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||||||
|
|
||||||
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
||||||
CCValAssign &VA = ArgLocs[i];
|
CCValAssign &VA = ArgLocs[i];
|
||||||
std::advance(FuncArg, Ins[i].OrigArgIndex - CurArgIdx);
|
if (Ins[i].isOrigArg()) {
|
||||||
CurArgIdx = Ins[i].OrigArgIndex;
|
std::advance(FuncArg, Ins[i].getOrigArgIndex() - CurArgIdx);
|
||||||
|
CurArgIdx = Ins[i].getOrigArgIndex();
|
||||||
|
}
|
||||||
EVT ValVT = VA.getValVT();
|
EVT ValVT = VA.getValVT();
|
||||||
ISD::ArgFlagsTy Flags = Ins[i].Flags;
|
ISD::ArgFlagsTy Flags = Ins[i].Flags;
|
||||||
bool IsRegLoc = VA.isRegLoc();
|
bool IsRegLoc = VA.isRegLoc();
|
||||||
|
|
||||||
if (Flags.isByVal()) {
|
if (Flags.isByVal()) {
|
||||||
|
assert(Ins[i].isOrigArg() && "Byval arguments cannot be implicit");
|
||||||
unsigned FirstByValReg, LastByValReg;
|
unsigned FirstByValReg, LastByValReg;
|
||||||
unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
|
unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
|
||||||
CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
|
CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
|
||||||
|
@ -2698,9 +2698,10 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
|
|||||||
unsigned ObjSize = ObjectVT.getStoreSize();
|
unsigned ObjSize = ObjectVT.getStoreSize();
|
||||||
unsigned ArgSize = ObjSize;
|
unsigned ArgSize = ObjSize;
|
||||||
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
|
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
|
||||||
std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
|
if (Ins[ArgNo].isOrigArg()) {
|
||||||
CurArgIdx = Ins[ArgNo].OrigArgIndex;
|
std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
|
||||||
|
CurArgIdx = Ins[ArgNo].getOrigArgIndex();
|
||||||
|
}
|
||||||
// We re-align the argument offset for each argument, except when using the
|
// We re-align the argument offset for each argument, except when using the
|
||||||
// fast calling convention, when we need to make sure we do that only when
|
// fast calling convention, when we need to make sure we do that only when
|
||||||
// we'll actually use a stack slot.
|
// we'll actually use a stack slot.
|
||||||
@ -2723,6 +2724,8 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
|
|||||||
// FIXME the codegen can be much improved in some cases.
|
// FIXME the codegen can be much improved in some cases.
|
||||||
// We do not have to keep everything in memory.
|
// We do not have to keep everything in memory.
|
||||||
if (Flags.isByVal()) {
|
if (Flags.isByVal()) {
|
||||||
|
assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
|
||||||
|
|
||||||
if (CallConv == CallingConv::Fast)
|
if (CallConv == CallingConv::Fast)
|
||||||
ComputeArgOffset();
|
ComputeArgOffset();
|
||||||
|
|
||||||
@ -3101,9 +3104,10 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
|
|||||||
unsigned ObjSize = ObjectVT.getSizeInBits()/8;
|
unsigned ObjSize = ObjectVT.getSizeInBits()/8;
|
||||||
unsigned ArgSize = ObjSize;
|
unsigned ArgSize = ObjSize;
|
||||||
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
|
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
|
||||||
std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
|
if (Ins[ArgNo].isOrigArg()) {
|
||||||
CurArgIdx = Ins[ArgNo].OrigArgIndex;
|
std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
|
||||||
|
CurArgIdx = Ins[ArgNo].getOrigArgIndex();
|
||||||
|
}
|
||||||
unsigned CurArgOffset = ArgOffset;
|
unsigned CurArgOffset = ArgOffset;
|
||||||
|
|
||||||
// Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
|
// Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
|
||||||
@ -3124,6 +3128,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
|
|||||||
// FIXME the codegen can be much improved in some cases.
|
// FIXME the codegen can be much improved in some cases.
|
||||||
// We do not have to keep everything in memory.
|
// We do not have to keep everything in memory.
|
||||||
if (Flags.isByVal()) {
|
if (Flags.isByVal()) {
|
||||||
|
assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
|
||||||
|
|
||||||
// ObjSize is the true size, ArgSize rounded up to multiple of registers.
|
// ObjSize is the true size, ArgSize rounded up to multiple of registers.
|
||||||
ObjSize = Flags.getByValSize();
|
ObjSize = Flags.getByValSize();
|
||||||
ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
|
ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
|
||||||
|
@ -1693,7 +1693,7 @@ SDValue R600TargetLowering::LowerFormalArguments(
|
|||||||
// XXX - I think PartOffset should give you this, but it seems to give the
|
// XXX - I think PartOffset should give you this, but it seems to give the
|
||||||
// size of the register which isn't useful.
|
// size of the register which isn't useful.
|
||||||
|
|
||||||
unsigned ValBase = ArgLocs[In.OrigArgIndex].getLocMemOffset();
|
unsigned ValBase = ArgLocs[In.getOrigArgIndex()].getLocMemOffset();
|
||||||
unsigned PartOffset = VA.getLocMemOffset();
|
unsigned PartOffset = VA.getLocMemOffset();
|
||||||
unsigned Offset = 36 + VA.getLocMemOffset();
|
unsigned Offset = 36 + VA.getLocMemOffset();
|
||||||
|
|
||||||
|
@ -446,7 +446,7 @@ SDValue SITargetLowering::LowerFormalArguments(
|
|||||||
// We REALLY want the ORIGINAL number of vertex elements here, e.g. a
|
// We REALLY want the ORIGINAL number of vertex elements here, e.g. a
|
||||||
// three or five element vertex only needs three or five registers,
|
// three or five element vertex only needs three or five registers,
|
||||||
// NOT four or eigth.
|
// NOT four or eigth.
|
||||||
Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
|
Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
|
||||||
unsigned NumElements = ParamType->getVectorNumElements();
|
unsigned NumElements = ParamType->getVectorNumElements();
|
||||||
|
|
||||||
for (unsigned j = 0; j != NumElements; ++j) {
|
for (unsigned j = 0; j != NumElements; ++j) {
|
||||||
@ -529,7 +529,7 @@ SDValue SITargetLowering::LowerFormalArguments(
|
|||||||
Offset, Ins[i].Flags.isSExt());
|
Offset, Ins[i].Flags.isSExt());
|
||||||
|
|
||||||
const PointerType *ParamTy =
|
const PointerType *ParamTy =
|
||||||
dyn_cast<PointerType>(FType->getParamType(Ins[i].OrigArgIndex));
|
dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
|
||||||
if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
|
if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
|
||||||
ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
|
ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
|
||||||
// On SI local pointers are just offsets into LDS, so they are always
|
// On SI local pointers are just offsets into LDS, so they are always
|
||||||
@ -564,7 +564,7 @@ SDValue SITargetLowering::LowerFormalArguments(
|
|||||||
if (Arg.VT.isVector()) {
|
if (Arg.VT.isVector()) {
|
||||||
|
|
||||||
// Build a vector from the registers
|
// Build a vector from the registers
|
||||||
Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
|
Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
|
||||||
unsigned NumElements = ParamType->getVectorNumElements();
|
unsigned NumElements = ParamType->getVectorNumElements();
|
||||||
|
|
||||||
SmallVector<SDValue, 4> Regs;
|
SmallVector<SDValue, 4> Regs;
|
||||||
|
13
test/CodeGen/AArch64/implicit-sret.ll
Normal file
13
test/CodeGen/AArch64/implicit-sret.ll
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
; RUN: llc %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
|
||||||
|
;
|
||||||
|
; Handle implicit sret arguments that are generated on-the-fly during lowering.
|
||||||
|
; <rdar://19792160> Null pointer assertion in AArch64TargetLowering
|
||||||
|
|
||||||
|
; CHECK-LABEL: big_retval
|
||||||
|
; ... str or stp for the first 1024 bits
|
||||||
|
; CHECK: strb wzr, [x8, #128]
|
||||||
|
; CHECK: ret
|
||||||
|
define i1032 @big_retval() {
|
||||||
|
entry:
|
||||||
|
ret i1032 0
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user