mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
Add a parameter to CCState so that it can access the MachineFunction.
No functional change. Part of PR6965 llvm-svn: 132763
This commit is contained in:
parent
68f8e98b8e
commit
1ae9ec6124
@ -16,6 +16,7 @@
|
||||
#define LLVM_CODEGEN_CALLINGCONVLOWER_H
|
||||
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/ValueTypes.h"
|
||||
#include "llvm/Target/TargetCallingConv.h"
|
||||
#include "llvm/CallingConv.h"
|
||||
@ -149,6 +150,7 @@ typedef enum { Invalid, Prologue, Call } ParmContext;
|
||||
class CCState {
|
||||
CallingConv::ID CallingConv;
|
||||
bool IsVarArg;
|
||||
MachineFunction &MF;
|
||||
const TargetMachine &TM;
|
||||
const TargetRegisterInfo &TRI;
|
||||
SmallVector<CCValAssign, 16> &Locs;
|
||||
@ -160,7 +162,8 @@ class CCState {
|
||||
bool FirstByValRegValid;
|
||||
ParmContext CallOrPrologue;
|
||||
public:
|
||||
CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &TM,
|
||||
CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
|
||||
const TargetMachine &TM,
|
||||
SmallVector<CCValAssign, 16> &locs, LLVMContext &C);
|
||||
|
||||
void addLoc(const CCValAssign &V) {
|
||||
@ -169,6 +172,7 @@ public:
|
||||
|
||||
LLVMContext &getContext() const { return Context; }
|
||||
const TargetMachine &getTarget() const { return TM; }
|
||||
MachineFunction &getMachineFunction() const { return MF; }
|
||||
CallingConv::ID getCallingConv() const { return CallingConv; }
|
||||
bool isVarArg() const { return IsVarArg; }
|
||||
|
||||
|
@ -254,7 +254,7 @@ public:
|
||||
/// to get to the smaller register. For illegal floating point types, this
|
||||
/// returns the integer type to transform to.
|
||||
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
|
||||
return getTypeConversion(Context, VT).second;
|
||||
return getTypeConversion(Context, VT).second;
|
||||
}
|
||||
|
||||
/// getTypeToExpandTo - For types supported by the target, this is an
|
||||
@ -1211,7 +1211,8 @@ public:
|
||||
/// return values described by the Outs array can fit into the return
|
||||
/// registers. If false is returned, an sret-demotion is performed.
|
||||
///
|
||||
virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
|
||||
virtual bool CanLowerReturn(CallingConv::ID CallConv,
|
||||
MachineFunction &MF, bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
LLVMContext &Context) const
|
||||
{
|
||||
|
@ -13,6 +13,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/CodeGen/CallingConvLower.h"
|
||||
#include "llvm/CodeGen/MachineFrameInfo.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
@ -22,21 +23,22 @@
|
||||
#include "llvm/Target/TargetLowering.h"
|
||||
using namespace llvm;
|
||||
|
||||
CCState::CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &tm,
|
||||
CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
|
||||
const TargetMachine &tm,
|
||||
SmallVector<CCValAssign, 16> &locs, LLVMContext &C)
|
||||
: CallingConv(CC), IsVarArg(isVarArg), TM(tm),
|
||||
: CallingConv(CC), IsVarArg(isVarArg), MF(mf), TM(tm),
|
||||
TRI(*TM.getRegisterInfo()), Locs(locs), Context(C),
|
||||
CallOrPrologue(Invalid) {
|
||||
// No stack is used.
|
||||
StackOffset = 0;
|
||||
|
||||
|
||||
clearFirstByValReg();
|
||||
UsedRegs.resize((TRI.getNumRegs()+31)/32);
|
||||
}
|
||||
|
||||
// HandleByVal - Allocate a stack slot large enough to pass an argument by
|
||||
// value. The size and alignment information of the argument is encoded in its
|
||||
// parameter attribute.
|
||||
// HandleByVal - Allocate space on the stack large enough to pass an argument
|
||||
// by value. The size and alignment information of the argument is encoded in
|
||||
// its parameter attribute.
|
||||
void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
|
||||
MVT LocVT, CCValAssign::LocInfo LocInfo,
|
||||
int MinSize, int MinAlign,
|
||||
|
@ -67,7 +67,8 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
GetReturnInfo(Fn->getReturnType(),
|
||||
Fn->getAttributes().getRetAttributes(), Outs, TLI);
|
||||
CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), Fn->isVarArg(),
|
||||
CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), *MF,
|
||||
Fn->isVarArg(),
|
||||
Outs, Fn->getContext());
|
||||
|
||||
// Initialize the mapping of values to registers. This is only set up for
|
||||
@ -321,7 +322,7 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
|
||||
APInt Zero(BitWidth, 0);
|
||||
DestLOI.KnownZero = Zero;
|
||||
DestLOI.KnownOne = Zero;
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
|
||||
@ -353,18 +354,18 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
|
||||
/// setByValArgumentFrameIndex - Record frame index for the byval
|
||||
/// argument. This overrides previous frame index entry for this argument,
|
||||
/// if any.
|
||||
void FunctionLoweringInfo::setByValArgumentFrameIndex(const Argument *A,
|
||||
void FunctionLoweringInfo::setByValArgumentFrameIndex(const Argument *A,
|
||||
int FI) {
|
||||
assert (A->hasByValAttr() && "Argument does not have byval attribute!");
|
||||
ByValArgFrameIndexMap[A] = FI;
|
||||
}
|
||||
|
||||
|
||||
/// getByValArgumentFrameIndex - Get frame index for the byval argument.
|
||||
/// If the argument does not have any assigned frame index then 0 is
|
||||
/// returned.
|
||||
int FunctionLoweringInfo::getByValArgumentFrameIndex(const Argument *A) {
|
||||
assert (A->hasByValAttr() && "Argument does not have byval attribute!");
|
||||
DenseMap<const Argument *, int>::iterator I =
|
||||
DenseMap<const Argument *, int>::iterator I =
|
||||
ByValArgFrameIndexMap.find(A);
|
||||
if (I != ByValArgFrameIndexMap.end())
|
||||
return I->second;
|
||||
|
@ -303,7 +303,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
|
||||
return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT,
|
||||
&NewOps[0], NewOps.size());
|
||||
}
|
||||
|
||||
|
||||
// Trivial bitcast if the types are the same size and the destination
|
||||
// vector type is legal.
|
||||
if (PartVT.getSizeInBits() == ValueVT.getSizeInBits() &&
|
||||
@ -4884,7 +4884,9 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
|
||||
Outs, TLI, &Offsets);
|
||||
|
||||
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
|
||||
FTy->isVarArg(), Outs, FTy->getContext());
|
||||
DAG.getMachineFunction(),
|
||||
FTy->isVarArg(), Outs,
|
||||
FTy->getContext());
|
||||
|
||||
SDValue DemoteStackSlot;
|
||||
int DemoteStackIdx = -100;
|
||||
@ -5777,7 +5779,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
|
||||
// the addressing mode that the constraint wants. Also, this may take
|
||||
// an additional register for the computation and we don't want that
|
||||
// either.
|
||||
|
||||
|
||||
// If the operand is a float, integer, or vector constant, spill to a
|
||||
// constant pool entry to get its address.
|
||||
const Value *OpVal = OpInfo.CallOperandVal;
|
||||
|
@ -443,14 +443,14 @@ unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
|
||||
uint64_t Imm1, uint64_t Imm2) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
|
||||
if (II.getNumDefs() >= 1)
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
|
||||
.addImm(Imm1).addImm(Imm2));
|
||||
else {
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
|
||||
.addImm(Imm1).addImm(Imm2));
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
TII.get(TargetOpcode::COPY),
|
||||
ResultReg)
|
||||
.addReg(II.ImplicitDefs[0]));
|
||||
@ -1542,7 +1542,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
|
||||
CallingConv::ID CC,
|
||||
unsigned &NumBytes) {
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, false, TM, ArgLocs, *Context);
|
||||
CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context);
|
||||
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
|
||||
|
||||
// Get a count of how many bytes are to be pushed on the stack.
|
||||
@ -1655,7 +1655,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
|
||||
// Now the return value.
|
||||
if (RetVT != MVT::isVoid) {
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CC, false, TM, RVLocs, *Context);
|
||||
CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
|
||||
CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true));
|
||||
|
||||
// Copy all of the result registers out of their specified physreg.
|
||||
@ -1711,7 +1711,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ValLocs;
|
||||
CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
|
||||
CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, I->getContext());
|
||||
CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */));
|
||||
|
||||
const Value *RV = Ret->getOperand(0);
|
||||
@ -1990,7 +1990,7 @@ bool ARMFastISel::SelectIntCast(const Instruction *I) {
|
||||
// any code.
|
||||
UpdateValueMap(I, SrcReg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
|
||||
return false;
|
||||
|
||||
|
@ -1071,8 +1071,8 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallResult(Ins,
|
||||
CCAssignFnForNode(CallConv, /* Return*/ true,
|
||||
isVarArg));
|
||||
@ -1206,8 +1206,8 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.setCallOrPrologue(Call);
|
||||
CCInfo.AnalyzeCallOperands(Outs,
|
||||
CCAssignFnForNode(CallConv, /* Return*/ false,
|
||||
@ -1638,13 +1638,13 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
|
||||
// results are returned in the same way as what the caller expects.
|
||||
if (!CCMatch) {
|
||||
SmallVector<CCValAssign, 16> RVLocs1;
|
||||
CCState CCInfo1(CalleeCC, false, getTargetMachine(),
|
||||
RVLocs1, *DAG.getContext());
|
||||
CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs1, *DAG.getContext());
|
||||
CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
|
||||
|
||||
SmallVector<CCValAssign, 16> RVLocs2;
|
||||
CCState CCInfo2(CallerCC, false, getTargetMachine(),
|
||||
RVLocs2, *DAG.getContext());
|
||||
CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs2, *DAG.getContext());
|
||||
CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
|
||||
|
||||
if (RVLocs1.size() != RVLocs2.size())
|
||||
@ -1670,8 +1670,8 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
|
||||
// Check if stack adjustment is needed. For now, do not do this if any
|
||||
// argument is passed on the stack.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallOperands(Outs,
|
||||
CCAssignFnForNode(CalleeCC, false, isVarArg));
|
||||
if (CCInfo.getNextStackOffset()) {
|
||||
@ -1730,8 +1730,8 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
|
||||
// CCState - Info about the registers and stack slots.
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analyze outgoing return values.
|
||||
CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
|
||||
@ -2424,8 +2424,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.setCallOrPrologue(Prologue);
|
||||
CCInfo.AnalyzeFormalArguments(Ins,
|
||||
CCAssignFnForNode(CallConv, /* Return*/ false,
|
||||
@ -2524,7 +2524,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
if (index != lastInsIndex)
|
||||
{
|
||||
ISD::ArgFlagsTy Flags = Ins[index].Flags;
|
||||
// FIXME: For now, all byval parameter objects are marked mutable.
|
||||
// FIXME: For now, all byval parameter objects are marked mutable.
|
||||
// This can be changed with more analysis.
|
||||
// In case of tail call optimization mark all arguments mutable.
|
||||
// Since they could be overwritten by lowering of arguments in case of
|
||||
@ -7386,7 +7386,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
|
||||
// Currently only support length 1 constraints.
|
||||
if (Constraint.length() != 1) return;
|
||||
|
||||
|
||||
char ConstraintLetter = Constraint[0];
|
||||
switch (ConstraintLetter) {
|
||||
default: break;
|
||||
|
@ -230,8 +230,8 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_Alpha);
|
||||
|
||||
@ -344,8 +344,8 @@ AlphaTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(Ins, RetCC_Alpha);
|
||||
|
||||
|
@ -171,8 +171,8 @@ BlackfinTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AllocateStack(12, 4); // ABI requires 12 bytes stack space
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CC_Blackfin);
|
||||
|
||||
@ -229,8 +229,8 @@ BlackfinTargetLowering::LowerReturn(SDValue Chain,
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getTarget(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
DAG.getTarget(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values.
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_Blackfin);
|
||||
@ -290,8 +290,8 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getTarget(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
DAG.getTarget(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AllocateStack(12, 4); // ABI requires 12 bytes stack space
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_Blackfin);
|
||||
|
||||
@ -378,8 +378,8 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState RVInfo(CallConv, isVarArg, DAG.getTarget(), RVLocs,
|
||||
*DAG.getContext());
|
||||
CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
DAG.getTarget(), RVLocs, *DAG.getContext());
|
||||
|
||||
RVInfo.AnalyzeCallResult(Ins, RetCC_Blackfin);
|
||||
|
||||
|
@ -1117,8 +1117,8 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
||||
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
// FIXME: allow for other calling conventions
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
|
||||
|
||||
@ -1264,8 +1264,8 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
|
||||
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
// FIXME: allow for other calling conventions
|
||||
CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
|
||||
|
||||
@ -1425,8 +1425,8 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Now handle the return value(s)
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCRetInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
|
||||
|
||||
|
||||
@ -1452,8 +1452,8 @@ SPUTargetLowering::LowerReturn(SDValue Chain,
|
||||
DebugLoc dl, SelectionDAG &DAG) const {
|
||||
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
|
||||
|
||||
// If this is the first return lowered for this function, add the regs to the
|
||||
|
@ -417,7 +417,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI,
|
||||
// All atomic instructions on the Microblaze are implemented using the
|
||||
// load-linked / store-conditional style atomic instruction sequences.
|
||||
// Thus, all operations will look something like the following:
|
||||
//
|
||||
//
|
||||
// start:
|
||||
// lwx RV, RP, 0
|
||||
// <do stuff>
|
||||
@ -698,8 +698,8 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_MBlaze);
|
||||
|
||||
// Get a count of how many bytes are to be pushed on the stack.
|
||||
@ -837,8 +837,8 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(Ins, RetCC_MBlaze);
|
||||
|
||||
@ -880,8 +880,8 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CC_MBlaze);
|
||||
SDValue StackPtr;
|
||||
@ -1012,8 +1012,8 @@ LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values.
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_MBlaze);
|
||||
@ -1043,9 +1043,9 @@ LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
||||
|
||||
// If this function is using the interrupt_handler calling convention
|
||||
// then use "rtid r14, 0" otherwise use "rtsd r15, 8"
|
||||
unsigned Ret = (CallConv == llvm::CallingConv::MBLAZE_INTR) ? MBlazeISD::IRet
|
||||
unsigned Ret = (CallConv == llvm::CallingConv::MBLAZE_INTR) ? MBlazeISD::IRet
|
||||
: MBlazeISD::Ret;
|
||||
unsigned Reg = (CallConv == llvm::CallingConv::MBLAZE_INTR) ? MBlaze::R14
|
||||
unsigned Reg = (CallConv == llvm::CallingConv::MBLAZE_INTR) ? MBlaze::R14
|
||||
: MBlaze::R15;
|
||||
SDValue DReg = DAG.getRegister(Reg, MVT::i32);
|
||||
|
||||
|
@ -312,8 +312,8 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CC_MSP430);
|
||||
|
||||
assert(!isVarArg && "Varargs not supported yet");
|
||||
@ -395,8 +395,8 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
|
||||
}
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values.
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_MSP430);
|
||||
@ -449,8 +449,8 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_MSP430);
|
||||
|
||||
@ -572,8 +572,8 @@ MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(Ins, RetCC_MSP430);
|
||||
|
||||
|
@ -148,7 +148,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
|
||||
|
||||
setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
|
||||
setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
|
||||
|
||||
|
||||
setOperationAction(ISD::VAARG, MVT::Other, Expand);
|
||||
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
|
||||
setOperationAction(ISD::VAEND, MVT::Other, Expand);
|
||||
@ -720,7 +720,7 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
// Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
|
||||
MachineBasicBlock *
|
||||
MipsTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
unsigned Size, unsigned BinOpcode,
|
||||
unsigned Size, unsigned BinOpcode,
|
||||
bool Nand) const {
|
||||
assert(Size == 4 && "Unsupported size for EmitAtomicBinary.");
|
||||
|
||||
@ -1502,11 +1502,11 @@ static SDValue LowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG) {
|
||||
}
|
||||
|
||||
static SDValue LowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool isLittle) {
|
||||
// FIXME:
|
||||
// FIXME:
|
||||
// Use ext/ins instructions if target architecture is Mips32r2.
|
||||
// Eliminate redundant mfc1 and mtc1 instructions.
|
||||
unsigned LoIdx = 0, HiIdx = 1;
|
||||
|
||||
|
||||
if (!isLittle)
|
||||
std::swap(LoIdx, HiIdx);
|
||||
|
||||
@ -1707,7 +1707,7 @@ WriteByValArg(SDValue& Chain, DebugLoc dl,
|
||||
|
||||
// copy remaining part of byval arg to stack.
|
||||
if (CurWord < LastWord) {
|
||||
unsigned SizeInBytes = (LastWord - CurWord) * 4;
|
||||
unsigned SizeInBytes = (LastWord - CurWord) * 4;
|
||||
SDValue Src = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
|
||||
DAG.getConstant((CurWord - FirstWord) * 4,
|
||||
MVT::i32));
|
||||
@ -1745,8 +1745,8 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
if (Subtarget->isABI_O32())
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
|
||||
@ -1767,7 +1767,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
// Get the frame index of the stack frame object that points to the location
|
||||
// of dynamically allocated area on the stack.
|
||||
int DynAllocFI = MipsFI->getDynAllocFI();
|
||||
|
||||
|
||||
// Update size of the maximum argument space.
|
||||
// For O32, a minimum of four words (16 bytes) of argument space is
|
||||
// allocated.
|
||||
@ -1781,14 +1781,14 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Set the offsets relative to $sp of the $gp restore slot and dynamically
|
||||
// allocated stack space. These offsets must be aligned to a boundary
|
||||
// determined by the stack alignment of the ABI.
|
||||
// determined by the stack alignment of the ABI.
|
||||
unsigned StackAlignment = TFL->getStackAlignment();
|
||||
NextStackOffset = (NextStackOffset + StackAlignment - 1) /
|
||||
NextStackOffset = (NextStackOffset + StackAlignment - 1) /
|
||||
StackAlignment * StackAlignment;
|
||||
|
||||
if (IsPIC)
|
||||
MFI->setObjectOffset(MipsFI->getGPFI(), NextStackOffset);
|
||||
|
||||
|
||||
MFI->setObjectOffset(DynAllocFI, NextStackOffset);
|
||||
}
|
||||
|
||||
@ -1796,7 +1796,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
|
||||
SmallVector<SDValue, 8> MemOpChains;
|
||||
|
||||
int FirstFI = -MFI->getNumFixedObjects() - 1, LastFI = 0;
|
||||
int FirstFI = -MFI->getNumFixedObjects() - 1, LastFI = 0;
|
||||
|
||||
// Walk the register/memloc assignments, inserting copies/loads.
|
||||
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
||||
@ -1844,7 +1844,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
// Register can't get to this point...
|
||||
assert(VA.isMemLoc());
|
||||
|
||||
// ByVal Arg.
|
||||
// ByVal Arg.
|
||||
ISD::ArgFlagsTy Flags = Outs[i].Flags;
|
||||
if (Flags.isByVal()) {
|
||||
assert(Subtarget->isABI_O32() &&
|
||||
@ -1857,7 +1857,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
}
|
||||
|
||||
// Create the frame index object for this incoming parameter
|
||||
LastFI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
|
||||
LastFI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
|
||||
VA.getLocMemOffset(), true);
|
||||
SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy());
|
||||
|
||||
@ -1924,7 +1924,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
} else
|
||||
Callee = LoadValue;
|
||||
|
||||
// Use chain output from LoadValue
|
||||
// Use chain output from LoadValue
|
||||
Chain = LoadValue.getValue(1);
|
||||
}
|
||||
|
||||
@ -1986,8 +1986,8 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(Ins, RetCC_Mips);
|
||||
|
||||
@ -2051,8 +2051,8 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
if (Subtarget->isABI_O32())
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CC_MipsO32);
|
||||
@ -2165,7 +2165,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
|
||||
if (isVarArg && Subtarget->isABI_O32()) {
|
||||
// Record the frame index of the first variable argument
|
||||
// which is a value necessary to VASTART.
|
||||
// which is a value necessary to VASTART.
|
||||
unsigned NextStackOffset = CCInfo.getNextStackOffset();
|
||||
assert(NextStackOffset % 4 == 0 &&
|
||||
"NextStackOffset must be aligned to 4-byte boundaries.");
|
||||
@ -2217,8 +2217,8 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values.
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
|
||||
|
@ -1558,8 +1558,8 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
// Reserve space for the linkage area on the stack.
|
||||
CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
|
||||
@ -1619,8 +1619,8 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
|
||||
// Aggregates passed by value are stored in the local variable space of the
|
||||
// caller's stack frame, right above the parameter list area.
|
||||
SmallVector<CCValAssign, 16> ByValArgLocs;
|
||||
CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ByValArgLocs, *DAG.getContext());
|
||||
CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ByValArgLocs, *DAG.getContext());
|
||||
|
||||
// Reserve stack space for the allocations in CCInfo.
|
||||
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
|
||||
@ -2591,8 +2591,8 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCRetInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC);
|
||||
|
||||
// Copy all of the result registers out of their specified physreg.
|
||||
@ -2641,8 +2641,8 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
|
||||
// to the liveout set for the function.
|
||||
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallResult(Ins, RetCC_PPC);
|
||||
for (unsigned i = 0; i != RVLocs.size(); ++i)
|
||||
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
|
||||
@ -2755,8 +2755,8 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Assign locations to all of the outgoing arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
// Reserve space for the linkage area on the stack.
|
||||
CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
|
||||
@ -2795,8 +2795,8 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Assign locations to all of the outgoing aggregate by value arguments.
|
||||
SmallVector<CCValAssign, 16> ByValArgLocs;
|
||||
CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), ByValArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ByValArgLocs, *DAG.getContext());
|
||||
|
||||
// Reserve stack space for the allocations in CCInfo.
|
||||
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
|
||||
@ -3303,8 +3303,8 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
|
||||
DebugLoc dl, SelectionDAG &DAG) const {
|
||||
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_PPC);
|
||||
|
||||
// If this is the first return lowered for this function, add the regs to the
|
||||
@ -5439,15 +5439,15 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
||||
|
||||
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
|
||||
/// vector. If it is invalid, don't add anything to Ops.
|
||||
void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
std::string &Constraint,
|
||||
std::vector<SDValue>&Ops,
|
||||
SelectionDAG &DAG) const {
|
||||
SDValue Result(0,0);
|
||||
|
||||
|
||||
// Only support length 1 constraints.
|
||||
if (Constraint.length() > 1) return;
|
||||
|
||||
|
||||
char Letter = Constraint[0];
|
||||
switch (Letter) {
|
||||
default: break;
|
||||
|
@ -91,8 +91,8 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getTarget(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
DAG.getTarget(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values.
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
|
||||
@ -139,7 +139,7 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
|
||||
if (Flag.getNode())
|
||||
return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain,
|
||||
RetAddrOffsetNode, Flag);
|
||||
return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain,
|
||||
return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain,
|
||||
RetAddrOffsetNode);
|
||||
}
|
||||
|
||||
@ -161,8 +161,8 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
|
||||
|
||||
const unsigned StackOffset = 92;
|
||||
@ -360,8 +360,8 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getTarget(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
DAG.getTarget(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
|
||||
|
||||
// Get the size of the outgoing arguments stack space requirement.
|
||||
@ -591,8 +591,8 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState RVInfo(CallConv, isVarArg, DAG.getTarget(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
DAG.getTarget(), RVLocs, *DAG.getContext());
|
||||
|
||||
RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
|
||||
|
||||
|
@ -291,8 +291,8 @@ SystemZTargetLowering::LowerCCCArguments(SDValue Chain,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
|
||||
|
||||
if (isVarArg)
|
||||
@ -384,8 +384,8 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
|
||||
|
||||
@ -513,8 +513,8 @@ SystemZTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
|
||||
*DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
|
||||
|
||||
@ -558,8 +558,8 @@ SystemZTargetLowering::LowerReturn(SDValue Chain,
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values.
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
|
||||
|
@ -401,7 +401,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
|
||||
Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// A array/variable index is always of the form i*S where S is the
|
||||
// constant scale size. See if we can push the scale into immediates.
|
||||
uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
|
||||
@ -469,7 +469,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
|
||||
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
|
||||
if (GVar->isThreadLocal())
|
||||
return false;
|
||||
|
||||
|
||||
// RIP-relative addresses can't have additional register operands, so if
|
||||
// we've already folded stuff into the addressing mode, just force the
|
||||
// global value into its own register, which we can use as the basereg.
|
||||
@ -704,7 +704,8 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ValLocs;
|
||||
CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
|
||||
CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,
|
||||
I->getContext());
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
|
||||
|
||||
const Value *RV = Ret->getOperand(0);
|
||||
@ -936,7 +937,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
||||
|
||||
bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
||||
// Handle zero-extension from i1 to i8, which is common.
|
||||
if (!I->getOperand(0)->getType()->isIntegerTy(1))
|
||||
if (!I->getOperand(0)->getType()->isIntegerTy(1))
|
||||
return false;
|
||||
|
||||
EVT DstVT = TLI.getValueType(I->getType());
|
||||
@ -1062,13 +1063,13 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
if (OpReg == 0) return false;
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TestOpc))
|
||||
.addReg(OpReg).addImm(1);
|
||||
|
||||
|
||||
unsigned JmpOpc = X86::JNE_4;
|
||||
if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
|
||||
std::swap(TrueMBB, FalseMBB);
|
||||
JmpOpc = X86::JE_4;
|
||||
}
|
||||
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(JmpOpc))
|
||||
.addMBB(TrueMBB);
|
||||
FastEmitBranch(FalseMBB, DL);
|
||||
@ -1336,7 +1337,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
|
||||
return false;
|
||||
|
||||
uint64_t Len = cast<ConstantInt>(MCI.getLength())->getZExtValue();
|
||||
|
||||
|
||||
// Get the address of the dest and source addresses.
|
||||
X86AddressMode DestAM, SrcAM;
|
||||
if (!X86SelectAddress(MCI.getRawDest(), DestAM) ||
|
||||
@ -1345,7 +1346,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
|
||||
|
||||
return TryEmitSmallMemcpy(DestAM, SrcAM, Len);
|
||||
}
|
||||
|
||||
|
||||
case Intrinsic::stackprotector: {
|
||||
// Emit code inline code to store the stack guard onto the stack.
|
||||
EVT PtrTy = TLI.getPointerTy();
|
||||
@ -1379,7 +1380,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
case Intrinsic::uadd_with_overflow: {
|
||||
// FIXME: Should fold immediates.
|
||||
|
||||
|
||||
// Replace "add with overflow" intrinsics with an "add" instruction followed
|
||||
// by a seto/setc instruction.
|
||||
const Function *Callee = I.getCalledFunction();
|
||||
@ -1467,7 +1468,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
||||
GetReturnInfo(I->getType(), CS.getAttributes().getRetAttributes(),
|
||||
Outs, TLI, &Offsets);
|
||||
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
|
||||
FTy->isVarArg(), Outs, FTy->getContext());
|
||||
*FuncInfo.MF, FTy->isVarArg(),
|
||||
Outs, FTy->getContext());
|
||||
if (!CanLowerReturn)
|
||||
return false;
|
||||
|
||||
@ -1535,9 +1537,9 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
||||
ArgVal = ConstantExpr::getZExt(CI,Type::getInt32Ty(CI->getContext()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned ArgReg;
|
||||
|
||||
|
||||
// Passing bools around ends up doing a trunc to i1 and passing it.
|
||||
// Codegen this as an argument + "and 1".
|
||||
if (ArgVal->getType()->isIntegerTy(1) && isa<TruncInst>(ArgVal) &&
|
||||
@ -1546,10 +1548,10 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
||||
ArgVal = cast<TruncInst>(ArgVal)->getOperand(0);
|
||||
ArgReg = getRegForValue(ArgVal);
|
||||
if (ArgReg == 0) return false;
|
||||
|
||||
|
||||
MVT ArgVT;
|
||||
if (!isTypeLegal(ArgVal->getType(), ArgVT)) return false;
|
||||
|
||||
|
||||
ArgReg = FastEmit_ri(ArgVT, ArgVT, ISD::AND, ArgReg,
|
||||
ArgVal->hasOneUse(), 1);
|
||||
} else {
|
||||
@ -1575,7 +1577,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, TM, ArgLocs, I->getParent()->getContext());
|
||||
CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs,
|
||||
I->getParent()->getContext());
|
||||
|
||||
// Allocate shadow area for Win64
|
||||
if (Subtarget->isTargetWin64())
|
||||
@ -1790,7 +1793,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
||||
// Now handle call return values.
|
||||
SmallVector<unsigned, 4> UsedRegs;
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCRetInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
|
||||
CCState CCRetInfo(CC, false, *FuncInfo.MF, TM, RVLocs,
|
||||
I->getParent()->getContext());
|
||||
unsigned ResultReg = FuncInfo.CreateRegs(I->getType());
|
||||
CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
|
||||
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
||||
@ -1946,7 +1950,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
if (AM.BaseType == X86AddressMode::RegBase &&
|
||||
AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == 0)
|
||||
return AM.Base.Reg;
|
||||
|
||||
|
||||
Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
|
@ -222,7 +222,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
||||
|
||||
// X86 is weird, it always uses i8 for shift amounts and setcc results.
|
||||
setBooleanContents(ZeroOrOneBooleanContent);
|
||||
|
||||
|
||||
// For 64-bit since we have so many registers use the ILP scheduler, for
|
||||
// 32-bit code use the register pressure specific scheduling.
|
||||
if (Subtarget->is64Bit())
|
||||
@ -1320,11 +1320,12 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
|
||||
#include "X86GenCallingConv.inc"
|
||||
|
||||
bool
|
||||
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
|
||||
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
|
||||
MachineFunction &MF, bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
LLVMContext &Context) const {
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
|
||||
RVLocs, Context);
|
||||
return CCInfo.CheckReturn(Outs, RetCC_X86);
|
||||
}
|
||||
@ -1339,7 +1340,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
|
||||
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
||||
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
|
||||
|
||||
@ -1490,8 +1491,8 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
bool Is64Bit = Subtarget->is64Bit();
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
|
||||
|
||||
// Copy all of the result registers out of their specified physreg.
|
||||
@ -1680,7 +1681,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
|
||||
// Allocate shadow area for Win64
|
||||
@ -2007,7 +2008,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
|
||||
// Allocate shadow area for Win64
|
||||
@ -2545,8 +2546,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
|
||||
return false;
|
||||
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
|
||||
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
|
||||
@ -2566,8 +2567,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
|
||||
}
|
||||
if (Unused) {
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CalleeCC, false, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
|
||||
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
|
||||
CCValAssign &VA = RVLocs[i];
|
||||
@ -2580,13 +2581,13 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
|
||||
// results are returned in the same way as what the caller expects.
|
||||
if (!CCMatch) {
|
||||
SmallVector<CCValAssign, 16> RVLocs1;
|
||||
CCState CCInfo1(CalleeCC, false, getTargetMachine(),
|
||||
RVLocs1, *DAG.getContext());
|
||||
CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs1, *DAG.getContext());
|
||||
CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
|
||||
|
||||
SmallVector<CCValAssign, 16> RVLocs2;
|
||||
CCState CCInfo2(CallerCC, false, getTargetMachine(),
|
||||
RVLocs2, *DAG.getContext());
|
||||
CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs2, *DAG.getContext());
|
||||
CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
|
||||
|
||||
if (RVLocs1.size() != RVLocs2.size())
|
||||
@ -2612,8 +2613,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
|
||||
// Check if stack adjustment is needed. For now, do not do this if any
|
||||
// argument is passed on the stack.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
// Allocate shadow area for Win64
|
||||
if (Subtarget->isTargetWin64()) {
|
||||
@ -12706,7 +12707,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
|
||||
// Only support length 1 constraints for now.
|
||||
if (Constraint.length() > 1) return;
|
||||
|
||||
|
||||
char ConstraintLetter = Constraint[0];
|
||||
switch (ConstraintLetter) {
|
||||
default: break;
|
||||
|
@ -858,9 +858,10 @@ namespace llvm {
|
||||
ISD::NodeType ExtendKind) const;
|
||||
|
||||
virtual bool
|
||||
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
LLVMContext &Context) const;
|
||||
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
|
||||
bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
LLVMContext &Context) const;
|
||||
|
||||
void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
|
||||
SelectionDAG &DAG, unsigned NewOp) const;
|
||||
|
@ -897,8 +897,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
// The ABI dictates there should be one stack slot available to the callee
|
||||
// on function entry (for saving lr).
|
||||
@ -1020,8 +1020,8 @@ XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
|
||||
|
||||
@ -1080,8 +1080,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
|
||||
|
||||
@ -1185,12 +1185,12 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
bool XCoreTargetLowering::
|
||||
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
|
||||
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
|
||||
bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
LLVMContext &Context) const {
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, Context);
|
||||
CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context);
|
||||
return CCInfo.CheckReturn(Outs, RetCC_XCore);
|
||||
}
|
||||
|
||||
@ -1206,10 +1206,10 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, *DAG.getContext());
|
||||
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
|
||||
getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values.
|
||||
// Analyze return values.
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
|
||||
|
||||
// If this is the first return lowered for this function, add
|
||||
|
@ -191,7 +191,8 @@ namespace llvm {
|
||||
DebugLoc dl, SelectionDAG &DAG) const;
|
||||
|
||||
virtual bool
|
||||
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
|
||||
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
|
||||
bool isVarArg,
|
||||
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
|
||||
LLVMContext &Context) const;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user