diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 3b72285d312..3eac44bc8d7 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -684,9 +684,9 @@ bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Base, } void ARMFastISel::ARMSimplifyRegOffset(unsigned &Base, int &Offset, EVT VT) { - + assert(VT.isSimple() && "Non-simple types are invalid here!"); - + bool needsLowering = false; switch (VT.getSimpleVT().SimpleTy) { default: @@ -704,7 +704,7 @@ void ARMFastISel::ARMSimplifyRegOffset(unsigned &Base, int &Offset, EVT VT) { needsLowering = ((Offset & 0xff) != Offset); break; } - + // Since the offset is too large for the load/store instruction // get the reg+offset into a register. if (needsLowering) { @@ -766,14 +766,14 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, } ResultReg = createResultReg(RC); - + ARMSimplifyRegOffset(Base, Offset, VT); - + // addrmode5 output depends on the selection dag addressing dividing the // offset by 4 that it then later multiplies. Do this here as well. if (isFloat) Offset /= 4; - + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) .addReg(Base).addImm(Offset)); @@ -830,12 +830,12 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, } ARMSimplifyRegOffset(Base, Offset, VT); - + // addrmode5 output depends on the selection dag addressing dividing the // offset by 4 that it then later multiplies. Do this here as well. if (isFloat) Offset /= 4; - + // The thumb addressing mode has operands swapped from the arm addressing // mode, the floating point one only has two operands. if (isFloat || isThumb) @@ -1242,12 +1242,12 @@ bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, unsigned &ResultReg) { unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src, /*TODO: Kill=*/false); - + if (RR != 0) { ResultReg = RR; return true; } else - return false; + return false; } // This is largely taken directly from CCAssignFnForNode - we don't support @@ -1365,7 +1365,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, } else if (VA.needsCustom()) { // TODO: We need custom lowering for vector (v2f64) args. if (VA.getLocVT() != MVT::f64) return false; - + CCValAssign &NextVA = ArgLocs[++i]; // TODO: Only handle register args for now. @@ -1418,7 +1418,7 @@ bool ARMFastISel::FinishCall(EVT RetVT, SmallVectorImpl &UsedRegs, UsedRegs.push_back(RVLocs[0].getLocReg()); UsedRegs.push_back(RVLocs[1].getLocReg()); - + // Finally update the result. UpdateValueMap(I, ResultReg); } else { @@ -1442,10 +1442,10 @@ bool ARMFastISel::FinishCall(EVT RetVT, SmallVectorImpl &UsedRegs, bool ARMFastISel::SelectRet(const Instruction *I) { const ReturnInst *Ret = cast(I); const Function &F = *I->getParent()->getParent(); - + if (!FuncInfo.CanLowerReturn) return false; - + if (F.isVarArg()) return false; @@ -1470,7 +1470,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) { return false; CCValAssign &VA = ValLocs[0]; - + // Don't bother handling odd stuff for now. if (VA.getLocInfo() != CCValAssign::Full) return false; @@ -1481,7 +1481,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) { // says Full but the types don't match. if (VA.getValVT() != TLI.getValueType(RV->getType())) return false; - + // Make the copy. unsigned SrcReg = Reg + VA.getValNo(); unsigned DstReg = VA.getLocReg(); @@ -1495,7 +1495,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) { // Mark the register as live out of the function. MRI.addLiveOut(VA.getLocReg()); } - + unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(RetOpc)));