From e45ebab2b3620fdc97b739b28f116522876ec784 Mon Sep 17 00:00:00 2001 From: Matthias Braun Date: Fri, 19 Jan 2018 03:16:36 +0000 Subject: [PATCH] AArch64: Fix emergency spillslot being out of reach for large callframes Re-commit of r322200: The testcase shouldn't hit machineverifiers anymore with r322917 in place. Large callframes (calls with several hundreds or thousands or parameters) could lead to situations in which the emergency spillslot is out of range to be addressed relative to the stack pointer. This commit forces the use of a frame pointer in the presence of large callframes. This commit does several things: - Compute max callframe size at the end of instruction selection. - Add mirFileLoaded target callback. Use it to compute the max callframe size after loading a .mir file when the size wasn't specified in the file. - Let TargetFrameLowering::hasFP() return true if there exists a callframe > 255 bytes. - Always place the emergency spillslot close to FP if we have a frame pointer. - Note that `useFPForScavengingIndex()` would previously return false when a base pointer was available leading to the emergency spillslot getting allocated late (that's the whole effect of this callback). Which made no sense to me so I took this case out: Even though the emergency spillslot is technically not referenced by FP in this case we still want it allocated early. Differential Revision: https://reviews.llvm.org/D40876 llvm-svn: 322919 --- include/llvm/CodeGen/TargetSubtargetInfo.h | 3 ++ lib/CodeGen/MIRParser/MIRParser.cpp | 2 ++ lib/CodeGen/TargetSubtargetInfo.cpp | 3 ++ lib/Target/AArch64/AArch64FrameLowering.cpp | 32 +++++++++++++++++---- lib/Target/AArch64/AArch64ISelLowering.cpp | 5 ++++ lib/Target/AArch64/AArch64ISelLowering.h | 2 ++ lib/Target/AArch64/AArch64RegisterInfo.cpp | 12 ++++---- lib/Target/AArch64/AArch64Subtarget.cpp | 10 +++++++ lib/Target/AArch64/AArch64Subtarget.h | 2 ++ test/CodeGen/AArch64/big-callframe.ll | 15 ++++++++++ 10 files changed, 75 insertions(+), 11 deletions(-) create mode 100644 test/CodeGen/AArch64/big-callframe.ll diff --git a/include/llvm/CodeGen/TargetSubtargetInfo.h b/include/llvm/CodeGen/TargetSubtargetInfo.h index 576522aef46..d5b413c5342 100644 --- a/include/llvm/CodeGen/TargetSubtargetInfo.h +++ b/include/llvm/CodeGen/TargetSubtargetInfo.h @@ -248,6 +248,9 @@ public: /// Returns string representation of scheduler comment std::string getSchedInfoStr(const MachineInstr &MI) const override; std::string getSchedInfoStr(MCInst const &MCI) const override; + + /// This is called after a .mir file was loaded. + virtual void mirFileLoaded(MachineFunction &MF) const; }; } // end namespace llvm diff --git a/lib/CodeGen/MIRParser/MIRParser.cpp b/lib/CodeGen/MIRParser/MIRParser.cpp index 7d8e62736a3..e4e3fbbd75d 100644 --- a/lib/CodeGen/MIRParser/MIRParser.cpp +++ b/lib/CodeGen/MIRParser/MIRParser.cpp @@ -417,6 +417,8 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF, computeFunctionProperties(MF); + MF.getSubtarget().mirFileLoaded(MF); + MF.verify(); return false; } diff --git a/lib/CodeGen/TargetSubtargetInfo.cpp b/lib/CodeGen/TargetSubtargetInfo.cpp index 1a317cd865f..7aab869f025 100644 --- a/lib/CodeGen/TargetSubtargetInfo.cpp +++ b/lib/CodeGen/TargetSubtargetInfo.cpp @@ -111,3 +111,6 @@ std::string TargetSubtargetInfo::getSchedInfoStr(MCInst const &MCI) const { TSchedModel.computeInstrRThroughput(MCI.getOpcode()); return createSchedInfoStr(Latency, RThroughput); } + +void TargetSubtargetInfo::mirFileLoaded(MachineFunction &MF) const { +} diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp index d66f7b59a4b..ea4bfe7e8d9 100644 --- a/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -142,6 +142,12 @@ static cl::opt EnableRedZone("aarch64-redzone", STATISTIC(NumRedZoneFunctions, "Number of functions using red zone"); +/// This is the biggest offset to the stack pointer we can encode in aarch64 +/// instructions (without using a separate calculation and a temp register). +/// Note that the exception here are vector stores/loads which cannot encode any +/// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()). +static const unsigned DefaultSafeSPDisplacement = 255; + /// Look at each instruction that references stack frames and return the stack /// size limit beyond which some of these instructions will require a scratch /// register during their expansion later. @@ -167,7 +173,7 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF) { } } } - return 255; + return DefaultSafeSPDisplacement; } bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { @@ -191,11 +197,25 @@ bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); // Retain behavior of always omitting the FP for leaf functions when possible. - return (MFI.hasCalls() && - MF.getTarget().Options.DisableFramePointerElim(MF)) || - MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || - MFI.hasStackMap() || MFI.hasPatchPoint() || - RegInfo->needsStackRealignment(MF); + if (MFI.hasCalls() && MF.getTarget().Options.DisableFramePointerElim(MF)) + return true; + if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || + MFI.hasStackMap() || MFI.hasPatchPoint() || + RegInfo->needsStackRealignment(MF)) + return true; + // With large callframes around we may need to use FP to access the scavenging + // emergency spillslot. + // + // Unfortunately some calls to hasFP() like machine verifier -> + // getReservedReg() -> hasFP in the middle of global isel are too early + // to know the max call frame size. Hopefully conservatively returning "true" + // in those cases is fine. + // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs. + if (!MFI.isMaxCallFrameSizeComputed() || + MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement) + return true; + + return false; } /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index f2c1f86a974..d721e7205c9 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10982,3 +10982,8 @@ AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const { return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32; } + +void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { + MF.getFrameInfo().computeMaxCallFrameSize(MF); + TargetLoweringBase::finalizeLowering(MF); +} diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h index 0af05fff262..6018bc501db 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.h +++ b/lib/Target/AArch64/AArch64ISelLowering.h @@ -648,6 +648,8 @@ private: SelectionDAG &DAG) const override; bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override; + + void finalizeLowering(MachineFunction &MF) const override; }; namespace AArch64 { diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp index 88dd297e007..360b39125b7 100644 --- a/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -225,11 +225,13 @@ bool AArch64RegisterInfo::requiresVirtualBaseRegisters( bool AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const { - const MachineFrameInfo &MFI = MF.getFrameInfo(); - // AArch64FrameLowering::resolveFrameIndexReference() can always fall back - // to the stack pointer, so only put the emergency spill slot next to the - // FP when there's no better way to access it (SP or base pointer). - return MFI.hasVarSizedObjects() && !hasBasePointer(MF); + // This function indicates whether the emergency spillslot should be placed + // close to the beginning of the stackframe (closer to FP) or the end + // (closer to SP). + // + // The beginning works most reliably if we have a frame pointer. + const AArch64FrameLowering &TFI = *getFrameLowering(MF); + return TFI.hasFP(MF); } bool AArch64RegisterInfo::requiresFrameIndexScavenging( diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp index 688bb936d0c..eb9bb1498d6 100644 --- a/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/lib/Target/AArch64/AArch64Subtarget.cpp @@ -250,3 +250,13 @@ std::unique_ptr AArch64Subtarget::getCustomPBQPConstraints() const { return balanceFPOps() ? llvm::make_unique() : nullptr; } + +void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const { + // We usually compute max call frame size after ISel. Do the computation now + // if the .mir file didn't specify it. Note that this will probably give you + // bogus values after PEI has eliminated the callframe setup/destroy pseudo + // instructions, specify explicitely if you need it to be correct. + MachineFrameInfo &MFI = MF.getFrameInfo(); + if (!MFI.isMaxCallFrameSizeComputed()) + MFI.computeMaxCallFrameSize(MF); +} diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h index 9245b2f396b..45a8eb16464 100644 --- a/lib/Target/AArch64/AArch64Subtarget.h +++ b/lib/Target/AArch64/AArch64Subtarget.h @@ -326,6 +326,8 @@ public: return false; } } + + void mirFileLoaded(MachineFunction &MF) const override; }; } // End llvm namespace diff --git a/test/CodeGen/AArch64/big-callframe.ll b/test/CodeGen/AArch64/big-callframe.ll new file mode 100644 index 00000000000..6e15795b272 --- /dev/null +++ b/test/CodeGen/AArch64/big-callframe.ll @@ -0,0 +1,15 @@ +; RUN: llc -o - %s -verify-machineinstrs | FileCheck %s +; Make sure we use a frame pointer and fp relative addressing for the emergency +; spillslot when we have gigantic callframes. +; CHECK-LABEL: func: +; CHECK: stur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Spill +; CHECK: ldur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Reload +target triple = "aarch64--" +declare void @extfunc([4096 x i64]* byval %p) +define void @func([4096 x i64]* %z) { + %lvar = alloca [31 x i8] + %v = load volatile [31 x i8], [31 x i8]* %lvar + store volatile [31 x i8] %v, [31 x i8]* %lvar + call void @extfunc([4096 x i64]* byval %z) + ret void +}