2014-05-24 14:50:23 +02:00
|
|
|
//==-- AArch64FrameLowering.h - TargetFrameLowering for AArch64 --*- C++ -*-==//
|
2014-03-29 11:18:08 +01:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2014-03-29 11:18:08 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
//
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-08-13 18:26:38 +02:00
|
|
|
#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H
|
|
|
|
#define LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H
|
2014-03-29 11:18:08 +01:00
|
|
|
|
[AArch64] NFC: Add generic StackOffset to describe scalable offsets.
To support spilling/filling of scalable vectors we need a more generic
representation of a stack offset than simply 'int'.
For this we introduce the StackOffset struct, which comprises multiple
offsets sized by their respective MVTs. Byte-offsets will thus be a simple
tuple such as { offset, MVT::i8 }. Adding two byte-offsets will result in a
byte offset { offsetA + offsetB, MVT::i8 }. When two offsets have different
types, we can canonicalise them to use the same MVT, as long as their
runtime sizes are guaranteed to have the same size-ratio as they would have
at compile-time.
When we have both scalable- and fixed-size objects on the stack, we can
create an offset that is:
({ offset_fixed, MVT::i8 } + { offset_scalable, MVT::nxv1i8 })
The struct also contains a getForFrameOffset() method that is specific to
AArch64 and decomposes the frame-offset to be used directly in instructions
that operate on the stack or index into the stack.
Note: This patch adds StackOffset as an AArch64-only concept, but we would
like to make this a generic concept/struct that is supported by all
interfaces that take or return stack offsets (currently as 'int'). Since
that would be a bigger change that is currently pending on D32530 landing,
we thought it makes sense to first show/prove the concept in the AArch64
target before proposing to roll this out further.
Reviewers: thegameg, rovka, t.p.northover, efriedma, greened
Reviewed By: rovka, greened
Differential Revision: https://reviews.llvm.org/D61435
llvm-svn: 368024
2019-08-06 15:06:40 +02:00
|
|
|
#include "AArch64StackOffset.h"
|
2017-11-03 23:32:11 +01:00
|
|
|
#include "llvm/CodeGen/TargetFrameLowering.h"
|
2014-03-29 11:18:08 +01:00
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
2014-05-24 14:50:23 +02:00
|
|
|
class AArch64FrameLowering : public TargetFrameLowering {
|
2014-03-29 11:18:08 +01:00
|
|
|
public:
|
2014-06-10 19:33:39 +02:00
|
|
|
explicit AArch64FrameLowering()
|
2014-03-29 11:18:08 +01:00
|
|
|
: TargetFrameLowering(StackGrowsDown, 16, 0, 16,
|
2015-04-09 10:49:47 +02:00
|
|
|
true /*StackRealignable*/) {}
|
2014-03-29 11:18:08 +01:00
|
|
|
|
|
|
|
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
|
2016-02-25 17:36:08 +01:00
|
|
|
MachineBasicBlock::iterator MBBI) const;
|
2014-03-29 11:18:08 +01:00
|
|
|
|
2016-03-31 20:33:38 +02:00
|
|
|
MachineBasicBlock::iterator
|
|
|
|
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I) const override;
|
2014-03-29 11:18:08 +01:00
|
|
|
|
|
|
|
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
|
|
|
|
/// the function.
|
[ShrinkWrap] Add (a simplified version) of shrink-wrapping.
This patch introduces a new pass that computes the safe point to insert the
prologue and epilogue of the function.
The interest is to find safe points that are cheaper than the entry and exits
blocks.
As an example and to avoid regressions to be introduce, this patch also
implements the required bits to enable the shrink-wrapping pass for AArch64.
** Context **
Currently we insert the prologue and epilogue of the method/function in the
entry and exits blocks. Although this is correct, we can do a better job when
those are not immediately required and insert them at less frequently executed
places.
The job of the shrink-wrapping pass is to identify such places.
** Motivating example **
Let us consider the following function that perform a call only in one branch of
a if:
define i32 @f(i32 %a, i32 %b) {
%tmp = alloca i32, align 4
%tmp2 = icmp slt i32 %a, %b
br i1 %tmp2, label %true, label %false
true:
store i32 %a, i32* %tmp, align 4
%tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
br label %false
false:
%tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %0 ]
ret i32 %tmp.0
}
On AArch64 this code generates (removing the cfi directives to ease
readabilities):
_f: ; @f
; BB#0:
stp x29, x30, [sp, #-16]!
mov x29, sp
sub sp, sp, #16 ; =16
cmp w0, w1
b.ge LBB0_2
; BB#1: ; %true
stur w0, [x29, #-4]
sub x1, x29, #4 ; =4
mov w0, wzr
bl _doSomething
LBB0_2: ; %false
mov sp, x29
ldp x29, x30, [sp], #16
ret
With shrink-wrapping we could generate:
_f: ; @f
; BB#0:
cmp w0, w1
b.ge LBB0_2
; BB#1: ; %true
stp x29, x30, [sp, #-16]!
mov x29, sp
sub sp, sp, #16 ; =16
stur w0, [x29, #-4]
sub x1, x29, #4 ; =4
mov w0, wzr
bl _doSomething
add sp, x29, #16 ; =16
ldp x29, x30, [sp], #16
LBB0_2: ; %false
ret
Therefore, we would pay the overhead of setting up/destroying the frame only if
we actually do the call.
** Proposed Solution **
This patch introduces a new machine pass that perform the shrink-wrapping
analysis (See the comments at the beginning of ShrinkWrap.cpp for more details).
It then stores the safe save and restore point into the MachineFrameInfo
attached to the MachineFunction.
This information is then used by the PrologEpilogInserter (PEI) to place the
related code at the right place. This pass runs right before the PEI.
Unlike the original paper of Chow from PLDI’88, this implementation of
shrink-wrapping does not use expensive data-flow analysis and does not need hack
to properly avoid frequently executed point. Instead, it relies on dominance and
loop properties.
The pass is off by default and each target can opt-in by setting the
EnableShrinkWrap boolean to true in their derived class of TargetPassConfig.
This setting can also be overwritten on the command line by using
-enable-shrink-wrap.
Before you try out the pass for your target, make sure you properly fix your
emitProlog/emitEpilog/adjustForXXX method to cope with basic blocks that are not
necessarily the entry block.
** Design Decisions **
1. ShrinkWrap is its own pass right now. It could frankly be merged into PEI but
for debugging and clarity I thought it was best to have its own file.
2. Right now, we only support one save point and one restore point. At some
point we can expand this to several save point and restore point, the impacted
component would then be:
- The pass itself: New algorithm needed.
- MachineFrameInfo: Hold a list or set of Save/Restore point instead of one
pointer.
- PEI: Should loop over the save point and restore point.
Anyhow, at least for this first iteration, I do not believe this is interesting
to support the complex cases. We should revisit that when we motivating
examples.
Differential Revision: http://reviews.llvm.org/D9210
<rdar://problem/3201744>
llvm-svn: 236507
2015-05-05 19:38:16 +02:00
|
|
|
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
|
2014-04-29 09:58:25 +02:00
|
|
|
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
|
2014-03-29 11:18:08 +01:00
|
|
|
|
2016-02-19 19:27:32 +01:00
|
|
|
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override;
|
|
|
|
|
2014-03-29 11:18:08 +01:00
|
|
|
int getFrameIndexReference(const MachineFunction &MF, int FI,
|
2014-04-29 09:58:25 +02:00
|
|
|
unsigned &FrameReg) const override;
|
[AArch64] NFC: Add generic StackOffset to describe scalable offsets.
To support spilling/filling of scalable vectors we need a more generic
representation of a stack offset than simply 'int'.
For this we introduce the StackOffset struct, which comprises multiple
offsets sized by their respective MVTs. Byte-offsets will thus be a simple
tuple such as { offset, MVT::i8 }. Adding two byte-offsets will result in a
byte offset { offsetA + offsetB, MVT::i8 }. When two offsets have different
types, we can canonicalise them to use the same MVT, as long as their
runtime sizes are guaranteed to have the same size-ratio as they would have
at compile-time.
When we have both scalable- and fixed-size objects on the stack, we can
create an offset that is:
({ offset_fixed, MVT::i8 } + { offset_scalable, MVT::nxv1i8 })
The struct also contains a getForFrameOffset() method that is specific to
AArch64 and decomposes the frame-offset to be used directly in instructions
that operate on the stack or index into the stack.
Note: This patch adds StackOffset as an AArch64-only concept, but we would
like to make this a generic concept/struct that is supported by all
interfaces that take or return stack offsets (currently as 'int'). Since
that would be a bigger change that is currently pending on D32530 landing,
we thought it makes sense to first show/prove the concept in the AArch64
target before proposing to roll this out further.
Reviewers: thegameg, rovka, t.p.northover, efriedma, greened
Reviewed By: rovka, greened
Differential Revision: https://reviews.llvm.org/D61435
llvm-svn: 368024
2019-08-06 15:06:40 +02:00
|
|
|
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI,
|
|
|
|
unsigned &FrameReg, bool PreferFP,
|
|
|
|
bool ForSimm) const;
|
|
|
|
StackOffset resolveFrameOffsetReference(const MachineFunction &MF,
|
|
|
|
int ObjectOffset, bool isFixed,
|
|
|
|
unsigned &FrameReg, bool PreferFP,
|
|
|
|
bool ForSimm) const;
|
2014-03-29 11:18:08 +01:00
|
|
|
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MI,
|
|
|
|
const std::vector<CalleeSavedInfo> &CSI,
|
2014-04-29 09:58:25 +02:00
|
|
|
const TargetRegisterInfo *TRI) const override;
|
2014-03-29 11:18:08 +01:00
|
|
|
|
|
|
|
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
|
2014-04-29 09:58:25 +02:00
|
|
|
MachineBasicBlock::iterator MI,
|
2017-08-10 18:17:32 +02:00
|
|
|
std::vector<CalleeSavedInfo> &CSI,
|
2014-04-29 09:58:25 +02:00
|
|
|
const TargetRegisterInfo *TRI) const override;
|
2014-03-29 11:18:08 +01:00
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Can this function use the red zone for local allocations.
|
2014-03-29 11:18:08 +01:00
|
|
|
bool canUseRedZone(const MachineFunction &MF) const;
|
|
|
|
|
2014-04-29 09:58:25 +02:00
|
|
|
bool hasFP(const MachineFunction &MF) const override;
|
|
|
|
bool hasReservedCallFrame(const MachineFunction &MF) const override;
|
2014-03-29 11:18:08 +01:00
|
|
|
|
2015-07-14 19:17:13 +02:00
|
|
|
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
|
|
|
|
RegScavenger *RS) const override;
|
2015-11-19 00:12:20 +01:00
|
|
|
|
|
|
|
/// Returns true if the target will correctly handle shrink wrapping.
|
|
|
|
bool enableShrinkWrapping(const MachineFunction &MF) const override {
|
|
|
|
return true;
|
|
|
|
}
|
2016-05-06 18:34:59 +02:00
|
|
|
|
2016-06-02 18:22:07 +02:00
|
|
|
bool enableStackSlotScavenging(const MachineFunction &MF) const override;
|
|
|
|
|
2018-11-10 00:33:30 +01:00
|
|
|
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
|
|
|
|
RegScavenger *RS) const override;
|
|
|
|
|
|
|
|
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override;
|
|
|
|
|
|
|
|
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const;
|
|
|
|
|
|
|
|
int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
|
|
|
|
unsigned &FrameReg,
|
|
|
|
bool IgnoreSPUpdates) const override;
|
2019-02-01 22:41:33 +01:00
|
|
|
int getNonLocalFrameIndexReference(const MachineFunction &MF,
|
|
|
|
int FI) const override;
|
|
|
|
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const;
|
2018-11-10 00:33:30 +01:00
|
|
|
|
2016-05-06 18:34:59 +02:00
|
|
|
private:
|
|
|
|
bool shouldCombineCSRLocalStackBump(MachineFunction &MF,
|
|
|
|
unsigned StackBumpBytes) const;
|
2014-03-29 11:18:08 +01:00
|
|
|
};
|
|
|
|
|
2015-06-23 11:49:53 +02:00
|
|
|
} // End llvm namespace
|
2014-03-29 11:18:08 +01:00
|
|
|
|
|
|
|
#endif
|