mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
e9cd01d82d
In RISC-V there is a single addressing mode of the form imm(reg) where imm is a signed integer of 12-bit with a range of [-2048..2047] bytes from reg. The test MultiSource/UnitTests/C++11/frame_layout of the LLVM test-suite exercises several scenarios with the stack, including function calls where the stack will need to be realigned to to a local variable having a large alignment of 4096 bytes. In situations of large stacks, the RISC-V backend (in RISCVFrameLowering) reserves an extra emergency spill slot which can be used (if no free register is found) by the register scavenger after the frame indexes have been eliminated. PrologEpilogInserter already takes care of keeping the emergency spill slots as close as possible to the stack pointer or frame pointer (depending on what the function will use). However there is a final alignment step to honour the maximum alignment of the stack that, when using the stack pointer to access the emergency spill slots, has the side effect of setting them farther from the stack pointer. In the case of the frame_layout testcase, the net result is that we do have an emergency spill slot but it is so far from the stack pointer (more than 2048 bytes due to the extra alignment of a variable to 4096 bytes) that it becomes unreachable via any immediate offset. During elimination of the frame index, many (regular) offsets of the stack may be immediately unreachable already. Their address needs to be computed using a register. A virtual register is created and later RegisterScavenger should be able to find an unused (physical) register. However if no register is available, RegisterScavenger will pick a physical register and spill it onto an emergency stack slot, while we compute the offset (restoring the chosen register after all this). This assumes that the emergency stack slot is easily reachable (this is, without requiring another register!). This is the assumption we seem to break when we perform the extra alignment in PrologEpilogInserter. We can "float" the emergency spill slots by increasing (in absolute value) their offsets from the incoming stack pointer. This way the emergency spill slots will remain close to the stack pointer (once the function has allocated storage for the stack, including the needed realignment). The new size computed in PrologEpilogInserter is padding so it should be OK to move the emergency spill slots there. Also because we're increasing the alignment, the new location should stay aligned for the purpose of the emergency spill slots. Note that this change also impacts other backends as shown by the tests. Changes are minor adjustments to the emergency stack slot offset. Differential Revision: https://reviews.llvm.org/D89239
83 lines
3.4 KiB
LLVM
83 lines
3.4 KiB
LLVM
; RUN: llc -o - %s | FileCheck %s
|
|
; Check that we reserve an emergency spill slot, even if we added an extra
|
|
; CSR spill for the values used by the swiftself parameter.
|
|
; CHECK-LABEL: func:
|
|
; CHECK: str [[REG:x[0-9]+]], [sp]
|
|
; CHECK: add [[REG]], sp, #248
|
|
; CHECK: str xzr, [{{\s*}}[[REG]], #32760]
|
|
; CHECK: ldr [[REG]], [sp]
|
|
target triple = "arm64-apple-ios"
|
|
|
|
@ptr8 = external global i8*
|
|
@ptr64 = external global i64
|
|
|
|
define hidden swiftcc void @func(i8* swiftself %arg) #0 {
|
|
bb:
|
|
%stack0 = alloca i8*, i32 5000, align 8
|
|
%stack1 = alloca i8*, i32 32, align 8
|
|
|
|
%v0 = load volatile i64, i64* @ptr64, align 8
|
|
%v1 = load volatile i64, i64* @ptr64, align 8
|
|
%v2 = load volatile i64, i64* @ptr64, align 8
|
|
%v3 = load volatile i64, i64* @ptr64, align 8
|
|
%v4 = load volatile i64, i64* @ptr64, align 8
|
|
%v5 = load volatile i64, i64* @ptr64, align 8
|
|
%v6 = load volatile i64, i64* @ptr64, align 8
|
|
%v7 = load volatile i64, i64* @ptr64, align 8
|
|
%v8 = load volatile i64, i64* @ptr64, align 8
|
|
%v9 = load volatile i64, i64* @ptr64, align 8
|
|
%v10 = load volatile i64, i64* @ptr64, align 8
|
|
%v11 = load volatile i64, i64* @ptr64, align 8
|
|
%v12 = load volatile i64, i64* @ptr64, align 8
|
|
%v13 = load volatile i64, i64* @ptr64, align 8
|
|
%v14 = load volatile i64, i64* @ptr64, align 8
|
|
%v15 = load volatile i64, i64* @ptr64, align 8
|
|
%v16 = load volatile i64, i64* @ptr64, align 8
|
|
%v17 = load volatile i64, i64* @ptr64, align 8
|
|
%v18 = load volatile i64, i64* @ptr64, align 8
|
|
%v19 = load volatile i64, i64* @ptr64, align 8
|
|
%v20 = load volatile i64, i64* @ptr64, align 8
|
|
%v21 = load volatile i64, i64* @ptr64, align 8
|
|
%v22 = load volatile i64, i64* @ptr64, align 8
|
|
%v23 = load volatile i64, i64* @ptr64, align 8
|
|
%v24 = load volatile i64, i64* @ptr64, align 8
|
|
%v25 = load volatile i64, i64* @ptr64, align 8
|
|
|
|
; this should exceed stack-relative addressing limits and need an emergency
|
|
; spill slot.
|
|
%s = getelementptr inbounds i8*, i8** %stack0, i64 4092
|
|
store volatile i8* null, i8** %s
|
|
store volatile i8* null, i8** %stack1
|
|
|
|
store volatile i64 %v0, i64* @ptr64, align 8
|
|
store volatile i64 %v1, i64* @ptr64, align 8
|
|
store volatile i64 %v2, i64* @ptr64, align 8
|
|
store volatile i64 %v3, i64* @ptr64, align 8
|
|
store volatile i64 %v4, i64* @ptr64, align 8
|
|
store volatile i64 %v5, i64* @ptr64, align 8
|
|
store volatile i64 %v6, i64* @ptr64, align 8
|
|
store volatile i64 %v7, i64* @ptr64, align 8
|
|
store volatile i64 %v8, i64* @ptr64, align 8
|
|
store volatile i64 %v9, i64* @ptr64, align 8
|
|
store volatile i64 %v10, i64* @ptr64, align 8
|
|
store volatile i64 %v11, i64* @ptr64, align 8
|
|
store volatile i64 %v12, i64* @ptr64, align 8
|
|
store volatile i64 %v13, i64* @ptr64, align 8
|
|
store volatile i64 %v14, i64* @ptr64, align 8
|
|
store volatile i64 %v15, i64* @ptr64, align 8
|
|
store volatile i64 %v16, i64* @ptr64, align 8
|
|
store volatile i64 %v17, i64* @ptr64, align 8
|
|
store volatile i64 %v18, i64* @ptr64, align 8
|
|
store volatile i64 %v19, i64* @ptr64, align 8
|
|
store volatile i64 %v20, i64* @ptr64, align 8
|
|
store volatile i64 %v21, i64* @ptr64, align 8
|
|
store volatile i64 %v22, i64* @ptr64, align 8
|
|
store volatile i64 %v23, i64* @ptr64, align 8
|
|
store volatile i64 %v24, i64* @ptr64, align 8
|
|
store volatile i64 %v25, i64* @ptr64, align 8
|
|
|
|
; use swiftself parameter late so it stays alive throughout the function.
|
|
store volatile i8* %arg, i8** @ptr8
|
|
ret void
|
|
}
|