1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-23 13:02:52 +02:00
llvm-mirror/test/CodeGen/AArch64/reverse-csr-restore-seq.mir
Francis Visoiu Mistrih 648e1e248b [AArch64] Emit CSR loads in the same order as stores
Optionally allow the order of restoring the callee-saved registers in the
epilogue to be reversed.

The flag -reverse-csr-restore-seq generates the following code:

```
stp     x26, x25, [sp, #-64]!
stp     x24, x23, [sp, #16]
stp     x22, x21, [sp, #32]
stp     x20, x19, [sp, #48]

; [..]

ldp     x24, x23, [sp, #16]
ldp     x22, x21, [sp, #32]
ldp     x20, x19, [sp, #48]
ldp     x26, x25, [sp], #64
ret
```

Note how the CSRs are restored in the same order as they are saved.

One exception to this rule is the last `ldp`, which allows us to merge
the stack adjustment and the ldp into a post-index ldp. This is done by
first generating:

ldp x26, x27, [sp]
add sp, sp, #64

which gets merged by the arm64 load store optimizer into

ldp x26, x25, [sp], #64

The flag is disabled by default.

llvm-svn: 327569
2018-03-14 20:34:03 +00:00

73 lines
2.4 KiB
YAML

# RUN: llc -run-pass=prologepilog -reverse-csr-restore-seq -o - -mtriple=aarch64-- %s | FileCheck %s --check-prefixes=CHECK,BEFORELDSTOPT
# RUN: llc -start-before=prologepilog -stop-after=aarch64-ldst-opt -reverse-csr-restore-seq -o - -mtriple=aarch64-- %s | FileCheck %s --check-prefixes=CHECK,AFTERLDSTOPT
#
--- |
define void @foo() nounwind { entry: unreachable }
define void @bar() nounwind { entry: unreachable }
...
---
name: foo
# CHECK-LABEL: name: foo
tracksRegLiveness: true
body: |
bb.0:
$x19 = IMPLICIT_DEF
$x20 = IMPLICIT_DEF
$x21 = IMPLICIT_DEF
$x22 = IMPLICIT_DEF
$x23 = IMPLICIT_DEF
$x24 = IMPLICIT_DEF
$x25 = IMPLICIT_DEF
$x26 = IMPLICIT_DEF
; The local stack size is 0, so the last ldp in the sequence will also
; restore the stack.
; CHECK: $x24, $x23 = frame-destroy LDPXi $sp, 2
; CHECK-NEXT: $x22, $x21 = frame-destroy LDPXi $sp, 4
; CHECK-NEXT: $x20, $x19 = frame-destroy LDPXi $sp, 6
; Before running the load-store optimizer, we emit a ldp and an add.
; BEFORELDSTOPT-NEXT: $x26, $x25 = frame-destroy LDPXi $sp, 0
; BEFORELDSTOPT-NEXT: $sp = frame-destroy ADDXri $sp, 64, 0
; We want to make sure that after running the load-store optimizer, the ldp
; and the add get merged into a post-index ldp.
; AFTERLDSTOPT-NEXT: early-clobber $sp, $x26, $x25 = frame-destroy LDPXpost $sp, 8
RET_ReallyLR
...
---
name: bar
# CHECK-LABEL: name: bar
tracksRegLiveness: true
stack:
- { id : 0, size: 8, alignment: 4,
stack-id: 0, callee-saved-register: '', callee-saved-restored: true,
local-offset: -4, di-variable: '', di-expression: '', di-location: '' }
body: |
bb.0:
$x19 = IMPLICIT_DEF
$x20 = IMPLICIT_DEF
$x21 = IMPLICIT_DEF
$x22 = IMPLICIT_DEF
$x23 = IMPLICIT_DEF
$x24 = IMPLICIT_DEF
$x25 = IMPLICIT_DEF
$x26 = IMPLICIT_DEF
; The local stack size is not 0, and we can combine the CSR stack size with
; the local stack size. This results in rewriting the offsets for all the
; save/restores and forbids us to merge the stack adjustment and the last pop.
; In this case, there is no point of moving the first CSR pair at the end.
; CHECK: $x26, $x25 = frame-destroy LDPXi $sp, 2
; CHECK-NEXT: $x24, $x23 = frame-destroy LDPXi $sp, 4
; CHECK-NEXT: $x22, $x21 = frame-destroy LDPXi $sp, 6
; CHECK-NEXT: $x20, $x19 = frame-destroy LDPXi $sp, 8
; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 80, 0
RET_ReallyLR
...