1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

[AArch64][SVE] Fix PCS for functions taking/returning scalable types.

The default calling convention needs to save/restore the SVE callee
saves according to the SVE PCS when the function takes or returns
scalable types, even when the `aarch64_sve_vector_pcs` CC is not
specified for the function.

Reviewers: efriedma, paulwalker-arm, david-arm, rengolin

Reviewed By: paulwalker-arm

Differential Revision: https://reviews.llvm.org/D84041

(cherry picked from commit 9bacf1588583014538a0217add18f370acb95788)
This commit is contained in:
Sander de Smalen 2020-07-20 14:43:50 +01:00 committed by Hans Wennborg
parent 48fbb591c8
commit bd7f2f5f58
3 changed files with 42 additions and 0 deletions

View File

@ -40,6 +40,14 @@ AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT)
AArch64_MC::initLLVMToCVRegMapping(this);
}
static bool hasSVEArgsOrReturn(const MachineFunction *MF) {
const Function &F = MF->getFunction();
return isa<ScalableVectorType>(F.getReturnType()) ||
any_of(F.args(), [](const Argument &Arg) {
return isa<ScalableVectorType>(Arg.getType());
});
}
const MCPhysReg *
AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
@ -75,6 +83,8 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
// This is for OSes other than Windows; Windows is a separate case further
// above.
return CSR_AArch64_AAPCS_X18_SaveList;
if (hasSVEArgsOrReturn(MF))
return CSR_AArch64_SVE_AAPCS_SaveList;
return CSR_AArch64_AAPCS_SaveList;
}

View File

@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -stop-after=finalize-isel < %s 2>%t | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -stop-after=prologepilog < %s 2>%t | FileCheck %s --check-prefix=CHECKCSR
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
@ -123,3 +124,25 @@ define <vscale x 4 x i1> @sve_signature_pred_caller(<vscale x 4 x i1> %arg1, <vs
%res = call <vscale x 4 x i1> @sve_signature_pred(<vscale x 4 x i1> %arg2, <vscale x 4 x i1> %arg1)
ret <vscale x 4 x i1> %res
}
; Test that functions returning or taking SVE arguments use the correct
; callee-saved set when using the default C calling convention (as opposed
; to aarch64_sve_vector_pcs)
; CHECKCSR-LABEL: name: sve_signature_vec_ret_callee
; CHECKCSR: callee-saved-register: '$z8'
; CHECKCSR: callee-saved-register: '$p4'
; CHECKCSR: RET_ReallyLR
define <vscale x 4 x i32> @sve_signature_vec_ret_callee() nounwind {
call void asm sideeffect "nop", "~{z8},~{p4}"()
ret <vscale x 4 x i32> zeroinitializer
}
; CHECKCSR-LABEL: name: sve_signature_vec_arg_callee
; CHECKCSR: callee-saved-register: '$z8'
; CHECKCSR: callee-saved-register: '$p4'
; CHECKCSR: RET_ReallyLR
define void @sve_signature_vec_arg_callee(<vscale x 4 x i32> %v) nounwind {
call void asm sideeffect "nop", "~{z8},~{p4}"()
ret void
}

View File

@ -113,6 +113,12 @@ entry:
define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) {
; CHECK-LABEL: trunc_i64toi1_split3:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset p4, -16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and z7.d, z7.d, #0x1
; CHECK-NEXT: and z6.d, z6.d, #0x1
@ -134,9 +140,12 @@ define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) {
; CHECK-NEXT: cmpne p4.d, p0/z, z1.d, #0
; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
; CHECK-NEXT: uzp1 p0.s, p0.s, p4.s
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: uzp1 p1.h, p3.h, p1.h
; CHECK-NEXT: uzp1 p0.h, p0.h, p2.h
; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%out = trunc <vscale x 16 x i64> %in to <vscale x 16 x i1>