1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00
llvm-mirror/test/CodeGen/ARM/fp16-args.ll
Lucas Prates 4265553ed8 [CodeGen] Properly propagating Calling Convention information when lowering vector arguments
When joining the legal parts of vector arguments into its original value
during the lower of Formal Arguments in SelectionDAGBuilder, the Calling
Convention information was not being propagated for the handling of each
individual parts. The same did not happen when lowering calls, causing a
mismatch.

This patch fixes the issue by properly propagating the Calling
Convention details.

This fixes Bugzilla #47001.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D86715
2020-08-27 17:01:10 +01:00

120 lines
4.3 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=armv7a--none-eabi -float-abi soft -mattr=+fp16 < %s | FileCheck %s --check-prefix=SOFT
; RUN: llc -mtriple=armv7a--none-eabi -float-abi hard -mattr=+fp16 < %s | FileCheck %s --check-prefix=HARD
; RUN: llc -mtriple=armv7a--none-eabi -float-abi soft -mattr=+fullfp16 < %s | FileCheck %s --check-prefix=FULL-SOFT --check-prefix=FULL-SOFT-LE
; RUN: llc -mtriple=armv7a--none-eabi -float-abi hard -mattr=+fullfp16 < %s | FileCheck %s --check-prefix=FULL-HARD --check-prefix=FULL-HARD-LE
; RUN: llc -mtriple=armv7aeb--none-eabi -float-abi soft -mattr=+fp16 < %s | FileCheck %s --check-prefix=SOFT
; RUN: llc -mtriple=armv7aeb--none-eabi -float-abi hard -mattr=+fp16 < %s | FileCheck %s --check-prefix=HARD
; RUN: llc -mtriple=armv7aeb--none-eabi -float-abi soft -mattr=+fullfp16 < %s | FileCheck %s --check-prefix=FULL-SOFT --check-prefix=FULL-SOFT-BE
; RUN: llc -mtriple=armv7aeb--none-eabi -float-abi hard -mattr=+fullfp16 < %s | FileCheck %s --check-prefix=FULL-HARD --check-prefix=FULL-HARD-BE
define half @foo(half %a, half %b) {
; SOFT-LABEL: foo:
; SOFT: @ %bb.0: @ %entry
; SOFT-NEXT: vmov s0, r0
; SOFT-NEXT: vmov s2, r1
; SOFT-NEXT: vcvtb.f32.f16 s0, s0
; SOFT-NEXT: vcvtb.f32.f16 s2, s2
; SOFT-NEXT: vadd.f32 s0, s0, s2
; SOFT-NEXT: vcvtb.f16.f32 s0, s0
; SOFT-NEXT: vmov r0, s0
; SOFT-NEXT: bx lr
;
; HARD-LABEL: foo:
; HARD: @ %bb.0: @ %entry
; HARD-NEXT: vcvtb.f32.f16 s2, s1
; HARD-NEXT: vcvtb.f32.f16 s0, s0
; HARD-NEXT: vadd.f32 s0, s0, s2
; HARD-NEXT: vcvtb.f16.f32 s0, s0
; HARD-NEXT: bx lr
;
; FULL-SOFT-LABEL: foo:
; FULL-SOFT: @ %bb.0: @ %entry
; FULL-SOFT-NEXT: vmov.f16 s0, r1
; FULL-SOFT-NEXT: vmov.f16 s2, r0
; FULL-SOFT-NEXT: vadd.f16 s0, s2, s0
; FULL-SOFT-NEXT: vmov r0, s0
; FULL-SOFT-NEXT: bx lr
;
; FULL-HARD-LABEL: foo:
; FULL-HARD: @ %bb.0: @ %entry
; FULL-HARD-NEXT: vadd.f16 s0, s0, s1
; FULL-HARD-NEXT: bx lr
entry:
%0 = fadd half %a, %b
ret half %0
}
define <4 x half> @foo_vec(<4 x half> %a) {
; SOFT-LABEL: foo_vec:
; SOFT: @ %bb.0: @ %entry
; SOFT-NEXT: vmov s0, r3
; SOFT-NEXT: vmov s2, r1
; SOFT-NEXT: vcvtb.f32.f16 s0, s0
; SOFT-NEXT: vmov s4, r0
; SOFT-NEXT: vcvtb.f32.f16 s2, s2
; SOFT-NEXT: vmov s6, r2
; SOFT-NEXT: vcvtb.f32.f16 s4, s4
; SOFT-NEXT: vcvtb.f32.f16 s6, s6
; SOFT-NEXT: vadd.f32 s0, s0, s0
; SOFT-NEXT: vadd.f32 s2, s2, s2
; SOFT-NEXT: vcvtb.f16.f32 s0, s0
; SOFT-NEXT: vadd.f32 s4, s4, s4
; SOFT-NEXT: vcvtb.f16.f32 s2, s2
; SOFT-NEXT: vadd.f32 s6, s6, s6
; SOFT-NEXT: vcvtb.f16.f32 s4, s4
; SOFT-NEXT: vcvtb.f16.f32 s6, s6
; SOFT-NEXT: vmov r0, s4
; SOFT-NEXT: vmov r1, s2
; SOFT-NEXT: vmov r2, s6
; SOFT-NEXT: vmov r3, s0
; SOFT-NEXT: bx lr
;
; HARD-LABEL: foo_vec:
; HARD: @ %bb.0: @ %entry
; HARD-NEXT: vcvtb.f32.f16 s4, s3
; HARD-NEXT: vcvtb.f32.f16 s2, s2
; HARD-NEXT: vcvtb.f32.f16 s6, s1
; HARD-NEXT: vcvtb.f32.f16 s0, s0
; HARD-NEXT: vadd.f32 s2, s2, s2
; HARD-NEXT: vadd.f32 s0, s0, s0
; HARD-NEXT: vcvtb.f16.f32 s2, s2
; HARD-NEXT: vadd.f32 s4, s4, s4
; HARD-NEXT: vcvtb.f16.f32 s0, s0
; HARD-NEXT: vadd.f32 s6, s6, s6
; HARD-NEXT: vcvtb.f16.f32 s3, s4
; HARD-NEXT: vcvtb.f16.f32 s1, s6
; HARD-NEXT: bx lr
;
; FULL-SOFT-LE-LABEL: foo_vec:
; FULL-SOFT-LE: @ %bb.0: @ %entry
; FULL-SOFT-LE-NEXT: vmov d16, r0, r1
; FULL-SOFT-LE-NEXT: vadd.f16 d16, d16, d16
; FULL-SOFT-LE-NEXT: vmov r0, r1, d16
; FULL-SOFT-LE-NEXT: bx lr
;
; FULL-HARD-LE-LABEL: foo_vec:
; FULL-HARD-LE: @ %bb.0: @ %entry
; FULL-HARD-LE-NEXT: vadd.f16 d0, d0, d0
; FULL-HARD-LE-NEXT: bx lr
;
; FULL-SOFT-BE-LABEL: foo_vec:
; FULL-SOFT-BE: @ %bb.0: @ %entry
; FULL-SOFT-BE-NEXT: vmov d16, r1, r0
; FULL-SOFT-BE-NEXT: vrev64.16 d16, d16
; FULL-SOFT-BE-NEXT: vadd.f16 d16, d16, d16
; FULL-SOFT-BE-NEXT: vrev64.16 d16, d16
; FULL-SOFT-BE-NEXT: vmov r1, r0, d16
; FULL-SOFT-BE-NEXT: bx lr
;
; FULL-HARD-BE-LABEL: foo_vec:
; FULL-HARD-BE: @ %bb.0: @ %entry
; FULL-HARD-BE-NEXT: vrev64.16 d16, d0
; FULL-HARD-BE-NEXT: vadd.f16 d16, d16, d16
; FULL-HARD-BE-NEXT: vrev64.16 d0, d16
; FULL-HARD-BE-NEXT: bx lr
entry:
%0 = fadd <4 x half> %a, %a
ret <4 x half> %0
}