1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
Jinsong Ji 7ed143a4a1 [AsmPrinter] Print FP constant in hexadecimal form instead
Printing floating point number in decimal is inconvenient for humans.
Verbose asm output will print out floating point values in comments, it
helps.

But in lots of cases, users still need additional work to covert the
decimal back to hex or binary to check the bit patterns,
especially when there are small precision difference.

Hexadecimal form is one of the supported form in LLVM IR, and easier for
debugging.

This patch try to print all FP constant in hex form instead.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D73566
2020-02-07 16:00:55 +00:00

2265 lines
81 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FP
; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16,+fp64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOFP
; FIXME minnum nonan X, +Inf -> X ?
define arm_aapcs_vfpcc float @fmin_v2f32(<2 x float> %x) {
; CHECK-LABEL: fmin_v2f32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr s4, .LCPI0_0
; CHECK-NEXT: vminnm.f32 s0, s0, s1
; CHECK-NEXT: vminnm.f32 s0, s0, s4
; CHECK-NEXT: vminnm.f32 s0, s0, s4
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI0_0:
; CHECK-NEXT: .long 0x7f800000 @ float +Inf
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc float @fmin_v4f32(<4 x float> %x) {
; CHECK-LABEL: fmin_v4f32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vminnm.f32 s4, s0, s1
; CHECK-NEXT: vminnm.f32 s4, s4, s2
; CHECK-NEXT: vminnm.f32 s0, s4, s3
; CHECK-NEXT: bx lr
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
ret float %z
}
; FIXME fminnum (vector) -> fminnum (scalar) ?
define arm_aapcs_vfpcc float @fmin_v8f32(<8 x float> %x) {
; CHECK-FP-LABEL: fmin_v8f32:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vminnm.f32 s4, s0, s1
; CHECK-FP-NEXT: vminnm.f32 s4, s4, s2
; CHECK-FP-NEXT: vminnm.f32 s0, s4, s3
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v8f32:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
; CHECK-NOFP-NEXT: vselgt.f32 s10, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
; CHECK-NOFP-NEXT: vselgt.f32 s12, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s3, s7
; CHECK-NOFP-NEXT: vminnm.f32 s2, s10, s8
; CHECK-NOFP-NEXT: vminnm.f32 s2, s2, s12
; CHECK-NOFP-NEXT: vminnm.f32 s0, s2, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc half @fmin_v4f16(<4 x half> %x) {
; CHECK-LABEL: fmin_v4f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s4, s0
; CHECK-NEXT: vminnm.f16 s4, s0, s4
; CHECK-NEXT: vmovx.f16 s0, s1
; CHECK-NEXT: vminnm.f16 s4, s4, s1
; CHECK-NEXT: vldr.16 s2, .LCPI3_0
; CHECK-NEXT: vminnm.f16 s0, s4, s0
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI3_0:
; CHECK-NEXT: .short 0x7c00 @ half +Inf
entry:
%z = call fast half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc half @fmin_v8f16(<8 x half> %x) {
; CHECK-LABEL: fmin_v8f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s4, s0
; CHECK-NEXT: vmovx.f16 s6, s1
; CHECK-NEXT: vminnm.f16 s4, s0, s4
; CHECK-NEXT: vmovx.f16 s0, s3
; CHECK-NEXT: vminnm.f16 s4, s4, s1
; CHECK-NEXT: vminnm.f16 s4, s4, s6
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vminnm.f16 s4, s4, s2
; CHECK-NEXT: vminnm.f16 s4, s4, s6
; CHECK-NEXT: vminnm.f16 s4, s4, s3
; CHECK-NEXT: vminnm.f16 s0, s4, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
entry:
%z = call fast half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc half @fmin_v16f16(<16 x half> %x) {
; CHECK-FP-LABEL: fmin_v16f16:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmovx.f16 s4, s0
; CHECK-FP-NEXT: vmovx.f16 s6, s1
; CHECK-FP-NEXT: vminnm.f16 s4, s0, s4
; CHECK-FP-NEXT: vmovx.f16 s0, s3
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s1
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s6
; CHECK-FP-NEXT: vmovx.f16 s6, s2
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s2
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s6
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s3
; CHECK-FP-NEXT: vminnm.f16 s0, s4, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v16f16:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
; CHECK-NOFP-NEXT: vselgt.f16 s10, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vminnm.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s4, s7
; CHECK-NOFP-NEXT: vmovx.f16 s0, s3
; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s5
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
; CHECK-NOFP-NEXT: vselgt.f16 s10, s2, s6
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vminnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call fast half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc double @fmin_v1f64(<1 x double> %x) {
; CHECK-LABEL: fmin_v1f64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc double @fmin_v2f64(<2 x double> %x) {
; CHECK-LABEL: fmin_v2f64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vminnm.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc double @fmin_v4f64(<4 x double> %x) {
; CHECK-LABEL: fmin_v4f64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d3, d1
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vcmp.f64 d2, d0
; CHECK-NEXT: vselgt.f64 d4, d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d2
; CHECK-NEXT: vminnm.f64 d0, d0, d4
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
ret double %z
}
; FIXME should not be vminnm
; FIXME better reductions (no vmovs/vdups)
define arm_aapcs_vfpcc float @fmin_v2f32_nofast(<2 x float> %x) {
; CHECK-FP-LABEL: fmin_v2f32_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r0
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v2f32_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmov.32 r0, q0[1]
; CHECK-NOFP-NEXT: vdup.32 q1, r0
; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc float @fmin_v4f32_nofast(<4 x float> %x) {
; CHECK-FP-LABEL: fmin_v4f32_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r0
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v4f32_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s3, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmov.f64 d2, d1
; CHECK-NOFP-NEXT: vmov.f32 s5, s3
; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vcmp.f32 s8, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc float @fmin_v8f32_nofast(<8 x float> %x) {
; CHECK-FP-LABEL: fmin_v8f32_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r0
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v8f32_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
; CHECK-NOFP-NEXT: vselgt.f32 s8, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
; CHECK-NOFP-NEXT: vselgt.f32 s12, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s8, s10
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s12, s0
; CHECK-NOFP-NEXT: vselgt.f32 s2, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s12
; CHECK-NOFP-NEXT: vcmp.f32 s2, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s2
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc half @fmin_v4f16_nofast(<4 x half> %x) {
; CHECK-FP-LABEL: fmin_v4f16_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v4f16_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmov.32 r1, q0[1]
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
; CHECK-NOFP-NEXT: vdup.32 q1, r1
; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc half @fmin_v8f16_nofast(<8 x half> %x) {
; CHECK-FP-LABEL: fmin_v8f16_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v8f16_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s3
; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmov.f64 d2, d1
; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmov.f32 s5, s3
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s3, s1
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vcmp.f16 s10, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc half @fmin_v16f16_nofast(<16 x half> %x) {
; CHECK-FP-LABEL: fmin_v16f16_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v16f16_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s7
; CHECK-NOFP-NEXT: vmovx.f16 s10, s3
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmovx.f16 s14, s0
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s4
; CHECK-NOFP-NEXT: vcmp.f16 s12, s14
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s12, s14, s12
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s12, s1, s5
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vselgt.f16 s12, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vcmp.f16 s12, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s12
; CHECK-NOFP-NEXT: vcmp.f16 s10, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc double @fmin_v1f64_nofast(<1 x double> %x) {
; CHECK-LABEL: fmin_v1f64_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc double @fmin_v2f64_nofast(<2 x double> %x) {
; CHECK-LABEL: fmin_v2f64_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d1, d0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc double @fmin_v4f64_nofast(<4 x double> %x) {
; CHECK-LABEL: fmin_v4f64_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d3, d1
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vcmp.f64 d2, d0
; CHECK-NEXT: vselgt.f64 d4, d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d2
; CHECK-NEXT: vcmp.f64 d4, d0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d4
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc float @fmin_v2f32_acc(<2 x float> %x, float %y) {
; CHECK-LABEL: fmin_v2f32_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr s6, .LCPI18_0
; CHECK-NEXT: vminnm.f32 s0, s0, s1
; CHECK-NEXT: vminnm.f32 s0, s0, s6
; CHECK-NEXT: vminnm.f32 s0, s0, s6
; CHECK-NEXT: vminnm.f32 s0, s4, s0
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI18_0:
; CHECK-NEXT: .long 0x7f800000 @ float +Inf
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
%c = fcmp fast olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc float @fmin_v4f32_acc(<4 x float> %x, float %y) {
; CHECK-LABEL: fmin_v4f32_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vminnm.f32 s6, s0, s1
; CHECK-NEXT: vminnm.f32 s6, s6, s2
; CHECK-NEXT: vminnm.f32 s0, s6, s3
; CHECK-NEXT: vminnm.f32 s0, s4, s0
; CHECK-NEXT: bx lr
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
%c = fcmp fast olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc float @fmin_v8f32_acc(<8 x float> %x, float %y) {
; CHECK-FP-LABEL: fmin_v8f32_acc:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vminnm.f32 s4, s0, s1
; CHECK-FP-NEXT: vminnm.f32 s4, s4, s2
; CHECK-FP-NEXT: vminnm.f32 s0, s4, s3
; CHECK-FP-NEXT: vminnm.f32 s0, s8, s0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v8f32_acc:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
; CHECK-NOFP-NEXT: vselgt.f32 s12, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
; CHECK-NOFP-NEXT: vselgt.f32 s14, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s3, s7
; CHECK-NOFP-NEXT: vminnm.f32 s2, s12, s10
; CHECK-NOFP-NEXT: vminnm.f32 s2, s2, s14
; CHECK-NOFP-NEXT: vminnm.f32 s0, s2, s0
; CHECK-NOFP-NEXT: vminnm.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
%c = fcmp fast olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc void @fmin_v4f16_acc(<4 x half> %x, half* %yy) {
; CHECK-LABEL: fmin_v4f16_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s4, s0
; CHECK-NEXT: vminnm.f16 s4, s0, s4
; CHECK-NEXT: vmovx.f16 s0, s1
; CHECK-NEXT: vminnm.f16 s4, s4, s1
; CHECK-NEXT: vldr.16 s2, .LCPI21_0
; CHECK-NEXT: vminnm.f16 s0, s4, s0
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vminnm.f16 s0, s0, s2
; CHECK-NEXT: vldr.16 s2, [r0]
; CHECK-NEXT: vminnm.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI21_0:
; CHECK-NEXT: .short 0x7c00 @ half +Inf
entry:
%y = load half, half* %yy
%z = call fast half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
%c = fcmp fast olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc void @fmin_v8f16_acc(<8 x half> %x, half* %yy) {
; CHECK-LABEL: fmin_v8f16_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s4, s0
; CHECK-NEXT: vmovx.f16 s6, s1
; CHECK-NEXT: vminnm.f16 s4, s0, s4
; CHECK-NEXT: vmovx.f16 s0, s3
; CHECK-NEXT: vminnm.f16 s4, s4, s1
; CHECK-NEXT: vminnm.f16 s4, s4, s6
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vminnm.f16 s4, s4, s2
; CHECK-NEXT: vldr.16 s2, [r0]
; CHECK-NEXT: vminnm.f16 s4, s4, s6
; CHECK-NEXT: vminnm.f16 s4, s4, s3
; CHECK-NEXT: vminnm.f16 s0, s4, s0
; CHECK-NEXT: vminnm.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call fast half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
%c = fcmp fast olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc void @fmin_v16f16_acc(<16 x half> %x, half* %yy) {
; CHECK-FP-LABEL: fmin_v16f16_acc:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmovx.f16 s4, s0
; CHECK-FP-NEXT: vmovx.f16 s6, s1
; CHECK-FP-NEXT: vminnm.f16 s4, s0, s4
; CHECK-FP-NEXT: vmovx.f16 s0, s3
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s1
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s6
; CHECK-FP-NEXT: vmovx.f16 s6, s2
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s2
; CHECK-FP-NEXT: vldr.16 s2, [r0]
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s6
; CHECK-FP-NEXT: vminnm.f16 s4, s4, s3
; CHECK-FP-NEXT: vminnm.f16 s0, s4, s0
; CHECK-FP-NEXT: vminnm.f16 s0, s2, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v16f16_acc:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
; CHECK-NOFP-NEXT: vselgt.f16 s10, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vminnm.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s4, s7
; CHECK-NOFP-NEXT: vmovx.f16 s0, s3
; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s5
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
; CHECK-NOFP-NEXT: vselgt.f16 s10, s2, s6
; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vminnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: vminnm.f16 s0, s2, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call fast half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
%c = fcmp fast olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc double @fmin_v1f64_acc(<1 x double> %x, double %y) {
; CHECK-LABEL: fmin_v1f64_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vminnm.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
%c = fcmp fast olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc double @fmin_v2f64_acc(<2 x double> %x, double %y) {
; CHECK-LABEL: fmin_v2f64_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vminnm.f64 d0, d0, d1
; CHECK-NEXT: vminnm.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
%c = fcmp fast olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc double @fmin_v4f64_acc(<4 x double> %x, double %y) {
; CHECK-LABEL: fmin_v4f64_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d3, d1
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vcmp.f64 d2, d0
; CHECK-NEXT: vselgt.f64 d5, d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d2
; CHECK-NEXT: vminnm.f64 d0, d0, d5
; CHECK-NEXT: vminnm.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
%c = fcmp fast olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc float @fmin_v2f32_acc_nofast(<2 x float> %x, float %y) {
; CHECK-FP-LABEL: fmin_v2f32_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q2, r0
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q2
; CHECK-FP-NEXT: vcmp.f32 s0, s4
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v2f32_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmov.32 r0, q0[1]
; CHECK-NOFP-NEXT: vdup.32 q2, r0
; CHECK-NOFP-NEXT: vcmp.f32 s8, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
%c = fcmp olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc float @fmin_v4f32_acc_nofast(<4 x float> %x, float %y) {
; CHECK-FP-LABEL: fmin_v4f32_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.f64 d4, d1
; CHECK-FP-NEXT: vmov.f32 s9, s3
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q2
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q2, r0
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q2
; CHECK-FP-NEXT: vcmp.f32 s0, s4
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v4f32_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s3, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmov.f64 d4, d1
; CHECK-NOFP-NEXT: vmov.f32 s9, s3
; CHECK-NOFP-NEXT: vcmp.f32 s8, s0
; CHECK-NOFP-NEXT: vselgt.f32 s6, s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f32 s6, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s6
; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
%c = fcmp olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc float @fmin_v8f32_acc_nofast(<8 x float> %x, float %y) {
; CHECK-FP-LABEL: fmin_v8f32_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r0
; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vcmp.f32 s0, s8
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f32 s0, s8, s0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v8f32_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
; CHECK-NOFP-NEXT: vselgt.f32 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
; CHECK-NOFP-NEXT: vselgt.f32 s12, s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
; CHECK-NOFP-NEXT: vselgt.f32 s14, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s10, s12
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s14, s0
; CHECK-NOFP-NEXT: vselgt.f32 s2, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s14
; CHECK-NOFP-NEXT: vcmp.f32 s2, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s2
; CHECK-NOFP-NEXT: vcmp.f32 s0, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
%c = fcmp olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc void @fmin_v4f16_acc_nofast(<4 x half> %x, half* %yy) {
; CHECK-FP-LABEL: fmin_v4f16_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vldr.16 s4, [r0]
; CHECK-FP-NEXT: vcmp.f16 s0, s4
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v4f16_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmov.32 r1, q0[1]
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
; CHECK-NOFP-NEXT: vdup.32 q1, r1
; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f16 s0, s2
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
%c = fcmp olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc void @fmin_v8f16_acc_nofast(<8 x half> %x, half* %yy) {
; CHECK-FP-LABEL: fmin_v8f16_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vldr.16 s4, [r0]
; CHECK-FP-NEXT: vcmp.f16 s0, s4
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v8f16_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s3
; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmov.f64 d2, d1
; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmov.f32 s5, s3
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s3, s1
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
; CHECK-NOFP-NEXT: vcmp.f16 s10, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f16 s0, s2
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
%c = fcmp olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc void @fmin_v16f16_acc_nofast(<16 x half> %x, half* %yy) {
; CHECK-FP-LABEL: fmin_v16f16_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vldr.16 s4, [r0]
; CHECK-FP-NEXT: vcmp.f16 s0, s4
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmin_v16f16_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s7
; CHECK-NOFP-NEXT: vmovx.f16 s10, s3
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmovx.f16 s14, s0
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s4
; CHECK-NOFP-NEXT: vcmp.f16 s12, s14
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s12, s14, s12
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s12, s1, s5
; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
; CHECK-NOFP-NEXT: vselgt.f16 s12, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
; CHECK-NOFP-NEXT: vcmp.f16 s12, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s12
; CHECK-NOFP-NEXT: vcmp.f16 s10, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f16 s0, s2
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
%c = fcmp olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc double @fmin_v1f64_acc_nofast(<1 x double> %x, double %y) {
; CHECK-LABEL: fmin_v1f64_acc_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d0, d1
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
%c = fcmp olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc double @fmin_v2f64_acc_nofast(<2 x double> %x, double %y) {
; CHECK-LABEL: fmin_v2f64_acc_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d1, d0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d1
; CHECK-NEXT: vcmp.f64 d0, d2
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
%c = fcmp olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc double @fmin_v4f64_acc_nofast(<4 x double> %x, double %y) {
; CHECK-LABEL: fmin_v4f64_acc_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d3, d1
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vcmp.f64 d2, d0
; CHECK-NEXT: vselgt.f64 d5, d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d2
; CHECK-NEXT: vcmp.f64 d5, d0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d5
; CHECK-NEXT: vcmp.f64 d0, d4
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
%c = fcmp olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc float @fmax_v2f32(<2 x float> %x) {
; CHECK-LABEL: fmax_v2f32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr s4, .LCPI36_0
; CHECK-NEXT: vmaxnm.f32 s0, s0, s1
; CHECK-NEXT: vmaxnm.f32 s0, s0, s4
; CHECK-NEXT: vmaxnm.f32 s0, s0, s4
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI36_0:
; CHECK-NEXT: .long 0xff800000 @ float -Inf
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc float @fmax_v4f32(<4 x float> %x) {
; CHECK-LABEL: fmax_v4f32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmaxnm.f32 s4, s0, s1
; CHECK-NEXT: vmaxnm.f32 s4, s4, s2
; CHECK-NEXT: vmaxnm.f32 s0, s4, s3
; CHECK-NEXT: bx lr
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc float @fmax_v8f32(<8 x float> %x) {
; CHECK-FP-LABEL: fmax_v8f32:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmaxnm.f32 s4, s0, s1
; CHECK-FP-NEXT: vmaxnm.f32 s4, s4, s2
; CHECK-FP-NEXT: vmaxnm.f32 s0, s4, s3
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v8f32:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
; CHECK-NOFP-NEXT: vselgt.f32 s10, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
; CHECK-NOFP-NEXT: vselgt.f32 s12, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s3, s7
; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s10, s8
; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s2, s12
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s2, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc half @fmax_v4f16(<4 x half> %x) {
; CHECK-LABEL: fmax_v4f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s4, s0
; CHECK-NEXT: vmaxnm.f16 s4, s0, s4
; CHECK-NEXT: vmovx.f16 s0, s1
; CHECK-NEXT: vmaxnm.f16 s4, s4, s1
; CHECK-NEXT: vldr.16 s2, .LCPI39_0
; CHECK-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI39_0:
; CHECK-NEXT: .short 0xfc00 @ half -Inf
entry:
%z = call fast half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc half @fmax_v8f16(<8 x half> %x) {
; CHECK-LABEL: fmax_v8f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s4, s0
; CHECK-NEXT: vmovx.f16 s6, s1
; CHECK-NEXT: vmaxnm.f16 s4, s0, s4
; CHECK-NEXT: vmovx.f16 s0, s3
; CHECK-NEXT: vmaxnm.f16 s4, s4, s1
; CHECK-NEXT: vmaxnm.f16 s4, s4, s6
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vmaxnm.f16 s4, s4, s2
; CHECK-NEXT: vmaxnm.f16 s4, s4, s6
; CHECK-NEXT: vmaxnm.f16 s4, s4, s3
; CHECK-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
entry:
%z = call fast half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc half @fmax_v16f16(<16 x half> %x) {
; CHECK-FP-LABEL: fmax_v16f16:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmovx.f16 s4, s0
; CHECK-FP-NEXT: vmovx.f16 s6, s1
; CHECK-FP-NEXT: vmaxnm.f16 s4, s0, s4
; CHECK-FP-NEXT: vmovx.f16 s0, s3
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s1
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s6
; CHECK-FP-NEXT: vmovx.f16 s6, s2
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s2
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s6
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s3
; CHECK-FP-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v16f16:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
; CHECK-NOFP-NEXT: vselgt.f16 s10, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s4, s7
; CHECK-NOFP-NEXT: vmovx.f16 s0, s3
; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s5
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
; CHECK-NOFP-NEXT: vselgt.f16 s10, s2, s6
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call fast half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc double @fmax_v1f64(<1 x double> %x) {
; CHECK-LABEL: fmax_v1f64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc double @fmax_v2f64(<2 x double> %x) {
; CHECK-LABEL: fmax_v2f64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmaxnm.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc double @fmax_v4f64(<4 x double> %x) {
; CHECK-LABEL: fmax_v4f64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vcmp.f64 d0, d2
; CHECK-NEXT: vselgt.f64 d4, d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d2
; CHECK-NEXT: vmaxnm.f64 d0, d0, d4
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc float @fmax_v2f32_nofast(<2 x float> %x) {
; CHECK-FP-LABEL: fmax_v2f32_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r0
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v2f32_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmov.32 r0, q0[1]
; CHECK-NOFP-NEXT: vdup.32 q1, r0
; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc float @fmax_v4f32_nofast(<4 x float> %x) {
; CHECK-FP-LABEL: fmax_v4f32_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r0
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v4f32_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmov.f64 d2, d1
; CHECK-NOFP-NEXT: vmov.f32 s5, s3
; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vcmp.f32 s0, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc float @fmax_v8f32_nofast(<8 x float> %x) {
; CHECK-FP-LABEL: fmax_v8f32_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r0
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v8f32_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
; CHECK-NOFP-NEXT: vselgt.f32 s8, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
; CHECK-NOFP-NEXT: vselgt.f32 s12, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s10, s8
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s0, s12
; CHECK-NOFP-NEXT: vselgt.f32 s2, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s12
; CHECK-NOFP-NEXT: vcmp.f32 s0, s2
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s2
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
ret float %z
}
define arm_aapcs_vfpcc half @fmax_v4f16_nofast(<4 x half> %x) {
; CHECK-FP-LABEL: fmax_v4f16_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v4f16_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmov.32 r1, q0[1]
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
; CHECK-NOFP-NEXT: vdup.32 q1, r1
; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc half @fmax_v8f16_nofast(<8 x half> %x) {
; CHECK-FP-LABEL: fmax_v8f16_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v8f16_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s3
; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmov.f64 d2, d1
; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmov.f32 s5, s3
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s1, s3
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vcmp.f16 s0, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc half @fmax_v16f16_nofast(<16 x half> %x) {
; CHECK-FP-LABEL: fmax_v16f16_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v16f16_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s7
; CHECK-NOFP-NEXT: vmovx.f16 s10, s3
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmovx.f16 s14, s0
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s4
; CHECK-NOFP-NEXT: vcmp.f16 s14, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s12, s14, s12
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s12, s1, s5
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vselgt.f16 s12, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vcmp.f16 s0, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s12
; CHECK-NOFP-NEXT: vcmp.f16 s0, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
ret half %z
}
define arm_aapcs_vfpcc double @fmax_v1f64_nofast(<1 x double> %x) {
; CHECK-LABEL: fmax_v1f64_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc double @fmax_v2f64_nofast(<2 x double> %x) {
; CHECK-LABEL: fmax_v2f64_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d0, d1
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc double @fmax_v4f64_nofast(<4 x double> %x) {
; CHECK-LABEL: fmax_v4f64_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vcmp.f64 d0, d2
; CHECK-NEXT: vselgt.f64 d4, d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d2
; CHECK-NEXT: vcmp.f64 d0, d4
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d4
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
ret double %z
}
define arm_aapcs_vfpcc float @fmax_v2f32_acc(<2 x float> %x, float %y) {
; CHECK-LABEL: fmax_v2f32_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldr s6, .LCPI54_0
; CHECK-NEXT: vmaxnm.f32 s0, s0, s1
; CHECK-NEXT: vmaxnm.f32 s0, s0, s6
; CHECK-NEXT: vmaxnm.f32 s0, s0, s6
; CHECK-NEXT: vmaxnm.f32 s0, s4, s0
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI54_0:
; CHECK-NEXT: .long 0xff800000 @ float -Inf
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
%c = fcmp fast ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc float @fmax_v4f32_acc(<4 x float> %x, float %y) {
; CHECK-LABEL: fmax_v4f32_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmaxnm.f32 s6, s0, s1
; CHECK-NEXT: vmaxnm.f32 s6, s6, s2
; CHECK-NEXT: vmaxnm.f32 s0, s6, s3
; CHECK-NEXT: vmaxnm.f32 s0, s4, s0
; CHECK-NEXT: bx lr
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
%c = fcmp fast ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc float @fmax_v8f32_acc(<8 x float> %x, float %y) {
; CHECK-FP-LABEL: fmax_v8f32_acc:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmaxnm.f32 s4, s0, s1
; CHECK-FP-NEXT: vmaxnm.f32 s4, s4, s2
; CHECK-FP-NEXT: vmaxnm.f32 s0, s4, s3
; CHECK-FP-NEXT: vmaxnm.f32 s0, s8, s0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v8f32_acc:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
; CHECK-NOFP-NEXT: vselgt.f32 s12, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
; CHECK-NOFP-NEXT: vselgt.f32 s14, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s3, s7
; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s12, s10
; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s2, s14
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s2, s0
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call fast float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
%c = fcmp fast ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc void @fmax_v4f16_acc(<4 x half> %x, half* %yy) {
; CHECK-LABEL: fmax_v4f16_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s4, s0
; CHECK-NEXT: vmaxnm.f16 s4, s0, s4
; CHECK-NEXT: vmovx.f16 s0, s1
; CHECK-NEXT: vmaxnm.f16 s4, s4, s1
; CHECK-NEXT: vldr.16 s2, .LCPI57_0
; CHECK-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
; CHECK-NEXT: vldr.16 s2, [r0]
; CHECK-NEXT: vmaxnm.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
; CHECK-NEXT: .p2align 1
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI57_0:
; CHECK-NEXT: .short 0xfc00 @ half -Inf
entry:
%y = load half, half* %yy
%z = call fast half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
%c = fcmp fast ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc void @fmax_v8f16_acc(<8 x half> %x, half* %yy) {
; CHECK-LABEL: fmax_v8f16_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s4, s0
; CHECK-NEXT: vmovx.f16 s6, s1
; CHECK-NEXT: vmaxnm.f16 s4, s0, s4
; CHECK-NEXT: vmovx.f16 s0, s3
; CHECK-NEXT: vmaxnm.f16 s4, s4, s1
; CHECK-NEXT: vmaxnm.f16 s4, s4, s6
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vmaxnm.f16 s4, s4, s2
; CHECK-NEXT: vldr.16 s2, [r0]
; CHECK-NEXT: vmaxnm.f16 s4, s4, s6
; CHECK-NEXT: vmaxnm.f16 s4, s4, s3
; CHECK-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-NEXT: vmaxnm.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call fast half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
%c = fcmp fast ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc void @fmax_v16f16_acc(<16 x half> %x, half* %yy) {
; CHECK-FP-LABEL: fmax_v16f16_acc:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmovx.f16 s4, s0
; CHECK-FP-NEXT: vmovx.f16 s6, s1
; CHECK-FP-NEXT: vmaxnm.f16 s4, s0, s4
; CHECK-FP-NEXT: vmovx.f16 s0, s3
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s1
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s6
; CHECK-FP-NEXT: vmovx.f16 s6, s2
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s2
; CHECK-FP-NEXT: vldr.16 s2, [r0]
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s6
; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s3
; CHECK-FP-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-FP-NEXT: vmaxnm.f16 s0, s2, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v16f16_acc:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
; CHECK-NOFP-NEXT: vselgt.f16 s10, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s4, s7
; CHECK-NOFP-NEXT: vmovx.f16 s0, s3
; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s5
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
; CHECK-NOFP-NEXT: vselgt.f16 s10, s2, s6
; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s2, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call fast half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
%c = fcmp fast ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc double @fmax_v1f64_acc(<1 x double> %x, double %y) {
; CHECK-LABEL: fmax_v1f64_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmaxnm.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
%c = fcmp fast ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc double @fmax_v2f64_acc(<2 x double> %x, double %y) {
; CHECK-LABEL: fmax_v2f64_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmaxnm.f64 d0, d0, d1
; CHECK-NEXT: vmaxnm.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
%c = fcmp fast ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc double @fmax_v4f64_acc(<4 x double> %x, double %y) {
; CHECK-LABEL: fmax_v4f64_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vcmp.f64 d0, d2
; CHECK-NEXT: vselgt.f64 d5, d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d2
; CHECK-NEXT: vmaxnm.f64 d0, d0, d5
; CHECK-NEXT: vmaxnm.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
%z = call fast double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
%c = fcmp fast ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc float @fmax_v2f32_acc_nofast(<2 x float> %x, float %y) {
; CHECK-FP-LABEL: fmax_v2f32_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q2, r0
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q2
; CHECK-FP-NEXT: vcmp.f32 s4, s0
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v2f32_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmov.32 r0, q0[1]
; CHECK-NOFP-NEXT: vdup.32 q2, r0
; CHECK-NOFP-NEXT: vcmp.f32 s0, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
%c = fcmp ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc float @fmax_v4f32_acc_nofast(<4 x float> %x, float %y) {
; CHECK-FP-LABEL: fmax_v4f32_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.f64 d4, d1
; CHECK-FP-NEXT: vmov.f32 s9, s3
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q2
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q2, r0
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q2
; CHECK-FP-NEXT: vcmp.f32 s4, s0
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v4f32_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmov.f64 d4, d1
; CHECK-NOFP-NEXT: vmov.f32 s9, s3
; CHECK-NOFP-NEXT: vcmp.f32 s0, s8
; CHECK-NOFP-NEXT: vselgt.f32 s6, s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f32 s0, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s6
; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
%c = fcmp ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc float @fmax_v8f32_acc_nofast(<8 x float> %x, float %y) {
; CHECK-FP-LABEL: fmax_v8f32_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r0, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r0
; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
; CHECK-FP-NEXT: vcmp.f32 s8, s0
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f32 s0, s8, s0
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v8f32_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
; CHECK-NOFP-NEXT: vselgt.f32 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
; CHECK-NOFP-NEXT: vselgt.f32 s12, s1, s5
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
; CHECK-NOFP-NEXT: vselgt.f32 s14, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s12, s10
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f32 s0, s14
; CHECK-NOFP-NEXT: vselgt.f32 s2, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s14
; CHECK-NOFP-NEXT: vcmp.f32 s0, s2
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s2
; CHECK-NOFP-NEXT: vcmp.f32 s8, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
%z = call float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
%c = fcmp ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
}
define arm_aapcs_vfpcc void @fmax_v4f16_acc_nofast(<4 x half> %x, half* %yy) {
; CHECK-FP-LABEL: fmax_v4f16_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vldr.16 s4, [r0]
; CHECK-FP-NEXT: vcmp.f16 s4, s0
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v4f16_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmov.32 r1, q0[1]
; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
; CHECK-NOFP-NEXT: vdup.32 q1, r1
; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f16 s2, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
%c = fcmp ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc void @fmax_v8f16_acc_nofast(<8 x half> %x, half* %yy) {
; CHECK-FP-LABEL: fmax_v8f16_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vldr.16 s4, [r0]
; CHECK-FP-NEXT: vcmp.f16 s4, s0
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v8f16_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s3
; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmov.f64 d2, d1
; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmov.f32 s5, s3
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s1, s3
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s3
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
; CHECK-NOFP-NEXT: vcmp.f16 s0, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f16 s2, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
%c = fcmp ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc void @fmax_v16f16_acc_nofast(<16 x half> %x, half* %yy) {
; CHECK-FP-LABEL: fmax_v16f16_acc_nofast:
; CHECK-FP: @ %bb.0: @ %entry
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.f64 d2, d1
; CHECK-FP-NEXT: vmov.f32 s5, s3
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.32 r1, q0[1]
; CHECK-FP-NEXT: vdup.32 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
; CHECK-FP-NEXT: vdup.16 q1, r1
; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
; CHECK-FP-NEXT: vldr.16 s4, [r0]
; CHECK-FP-NEXT: vcmp.f16 s4, s0
; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
; CHECK-FP-NEXT: vstr.16 s0, [r0]
; CHECK-FP-NEXT: bx lr
;
; CHECK-NOFP-LABEL: fmax_v16f16_acc_nofast:
; CHECK-NOFP: @ %bb.0: @ %entry
; CHECK-NOFP-NEXT: vmovx.f16 s8, s7
; CHECK-NOFP-NEXT: vmovx.f16 s10, s3
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vmovx.f16 s14, s0
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmovx.f16 s12, s4
; CHECK-NOFP-NEXT: vcmp.f16 s14, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s12, s14, s12
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s12, s1, s5
; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
; CHECK-NOFP-NEXT: vselgt.f16 s12, s2, s6
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
; CHECK-NOFP-NEXT: vcmp.f16 s0, s12
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s12
; CHECK-NOFP-NEXT: vcmp.f16 s0, s10
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
; CHECK-NOFP-NEXT: vcmp.f16 s2, s0
; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
%z = call half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
%c = fcmp ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
ret void
}
define arm_aapcs_vfpcc double @fmax_v1f64_acc_nofast(<1 x double> %x, double %y) {
; CHECK-LABEL: fmax_v1f64_acc_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d1, d0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
%c = fcmp ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc double @fmax_v2f64_acc_nofast(<2 x double> %x, double %y) {
; CHECK-LABEL: fmax_v2f64_acc_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d0, d1
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d1
; CHECK-NEXT: vcmp.f64 d2, d0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
%c = fcmp ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
define arm_aapcs_vfpcc double @fmax_v4f64_acc_nofast(<4 x double> %x, double %y) {
; CHECK-LABEL: fmax_v4f64_acc_nofast:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcmp.f64 d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vcmp.f64 d0, d2
; CHECK-NEXT: vselgt.f64 d5, d1, d3
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d2
; CHECK-NEXT: vcmp.f64 d0, d5
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d0, d5
; CHECK-NEXT: vcmp.f64 d4, d0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
%z = call double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
%c = fcmp ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
declare double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double>)
declare double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double>)
declare double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double>)
declare double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double>)
declare double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double>)
declare double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double>)
declare float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float>)
declare float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float>)
declare float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float>)
declare float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float>)
declare float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float>)
declare float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float>)
declare half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half>)
declare half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half>)
declare half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half>)
declare half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half>)
declare half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half>)
declare half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half>)