1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[ARM][BFloat] Lowering of create/get/set/dup intrinsics

This patch adds codegen for the following BFloat
operations to the ARM backend:
* concatenation of bf16 vectors
* bf16 vector element extraction
* bf16 vector element insertion
* duplication of a bf16 value into each lane of a vector
* duplication of a bf16 vector lane into each lane

Differential Revision: https://reviews.llvm.org/D81411
This commit is contained in:
Mikhail Maltsev 2020-06-19 12:49:17 +00:00
parent 40767ae0bd
commit c955b9f411
4 changed files with 313 additions and 32 deletions

View File

@ -2481,7 +2481,7 @@ def SSubReg_f32_reg : SDNodeXForm<imm, [{
MVT::i32);
}]>;
// Extract S sub-registers of Q/D registers containing a given f16 lane.
// Extract S sub-registers of Q/D registers containing a given f16/bf16 lane.
def SSubReg_f16_reg : SDNodeXForm<imm, [{
assert(ARM::ssub_3 == ARM::ssub_0+3 && "Unexpected subreg numbering");
return CurDAG->getTargetConstant(ARM::ssub_0 + N->getZExtValue()/2, SDLoc(N),
@ -6407,29 +6407,57 @@ def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
def imm_even : ImmLeaf<i32, [{ return (Imm & 1) == 0; }]>;
def imm_odd : ImmLeaf<i32, [{ return (Imm & 1) == 1; }]>;
let Predicates = [HasNEON] in {
def : Pat<(extractelt (v4f16 DPR:$src), imm_even:$lane),
(EXTRACT_SUBREG
(v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)),
(SSubReg_f16_reg imm_even:$lane))>;
multiclass ExtractEltEvenF16<ValueType VT4, ValueType VT8> {
def : Pat<(extractelt (VT4 DPR:$src), imm_even:$lane),
(EXTRACT_SUBREG
(v2f32 (COPY_TO_REGCLASS (VT4 DPR:$src), DPR_VFP2)),
(SSubReg_f16_reg imm_even:$lane))>;
def : Pat<(extractelt (VT8 QPR:$src), imm_even:$lane),
(EXTRACT_SUBREG
(v4f32 (COPY_TO_REGCLASS (VT8 QPR:$src), QPR_VFP2)),
(SSubReg_f16_reg imm_even:$lane))>;
}
def : Pat<(extractelt (v4f16 DPR:$src), imm_odd:$lane),
multiclass ExtractEltOddF16VMOVH<ValueType VT4, ValueType VT8> {
def : Pat<(extractelt (VT4 DPR:$src), imm_odd:$lane),
(COPY_TO_REGCLASS
(VMOVH (EXTRACT_SUBREG
(v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)),
(SSubReg_f16_reg imm_odd:$lane))),
(v2f32 (COPY_TO_REGCLASS (VT4 DPR:$src), DPR_VFP2)),
(SSubReg_f16_reg imm_odd:$lane))),
HPR)>;
def : Pat<(extractelt (VT8 QPR:$src), imm_odd:$lane),
(COPY_TO_REGCLASS
(VMOVH (EXTRACT_SUBREG
(v4f32 (COPY_TO_REGCLASS (VT8 QPR:$src), QPR_VFP2)),
(SSubReg_f16_reg imm_odd:$lane))),
HPR)>;
}
let Predicates = [HasNEON] in {
defm : ExtractEltEvenF16<v4f16, v8f16>;
defm : ExtractEltOddF16VMOVH<v4f16, v8f16>;
}
let AddedComplexity = 1, Predicates = [HasNEON, HasBF16, HasFullFP16] in {
// If VMOVH (vmovx.f16) is available use it to extract BF16 from the odd lanes
defm : ExtractEltOddF16VMOVH<v4bf16, v8bf16>;
}
let Predicates = [HasBF16, HasNEON] in {
defm : ExtractEltEvenF16<v4bf16, v8bf16>;
// Otherwise, if VMOVH is not available resort to extracting the odd lane
// into a GPR and then moving to HPR
def : Pat<(extractelt (v4bf16 DPR:$src), imm_odd:$lane),
(COPY_TO_REGCLASS
(VGETLNu16 (v4bf16 DPR:$src), imm:$lane),
HPR)>;
def : Pat<(extractelt (v8f16 QPR:$src), imm_even:$lane),
(EXTRACT_SUBREG
(v4f32 (COPY_TO_REGCLASS (v8f16 QPR:$src), QPR_VFP2)),
(SSubReg_f16_reg imm_even:$lane))>;
def : Pat<(extractelt (v8f16 QPR:$src), imm_odd:$lane),
def : Pat<(extractelt (v8bf16 QPR:$src), imm_odd:$lane),
(COPY_TO_REGCLASS
(VMOVH (EXTRACT_SUBREG
(v4f32 (COPY_TO_REGCLASS (v8f16 QPR:$src), QPR_VFP2)),
(SSubReg_f16_reg imm_odd:$lane))),
(VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src,
(DSubReg_i16_reg imm:$lane))),
(SubReg_i16_lane imm:$lane)),
HPR)>;
}
@ -6465,6 +6493,21 @@ def VSETLNi32 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, 0b00, (outs DPR:$V),
}
}
// TODO: for odd lanes we could optimize this a bit by using the VINS
// FullFP16 instruction when it is available
multiclass InsertEltF16<ValueType VTScalar, ValueType VT4, ValueType VT8> {
def : Pat<(insertelt (VT4 DPR:$src1), (VTScalar HPR:$src2), imm:$lane),
(VT4 (VSETLNi16 DPR:$src1,
(COPY_TO_REGCLASS HPR:$src2, GPR), imm:$lane))>;
def : Pat<(insertelt (VT8 QPR:$src1), (VTScalar HPR:$src2), imm:$lane),
(VT8 (INSERT_SUBREG QPR:$src1,
(v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
(DSubReg_i16_reg imm:$lane))),
(COPY_TO_REGCLASS HPR:$src2, GPR),
(SubReg_i16_lane imm:$lane))),
(DSubReg_i16_reg imm:$lane)))>;
}
let Predicates = [HasNEON] in {
def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane),
(v16i8 (INSERT_SUBREG QPR:$src1,
@ -6492,14 +6535,7 @@ def : Pat<(v4f32 (insertelt QPR:$src1, SPR:$src2, imm:$src3)),
(INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS QPR:$src1, QPR_VFP2)),
SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
def : Pat<(insertelt (v4f16 DPR:$src1), (f16 HPR:$src2), imm:$lane),
(v4f16 (VSETLNi16 DPR:$src1, (VMOVRH $src2), imm:$lane))>;
def : Pat<(insertelt (v8f16 QPR:$src1), (f16 HPR:$src2), imm:$lane),
(v8f16 (INSERT_SUBREG QPR:$src1,
(v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
(DSubReg_i16_reg imm:$lane))),
(VMOVRH $src2), (SubReg_i16_lane imm:$lane))),
(DSubReg_i16_reg imm:$lane)))>;
defm : InsertEltF16<f16, v4f16, v8f16>;
//def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
// (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
@ -6534,6 +6570,9 @@ def : Pat<(v4i32 (scalar_to_vector GPR:$src)),
dsub_0)>;
}
let Predicates = [HasNEON, HasBF16] in
defm : InsertEltF16<bf16, v4bf16, v8bf16>;
// VDUP : Vector Duplicate (from ARM core register to all elements)
class VDUPD<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
@ -6652,6 +6691,23 @@ def : Pat<(v8f16 (ARMvdup (f16 HPR:$src))),
(f16 HPR:$src), ssub_0), (i32 0)))>;
}
let Predicates = [HasNEON, HasBF16] in {
def : Pat<(v4bf16 (ARMvduplane (v4bf16 DPR:$Vm), imm:$lane)),
(VDUPLN16d DPR:$Vm, imm:$lane)>;
def : Pat<(v8bf16 (ARMvduplane (v8bf16 QPR:$src), imm:$lane)),
(v8bf16 (VDUPLN16q (v4bf16 (EXTRACT_SUBREG QPR:$src,
(DSubReg_i16_reg imm:$lane))),
(SubReg_i16_lane imm:$lane)))>;
def : Pat<(v4bf16 (ARMvdup (bf16 HPR:$src))),
(v4bf16 (VDUPLN16d (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)),
(bf16 HPR:$src), ssub_0), (i32 0)))>;
def : Pat<(v8bf16 (ARMvdup (bf16 HPR:$src))),
(v8bf16 (VDUPLN16q (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)),
(bf16 HPR:$src), ssub_0), (i32 0)))>;
}
// VMOVN : Vector Narrowing Move
defm VMOVN : N2VN_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVN,
"vmovn", "i", trunc>;
@ -7979,6 +8035,8 @@ def : Pat<(v4f32 (concat_vectors DPR:$Dn, DPR:$Dm)),
(REG_SEQUENCE QPR, DPR:$Dn, dsub_0, DPR:$Dm, dsub_1)>;
def : Pat<(v8f16 (concat_vectors DPR:$Dn, DPR:$Dm)),
(REG_SEQUENCE QPR, DPR:$Dn, dsub_0, DPR:$Dm, dsub_1)>;
def : Pat<(v8bf16 (concat_vectors DPR:$Dn, DPR:$Dm)),
(REG_SEQUENCE QPR, DPR:$Dn, dsub_0, DPR:$Dm, dsub_1)>;
}
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,178 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=armv8.6a-arm-none-eabi -mattr=+bf16,+neon,fullfp16 < %s | FileCheck %s
; FIXME: Remove fullfp16 once bfloat arguments and returns lowering stops
; depending on it.
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv8.6a-arm-none-eabi"
define arm_aapcs_vfpcc <4 x bfloat> @test_vcreate_bf16(i64 %a) {
; CHECK-LABEL: test_vcreate_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov d0, r0, r1
; CHECK-NEXT: bx lr
entry:
%0 = bitcast i64 %a to <4 x bfloat>
ret <4 x bfloat> %0
}
define arm_aapcs_vfpcc <4 x bfloat> @test_vdup_n_bf16(bfloat %v) {
; CHECK-LABEL: test_vdup_n_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: @ kill: def $s0 killed $s0 def $d0
; CHECK-NEXT: vdup.16 d0, d0[0]
; CHECK-NEXT: bx lr
entry:
%vecinit.i = insertelement <4 x bfloat> undef, bfloat %v, i32 0
%vecinit3.i = shufflevector <4 x bfloat> %vecinit.i, <4 x bfloat> undef, <4 x i32> zeroinitializer
ret <4 x bfloat> %vecinit3.i
}
define arm_aapcs_vfpcc <8 x bfloat> @test_vdupq_n_bf16(bfloat %v) {
; CHECK-LABEL: test_vdupq_n_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: @ kill: def $s0 killed $s0 def $d0
; CHECK-NEXT: vdup.16 q0, d0[0]
; CHECK-NEXT: bx lr
entry:
%vecinit.i = insertelement <8 x bfloat> undef, bfloat %v, i32 0
%vecinit7.i = shufflevector <8 x bfloat> %vecinit.i, <8 x bfloat> undef, <8 x i32> zeroinitializer
ret <8 x bfloat> %vecinit7.i
}
define arm_aapcs_vfpcc <4 x bfloat> @test_vdup_lane_bf16(<4 x bfloat> %v) {
; CHECK-LABEL: test_vdup_lane_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vdup.16 d0, d0[1]
; CHECK-NEXT: bx lr
entry:
%lane = shufflevector <4 x bfloat> %v, <4 x bfloat> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
ret <4 x bfloat> %lane
}
define arm_aapcs_vfpcc <8 x bfloat> @test_vdupq_lane_bf16(<4 x bfloat> %v) {
; CHECK-LABEL: test_vdupq_lane_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: @ kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: vdup.16 q0, d0[1]
; CHECK-NEXT: bx lr
entry:
%lane = shufflevector <4 x bfloat> %v, <4 x bfloat> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
ret <8 x bfloat> %lane
}
define arm_aapcs_vfpcc <4 x bfloat> @test_vdup_laneq_bf16(<8 x bfloat> %v) {
; CHECK-LABEL: test_vdup_laneq_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vdup.16 d0, d1[3]
; CHECK-NEXT: bx lr
entry:
%lane = shufflevector <8 x bfloat> %v, <8 x bfloat> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
ret <4 x bfloat> %lane
}
define arm_aapcs_vfpcc <8 x bfloat> @test_vdupq_laneq_bf16(<8 x bfloat> %v) {
; CHECK-LABEL: test_vdupq_laneq_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vdup.16 q0, d1[3]
; CHECK-NEXT: bx lr
entry:
%lane = shufflevector <8 x bfloat> %v, <8 x bfloat> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
ret <8 x bfloat> %lane
}
define arm_aapcs_vfpcc <8 x bfloat> @test_vcombine_bf16(<4 x bfloat> %low, <4 x bfloat> %high) {
; CHECK-LABEL: test_vcombine_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.f64 d16, d1
; CHECK-NEXT: vorr d17, d0, d0
; CHECK-NEXT: vorr q0, q8, q8
; CHECK-NEXT: bx lr
entry:
%shuffle.i = shufflevector <4 x bfloat> %high, <4 x bfloat> %low, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x bfloat> %shuffle.i
}
define arm_aapcs_vfpcc <4 x bfloat> @test_vget_high_bf16(<8 x bfloat> %a) {
; CHECK-LABEL: test_vget_high_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.f64 d0, d1
; CHECK-NEXT: bx lr
entry:
%shuffle.i = shufflevector <8 x bfloat> %a, <8 x bfloat> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
ret <4 x bfloat> %shuffle.i
}
define arm_aapcs_vfpcc <4 x bfloat> @test_vget_low_bf16(<8 x bfloat> %a) {
; CHECK-LABEL: test_vget_low_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: @ kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: bx lr
entry:
%shuffle.i = shufflevector <8 x bfloat> %a, <8 x bfloat> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret <4 x bfloat> %shuffle.i
}
define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_even(<8 x bfloat> %v) {
; CHECK-LABEL: test_vgetq_lane_bf16_even:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.f32 s0, s3
; CHECK-NEXT: bx lr
entry:
%0 = extractelement <8 x bfloat> %v, i32 6
ret bfloat %0
}
define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_odd(<8 x bfloat> %v) {
; CHECK-LABEL: test_vgetq_lane_bf16_odd:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s0, s3
; CHECK-NEXT: bx lr
entry:
%0 = extractelement <8 x bfloat> %v, i32 7
ret bfloat %0
}
define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_even(<4 x bfloat> %v) {
; CHECK-LABEL: test_vget_lane_bf16_even:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.f32 s0, s1
; CHECK-NEXT: bx lr
entry:
%0 = extractelement <4 x bfloat> %v, i32 2
ret bfloat %0
}
define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_odd(<4 x bfloat> %v) {
; CHECK-LABEL: test_vget_lane_bf16_odd:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s0, s0
; CHECK-NEXT: bx lr
entry:
%0 = extractelement <4 x bfloat> %v, i32 1
ret bfloat %0
}
define arm_aapcs_vfpcc <4 x bfloat> @test_vset_lane_bf16(bfloat %a, <4 x bfloat> %v) {
; CHECK-LABEL: test_vset_lane_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 d1[1], r0
; CHECK-NEXT: vorr d0, d1, d1
; CHECK-NEXT: bx lr
entry:
%0 = insertelement <4 x bfloat> %v, bfloat %a, i32 1
ret <4 x bfloat> %0
}
define arm_aapcs_vfpcc <8 x bfloat> @test_vsetq_lane_bf16(bfloat %a, <8 x bfloat> %v) {
; CHECK-LABEL: test_vsetq_lane_bf16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 d3[3], r0
; CHECK-NEXT: vorr q0, q1, q1
; CHECK-NEXT: bx lr
entry:
%0 = insertelement <8 x bfloat> %v, bfloat %a, i32 7
ret <8 x bfloat> %0
}

View File

@ -0,0 +1,45 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=armv8.6a-arm-none-eabi -mattr=+bf16,+neon,+fullfp16 < %s | FileCheck %s
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv8.6a-arm-none-eabi"
define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_even(<8 x bfloat> %v) {
; CHECK-LABEL: test_vgetq_lane_bf16_even:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.f32 s0, s3
; CHECK-NEXT: bx lr
entry:
%0 = extractelement <8 x bfloat> %v, i32 6
ret bfloat %0
}
define arm_aapcs_vfpcc bfloat @test_vgetq_lane_bf16_odd(<8 x bfloat> %v) {
; CHECK-LABEL: test_vgetq_lane_bf16_odd:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s0, s3
; CHECK-NEXT: bx lr
entry:
%0 = extractelement <8 x bfloat> %v, i32 7
ret bfloat %0
}
define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_even(<4 x bfloat> %v) {
; CHECK-LABEL: test_vget_lane_bf16_even:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.f32 s0, s1
; CHECK-NEXT: bx lr
entry:
%0 = extractelement <4 x bfloat> %v, i32 2
ret bfloat %0
}
define arm_aapcs_vfpcc bfloat @test_vget_lane_bf16_odd(<4 x bfloat> %v) {
; CHECK-LABEL: test_vget_lane_bf16_odd:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovx.f16 s0, s0
; CHECK-NEXT: bx lr
entry:
%0 = extractelement <4 x bfloat> %v, i32 1
ret bfloat %0
}

View File

@ -82,7 +82,7 @@ define <4 x half> @test_vset_lane_f16(<4 x half> %a, float %fb) nounwind {
; CHECKHARD-LABEL: test_vset_lane_f16:
; CHECKHARD: @ %bb.0: @ %entry
; CHECKHARD-NEXT: vcvtb.f16.f32 s2, s2
; CHECKHARD-NEXT: vmov.f16 r0, s2
; CHECKHARD-NEXT: vmov r0, s2
; CHECKHARD-NEXT: vmov.16 d0[3], r0
; CHECKHARD-NEXT: bx lr
;
@ -91,7 +91,7 @@ define <4 x half> @test_vset_lane_f16(<4 x half> %a, float %fb) nounwind {
; CHECKSOFT-NEXT: vmov s0, r2
; CHECKSOFT-NEXT: vcvtb.f16.f32 s0, s0
; CHECKSOFT-NEXT: vmov d16, r0, r1
; CHECKSOFT-NEXT: vmov.f16 r2, s0
; CHECKSOFT-NEXT: vmov r2, s0
; CHECKSOFT-NEXT: vmov.16 d16[3], r2
; CHECKSOFT-NEXT: vmov r0, r1, d16
; CHECKSOFT-NEXT: bx lr
@ -105,7 +105,7 @@ define <8 x half> @test_vset_laneq_f16_1(<8 x half> %a, float %fb) nounwind {
; CHECKHARD-LABEL: test_vset_laneq_f16_1:
; CHECKHARD: @ %bb.0: @ %entry
; CHECKHARD-NEXT: vcvtb.f16.f32 s4, s4
; CHECKHARD-NEXT: vmov.f16 r0, s4
; CHECKHARD-NEXT: vmov r0, s4
; CHECKHARD-NEXT: vmov.16 d0[1], r0
; CHECKHARD-NEXT: bx lr
;
@ -115,7 +115,7 @@ define <8 x half> @test_vset_laneq_f16_1(<8 x half> %a, float %fb) nounwind {
; CHECKSOFT-NEXT: vmov d17, r2, r3
; CHECKSOFT-NEXT: vmov d16, r0, r1
; CHECKSOFT-NEXT: vcvtb.f16.f32 s0, s0
; CHECKSOFT-NEXT: vmov.f16 r12, s0
; CHECKSOFT-NEXT: vmov r12, s0
; CHECKSOFT-NEXT: vmov.16 d16[1], r12
; CHECKSOFT-NEXT: vmov r2, r3, d17
; CHECKSOFT-NEXT: vmov r0, r1, d16
@ -130,7 +130,7 @@ define <8 x half> @test_vset_laneq_f16_7(<8 x half> %a, float %fb) nounwind {
; CHECKHARD-LABEL: test_vset_laneq_f16_7:
; CHECKHARD: @ %bb.0: @ %entry
; CHECKHARD-NEXT: vcvtb.f16.f32 s4, s4
; CHECKHARD-NEXT: vmov.f16 r0, s4
; CHECKHARD-NEXT: vmov r0, s4
; CHECKHARD-NEXT: vmov.16 d1[3], r0
; CHECKHARD-NEXT: bx lr
;
@ -140,7 +140,7 @@ define <8 x half> @test_vset_laneq_f16_7(<8 x half> %a, float %fb) nounwind {
; CHECKSOFT-NEXT: vmov d17, r2, r3
; CHECKSOFT-NEXT: vmov d16, r0, r1
; CHECKSOFT-NEXT: vcvtb.f16.f32 s0, s0
; CHECKSOFT-NEXT: vmov.f16 r12, s0
; CHECKSOFT-NEXT: vmov r12, s0
; CHECKSOFT-NEXT: vmov.16 d17[3], r12
; CHECKSOFT-NEXT: vmov r0, r1, d16
; CHECKSOFT-NEXT: vmov r2, r3, d17