1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 03:23:01 +02:00

[BFloat] Add convert/copy instrinsic support

This patch is part of a series implementing the Bfloat16 extension of the Armv8.6-a architecture, as detailed here:

https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/arm-architecture-developments-armv8-6-a

Specifically it adds intrinsic support in clang and llvm for Arm and AArch64.

The bfloat type, and its properties are specified in the Arm Architecture Reference Manual:

https://developer.arm.com/docs/ddi0487/latest/arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile

The following people contributed to this patch:
  - Alexandros Lamprineas
  - Luke Cheeseman
  - Mikhail Maltsev
  - Momchil Velikov
  - Luke Geeson

Differential Revision: https://reviews.llvm.org/D80928
This commit is contained in:
Mikhail Maltsev 2020-06-23 14:24:33 +00:00
parent a5ea3b8c6a
commit 14bad468ca
8 changed files with 228 additions and 9 deletions

View File

@ -471,6 +471,16 @@ let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
def int_aarch64_neon_bfmlalt : AdvSIMD_FML_Intrinsic;
// v8.6-A Bfloat Intrinsics
def int_aarch64_neon_bfcvt
: Intrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem]>;
def int_aarch64_neon_bfcvtn
: Intrinsic<[llvm_v8bf16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_aarch64_neon_bfcvtn2
: Intrinsic<[llvm_v8bf16_ty],
[llvm_v8bf16_ty, llvm_v4f32_ty],
[IntrNoMem]>;
// v8.2-A FP16 Fused Multiply-Add Long
def int_aarch64_neon_fmlal : AdvSIMD_FP16FML_Intrinsic;
def int_aarch64_neon_fmlsl : AdvSIMD_FP16FML_Intrinsic;

View File

@ -785,6 +785,11 @@ def int_arm_neon_usmmla : Neon_MatMul_Intrinsic;
def int_arm_neon_usdot : Neon_Dot_Intrinsic;
// v8.6-A Bfloat Intrinsics
def int_arm_neon_vcvtfp2bf
: Intrinsic<[llvm_anyvector_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_arm_neon_vcvtbfp2bf
: Intrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem]>;
def int_arm_neon_bfdot : Neon_Dot_Intrinsic;
def int_arm_neon_bfmmla : Neon_MatMul_Intrinsic;

View File

@ -7911,15 +7911,18 @@ class SIMDThreeSameVectorBF16MatrixMul<string asm>
class SIMD_BFCVTN
: BaseSIMDMixedTwoVector<0, 0, 0b10, 0b10110, V128, V128,
"bfcvtn", ".4h", ".4s",
[]>;
[(set (v8bf16 V128:$Rd),
(int_aarch64_neon_bfcvtn (v4f32 V128:$Rn)))]>;
class SIMD_BFCVTN2
: BaseSIMDMixedTwoVectorTied<1, 0, 0b10, 0b10110, V128, V128,
"bfcvtn2", ".8h", ".4s",
[]>;
[(set (v8bf16 V128:$dst),
(int_aarch64_neon_bfcvtn2 (v8bf16 V128:$Rd), (v4f32 V128:$Rn)))]>;
class BF16ToSinglePrecision<string asm>
: I<(outs FPR16:$Rd), (ins FPR32:$Rn), asm, "\t$Rd, $Rn", "", []>,
: I<(outs FPR16:$Rd), (ins FPR32:$Rn), asm, "\t$Rd, $Rn", "",
[(set (bf16 FPR16:$Rd), (int_aarch64_neon_bfcvt (f32 FPR32:$Rn)))]>,
Sched<[WriteFCvt]> {
bits<5> Rd;
bits<5> Rn;

View File

@ -3955,12 +3955,16 @@ defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>
defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
// Patterns for vector long shift (by element width). These need to match all
// three of zext, sext and anyext so it's easier to pull the patterns out of the

View File

@ -4743,6 +4743,29 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
default:
break;
// Scalar f32 -> bf16
case Intrinsic::arm_neon_vcvtbfp2bf: {
SDLoc dl(N);
const SDValue &Src = N->getOperand(1);
llvm::EVT DestTy = N->getValueType(0);
SDValue Pred = getAL(CurDAG, dl);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { Src, Src, Pred, Reg0 };
CurDAG->SelectNodeTo(N, ARM::BF16_VCVTB, DestTy, Ops);
return;
}
// Vector v4f32 -> v4bf16
case Intrinsic::arm_neon_vcvtfp2bf: {
SDLoc dl(N);
const SDValue &Src = N->getOperand(1);
SDValue Pred = getAL(CurDAG, dl);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { Src, Pred, Reg0 };
CurDAG->SelectNodeTo(N, ARM::BF16_VCVT, MVT::v4bf16, Ops);
return;
}
case Intrinsic::arm_mve_urshrl:
SelectMVE_LongShift(N, ARM::MVE_URSHRL, true, false);
return;

View File

@ -0,0 +1,34 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-arm-none-eabi -mattr=+neon -mattr=+bf16 | FileCheck %s
declare bfloat @llvm.aarch64.neon.bfcvt(float)
declare <8 x bfloat> @llvm.aarch64.neon.bfcvtn(<4 x float>)
declare <8 x bfloat> @llvm.aarch64.neon.bfcvtn2(<8 x bfloat>, <4 x float>)
; CHECK-LABEL: test_vcvth_bf16_f32
; CHECK: bfcvt h0, s0
; CHECK-NEXT: ret
define bfloat @test_vcvth_bf16_f32(float %a) {
entry:
%vcvth_bf16_f32 = call bfloat @llvm.aarch64.neon.bfcvt(float %a)
ret bfloat %vcvth_bf16_f32
}
; CHECK-LABEL: test_vcvtq_low_bf16_f32
; CHECK: bfcvtn v0.4h, v0.4s
; CHECK-NEXT: ret
define <8 x bfloat> @test_vcvtq_low_bf16_f32(<4 x float> %a) {
entry:
%cvt = call <8 x bfloat> @llvm.aarch64.neon.bfcvtn(<4 x float> %a)
ret <8 x bfloat> %cvt
}
; CHECK-LABEL: test_vcvtq_high_bf16_f32
; CHECK: bfcvtn2 v1.8h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: ret
define <8 x bfloat> @test_vcvtq_high_bf16_f32(<4 x float> %a, <8 x bfloat> %inactive) {
entry:
%cvt = call <8 x bfloat> @llvm.aarch64.neon.bfcvtn2(<8 x bfloat> %inactive, <4 x float> %a)
ret <8 x bfloat> %cvt
}

View File

@ -163,3 +163,87 @@ entry:
%vgetq_lane = extractelement <8 x bfloat> %v, i32 7
ret bfloat %vgetq_lane
}
; vcopy_lane_bf16(a, 1, b, 3);
define <4 x bfloat> @test_vcopy_lane_bf16_v1(<4 x bfloat> %a, <4 x bfloat> %b) nounwind {
; CHECK-LABEL: test_vcopy_lane_bf16_v1:
; CHECK-NEXT: mov v0.h[1], v1.h[3]
; CHECK-NEXT: ret
entry:
%vset_lane = shufflevector <4 x bfloat> %a, <4 x bfloat> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
ret <4 x bfloat> %vset_lane
}
; vcopy_lane_bf16(a, 2, b, 0);
define <4 x bfloat> @test_vcopy_lane_bf16_v2(<4 x bfloat> %a, <4 x bfloat> %b) nounwind {
; CHECK-LABEL: test_vcopy_lane_bf16_v2:
; CHECK-NEXT: mov v0.h[2], v1.h[0]
; CHECK-NEXT: ret
entry:
%vset_lane = shufflevector <4 x bfloat> %a, <4 x bfloat> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
ret <4 x bfloat> %vset_lane
}
; vcopyq_lane_bf16(a, 0, b, 2);
define <8 x bfloat> @test_vcopyq_lane_bf16_v1(<8 x bfloat> %a, <4 x bfloat> %b) nounwind {
; CHECK-LABEL: test_vcopyq_lane_bf16_v1:
; CHECK-NEXT: mov v0.h[0], v1.h[2]
; CHECK-NEXT: ret
entry:
%0 = shufflevector <4 x bfloat> %b, <4 x bfloat> undef, <8 x i32> <i32 undef, i32 undef, i32 2, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%vset_lane = shufflevector <8 x bfloat> %a, <8 x bfloat> %0, <8 x i32> <i32 10, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x bfloat> %vset_lane
}
; vcopyq_lane_bf16(a, 6, b, 0);
define <8 x bfloat> @test_vcopyq_lane_bf16_v2(<8 x bfloat> %a, <4 x bfloat> %b) nounwind {
; CHECK-LABEL: test_vcopyq_lane_bf16_v2:
; CHECK-NEXT: mov v0.h[6], v1.h[0]
; CHECK-NEXT: ret
entry:
%0 = shufflevector <4 x bfloat> %b, <4 x bfloat> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%vset_lane = shufflevector <8 x bfloat> %a, <8 x bfloat> %0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 7>
ret <8 x bfloat> %vset_lane
}
; vcopy_laneq_bf16(a, 0, b, 7);
define <4 x bfloat> @test_vcopy_laneq_bf16_v1(<4 x bfloat> %a, <8 x bfloat> %b) nounwind {
; CHECK-LABEL: test_vcopy_laneq_bf16_v1:
; CHECK-NEXT: mov v0.h[0], v1.h[7]
; CHECK-NEXT: ret
entry:
%vgetq_lane = extractelement <8 x bfloat> %b, i32 7
%vset_lane = insertelement <4 x bfloat> %a, bfloat %vgetq_lane, i32 0
ret <4 x bfloat> %vset_lane
}
; vcopy_laneq_bf16(a, 3, b, 4);
define <4 x bfloat> @test_vcopy_laneq_bf16_v2(<4 x bfloat> %a, <8 x bfloat> %b) nounwind {
; CHECK-LABEL: test_vcopy_laneq_bf16_v2:
; CHECK-NEXT: mov v0.h[3], v1.h[4]
; CHECK-NEXT: ret
entry:
%vgetq_lane = extractelement <8 x bfloat> %b, i32 4
%vset_lane = insertelement <4 x bfloat> %a, bfloat %vgetq_lane, i32 3
ret <4 x bfloat> %vset_lane
}
; vcopyq_laneq_bf16(a, 3, b, 7);
define <8 x bfloat> @test_vcopyq_laneq_bf16_v1(<8 x bfloat> %a, <8 x bfloat> %b) nounwind {
; CHECK-LABEL: test_vcopyq_laneq_bf16_v1:
; CHECK-NEXT: mov v0.h[3], v1.h[7]
; CHECK-NEXT: ret
entry:
%vset_lane = shufflevector <8 x bfloat> %a, <8 x bfloat> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 15, i32 4, i32 5, i32 6, i32 7>
ret <8 x bfloat> %vset_lane
}
; vcopyq_laneq_bf16(a, 6, b, 2);
define <8 x bfloat> @test_vcopyq_laneq_bf16_v2(<8 x bfloat> %a, <8 x bfloat> %b) nounwind {
; CHECK-LABEL: test_vcopyq_laneq_bf16_v2:
; CHECK-NEXT: mov v0.h[6], v1.h[2]
; CHECK-NEXT: ret
entry:
%vset_lane = shufflevector <8 x bfloat> %a, <8 x bfloat> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 10, i32 7>
ret <8 x bfloat> %vset_lane
}

View File

@ -0,0 +1,56 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -verify-machineinstrs -mtriple=armv8.6a-arm-none-eabi -mattr=+neon,+bf16,+fullfp16 | FileCheck %s
declare bfloat @llvm.arm.neon.vcvtbfp2bf(float)
; Hard float ABI
declare <4 x bfloat> @llvm.arm.neon.vcvtfp2bf.v4bf16(<4 x float>)
define arm_aapcs_vfpcc <4 x bfloat> @test_vcvt_bf16_f32_hardfp(<4 x float> %a) {
; CHECK-LABEL: test_vcvt_bf16_f32_hardfp:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcvt.bf16.f32 d0, q0
; CHECK-NEXT: bx lr
entry:
%vcvtfp2bf1.i.i = call <4 x bfloat> @llvm.arm.neon.vcvtfp2bf.v4bf16(<4 x float> %a)
ret <4 x bfloat> %vcvtfp2bf1.i.i
}
define arm_aapcs_vfpcc bfloat @test_vcvth_bf16_f32_hardfp(float %a) {
; CHECK-LABEL: test_vcvth_bf16_f32_hardfp:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vcvtb.bf16.f32 s0, s0
; CHECK-NEXT: bx lr
entry:
%vcvtbfp2bf.i = call bfloat @llvm.arm.neon.vcvtbfp2bf(float %a)
ret bfloat %vcvtbfp2bf.i
}
; Soft float ABI
declare <4 x i16> @llvm.arm.neon.vcvtfp2bf.v4i16(<4 x float>)
define <2 x i32> @test_vcvt_bf16_f32_softfp(<4 x float> %a) {
; CHECK-LABEL: test_vcvt_bf16_f32_softfp:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vcvt.bf16.f32 d16, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: bx lr
entry:
%vcvtfp2bf1.i.i = call <4 x i16> @llvm.arm.neon.vcvtfp2bf.v4i16(<4 x float> %a)
%.cast = bitcast <4 x i16> %vcvtfp2bf1.i.i to <2 x i32>
ret <2 x i32> %.cast
}
define bfloat @test_vcvth_bf16_f32_softfp(float %a) #1 {
; CHECK-LABEL: test_vcvth_bf16_f32_softfp:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.bf16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
entry:
%vcvtbfp2bf.i = call bfloat @llvm.arm.neon.vcvtbfp2bf(float %a) #3
ret bfloat %vcvtbfp2bf.i
}