1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

[AArch64][SVE] Add mul/mla/mls lane & dup intrinsics

Summary:
Implements the following intrinsics:
 - @llvm.aarch64.sve.dup
 - @llvm.aarch64.sve.mul.lane
 - @llvm.aarch64.sve.mla.lane
 - @llvm.aarch64.sve.mls.lane

Reviewers: c-rhodes, sdesmalen, dancgr, efriedma, rengolin

Reviewed By: sdesmalen

Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, cfe-commits, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D74222
This commit is contained in:
Kerry McLaughlin 2020-02-13 10:11:22 +00:00
parent 61eab1d530
commit b7041a91bb
7 changed files with 255 additions and 7 deletions

View File

@ -948,6 +948,13 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
llvm_i32_ty],
[IntrNoMem, ImmArg<3>, ImmArg<4>]>;
class AdvSIMD_SVE_DUP_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMVectorElementType<0>],
[IntrNoMem]>;
class AdvSIMD_SVE_EXPA_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMVectorOfBitcastsToInt<0>],
@ -1224,6 +1231,12 @@ def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredFaultingLoad_Intrinsic;
def int_aarch64_sve_stnt1 : AdvSIMD_1Vec_PredStore_Intrinsic;
//
// Scalar to vector operations
//
def int_aarch64_sve_dup : AdvSIMD_SVE_DUP_Intrinsic;
//
// Integer arithmetic
//
@ -1235,6 +1248,7 @@ def int_aarch64_sve_subr : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_pmul : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_mul : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_mul_lane : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_smulh : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_umulh : AdvSIMD_Pred2VectorArg_Intrinsic;
@ -1253,7 +1267,9 @@ def int_aarch64_sve_uabd : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_mad : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_msb : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_mla : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_mla_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
def int_aarch64_sve_mls : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_mls_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
def int_aarch64_sve_saddv : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;
def int_aarch64_sve_uaddv : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;

View File

@ -1425,6 +1425,7 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::LDP: return "AArch64ISD::LDP";
case AArch64ISD::STP: return "AArch64ISD::STP";
case AArch64ISD::STNP: return "AArch64ISD::STNP";
case AArch64ISD::DUP_PRED: return "AArch64ISD::DUP_PRED";
}
return nullptr;
}
@ -10917,6 +10918,18 @@ static SDValue LowerSVEIntReduction(SDNode *N, unsigned Opc,
return SDValue();
}
static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) {
SDLoc dl(N);
SDValue Scalar = N->getOperand(3);
EVT ScalarTy = Scalar.getValueType();
if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
return DAG.getNode(AArch64ISD::DUP_PRED, dl, N->getValueType(0),
N->getOperand(1), N->getOperand(2), Scalar);
}
static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) {
SDLoc dl(N);
LLVMContext &Ctx = *DAG.getContext();
@ -11105,6 +11118,8 @@ static SDValue performIntrinsicCombine(SDNode *N,
return LowerSVEIntReduction(N, AArch64ISD::EORV_PRED, DAG);
case Intrinsic::aarch64_sve_andv:
return LowerSVEIntReduction(N, AArch64ISD::ANDV_PRED, DAG);
case Intrinsic::aarch64_sve_dup:
return LowerSVEIntrinsicDUP(N, DAG);
case Intrinsic::aarch64_sve_ext:
return LowerSVEIntrinsicEXT(N, DAG);
case Intrinsic::aarch64_sve_cmpeq_wide:

View File

@ -215,6 +215,8 @@ enum NodeType : unsigned {
PTEST,
PTRUE,
DUP_PRED,
LDNF1,
LDNF1S,
LDFF1,

View File

@ -96,6 +96,9 @@ def AArch64rev : SDNode<"AArch64ISD::REV", SDT_AArch64Rev>;
def SDT_AArch64PTest : SDTypeProfile<0, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
def AArch64ptest : SDNode<"AArch64ISD::PTEST", SDT_AArch64PTest>;
def SDT_AArch64DUP_PRED : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, SDTCisVec<2>, SDTCVecEltisVT<2,i1>]>;
def AArch64dup_pred : SDNode<"AArch64ISD::DUP_PRED", SDT_AArch64DUP_PRED>;
let Predicates = [HasSVE] in {
defm RDFFR_PPz : sve_int_rdffr_pred<0b0, "rdffr", int_aarch64_sve_rdffr_z>;
@ -287,8 +290,8 @@ let Predicates = [HasSVE] in {
defm DUP_ZZI : sve_int_perm_dup_i<"dup">;
// Splat scalar register (predicated)
defm CPY_ZPmR : sve_int_perm_cpy_r<"cpy">;
defm CPY_ZPmV : sve_int_perm_cpy_v<"cpy">;
defm CPY_ZPmR : sve_int_perm_cpy_r<"cpy", AArch64dup_pred>;
defm CPY_ZPmV : sve_int_perm_cpy_v<"cpy", AArch64dup_pred>;
// Select elements from either vector (predicated)
defm SEL_ZPZZ : sve_int_sel_vvv<"sel", vselect>;
@ -1396,8 +1399,8 @@ let Predicates = [HasSVE] in {
let Predicates = [HasSVE2] in {
// SVE2 integer multiply-add (indexed)
defm MLA_ZZZI : sve2_int_mla_by_indexed_elem<0b01, 0b0, "mla", null_frag>;
defm MLS_ZZZI : sve2_int_mla_by_indexed_elem<0b01, 0b1, "mls", null_frag>;
defm MLA_ZZZI : sve2_int_mla_by_indexed_elem<0b01, 0b0, "mla", int_aarch64_sve_mla_lane>;
defm MLS_ZZZI : sve2_int_mla_by_indexed_elem<0b01, 0b1, "mls", int_aarch64_sve_mls_lane>;
// SVE2 saturating multiply-add high (indexed)
defm SQRDMLAH_ZZZI : sve2_int_mla_by_indexed_elem<0b10, 0b0, "sqrdmlah", int_aarch64_sve_sqrdmlah_lane>;
@ -1408,7 +1411,7 @@ let Predicates = [HasSVE2] in {
defm SQRDMLSH_ZZZ : sve2_int_mla<0b1, "sqrdmlsh", int_aarch64_sve_sqrdmlsh>;
// SVE2 integer multiply (indexed)
defm MUL_ZZZI : sve2_int_mul_by_indexed_elem<0b1110, "mul", null_frag>;
defm MUL_ZZZI : sve2_int_mul_by_indexed_elem<0b1110, "mul", int_aarch64_sve_mul_lane>;
// SVE2 saturating multiply high (indexed)
defm SQDMULH_ZZZI : sve2_int_mul_by_indexed_elem<0b1100, "sqdmulh", int_aarch64_sve_sqdmulh_lane>;

View File

@ -5603,7 +5603,7 @@ class sve_int_perm_cpy_r<bits<2> sz8_64, string asm, ZPRRegOp zprty,
let ElementSize = zprty.ElementSize;
}
multiclass sve_int_perm_cpy_r<string asm> {
multiclass sve_int_perm_cpy_r<string asm, SDPatternOperator op> {
def _B : sve_int_perm_cpy_r<0b00, asm, ZPR8, GPR32sp>;
def _H : sve_int_perm_cpy_r<0b01, asm, ZPR16, GPR32sp>;
def _S : sve_int_perm_cpy_r<0b10, asm, ZPR32, GPR32sp>;
@ -5617,6 +5617,11 @@ multiclass sve_int_perm_cpy_r<string asm> {
(!cast<Instruction>(NAME # _S) ZPR32:$Zd, PPR3bAny:$Pg, GPR32sp:$Rn), 1>;
def : InstAlias<"mov $Zd, $Pg/m, $Rn",
(!cast<Instruction>(NAME # _D) ZPR64:$Zd, PPR3bAny:$Pg, GPR64sp:$Rn), 1>;
def : SVE_3_Op_Pat<nxv16i8, op, nxv16i8, nxv16i1, i32, !cast<Instruction>(NAME # _B)>;
def : SVE_3_Op_Pat<nxv8i16, op, nxv8i16, nxv8i1, i32, !cast<Instruction>(NAME # _H)>;
def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv4i1, i32, !cast<Instruction>(NAME # _S)>;
def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv2i1, i64, !cast<Instruction>(NAME # _D)>;
}
class sve_int_perm_cpy_v<bits<2> sz8_64, string asm, ZPRRegOp zprty,
@ -5640,7 +5645,7 @@ class sve_int_perm_cpy_v<bits<2> sz8_64, string asm, ZPRRegOp zprty,
let ElementSize = zprty.ElementSize;
}
multiclass sve_int_perm_cpy_v<string asm> {
multiclass sve_int_perm_cpy_v<string asm, SDPatternOperator op> {
def _B : sve_int_perm_cpy_v<0b00, asm, ZPR8, FPR8>;
def _H : sve_int_perm_cpy_v<0b01, asm, ZPR16, FPR16>;
def _S : sve_int_perm_cpy_v<0b10, asm, ZPR32, FPR32>;
@ -5654,6 +5659,11 @@ multiclass sve_int_perm_cpy_v<string asm> {
(!cast<Instruction>(NAME # _S) ZPR32:$Zd, PPR3bAny:$Pg, FPR32:$Vn), 1>;
def : InstAlias<"mov $Zd, $Pg/m, $Vn",
(!cast<Instruction>(NAME # _D) ZPR64:$Zd, PPR3bAny:$Pg, FPR64:$Vn), 1>;
def : SVE_3_Op_Pat<nxv8f16, op, nxv8f16, nxv8i1, f16, !cast<Instruction>(NAME # _H)>;
def : SVE_3_Op_Pat<nxv4f32, op, nxv4f32, nxv4i1, f32, !cast<Instruction>(NAME # _S)>;
def : SVE_3_Op_Pat<nxv2f32, op, nxv2f32, nxv2i1, f32, !cast<Instruction>(NAME # _S)>;
def : SVE_3_Op_Pat<nxv2f64, op, nxv2f64, nxv2i1, f64, !cast<Instruction>(NAME # _D)>;
}
class sve_int_perm_compact<bit sz, string asm, ZPRRegOp zprty>

View File

@ -0,0 +1,83 @@
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
;
; DUP
;
define <vscale x 16 x i8> @dup_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, i8 %b) {
; CHECK-LABEL: dup_i8:
; CHECK: mov z0.b, p0/m, w0
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %a,
<vscale x 16 x i1> %pg,
i8 %b)
ret <vscale x 16 x i8> %out
}
define <vscale x 8 x i16> @dup_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, i16 %b) {
; CHECK-LABEL: dup_i16:
; CHECK: mov z0.h, p0/m, w0
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %a,
<vscale x 8 x i1> %pg,
i16 %b)
ret <vscale x 8 x i16> %out
}
define <vscale x 4 x i32> @dup_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, i32 %b) {
; CHECK-LABEL: dup_i32:
; CHECK: mov z0.s, p0/m, w0
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %a,
<vscale x 4 x i1> %pg,
i32 %b)
ret <vscale x 4 x i32> %out
}
define <vscale x 2 x i64> @dup_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, i64 %b) {
; CHECK-LABEL: dup_i64:
; CHECK: mov z0.d, p0/m, x0
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %a,
<vscale x 2 x i1> %pg,
i64 %b)
ret <vscale x 2 x i64> %out
}
define <vscale x 8 x half> @dup_f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, half %b) {
; CHECK-LABEL: dup_f16:
; CHECK: mov z0.h, p0/m, h1
; CHECK-NEXT: ret
%out = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> %a,
<vscale x 8 x i1> %pg,
half %b)
ret <vscale x 8 x half> %out
}
define <vscale x 4 x float> @dup_f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, float %b) {
; CHECK-LABEL: dup_f32:
; CHECK: mov z0.s, p0/m, s1
; CHECK-NEXT: ret
%out = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> %a,
<vscale x 4 x i1> %pg,
float %b)
ret <vscale x 4 x float> %out
}
define <vscale x 2 x double> @dup_f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, double %b) {
; CHECK-LABEL: dup_f64:
; CHECK: mov z0.d, p0/m, d1
; CHECK-NEXT: ret
%out = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> %a,
<vscale x 2 x i1> %pg,
double %b)
ret <vscale x 2 x double> %out
}
declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8)
declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16)
declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64)
declare <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half)
declare <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float)
declare <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double)

View File

@ -0,0 +1,119 @@
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
;
; MUL
;
define <vscale x 2 x i64> @mul_lane_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: mul_lane_d:
; CHECK: mul z0.d, z0.d, z1.d[1]
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.lane.nxv2i64(<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b,
i32 1)
ret <vscale x 2 x i64> %out
}
define <vscale x 4 x i32> @mul_lane_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: mul_lane_s:
; CHECK: mul z0.s, z0.s, z1.s[1]
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.lane.nxv4i32(<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b,
i32 1)
ret <vscale x 4 x i32> %out
}
define <vscale x 8 x i16> @mul_lane_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: mul_lane_h:
; CHECK: mul z0.h, z0.h, z1.h[1]
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.lane.nxv8i16(<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b,
i32 1)
ret <vscale x 8 x i16> %out
}
;
; MLA
;
define <vscale x 2 x i64> @mla_lane_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: mla_lane_d:
; CHECK: mla z0.d, z1.d, z2.d[1]
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.mla.lane.nxv2i64(<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b,
<vscale x 2 x i64> %c,
i32 1)
ret <vscale x 2 x i64> %out
}
define <vscale x 4 x i32> @mla_lane_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: mla_lane_s:
; CHECK: mla z0.s, z1.s, z2.s[1]
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.mla.lane.nxv4i32(<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b,
<vscale x 4 x i32> %c,
i32 1)
ret <vscale x 4 x i32> %out
}
define <vscale x 8 x i16> @mla_lane_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: mla_lane_h:
; CHECK: mla z0.h, z1.h, z2.h[1]
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.mla.lane.nxv8i16(<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b,
<vscale x 8 x i16> %c,
i32 1)
ret <vscale x 8 x i16> %out
}
;
; MLS
;
define <vscale x 2 x i64> @mls_lane_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: mls_lane_d:
; CHECK: mls z0.d, z1.d, z2.d[1]
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.mls.lane.nxv2i64(<vscale x 2 x i64> %a,
<vscale x 2 x i64> %b,
<vscale x 2 x i64> %c,
i32 1)
ret <vscale x 2 x i64> %out
}
define <vscale x 4 x i32> @mls_lane_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: mls_lane_s:
; CHECK: mls z0.s, z1.s, z2.s[1]
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.mls.lane.nxv4i32(<vscale x 4 x i32> %a,
<vscale x 4 x i32> %b,
<vscale x 4 x i32> %c,
i32 1)
ret <vscale x 4 x i32> %out
}
define <vscale x 8 x i16> @mls_lane_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: mls_lane_h:
; CHECK: mls z0.h, z1.h, z2.h[1]
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.mls.lane.nxv8i16(<vscale x 8 x i16> %a,
<vscale x 8 x i16> %b,
<vscale x 8 x i16> %c,
i32 1)
ret <vscale x 8 x i16> %out
}
declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.lane.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, i32)
declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.mla.lane.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
declare <vscale x 4 x i32> @llvm.aarch64.sve.mla.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 2 x i64> @llvm.aarch64.sve.mla.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.mls.lane.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
declare <vscale x 4 x i32> @llvm.aarch64.sve.mls.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 2 x i64> @llvm.aarch64.sve.mls.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)