mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 02:33:06 +01:00
[AArch64][SVE] Add SVE2 mla indexed intrinsics.
Summary: Add SVE2 mla indexed intrinsics: - smlalb, smalalt, umlalb, umlalt, smlslb, smlslt, umlslb, umlslt. Reviewers: efriedma, sdesmalen, dancgr, cameron.mcinally, c-rhodes, rengolin Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, arphaman, psnobl, llvm-commits, amehsan Tags: #llvm Differential Revision: https://reviews.llvm.org/D73576
This commit is contained in:
parent
7a56d419a1
commit
c32dde6058
@ -1080,6 +1080,14 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
|
||||
llvm_i32_ty],
|
||||
[IntrNoMem, ImmArg<2>]>;
|
||||
|
||||
class SVE2_3VectorArg_Indexed_Intrinsic
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>,
|
||||
LLVMSubdivide2VectorType<0>,
|
||||
LLVMSubdivide2VectorType<0>,
|
||||
llvm_i64_ty],
|
||||
[IntrNoMem, ImmArg<3>]>;
|
||||
|
||||
// NOTE: There is no relationship between these intrinsics beyond an attempt
|
||||
// to reuse currently identical class definitions.
|
||||
class AdvSIMD_SVE_LOGB_Intrinsic : AdvSIMD_SVE_CNT_Intrinsic;
|
||||
@ -1732,4 +1740,14 @@ def int_aarch64_sve_sqshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_sqrshrunb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
|
||||
def int_aarch64_sve_sqrshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_smlalb : SVE2_3VectorArg_Indexed_Intrinsic;
|
||||
def int_aarch64_sve_smlalt : SVE2_3VectorArg_Indexed_Intrinsic;
|
||||
def int_aarch64_sve_umlalb : SVE2_3VectorArg_Indexed_Intrinsic;
|
||||
def int_aarch64_sve_umlalt : SVE2_3VectorArg_Indexed_Intrinsic;
|
||||
def int_aarch64_sve_smlslb : SVE2_3VectorArg_Indexed_Intrinsic;
|
||||
def int_aarch64_sve_smlslt : SVE2_3VectorArg_Indexed_Intrinsic;
|
||||
def int_aarch64_sve_umlslb : SVE2_3VectorArg_Indexed_Intrinsic;
|
||||
def int_aarch64_sve_umlslt : SVE2_3VectorArg_Indexed_Intrinsic;
|
||||
|
||||
}
|
||||
|
@ -1467,14 +1467,14 @@ let Predicates = [HasSVE2] in {
|
||||
defm SQDMULLT_ZZZI : sve2_int_mul_long_by_indexed_elem<0b101, "sqdmullt">;
|
||||
|
||||
// SVE2 integer multiply-add long (indexed)
|
||||
defm SMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1000, "smlalb">;
|
||||
defm SMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1001, "smlalt">;
|
||||
defm UMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1010, "umlalb">;
|
||||
defm UMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1011, "umlalt">;
|
||||
defm SMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1100, "smlslb">;
|
||||
defm SMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1101, "smlslt">;
|
||||
defm UMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1110, "umlslb">;
|
||||
defm UMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1111, "umlslt">;
|
||||
defm SMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1000, "smlalb", int_aarch64_sve_smlalb>;
|
||||
defm SMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1001, "smlalt", int_aarch64_sve_smlalt>;
|
||||
defm UMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1010, "umlalb", int_aarch64_sve_umlalb>;
|
||||
defm UMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1011, "umlalt", int_aarch64_sve_umlalt>;
|
||||
defm SMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1100, "smlslb", int_aarch64_sve_smlslb>;
|
||||
defm SMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1101, "smlslt", int_aarch64_sve_smlslt>;
|
||||
defm UMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1110, "umlslb", int_aarch64_sve_umlslb>;
|
||||
defm UMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b1111, "umlslt", int_aarch64_sve_umlslt>;
|
||||
|
||||
// SVE2 integer multiply-add long (vectors, unpredicated)
|
||||
defm SMLALB_ZZZ : sve2_int_mla_long<0b10000, "smlalb">;
|
||||
@ -1487,10 +1487,10 @@ let Predicates = [HasSVE2] in {
|
||||
defm UMLSLT_ZZZ : sve2_int_mla_long<0b10111, "umlslt">;
|
||||
|
||||
// SVE2 saturating multiply-add long (indexed)
|
||||
defm SQDMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0100, "sqdmlalb">;
|
||||
defm SQDMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0101, "sqdmlalt">;
|
||||
defm SQDMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0110, "sqdmlslb">;
|
||||
defm SQDMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0111, "sqdmlslt">;
|
||||
defm SQDMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0100, "sqdmlalb", null_frag>;
|
||||
defm SQDMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0101, "sqdmlalt", null_frag>;
|
||||
defm SQDMLSLB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0110, "sqdmlslb", null_frag>;
|
||||
defm SQDMLSLT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0111, "sqdmlslt", null_frag>;
|
||||
|
||||
// SVE2 saturating multiply-add long (vectors, unpredicated)
|
||||
defm SQDMLALB_ZZZ : sve2_int_mla_long<0b11000, "sqdmlalb">;
|
||||
|
@ -2402,7 +2402,7 @@ multiclass sve2_int_mla_by_indexed_elem<bits<2> opc, bit S, string asm> {
|
||||
// SVE2 Integer Multiply-Add Long - Indexed Group
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
multiclass sve2_int_mla_long_by_indexed_elem<bits<4> opc, string asm> {
|
||||
multiclass sve2_int_mla_long_by_indexed_elem<bits<4> opc, string asm, SDPatternOperator op> {
|
||||
def _S : sve2_int_mla_by_indexed_elem<0b10, { opc{3}, 0b0, opc{2-1}, ?, opc{0} },
|
||||
asm, ZPR32, ZPR16, ZPR3b16, VectorIndexH> {
|
||||
bits<3> Zm;
|
||||
@ -2419,6 +2419,9 @@ multiclass sve2_int_mla_long_by_indexed_elem<bits<4> opc, string asm> {
|
||||
let Inst{19-16} = Zm;
|
||||
let Inst{11} = iop{0};
|
||||
}
|
||||
|
||||
def : SVE_4_Op_Imm_Pat<nxv4i32, op, nxv4i32, nxv8i16, nxv8i16, i64, VectorIndexH_timm, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_4_Op_Imm_Pat<nxv2i64, op, nxv2i64, nxv4i32, nxv4i32, i64, VectorIndexS_timm, !cast<Instruction>(NAME # _D)>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
458
test/CodeGen/AArch64/sve2-mla-indexed.ll
Normal file
458
test/CodeGen/AArch64/sve2-mla-indexed.ll
Normal file
@ -0,0 +1,458 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; SMLALB
|
||||
;
|
||||
define <vscale x 4 x i32> @smlalb_i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: smlalb_i32
|
||||
; CHECK: smlalb z0.s, z1.h, z2.h[1]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 1)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @smlalb_i32_2(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: smlalb_i32_2
|
||||
; CHECK: smlalb z0.s, z1.h, z2.h[7]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 7)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smlalb_i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smlalb_i64
|
||||
; CHECK: smlalb z0.d, z1.s, z2.s[0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 0)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smlalb_i64_2(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smlalb_i64_2
|
||||
; CHECK: smlalb z0.d, z1.s, z2.s[3]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 3)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; SMLALT
|
||||
;
|
||||
define <vscale x 4 x i32> @smlalt_i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: smlalt_i32
|
||||
; CHECK: smlalt z0.s, z1.h, z2.h[1]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 1)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @smlalt_i32_2(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: smlalt_i32_2
|
||||
; CHECK: smlalt z0.s, z1.h, z2.h[7]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 7)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smlalt_i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smlalt_i64
|
||||
; CHECK: smlalt z0.d, z1.s, z2.s[0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 0)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smlalt_i64_2(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smlalt_i64_2
|
||||
; CHECK: smlalt z0.d, z1.s, z2.s[3]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 3)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; UMLALB
|
||||
;
|
||||
define <vscale x 4 x i32> @umlalb_i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: umlalb_i32
|
||||
; CHECK: umlalb z0.s, z1.h, z2.h[1]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 1)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @umlalb_i32_2(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: umlalb_i32_2
|
||||
; CHECK: umlalb z0.s, z1.h, z2.h[7]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 7)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umlalb_i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umlalb_i64
|
||||
; CHECK: umlalb z0.d, z1.s, z2.s[0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 0)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umlalb_i64_2(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umlalb_i64_2
|
||||
; CHECK: umlalb z0.d, z1.s, z2.s[3]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 3)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; UMLALT
|
||||
;
|
||||
define <vscale x 4 x i32> @umlalt_i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: umlalt_i32
|
||||
; CHECK: umlalt z0.s, z1.h, z2.h[1]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 1)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @umlalt_i32_2(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: umlalt_i32_2
|
||||
; CHECK: umlalt z0.s, z1.h, z2.h[7]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 7)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umlalt_i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umlalt_i64
|
||||
; CHECK: umlalt z0.d, z1.s, z2.s[0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 0)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umlalt_i64_2(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umlalt_i64_2
|
||||
; CHECK: umlalt z0.d, z1.s, z2.s[3]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 3)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; SMLSLB
|
||||
;
|
||||
define <vscale x 4 x i32> @smlslb_i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: smlslb_i32
|
||||
; CHECK: smlslb z0.s, z1.h, z2.h[1]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 1)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @smlslb_i32_2(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: smlslb_i32_2
|
||||
; CHECK: smlslb z0.s, z1.h, z2.h[7]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 7)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smlslb_i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smlslb_i64
|
||||
; CHECK: smlslb z0.d, z1.s, z2.s[0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 0)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smlslb_i64_2(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smlslb_i64_2
|
||||
; CHECK: smlslb z0.d, z1.s, z2.s[3]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 3)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; SMLSLT
|
||||
;
|
||||
define <vscale x 4 x i32> @smlslt_i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: smlslt_i32
|
||||
; CHECK: smlslt z0.s, z1.h, z2.h[1]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 1)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @smlslt_i32_2(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: smlslt_i32_2
|
||||
; CHECK: smlslt z0.s, z1.h, z2.h[7]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 7)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smlslt_i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smlslt_i64
|
||||
; CHECK: smlslt z0.d, z1.s, z2.s[0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 0)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smlslt_i64_2(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smlslt_i64_2
|
||||
; CHECK: smlslt z0.d, z1.s, z2.s[3]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 3)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; UMLSLB
|
||||
;
|
||||
define <vscale x 4 x i32> @umlslb_i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: umlslb_i32
|
||||
; CHECK: umlslb z0.s, z1.h, z2.h[1]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 1)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @umlslb_i32_2(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: umlslb_i32_2
|
||||
; CHECK: umlslb z0.s, z1.h, z2.h[7]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 7)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umlslb_i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umlslb_i64
|
||||
; CHECK: umlslb z0.d, z1.s, z2.s[0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 0)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umlslb_i64_2(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umlslb_i64_2
|
||||
; CHECK: umlslb z0.d, z1.s, z2.s[3]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 3)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; UMLSLT
|
||||
;
|
||||
define <vscale x 4 x i32> @umlslt_i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: umlslt_i32
|
||||
; CHECK: umlslt z0.s, z1.h, z2.h[1]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 1)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @umlslt_i32_2(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: umlslt_i32_2
|
||||
; CHECK: umlslt z0.s, z1.h, z2.h[7]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %a,
|
||||
<vscale x 8 x i16> %b,
|
||||
<vscale x 8 x i16> %c,
|
||||
i64 7)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umlslt_i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umlslt_i64
|
||||
; CHECK: umlslt z0.d, z1.s, z2.s[0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 0)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umlslt_i64_2(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umlslt_i64_2
|
||||
; CHECK: umlslt z0.d, z1.s, z2.s[3]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %a,
|
||||
<vscale x 4 x i32> %b,
|
||||
<vscale x 4 x i32> %c,
|
||||
i64 3)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64)
|
Loading…
Reference in New Issue
Block a user