From 2ca38c891410bd712957b284a77760a3464f5d26 Mon Sep 17 00:00:00 2001 From: Hsiangkai Wang Date: Wed, 16 Dec 2020 16:46:21 +0800 Subject: [PATCH] [RISCV] Define vector widening mul intrinsics. Define vector widening mul intrinsics and lower them to V instructions. We work with @rogfer01 from BSC to come out this patch. Authored-by: Roger Ferrer Ibanez Co-Authored-by: Hsiangkai Wang Differential Revision: https://reviews.llvm.org/D93381 --- include/llvm/IR/IntrinsicsRISCV.td | 4 + lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 14 + test/CodeGen/RISCV/rvv/vwmul-rv32.ll | 881 ++++++++++++++ test/CodeGen/RISCV/rvv/vwmul-rv64.ll | 1201 ++++++++++++++++++++ test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll | 881 ++++++++++++++ test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll | 1201 ++++++++++++++++++++ test/CodeGen/RISCV/rvv/vwmulu-rv32.ll | 881 ++++++++++++++ test/CodeGen/RISCV/rvv/vwmulu-rv64.ll | 1201 ++++++++++++++++++++ 8 files changed, 6264 insertions(+) create mode 100644 test/CodeGen/RISCV/rvv/vwmul-rv32.ll create mode 100644 test/CodeGen/RISCV/rvv/vwmul-rv64.ll create mode 100644 test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll create mode 100644 test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll create mode 100644 test/CodeGen/RISCV/rvv/vwmulu-rv32.ll create mode 100644 test/CodeGen/RISCV/rvv/vwmulu-rv64.ll diff --git a/include/llvm/IR/IntrinsicsRISCV.td b/include/llvm/IR/IntrinsicsRISCV.td index c749d78023b..bb695befd13 100644 --- a/include/llvm/IR/IntrinsicsRISCV.td +++ b/include/llvm/IR/IntrinsicsRISCV.td @@ -247,6 +247,10 @@ let TargetPrefix = "riscv" in { defm vremu : RISCVBinaryAAX; defm vrem : RISCVBinaryAAX; + defm vwmul : RISCVBinaryABX; + defm vwmulu : RISCVBinaryABX; + defm vwmulsu : RISCVBinaryABX; + defm vfadd : RISCVBinaryAAX; defm vfsub : RISCVBinaryAAX; defm vfrsub : RISCVBinaryAAX; diff --git a/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index a05075cebd7..4d31f1217a3 100644 --- a/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1156,6 +1156,13 @@ defm PseudoVDIV : VPseudoBinaryV_VV_VX; defm PseudoVREMU : VPseudoBinaryV_VV_VX; defm PseudoVREM : VPseudoBinaryV_VV_VX; +//===----------------------------------------------------------------------===// +// 12.12. Vector Widening Integer Multiply Instructions +//===----------------------------------------------------------------------===// +defm PseudoVWMUL : VPseudoBinaryW_VV_VX; +defm PseudoVWMULU : VPseudoBinaryW_VV_VX; +defm PseudoVWMULSU : VPseudoBinaryW_VV_VX; + } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, HasStdExtF] in { @@ -1274,6 +1281,13 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors>; defm "" : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors>; defm "" : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>; +//===----------------------------------------------------------------------===// +// 12.12. Vector Widening Integer Multiply Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL">; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU">; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU">; + } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, HasStdExtF] in { diff --git a/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/test/CodeGen/RISCV/rvv/vwmul-rv32.ll new file mode 100644 index 00000000000..57f516d9ef1 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vwmul-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/test/CodeGen/RISCV/rvv/vwmul-rv64.ll new file mode 100644 index 00000000000..d361814644c --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vwmul-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll new file mode 100644 index 00000000000..34c9be77315 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll new file mode 100644 index 00000000000..089f4af5512 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll new file mode 100644 index 00000000000..3d8954401e8 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll new file mode 100644 index 00000000000..3d9bff312eb --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +}