From f958745fcd6a21391477192b0b6e619597222b37 Mon Sep 17 00:00:00 2001 From: Monk Chiang Date: Thu, 17 Dec 2020 13:45:52 +0800 Subject: [PATCH] [RISCV] Define vsadd/vsaddu/vssub/vssubu intrinsics. We work with @rogfer01 from BSC to come out this patch. Authored-by: Roger Ferrer Ibanez Co-Authored-by: ShihPo Hung Co-Authored-by: Monk Chiang Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D93366 --- include/llvm/IR/IntrinsicsRISCV.td | 29 + lib/Target/RISCV/RISCVISelLowering.cpp | 39 +- lib/Target/RISCV/RISCVISelLowering.h | 1 + lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 18 + lib/Target/RISCV/RISCVRegisterInfo.cpp | 1 + lib/Target/RISCV/RISCVRegisterInfo.td | 1 + test/CodeGen/RISCV/rvv/vsadd-rv32.ll | 1945 ++++++++++++++++ test/CodeGen/RISCV/rvv/vsadd-rv64.ll | 2377 ++++++++++++++++++++ test/CodeGen/RISCV/rvv/vsaddu-rv32.ll | 1945 ++++++++++++++++ test/CodeGen/RISCV/rvv/vsaddu-rv64.ll | 2377 ++++++++++++++++++++ test/CodeGen/RISCV/rvv/vssub-rv32.ll | 1441 ++++++++++++ test/CodeGen/RISCV/rvv/vssub-rv64.ll | 1761 +++++++++++++++ test/CodeGen/RISCV/rvv/vssubu-rv32.ll | 1441 ++++++++++++ test/CodeGen/RISCV/rvv/vssubu-rv64.ll | 1761 +++++++++++++++ 14 files changed, 15136 insertions(+), 1 deletion(-) create mode 100644 test/CodeGen/RISCV/rvv/vsadd-rv32.ll create mode 100644 test/CodeGen/RISCV/rvv/vsadd-rv64.ll create mode 100644 test/CodeGen/RISCV/rvv/vsaddu-rv32.ll create mode 100644 test/CodeGen/RISCV/rvv/vsaddu-rv64.ll create mode 100644 test/CodeGen/RISCV/rvv/vssub-rv32.ll create mode 100644 test/CodeGen/RISCV/rvv/vssub-rv64.ll create mode 100644 test/CodeGen/RISCV/rvv/vssubu-rv32.ll create mode 100644 test/CodeGen/RISCV/rvv/vssubu-rv64.ll diff --git a/include/llvm/IR/IntrinsicsRISCV.td b/include/llvm/IR/IntrinsicsRISCV.td index 805035f9244..6d20f39a402 100644 --- a/include/llvm/IR/IntrinsicsRISCV.td +++ b/include/llvm/IR/IntrinsicsRISCV.td @@ -209,6 +209,26 @@ let TargetPrefix = "riscv" in { let ExtendOperand = 2; } + // For Saturating binary operations. + // The destination vector type is the same as first source vector. + // Input: (vector_in, vector_in/scalar_in, vl) + class RISCVSaturatingBinaryAAXNoMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } + // For Saturating binary operations with mask. + // The destination vector type is the same as first source vector. + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl) + class RISCVSaturatingBinaryAAXMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { + let ExtendOperand = 3; + } + multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; @@ -243,6 +263,10 @@ let TargetPrefix = "riscv" in { multiclass RISCVBinaryMaskOut { def "int_riscv_" # NAME : RISCVBinaryMOut; } + multiclass RISCVSaturatingBinaryAAX { + def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask; + def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask; + } defm vle : RISCVUSLoad; defm vse : RISCVUSStore; @@ -299,4 +323,9 @@ let TargetPrefix = "riscv" in { defm vfadd : RISCVBinaryAAX; defm vfsub : RISCVBinaryAAX; defm vfrsub : RISCVBinaryAAX; + + defm vsaddu : RISCVSaturatingBinaryAAX; + defm vsadd : RISCVSaturatingBinaryAAX; + defm vssubu : RISCVSaturatingBinaryAAX; + defm vssub : RISCVSaturatingBinaryAAX; } // TargetPrefix = "riscv" diff --git a/lib/Target/RISCV/RISCVISelLowering.cpp b/lib/Target/RISCV/RISCVISelLowering.cpp index aeb6b062386..6dac023fe33 100644 --- a/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/lib/Target/RISCV/RISCVISelLowering.cpp @@ -350,8 +350,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, // RVV intrinsics may have illegal operands. setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); - if (Subtarget.is64Bit()) + setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); + setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); + + if (Subtarget.is64Bit()) { setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); + setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); + } } // Function alignments. @@ -599,6 +604,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, } case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); + case ISD::INTRINSIC_W_CHAIN: + return LowerINTRINSIC_W_CHAIN(Op, DAG); case ISD::BSWAP: case ISD::BITREVERSE: { // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. @@ -1054,6 +1061,36 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, } } +SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, + SelectionDAG &DAG) const { + unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); + SDLoc DL(Op); + + if (Subtarget.hasStdExtV()) { + // Some RVV intrinsics may claim that they want an integer operand to be + // extended. + if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = + RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) { + if (II->ExtendedOperand) { + // The operands start from the second argument in INTRINSIC_W_CHAIN. + unsigned ExtendOp = II->ExtendedOperand + 1; + assert(ExtendOp < Op.getNumOperands()); + SmallVector Operands(Op->op_begin(), Op->op_end()); + SDValue &ScalarOp = Operands[ExtendOp]; + if (ScalarOp.getValueType() == MVT::i32 || + ScalarOp.getValueType() == MVT::i16 || + ScalarOp.getValueType() == MVT::i8) { + ScalarOp = + DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp); + return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), Operands); + } + } + } + } + + return SDValue(); +} + // Returns the opcode of the target-specific SDNode that implements the 32-bit // form of the given Opcode. static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { diff --git a/lib/Target/RISCV/RISCVISelLowering.h b/lib/Target/RISCV/RISCVISelLowering.h index 33db64308d6..fb81b245492 100644 --- a/lib/Target/RISCV/RISCVISelLowering.h +++ b/lib/Target/RISCV/RISCVISelLowering.h @@ -260,6 +260,7 @@ private: SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; bool isEligibleForTailCallOptimization( CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, diff --git a/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 300d0b23c0d..7f5210310df 100644 --- a/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1295,6 +1295,16 @@ defm PseudoVWMUL : VPseudoBinaryW_VV_VX; defm PseudoVWMULU : VPseudoBinaryW_VV_VX; defm PseudoVWMULSU : VPseudoBinaryW_VV_VX; +//===----------------------------------------------------------------------===// +// 13.1. Vector Single-Width Saturating Add and Subtract +//===----------------------------------------------------------------------===// +let Defs = [VXSAT], hasSideEffects = 1 in { + defm PseudoVSADDU : VPseudoBinaryV_VV_VX_VI; + defm PseudoVSADD : VPseudoBinaryV_VV_VX_VI; + defm PseudoVSSUBU : VPseudoBinaryV_VV_VX; + defm PseudoVSSUB : VPseudoBinaryV_VV_VX; +} + } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, HasStdExtF] in { @@ -1434,6 +1444,14 @@ defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL">; defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU">; defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU">; +//===----------------------------------------------------------------------===// +// 13.1. Vector Single-Width Saturating Add and Subtract +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>; + } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, HasStdExtF] in { diff --git a/lib/Target/RISCV/RISCVRegisterInfo.cpp b/lib/Target/RISCV/RISCVRegisterInfo.cpp index 15c91a27b78..69abf840201 100644 --- a/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -98,6 +98,7 @@ BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const { // V registers for code generation. We handle them manually. markSuperRegs(Reserved, RISCV::VL); markSuperRegs(Reserved, RISCV::VTYPE); + markSuperRegs(Reserved, RISCV::VXSAT); assert(checkAllSuperRegsMarked(Reserved)); return Reserved; diff --git a/lib/Target/RISCV/RISCVRegisterInfo.td b/lib/Target/RISCV/RISCVRegisterInfo.td index a9575f6cd42..b87658fea59 100644 --- a/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/lib/Target/RISCV/RISCVRegisterInfo.td @@ -379,6 +379,7 @@ let RegAltNameIndices = [ABIRegAltName] in { def VTYPE : RISCVReg<0, "vtype", ["vtype"]>; def VL : RISCVReg<0, "vl", ["vl"]>; + def VXSAT : RISCVReg<0, "vxsat", ["vxsat"]>; } class RegisterTypes reg_types> { diff --git a/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/test/CodeGen/RISCV/rvv/vsadd-rv32.ll new file mode 100644 index 00000000000..6b2d018cff0 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -0,0 +1,1945 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv1i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv2i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv4i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv8i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv16i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv32i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv64i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv1i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv2i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv4i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv8i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv16i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv32i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv1i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv2i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv4i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv8i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv16i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/test/CodeGen/RISCV/rvv/vsadd-rv64.ll new file mode 100644 index 00000000000..864f0b48b37 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -0,0 +1,2377 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vsadd.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsadd.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsadd.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsadd.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv1i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv2i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv4i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv8i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv16i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv32i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv64i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv64i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv1i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv2i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv4i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv8i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv16i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv32i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv32i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv1i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv2i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv4i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv8i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv16i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv16i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv1i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv2i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv4i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsadd.nxv8i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsadd.mask.nxv8i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll new file mode 100644 index 00000000000..256b2ebc1a8 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -0,0 +1,1945 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv16i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv32i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv64i8.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv64i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv1i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv2i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv4i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv8i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv16i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv32i16.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv1i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv2i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv4i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv8i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv16i32.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll new file mode 100644 index 00000000000..f0e582df466 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -0,0 +1,2377 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vsaddu.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vsaddu.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vsaddu.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv1i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv2i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv4i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv8i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv16i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv32i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv64i8.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv64i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv1i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv2i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv4i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv8i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv16i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv32i16.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv32i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv1i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv2i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv4i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv8i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv16i32.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv16i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv1i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv2i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv4i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vsaddu.nxv8i64.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsaddu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vsaddu.mask.nxv8i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/test/CodeGen/RISCV/rvv/vssub-rv32.ll new file mode 100644 index 00000000000..bed91d57262 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vssub.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/test/CodeGen/RISCV/rvv/vssub-rv64.ll new file mode 100644 index 00000000000..0170f6ce7b0 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vssub.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vssub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssub.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssub.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssub.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vssub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssub.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/test/CodeGen/RISCV/rvv/vssubu-rv32.ll new file mode 100644 index 00000000000..5892b649a97 --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/test/CodeGen/RISCV/rvv/vssubu-rv64.ll new file mode 100644 index 00000000000..2a8c5d6976f --- /dev/null +++ b/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vssubu.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vssubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vssubu.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vssubu.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vssubu.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vssubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vssubu.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +}