1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 02:33:06 +01:00

[RISCV] Add support for vector saturating add/sub operations

This patch adds support for lowering the saturating vector add/sub
intrinsics to RVV instructions, for both fixed-length and
scalable-vector forms alike.

Note that some of the DAG combines are still not triggering for the
scalable-vector tests. These require a bit more work in the DAGCombiner
itself.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D106651
This commit is contained in:
Fraser Cormack 2021-07-23 11:10:04 +01:00
parent c2bd628550
commit 5afacc5171
14 changed files with 6080 additions and 88 deletions

View File

@ -516,6 +516,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FP_TO_SINT, VT, Custom);
setOperationAction(ISD::FP_TO_UINT, VT, Custom);
setOperationAction(ISD::SADDSAT, VT, Legal);
setOperationAction(ISD::UADDSAT, VT, Legal);
setOperationAction(ISD::SSUBSAT, VT, Legal);
setOperationAction(ISD::USUBSAT, VT, Legal);
// Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
// nodes which truncate by one power of two at a time.
setOperationAction(ISD::TRUNCATE, VT, Custom);
@ -742,6 +747,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::MULHS, VT, Custom);
setOperationAction(ISD::MULHU, VT, Custom);
setOperationAction(ISD::SADDSAT, VT, Custom);
setOperationAction(ISD::UADDSAT, VT, Custom);
setOperationAction(ISD::SSUBSAT, VT, Custom);
setOperationAction(ISD::USUBSAT, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::SELECT_CC, VT, Expand);
@ -2569,6 +2579,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
"Unexpected custom legalisation");
return SDValue();
case ISD::SADDSAT:
return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
case ISD::UADDSAT:
return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
case ISD::SSUBSAT:
return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
case ISD::USUBSAT:
return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
case ISD::FADD:
return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
case ISD::FSUB:
@ -8376,6 +8394,10 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(UDIV_VL)
NODE_NAME_CASE(UREM_VL)
NODE_NAME_CASE(XOR_VL)
NODE_NAME_CASE(SADDSAT_VL)
NODE_NAME_CASE(UADDSAT_VL)
NODE_NAME_CASE(SSUBSAT_VL)
NODE_NAME_CASE(USUBSAT_VL)
NODE_NAME_CASE(FADD_VL)
NODE_NAME_CASE(FSUB_VL)
NODE_NAME_CASE(FMUL_VL)

View File

@ -196,6 +196,12 @@ enum NodeType : unsigned {
UDIV_VL,
UREM_VL,
XOR_VL,
SADDSAT_VL,
UADDSAT_VL,
SSUBSAT_VL,
USUBSAT_VL,
FADD_VL,
FSUB_VL,
FMUL_VL,

View File

@ -523,6 +523,12 @@ foreach vti = AllIntegerVectors in {
vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>;
}
// 12.1. Vector Single-Width Saturating Add and Subtract
defm : VPatBinarySDNode_VV_VX_VI<saddsat, "PseudoVSADD">;
defm : VPatBinarySDNode_VV_VX_VI<uaddsat, "PseudoVSADDU">;
defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">;
defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">;
// 16.1. Vector Mask-Register Logical Instructions
foreach mti = AllMasks in {
def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)),

View File

@ -89,6 +89,12 @@ def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL>;
def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL>;
def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL>;
def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL>;
def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL>;
def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL>;
def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>;
def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>;
def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>;
def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
@ -903,6 +909,12 @@ foreach vti = AllIntegerVectors in {
XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>;
}
// 12.1. Vector Single-Width Saturating Add and Subtract
defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">;
defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">;
defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">;
defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">;
} // Predicates = [HasStdExtV]
// 15.1. Vector Single-Width Integer Reduction Instructions

View File

@ -11,8 +11,7 @@ define <2 x i64> @add_umax_v2i64(<2 x i64> %a0) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 7
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vmaxu.vx v25, v8, a0
; CHECK-NEXT: vadd.vi v8, v25, -7
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%v1 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a0, <2 x i64> <i64 7, i64 7>)
%v2 = add <2 x i64> %v1, <i64 -7, i64 -7>
@ -24,8 +23,7 @@ define <vscale x 2 x i64> @add_umax_nxv2i64(<vscale x 2 x i64> %a0) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 7
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmaxu.vx v26, v8, a0
; CHECK-NEXT: vadd.vi v8, v26, -7
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%ins1 = insertelement <vscale x 2 x i64> poison, i64 7, i32 0
%splat1 = shufflevector <vscale x 2 x i64> %ins1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
@ -40,24 +38,11 @@ define <vscale x 2 x i64> @add_umax_nxv2i64(<vscale x 2 x i64> %a0) {
; they may be converted to usubsat(a,b).
define <2 x i64> @sub_umax_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
; RV32-LABEL: sub_umax_v2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vsub.vv v25, v8, v9
; RV32-NEXT: vmsltu.vv v0, v8, v25
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vmv.v.i v26, 0
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vmerge.vvm v8, v25, v26, v0
; RV32-NEXT: ret
;
; RV64-LABEL: sub_umax_v2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vsub.vv v25, v8, v9
; RV64-NEXT: vmsltu.vv v0, v8, v25
; RV64-NEXT: vmerge.vim v8, v25, 0, v0
; RV64-NEXT: ret
; CHECK-LABEL: sub_umax_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v1 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a0, <2 x i64> %a1)
%v2 = sub <2 x i64> %v1, %a1
ret <2 x i64> %v2
@ -67,8 +52,7 @@ define <vscale x 2 x i64> @sub_umax_nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2
; CHECK-LABEL: sub_umax_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vmaxu.vv v26, v8, v10
; CHECK-NEXT: vsub.vv v8, v26, v10
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: ret
%v1 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> %a1)
%v2 = sub <vscale x 2 x i64> %v1, %a1
@ -76,24 +60,11 @@ define <vscale x 2 x i64> @sub_umax_nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2
}
define <2 x i64> @sub_umin_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
; RV32-LABEL: sub_umin_v2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vsub.vv v25, v8, v9
; RV32-NEXT: vmsltu.vv v0, v8, v25
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vmv.v.i v26, 0
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vmerge.vvm v8, v25, v26, v0
; RV32-NEXT: ret
;
; RV64-LABEL: sub_umin_v2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vsub.vv v25, v8, v9
; RV64-NEXT: vmsltu.vv v0, v8, v25
; RV64-NEXT: vmerge.vim v8, v25, 0, v0
; RV64-NEXT: ret
; CHECK-LABEL: sub_umin_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v1 = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %a0, <2 x i64> %a1)
%v2 = sub <2 x i64> %a0, %v1
ret <2 x i64> %v2
@ -103,8 +74,7 @@ define <vscale x 2 x i64> @sub_umin_nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2
; CHECK-LABEL: sub_umin_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vmaxu.vv v26, v8, v10
; CHECK-NEXT: vsub.vv v8, v26, v10
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: ret
%v1 = call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> %a1)
%v2 = sub <vscale x 2 x i64> %a0, %v1
@ -119,10 +89,7 @@ define <2 x i64> @vselect_sub_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: vselect_sub_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vsub.vv v25, v8, v9
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmerge.vvm v8, v26, v25, v0
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp uge <2 x i64> %a0, %a1
%v1 = sub <2 x i64> %a0, %a1
@ -149,9 +116,7 @@ define <8 x i16> @vselect_sub_2_v8i16(<8 x i16> %x, i16 zeroext %w) nounwind {
; CHECK-LABEL: vselect_sub_2_v8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v0, v8, a0
; CHECK-NEXT: vsub.vx v25, v8, a0
; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
%0 = insertelement <8 x i16> undef, i16 %w, i32 0
@ -185,12 +150,9 @@ entry:
define <2 x i64> @vselect_add_const_v2i64(<2 x i64> %a0) {
; CHECK-LABEL: vselect_add_const_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 6
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vadd.vi v25, v8, -6
; CHECK-NEXT: addi a0, zero, 5
; CHECK-NEXT: vmsgtu.vx v0, v8, a0
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmerge.vvm v8, v26, v25, v0
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%v1 = add <2 x i64> %a0, <i64 -6, i64 -6>
%cmp = icmp ugt <2 x i64> %a0, <i64 5, i64 5>
@ -221,27 +183,17 @@ define <2 x i16> @vselect_add_const_signbit_v2i16(<2 x i16> %a0) {
; RV32-LABEL: vselect_add_const_signbit_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 8
; RV32-NEXT: addi a0, a0, -2
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vmsgtu.vx v0, v8, a0
; RV32-NEXT: lui a0, 1048568
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vadd.vx v25, v8, a0
; RV32-NEXT: vmv.v.i v26, 0
; RV32-NEXT: vmerge.vvm v8, v26, v25, v0
; RV32-NEXT: vssubu.vx v8, v8, a0
; RV32-NEXT: ret
;
; RV64-LABEL: vselect_add_const_signbit_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 8
; RV64-NEXT: addiw a0, a0, -2
; RV64-NEXT: addiw a0, a0, -1
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vmsgtu.vx v0, v8, a0
; RV64-NEXT: lui a0, 1048568
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vadd.vx v25, v8, a0
; RV64-NEXT: vmv.v.i v26, 0
; RV64-NEXT: vmerge.vvm v8, v26, v25, v0
; RV64-NEXT: vssubu.vx v8, v8, a0
; RV64-NEXT: ret
%cmp = icmp ugt <2 x i16> %a0, <i16 32766, i16 32766>
%v1 = add <2 x i16> %a0, <i16 -32767, i16 -32767>
@ -290,12 +242,9 @@ define <vscale x 2 x i16> @vselect_add_const_signbit_nxv2i16(<vscale x 2 x i16>
define <2 x i16> @vselect_xor_const_signbit_v2i16(<2 x i16> %a0) {
; CHECK-LABEL: vselect_xor_const_signbit_v2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: lui a0, 1048568
; CHECK-NEXT: vxor.vx v26, v8, a0
; CHECK-NEXT: vmerge.vvm v8, v25, v26, v0
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%cmp = icmp slt <2 x i16> %a0, zeroinitializer
%v1 = xor <2 x i16> %a0, <i16 -32768, i16 -32768>
@ -330,10 +279,7 @@ define <2 x i64> @vselect_add_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: vselect_add_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vadd.vv v25, v8, v9
; CHECK-NEXT: vmsleu.vv v0, v8, v25
; CHECK-NEXT: vmv.v.i v26, -1
; CHECK-NEXT: vmerge.vvm v8, v26, v25, v0
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v1 = add <2 x i64> %a0, %a1
%cmp = icmp ule <2 x i64> %a0, %v1
@ -365,10 +311,7 @@ define <2 x i64> @vselect_add_const_2_v2i64(<2 x i64> %a0) {
; CHECK-LABEL: vselect_add_const_2_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vadd.vi v25, v8, 6
; CHECK-NEXT: vmsleu.vi v0, v8, -7
; CHECK-NEXT: vmv.v.i v26, -1
; CHECK-NEXT: vmerge.vvm v8, v26, v25, v0
; CHECK-NEXT: vsaddu.vi v8, v8, 6
; CHECK-NEXT: ret
%v1 = add <2 x i64> %a0, <i64 6, i64 6>
%cmp = icmp ule <2 x i64> %a0, <i64 -7, i64 -7>

View File

@ -0,0 +1,633 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>)
define <2 x i8> @sadd_v2i8_vv(<2 x i8> %va, <2 x i8> %b) {
; CHECK-LABEL: sadd_v2i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b)
ret <2 x i8> %v
}
define <2 x i8> @sadd_v2i8_vx(<2 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_v2i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
%vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
%v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
ret <2 x i8> %v
}
define <2 x i8> @sadd_v2i8_vi(<2 x i8> %va) {
; CHECK-LABEL: sadd_v2i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> undef, i8 5, i32 0
%vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
%v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
ret <2 x i8> %v
}
declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>)
define <4 x i8> @sadd_v4i8_vv(<4 x i8> %va, <4 x i8> %b) {
; CHECK-LABEL: sadd_v4i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b)
ret <4 x i8> %v
}
define <4 x i8> @sadd_v4i8_vx(<4 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_v4i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
%vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
%v = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
ret <4 x i8> %v
}
define <4 x i8> @sadd_v4i8_vi(<4 x i8> %va) {
; CHECK-LABEL: sadd_v4i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> undef, i8 5, i32 0
%vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
%v = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
ret <4 x i8> %v
}
declare <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8>, <8 x i8>)
define <8 x i8> @sadd_v8i8_vv(<8 x i8> %va, <8 x i8> %b) {
; CHECK-LABEL: sadd_v8i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b)
ret <8 x i8> %v
}
define <8 x i8> @sadd_v8i8_vx(<8 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_v8i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
%vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
%v = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
ret <8 x i8> %v
}
define <8 x i8> @sadd_v8i8_vi(<8 x i8> %va) {
; CHECK-LABEL: sadd_v8i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> undef, i8 5, i32 0
%vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
%v = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
ret <8 x i8> %v
}
declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
define <16 x i8> @sadd_v16i8_vv(<16 x i8> %va, <16 x i8> %b) {
; CHECK-LABEL: sadd_v16i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b)
ret <16 x i8> %v
}
define <16 x i8> @sadd_v16i8_vx(<16 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_v16i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
%vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
%v = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
ret <16 x i8> %v
}
define <16 x i8> @sadd_v16i8_vi(<16 x i8> %va) {
; CHECK-LABEL: sadd_v16i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> undef, i8 5, i32 0
%vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
%v = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
ret <16 x i8> %v
}
declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>)
define <2 x i16> @sadd_v2i16_vv(<2 x i16> %va, <2 x i16> %b) {
; CHECK-LABEL: sadd_v2i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b)
ret <2 x i16> %v
}
define <2 x i16> @sadd_v2i16_vx(<2 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_v2i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
%vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
%v = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
ret <2 x i16> %v
}
define <2 x i16> @sadd_v2i16_vi(<2 x i16> %va) {
; CHECK-LABEL: sadd_v2i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> undef, i16 5, i32 0
%vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
%v = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
ret <2 x i16> %v
}
declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>)
define <4 x i16> @sadd_v4i16_vv(<4 x i16> %va, <4 x i16> %b) {
; CHECK-LABEL: sadd_v4i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b)
ret <4 x i16> %v
}
define <4 x i16> @sadd_v4i16_vx(<4 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_v4i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
%vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
%v = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
ret <4 x i16> %v
}
define <4 x i16> @sadd_v4i16_vi(<4 x i16> %va) {
; CHECK-LABEL: sadd_v4i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> undef, i16 5, i32 0
%vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
%v = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
ret <4 x i16> %v
}
declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
define <8 x i16> @sadd_v8i16_vv(<8 x i16> %va, <8 x i16> %b) {
; CHECK-LABEL: sadd_v8i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b)
ret <8 x i16> %v
}
define <8 x i16> @sadd_v8i16_vx(<8 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_v8i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
%vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
%v = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
ret <8 x i16> %v
}
define <8 x i16> @sadd_v8i16_vi(<8 x i16> %va) {
; CHECK-LABEL: sadd_v8i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> undef, i16 5, i32 0
%vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
%v = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
ret <8 x i16> %v
}
declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
define <16 x i16> @sadd_v16i16_vv(<16 x i16> %va, <16 x i16> %b) {
; CHECK-LABEL: sadd_v16i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b)
ret <16 x i16> %v
}
define <16 x i16> @sadd_v16i16_vx(<16 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_v16i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
%vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
%v = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
ret <16 x i16> %v
}
define <16 x i16> @sadd_v16i16_vi(<16 x i16> %va) {
; CHECK-LABEL: sadd_v16i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> undef, i16 5, i32 0
%vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
%v = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
ret <16 x i16> %v
}
declare <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32>, <2 x i32>)
define <2 x i32> @sadd_v2i32_vv(<2 x i32> %va, <2 x i32> %b) {
; CHECK-LABEL: sadd_v2i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b)
ret <2 x i32> %v
}
define <2 x i32> @sadd_v2i32_vx(<2 x i32> %va, i32 %b) {
; CHECK-LABEL: sadd_v2i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
%v = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
ret <2 x i32> %v
}
define <2 x i32> @sadd_v2i32_vi(<2 x i32> %va) {
; CHECK-LABEL: sadd_v2i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> undef, i32 5, i32 0
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
%v = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
ret <2 x i32> %v
}
declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
define <4 x i32> @sadd_v4i32_vv(<4 x i32> %va, <4 x i32> %b) {
; CHECK-LABEL: sadd_v4i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b)
ret <4 x i32> %v
}
define <4 x i32> @sadd_v4i32_vx(<4 x i32> %va, i32 %b) {
; CHECK-LABEL: sadd_v4i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
%vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
ret <4 x i32> %v
}
define <4 x i32> @sadd_v4i32_vi(<4 x i32> %va) {
; CHECK-LABEL: sadd_v4i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> undef, i32 5, i32 0
%vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
ret <4 x i32> %v
}
declare <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>)
define <8 x i32> @sadd_v8i32_vv(<8 x i32> %va, <8 x i32> %b) {
; CHECK-LABEL: sadd_v8i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b)
ret <8 x i32> %v
}
define <8 x i32> @sadd_v8i32_vx(<8 x i32> %va, i32 %b) {
; CHECK-LABEL: sadd_v8i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
%vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
%v = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
ret <8 x i32> %v
}
define <8 x i32> @sadd_v8i32_vi(<8 x i32> %va) {
; CHECK-LABEL: sadd_v8i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> undef, i32 5, i32 0
%vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
%v = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
ret <8 x i32> %v
}
declare <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32>, <16 x i32>)
define <16 x i32> @sadd_v16i32_vv(<16 x i32> %va, <16 x i32> %b) {
; CHECK-LABEL: sadd_v16i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b)
ret <16 x i32> %v
}
define <16 x i32> @sadd_v16i32_vx(<16 x i32> %va, i32 %b) {
; CHECK-LABEL: sadd_v16i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
%vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
%v = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
ret <16 x i32> %v
}
define <16 x i32> @sadd_v16i32_vi(<16 x i32> %va) {
; CHECK-LABEL: sadd_v16i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> undef, i32 5, i32 0
%vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
%v = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
ret <16 x i32> %v
}
declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>)
define <2 x i64> @sadd_v2i64_vv(<2 x i64> %va, <2 x i64> %b) {
; CHECK-LABEL: sadd_v2i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b)
ret <2 x i64> %v
}
define <2 x i64> @sadd_v2i64_vx(<2 x i64> %va, i64 %b) {
; RV32-LABEL: sadd_v2i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v25, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v25
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: sadd_v2i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vsadd.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
%vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
%v = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
ret <2 x i64> %v
}
define <2 x i64> @sadd_v2i64_vi(<2 x i64> %va) {
; CHECK-LABEL: sadd_v2i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> undef, i64 5, i32 0
%vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
%v = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
ret <2 x i64> %v
}
declare <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>)
define <4 x i64> @sadd_v4i64_vv(<4 x i64> %va, <4 x i64> %b) {
; CHECK-LABEL: sadd_v4i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b)
ret <4 x i64> %v
}
define <4 x i64> @sadd_v4i64_vx(<4 x i64> %va, i64 %b) {
; RV32-LABEL: sadd_v4i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v26
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: sadd_v4i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vsadd.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
%vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
%v = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
ret <4 x i64> %v
}
define <4 x i64> @sadd_v4i64_vi(<4 x i64> %va) {
; CHECK-LABEL: sadd_v4i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> undef, i64 5, i32 0
%vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
%v = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
ret <4 x i64> %v
}
declare <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64>, <8 x i64>)
define <8 x i64> @sadd_v8i64_vv(<8 x i64> %va, <8 x i64> %b) {
; CHECK-LABEL: sadd_v8i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b)
ret <8 x i64> %v
}
define <8 x i64> @sadd_v8i64_vx(<8 x i64> %va, i64 %b) {
; RV32-LABEL: sadd_v8i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v28, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v28
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: sadd_v8i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vsadd.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
%vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
%v = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
ret <8 x i64> %v
}
define <8 x i64> @sadd_v8i64_vi(<8 x i64> %va) {
; CHECK-LABEL: sadd_v8i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> undef, i64 5, i32 0
%vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
%v = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
ret <8 x i64> %v
}
declare <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64>, <16 x i64>)
define <16 x i64> @sadd_v16i64_vv(<16 x i64> %va, <16 x i64> %b) {
; CHECK-LABEL: sadd_v16i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b)
ret <16 x i64> %v
}
define <16 x i64> @sadd_v16i64_vx(<16 x i64> %va, i64 %b) {
; RV32-LABEL: sadd_v16i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: sadd_v16i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV64-NEXT: vsadd.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
%vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
%v = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
ret <16 x i64> %v
}
define <16 x i64> @sadd_v16i64_vi(<16 x i64> %va) {
; CHECK-LABEL: sadd_v16i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> undef, i64 5, i32 0
%vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
%v = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
ret <16 x i64> %v
}

View File

@ -0,0 +1,633 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
define <2 x i8> @uadd_v2i8_vv(<2 x i8> %va, <2 x i8> %b) {
; CHECK-LABEL: uadd_v2i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b)
ret <2 x i8> %v
}
define <2 x i8> @uadd_v2i8_vx(<2 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_v2i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
%vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
%v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
ret <2 x i8> %v
}
define <2 x i8> @uadd_v2i8_vi(<2 x i8> %va) {
; CHECK-LABEL: uadd_v2i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> undef, i8 8, i32 0
%vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
%v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
ret <2 x i8> %v
}
declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>)
define <4 x i8> @uadd_v4i8_vv(<4 x i8> %va, <4 x i8> %b) {
; CHECK-LABEL: uadd_v4i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b)
ret <4 x i8> %v
}
define <4 x i8> @uadd_v4i8_vx(<4 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_v4i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
%vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
%v = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
ret <4 x i8> %v
}
define <4 x i8> @uadd_v4i8_vi(<4 x i8> %va) {
; CHECK-LABEL: uadd_v4i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> undef, i8 8, i32 0
%vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
%v = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
ret <4 x i8> %v
}
declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>)
define <8 x i8> @uadd_v8i8_vv(<8 x i8> %va, <8 x i8> %b) {
; CHECK-LABEL: uadd_v8i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b)
ret <8 x i8> %v
}
define <8 x i8> @uadd_v8i8_vx(<8 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_v8i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
%vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
%v = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
ret <8 x i8> %v
}
define <8 x i8> @uadd_v8i8_vi(<8 x i8> %va) {
; CHECK-LABEL: uadd_v8i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> undef, i8 8, i32 0
%vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
%v = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
ret <8 x i8> %v
}
declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
define <16 x i8> @uadd_v16i8_vv(<16 x i8> %va, <16 x i8> %b) {
; CHECK-LABEL: uadd_v16i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b)
ret <16 x i8> %v
}
define <16 x i8> @uadd_v16i8_vx(<16 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_v16i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
%vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
%v = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
ret <16 x i8> %v
}
define <16 x i8> @uadd_v16i8_vi(<16 x i8> %va) {
; CHECK-LABEL: uadd_v16i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> undef, i8 8, i32 0
%vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
%v = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
ret <16 x i8> %v
}
declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>)
define <2 x i16> @uadd_v2i16_vv(<2 x i16> %va, <2 x i16> %b) {
; CHECK-LABEL: uadd_v2i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b)
ret <2 x i16> %v
}
define <2 x i16> @uadd_v2i16_vx(<2 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_v2i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
%vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
%v = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
ret <2 x i16> %v
}
define <2 x i16> @uadd_v2i16_vi(<2 x i16> %va) {
; CHECK-LABEL: uadd_v2i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> undef, i16 8, i32 0
%vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
%v = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
ret <2 x i16> %v
}
declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>)
define <4 x i16> @uadd_v4i16_vv(<4 x i16> %va, <4 x i16> %b) {
; CHECK-LABEL: uadd_v4i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b)
ret <4 x i16> %v
}
define <4 x i16> @uadd_v4i16_vx(<4 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_v4i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
%vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
%v = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
ret <4 x i16> %v
}
define <4 x i16> @uadd_v4i16_vi(<4 x i16> %va) {
; CHECK-LABEL: uadd_v4i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> undef, i16 8, i32 0
%vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
%v = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
ret <4 x i16> %v
}
declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
define <8 x i16> @uadd_v8i16_vv(<8 x i16> %va, <8 x i16> %b) {
; CHECK-LABEL: uadd_v8i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b)
ret <8 x i16> %v
}
define <8 x i16> @uadd_v8i16_vx(<8 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_v8i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
%vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
%v = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
ret <8 x i16> %v
}
define <8 x i16> @uadd_v8i16_vi(<8 x i16> %va) {
; CHECK-LABEL: uadd_v8i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> undef, i16 8, i32 0
%vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
%v = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
ret <8 x i16> %v
}
declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
define <16 x i16> @uadd_v16i16_vv(<16 x i16> %va, <16 x i16> %b) {
; CHECK-LABEL: uadd_v16i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b)
ret <16 x i16> %v
}
define <16 x i16> @uadd_v16i16_vx(<16 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_v16i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
%vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
%v = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
ret <16 x i16> %v
}
define <16 x i16> @uadd_v16i16_vi(<16 x i16> %va) {
; CHECK-LABEL: uadd_v16i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> undef, i16 8, i32 0
%vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
%v = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
ret <16 x i16> %v
}
declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>)
define <2 x i32> @uadd_v2i32_vv(<2 x i32> %va, <2 x i32> %b) {
; CHECK-LABEL: uadd_v2i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b)
ret <2 x i32> %v
}
define <2 x i32> @uadd_v2i32_vx(<2 x i32> %va, i32 %b) {
; CHECK-LABEL: uadd_v2i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
%v = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
ret <2 x i32> %v
}
define <2 x i32> @uadd_v2i32_vi(<2 x i32> %va) {
; CHECK-LABEL: uadd_v2i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> undef, i32 8, i32 0
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
%v = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
ret <2 x i32> %v
}
declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
define <4 x i32> @uadd_v4i32_vv(<4 x i32> %va, <4 x i32> %b) {
; CHECK-LABEL: uadd_v4i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b)
ret <4 x i32> %v
}
define <4 x i32> @uadd_v4i32_vx(<4 x i32> %va, i32 %b) {
; CHECK-LABEL: uadd_v4i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
%vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
ret <4 x i32> %v
}
define <4 x i32> @uadd_v4i32_vi(<4 x i32> %va) {
; CHECK-LABEL: uadd_v4i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> undef, i32 8, i32 0
%vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
ret <4 x i32> %v
}
declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>)
define <8 x i32> @uadd_v8i32_vv(<8 x i32> %va, <8 x i32> %b) {
; CHECK-LABEL: uadd_v8i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b)
ret <8 x i32> %v
}
define <8 x i32> @uadd_v8i32_vx(<8 x i32> %va, i32 %b) {
; CHECK-LABEL: uadd_v8i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
%vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
%v = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
ret <8 x i32> %v
}
define <8 x i32> @uadd_v8i32_vi(<8 x i32> %va) {
; CHECK-LABEL: uadd_v8i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> undef, i32 8, i32 0
%vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
%v = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
ret <8 x i32> %v
}
declare <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32>, <16 x i32>)
define <16 x i32> @uadd_v16i32_vv(<16 x i32> %va, <16 x i32> %b) {
; CHECK-LABEL: uadd_v16i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b)
ret <16 x i32> %v
}
define <16 x i32> @uadd_v16i32_vx(<16 x i32> %va, i32 %b) {
; CHECK-LABEL: uadd_v16i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
%vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
%v = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
ret <16 x i32> %v
}
define <16 x i32> @uadd_v16i32_vi(<16 x i32> %va) {
; CHECK-LABEL: uadd_v16i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> undef, i32 8, i32 0
%vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
%v = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
ret <16 x i32> %v
}
declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
define <2 x i64> @uadd_v2i64_vv(<2 x i64> %va, <2 x i64> %b) {
; CHECK-LABEL: uadd_v2i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b)
ret <2 x i64> %v
}
define <2 x i64> @uadd_v2i64_vx(<2 x i64> %va, i64 %b) {
; RV32-LABEL: uadd_v2i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v25, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v25
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: uadd_v2i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vsaddu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
%vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
%v = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
ret <2 x i64> %v
}
define <2 x i64> @uadd_v2i64_vi(<2 x i64> %va) {
; CHECK-LABEL: uadd_v2i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> undef, i64 8, i32 0
%vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
%v = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
ret <2 x i64> %v
}
declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>)
define <4 x i64> @uadd_v4i64_vv(<4 x i64> %va, <4 x i64> %b) {
; CHECK-LABEL: uadd_v4i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b)
ret <4 x i64> %v
}
define <4 x i64> @uadd_v4i64_vx(<4 x i64> %va, i64 %b) {
; RV32-LABEL: uadd_v4i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v26
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: uadd_v4i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vsaddu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
%vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
%v = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
ret <4 x i64> %v
}
define <4 x i64> @uadd_v4i64_vi(<4 x i64> %va) {
; CHECK-LABEL: uadd_v4i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> undef, i64 8, i32 0
%vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
%v = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
ret <4 x i64> %v
}
declare <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64>, <8 x i64>)
define <8 x i64> @uadd_v8i64_vv(<8 x i64> %va, <8 x i64> %b) {
; CHECK-LABEL: uadd_v8i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b)
ret <8 x i64> %v
}
define <8 x i64> @uadd_v8i64_vx(<8 x i64> %va, i64 %b) {
; RV32-LABEL: uadd_v8i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v28, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v28
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: uadd_v8i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vsaddu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
%vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
%v = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
ret <8 x i64> %v
}
define <8 x i64> @uadd_v8i64_vi(<8 x i64> %va) {
; CHECK-LABEL: uadd_v8i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> undef, i64 8, i32 0
%vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
%v = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
ret <8 x i64> %v
}
declare <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64>, <16 x i64>)
define <16 x i64> @uadd_v16i64_vv(<16 x i64> %va, <16 x i64> %b) {
; CHECK-LABEL: uadd_v16i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b)
ret <16 x i64> %v
}
define <16 x i64> @uadd_v16i64_vx(<16 x i64> %va, i64 %b) {
; RV32-LABEL: uadd_v16i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: uadd_v16i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV64-NEXT: vsaddu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
%vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
%v = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
ret <16 x i64> %v
}
define <16 x i64> @uadd_v16i64_vi(<16 x i64> %va) {
; CHECK-LABEL: uadd_v16i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> undef, i64 8, i32 0
%vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
%v = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
ret <16 x i64> %v
}

View File

@ -0,0 +1,649 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
define <2 x i8> @ssub_v2i8_vv(<2 x i8> %va, <2 x i8> %b) {
; CHECK-LABEL: ssub_v2i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %b)
ret <2 x i8> %v
}
define <2 x i8> @ssub_v2i8_vx(<2 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_v2i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
%vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
%v = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
ret <2 x i8> %v
}
define <2 x i8> @ssub_v2i8_vi(<2 x i8> %va) {
; CHECK-LABEL: ssub_v2i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> undef, i8 1, i32 0
%vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
%v = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
ret <2 x i8> %v
}
declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>)
define <4 x i8> @ssub_v4i8_vv(<4 x i8> %va, <4 x i8> %b) {
; CHECK-LABEL: ssub_v4i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %b)
ret <4 x i8> %v
}
define <4 x i8> @ssub_v4i8_vx(<4 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_v4i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
%vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
%v = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
ret <4 x i8> %v
}
define <4 x i8> @ssub_v4i8_vi(<4 x i8> %va) {
; CHECK-LABEL: ssub_v4i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> undef, i8 1, i32 0
%vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
%v = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
ret <4 x i8> %v
}
declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>)
define <8 x i8> @ssub_v8i8_vv(<8 x i8> %va, <8 x i8> %b) {
; CHECK-LABEL: ssub_v8i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %b)
ret <8 x i8> %v
}
define <8 x i8> @ssub_v8i8_vx(<8 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_v8i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
%vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
%v = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
ret <8 x i8> %v
}
define <8 x i8> @ssub_v8i8_vi(<8 x i8> %va) {
; CHECK-LABEL: ssub_v8i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> undef, i8 1, i32 0
%vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
%v = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
ret <8 x i8> %v
}
declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
define <16 x i8> @ssub_v16i8_vv(<16 x i8> %va, <16 x i8> %b) {
; CHECK-LABEL: ssub_v16i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %b)
ret <16 x i8> %v
}
define <16 x i8> @ssub_v16i8_vx(<16 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_v16i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
%vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
%v = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
ret <16 x i8> %v
}
define <16 x i8> @ssub_v16i8_vi(<16 x i8> %va) {
; CHECK-LABEL: ssub_v16i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> undef, i8 1, i32 0
%vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
%v = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
ret <16 x i8> %v
}
declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>)
define <2 x i16> @ssub_v2i16_vv(<2 x i16> %va, <2 x i16> %b) {
; CHECK-LABEL: ssub_v2i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %b)
ret <2 x i16> %v
}
define <2 x i16> @ssub_v2i16_vx(<2 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_v2i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
%vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
%v = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
ret <2 x i16> %v
}
define <2 x i16> @ssub_v2i16_vi(<2 x i16> %va) {
; CHECK-LABEL: ssub_v2i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> undef, i16 1, i32 0
%vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
%v = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
ret <2 x i16> %v
}
declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>)
define <4 x i16> @ssub_v4i16_vv(<4 x i16> %va, <4 x i16> %b) {
; CHECK-LABEL: ssub_v4i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %b)
ret <4 x i16> %v
}
define <4 x i16> @ssub_v4i16_vx(<4 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_v4i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
%vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
%v = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
ret <4 x i16> %v
}
define <4 x i16> @ssub_v4i16_vi(<4 x i16> %va) {
; CHECK-LABEL: ssub_v4i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> undef, i16 1, i32 0
%vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
%v = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
ret <4 x i16> %v
}
declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
define <8 x i16> @ssub_v8i16_vv(<8 x i16> %va, <8 x i16> %b) {
; CHECK-LABEL: ssub_v8i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %b)
ret <8 x i16> %v
}
define <8 x i16> @ssub_v8i16_vx(<8 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_v8i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
%vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
%v = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
ret <8 x i16> %v
}
define <8 x i16> @ssub_v8i16_vi(<8 x i16> %va) {
; CHECK-LABEL: ssub_v8i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> undef, i16 1, i32 0
%vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
%v = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
ret <8 x i16> %v
}
declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
define <16 x i16> @ssub_v16i16_vv(<16 x i16> %va, <16 x i16> %b) {
; CHECK-LABEL: ssub_v16i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %b)
ret <16 x i16> %v
}
define <16 x i16> @ssub_v16i16_vx(<16 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_v16i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
%vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
%v = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
ret <16 x i16> %v
}
define <16 x i16> @ssub_v16i16_vi(<16 x i16> %va) {
; CHECK-LABEL: ssub_v16i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> undef, i16 1, i32 0
%vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
%v = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
ret <16 x i16> %v
}
declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>)
define <2 x i32> @ssub_v2i32_vv(<2 x i32> %va, <2 x i32> %b) {
; CHECK-LABEL: ssub_v2i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %b)
ret <2 x i32> %v
}
define <2 x i32> @ssub_v2i32_vx(<2 x i32> %va, i32 %b) {
; CHECK-LABEL: ssub_v2i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
%v = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
ret <2 x i32> %v
}
define <2 x i32> @ssub_v2i32_vi(<2 x i32> %va) {
; CHECK-LABEL: ssub_v2i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> undef, i32 1, i32 0
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
%v = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
ret <2 x i32> %v
}
declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
define <4 x i32> @ssub_v4i32_vv(<4 x i32> %va, <4 x i32> %b) {
; CHECK-LABEL: ssub_v4i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %b)
ret <4 x i32> %v
}
define <4 x i32> @ssub_v4i32_vx(<4 x i32> %va, i32 %b) {
; CHECK-LABEL: ssub_v4i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
%vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
ret <4 x i32> %v
}
define <4 x i32> @ssub_v4i32_vi(<4 x i32> %va) {
; CHECK-LABEL: ssub_v4i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> undef, i32 1, i32 0
%vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
ret <4 x i32> %v
}
declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>)
define <8 x i32> @ssub_v8i32_vv(<8 x i32> %va, <8 x i32> %b) {
; CHECK-LABEL: ssub_v8i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %b)
ret <8 x i32> %v
}
define <8 x i32> @ssub_v8i32_vx(<8 x i32> %va, i32 %b) {
; CHECK-LABEL: ssub_v8i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
%vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
%v = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
ret <8 x i32> %v
}
define <8 x i32> @ssub_v8i32_vi(<8 x i32> %va) {
; CHECK-LABEL: ssub_v8i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> undef, i32 1, i32 0
%vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
%v = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
ret <8 x i32> %v
}
declare <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32>, <16 x i32>)
define <16 x i32> @ssub_v16i32_vv(<16 x i32> %va, <16 x i32> %b) {
; CHECK-LABEL: ssub_v16i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %b)
ret <16 x i32> %v
}
define <16 x i32> @ssub_v16i32_vx(<16 x i32> %va, i32 %b) {
; CHECK-LABEL: ssub_v16i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
%vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
%v = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
ret <16 x i32> %v
}
define <16 x i32> @ssub_v16i32_vi(<16 x i32> %va) {
; CHECK-LABEL: ssub_v16i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> undef, i32 1, i32 0
%vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
%v = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
ret <16 x i32> %v
}
declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>)
define <2 x i64> @ssub_v2i64_vv(<2 x i64> %va, <2 x i64> %b) {
; CHECK-LABEL: ssub_v2i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %b)
ret <2 x i64> %v
}
define <2 x i64> @ssub_v2i64_vx(<2 x i64> %va, i64 %b) {
; RV32-LABEL: ssub_v2i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v25, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v25
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: ssub_v2i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vssub.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
%vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
%v = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
ret <2 x i64> %v
}
define <2 x i64> @ssub_v2i64_vi(<2 x i64> %va) {
; CHECK-LABEL: ssub_v2i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> undef, i64 1, i32 0
%vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
%v = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
ret <2 x i64> %v
}
declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>)
define <4 x i64> @ssub_v4i64_vv(<4 x i64> %va, <4 x i64> %b) {
; CHECK-LABEL: ssub_v4i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %b)
ret <4 x i64> %v
}
define <4 x i64> @ssub_v4i64_vx(<4 x i64> %va, i64 %b) {
; RV32-LABEL: ssub_v4i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v26
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: ssub_v4i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vssub.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
%vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
%v = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
ret <4 x i64> %v
}
define <4 x i64> @ssub_v4i64_vi(<4 x i64> %va) {
; CHECK-LABEL: ssub_v4i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> undef, i64 1, i32 0
%vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
%v = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
ret <4 x i64> %v
}
declare <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64>, <8 x i64>)
define <8 x i64> @ssub_v8i64_vv(<8 x i64> %va, <8 x i64> %b) {
; CHECK-LABEL: ssub_v8i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %b)
ret <8 x i64> %v
}
define <8 x i64> @ssub_v8i64_vx(<8 x i64> %va, i64 %b) {
; RV32-LABEL: ssub_v8i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v28, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v28
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: ssub_v8i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vssub.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
%vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
%v = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
ret <8 x i64> %v
}
define <8 x i64> @ssub_v8i64_vi(<8 x i64> %va) {
; CHECK-LABEL: ssub_v8i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> undef, i64 1, i32 0
%vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
%v = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
ret <8 x i64> %v
}
declare <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64>, <16 x i64>)
define <16 x i64> @ssub_v16i64_vv(<16 x i64> %va, <16 x i64> %b) {
; CHECK-LABEL: ssub_v16i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %b)
ret <16 x i64> %v
}
define <16 x i64> @ssub_v16i64_vx(<16 x i64> %va, i64 %b) {
; RV32-LABEL: ssub_v16i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: ssub_v16i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV64-NEXT: vssub.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
%vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
%v = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
ret <16 x i64> %v
}
define <16 x i64> @ssub_v16i64_vi(<16 x i64> %va) {
; CHECK-LABEL: ssub_v16i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> undef, i64 1, i32 0
%vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
%v = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
ret <16 x i64> %v
}

View File

@ -0,0 +1,649 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
define <2 x i8> @usub_v2i8_vv(<2 x i8> %va, <2 x i8> %b) {
; CHECK-LABEL: usub_v2i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %b)
ret <2 x i8> %v
}
define <2 x i8> @usub_v2i8_vx(<2 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_v2i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
%vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
%v = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
ret <2 x i8> %v
}
define <2 x i8> @usub_v2i8_vi(<2 x i8> %va) {
; CHECK-LABEL: usub_v2i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> undef, i8 2, i32 0
%vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
%v = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
ret <2 x i8> %v
}
declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>)
define <4 x i8> @usub_v4i8_vv(<4 x i8> %va, <4 x i8> %b) {
; CHECK-LABEL: usub_v4i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %b)
ret <4 x i8> %v
}
define <4 x i8> @usub_v4i8_vx(<4 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_v4i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
%vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
%v = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
ret <4 x i8> %v
}
define <4 x i8> @usub_v4i8_vi(<4 x i8> %va) {
; CHECK-LABEL: usub_v4i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> undef, i8 2, i32 0
%vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
%v = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
ret <4 x i8> %v
}
declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>)
define <8 x i8> @usub_v8i8_vv(<8 x i8> %va, <8 x i8> %b) {
; CHECK-LABEL: usub_v8i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %b)
ret <8 x i8> %v
}
define <8 x i8> @usub_v8i8_vx(<8 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_v8i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
%vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
%v = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
ret <8 x i8> %v
}
define <8 x i8> @usub_v8i8_vi(<8 x i8> %va) {
; CHECK-LABEL: usub_v8i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> undef, i8 2, i32 0
%vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
%v = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
ret <8 x i8> %v
}
declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
define <16 x i8> @usub_v16i8_vv(<16 x i8> %va, <16 x i8> %b) {
; CHECK-LABEL: usub_v16i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %b)
ret <16 x i8> %v
}
define <16 x i8> @usub_v16i8_vx(<16 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_v16i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
%vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
%v = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
ret <16 x i8> %v
}
define <16 x i8> @usub_v16i8_vi(<16 x i8> %va) {
; CHECK-LABEL: usub_v16i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> undef, i8 2, i32 0
%vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
%v = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
ret <16 x i8> %v
}
declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>)
define <2 x i16> @usub_v2i16_vv(<2 x i16> %va, <2 x i16> %b) {
; CHECK-LABEL: usub_v2i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %b)
ret <2 x i16> %v
}
define <2 x i16> @usub_v2i16_vx(<2 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_v2i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
%vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
%v = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
ret <2 x i16> %v
}
define <2 x i16> @usub_v2i16_vi(<2 x i16> %va) {
; CHECK-LABEL: usub_v2i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> undef, i16 2, i32 0
%vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
%v = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
ret <2 x i16> %v
}
declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>)
define <4 x i16> @usub_v4i16_vv(<4 x i16> %va, <4 x i16> %b) {
; CHECK-LABEL: usub_v4i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %b)
ret <4 x i16> %v
}
define <4 x i16> @usub_v4i16_vx(<4 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_v4i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
%vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
%v = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
ret <4 x i16> %v
}
define <4 x i16> @usub_v4i16_vi(<4 x i16> %va) {
; CHECK-LABEL: usub_v4i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> undef, i16 2, i32 0
%vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
%v = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
ret <4 x i16> %v
}
declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
define <8 x i16> @usub_v8i16_vv(<8 x i16> %va, <8 x i16> %b) {
; CHECK-LABEL: usub_v8i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %b)
ret <8 x i16> %v
}
define <8 x i16> @usub_v8i16_vx(<8 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_v8i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
%vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
%v = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
ret <8 x i16> %v
}
define <8 x i16> @usub_v8i16_vi(<8 x i16> %va) {
; CHECK-LABEL: usub_v8i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> undef, i16 2, i32 0
%vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
%v = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
ret <8 x i16> %v
}
declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
define <16 x i16> @usub_v16i16_vv(<16 x i16> %va, <16 x i16> %b) {
; CHECK-LABEL: usub_v16i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %b)
ret <16 x i16> %v
}
define <16 x i16> @usub_v16i16_vx(<16 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_v16i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
%vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
%v = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
ret <16 x i16> %v
}
define <16 x i16> @usub_v16i16_vi(<16 x i16> %va) {
; CHECK-LABEL: usub_v16i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> undef, i16 2, i32 0
%vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
%v = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
ret <16 x i16> %v
}
declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>)
define <2 x i32> @usub_v2i32_vv(<2 x i32> %va, <2 x i32> %b) {
; CHECK-LABEL: usub_v2i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %b)
ret <2 x i32> %v
}
define <2 x i32> @usub_v2i32_vx(<2 x i32> %va, i32 %b) {
; CHECK-LABEL: usub_v2i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
%v = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
ret <2 x i32> %v
}
define <2 x i32> @usub_v2i32_vi(<2 x i32> %va) {
; CHECK-LABEL: usub_v2i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> undef, i32 2, i32 0
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
%v = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
ret <2 x i32> %v
}
declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
define <4 x i32> @usub_v4i32_vv(<4 x i32> %va, <4 x i32> %b) {
; CHECK-LABEL: usub_v4i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %b)
ret <4 x i32> %v
}
define <4 x i32> @usub_v4i32_vx(<4 x i32> %va, i32 %b) {
; CHECK-LABEL: usub_v4i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
%vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
ret <4 x i32> %v
}
define <4 x i32> @usub_v4i32_vi(<4 x i32> %va) {
; CHECK-LABEL: usub_v4i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> undef, i32 2, i32 0
%vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
ret <4 x i32> %v
}
declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
define <8 x i32> @usub_v8i32_vv(<8 x i32> %va, <8 x i32> %b) {
; CHECK-LABEL: usub_v8i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %b)
ret <8 x i32> %v
}
define <8 x i32> @usub_v8i32_vx(<8 x i32> %va, i32 %b) {
; CHECK-LABEL: usub_v8i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
%vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
%v = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
ret <8 x i32> %v
}
define <8 x i32> @usub_v8i32_vi(<8 x i32> %va) {
; CHECK-LABEL: usub_v8i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> undef, i32 2, i32 0
%vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
%v = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
ret <8 x i32> %v
}
declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>)
define <16 x i32> @usub_v16i32_vv(<16 x i32> %va, <16 x i32> %b) {
; CHECK-LABEL: usub_v16i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %b)
ret <16 x i32> %v
}
define <16 x i32> @usub_v16i32_vx(<16 x i32> %va, i32 %b) {
; CHECK-LABEL: usub_v16i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
%vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
%v = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
ret <16 x i32> %v
}
define <16 x i32> @usub_v16i32_vi(<16 x i32> %va) {
; CHECK-LABEL: usub_v16i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> undef, i32 2, i32 0
%vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
%v = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
ret <16 x i32> %v
}
declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>)
define <2 x i64> @usub_v2i64_vv(<2 x i64> %va, <2 x i64> %b) {
; CHECK-LABEL: usub_v2i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %b)
ret <2 x i64> %v
}
define <2 x i64> @usub_v2i64_vx(<2 x i64> %va, i64 %b) {
; RV32-LABEL: usub_v2i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v25, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v25
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: usub_v2i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vssubu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
%vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
%v = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
ret <2 x i64> %v
}
define <2 x i64> @usub_v2i64_vi(<2 x i64> %va) {
; CHECK-LABEL: usub_v2i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> undef, i64 2, i32 0
%vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
%v = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
ret <2 x i64> %v
}
declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>)
define <4 x i64> @usub_v4i64_vv(<4 x i64> %va, <4 x i64> %b) {
; CHECK-LABEL: usub_v4i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %b)
ret <4 x i64> %v
}
define <4 x i64> @usub_v4i64_vx(<4 x i64> %va, i64 %b) {
; RV32-LABEL: usub_v4i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v26
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: usub_v4i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vssubu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
%vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
%v = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
ret <4 x i64> %v
}
define <4 x i64> @usub_v4i64_vi(<4 x i64> %va) {
; CHECK-LABEL: usub_v4i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> undef, i64 2, i32 0
%vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
%v = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
ret <4 x i64> %v
}
declare <8 x i64> @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>)
define <8 x i64> @usub_v8i64_vv(<8 x i64> %va, <8 x i64> %b) {
; CHECK-LABEL: usub_v8i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %b)
ret <8 x i64> %v
}
define <8 x i64> @usub_v8i64_vx(<8 x i64> %va, i64 %b) {
; RV32-LABEL: usub_v8i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v28, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v28
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: usub_v8i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vssubu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
%vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
%v = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
ret <8 x i64> %v
}
define <8 x i64> @usub_v8i64_vi(<8 x i64> %va) {
; CHECK-LABEL: usub_v8i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> undef, i64 2, i32 0
%vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
%v = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
ret <8 x i64> %v
}
declare <16 x i64> @llvm.usub.sat.v16i64(<16 x i64>, <16 x i64>)
define <16 x i64> @usub_v16i64_vv(<16 x i64> %va, <16 x i64> %b) {
; CHECK-LABEL: usub_v16i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %b)
ret <16 x i64> %v
}
define <16 x i64> @usub_v16i64_vx(<16 x i64> %va, i64 %b) {
; RV32-LABEL: usub_v16i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: usub_v16i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV64-NEXT: vssubu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
%vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
%v = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
ret <16 x i64> %v
}
define <16 x i64> @usub_v16i64_vi(<16 x i64> %va) {
; CHECK-LABEL: usub_v16i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> undef, i64 2, i32 0
%vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
%v = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
ret <16 x i64> %v
}

View File

@ -7,11 +7,10 @@ define <vscale x 2 x i32> @saddo_nvx2i32(<vscale x 2 x i32> %x, <vscale x 2 x i3
; CHECK-LABEL: saddo_nvx2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vadd.vv v25, v8, v9
; CHECK-NEXT: vmslt.vv v26, v25, v8
; CHECK-NEXT: vmsle.vi v27, v9, -1
; CHECK-NEXT: vmxor.mm v0, v27, v26
; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
; CHECK-NEXT: vsadd.vv v25, v8, v9
; CHECK-NEXT: vadd.vv v26, v8, v9
; CHECK-NEXT: vmsne.vv v0, v26, v25
; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.sadd.with.overflow.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y)
%b = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i1> } %a, 0

View File

@ -0,0 +1,849 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.sadd.sat.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
define <vscale x 1 x i8> @sadd_nxv1i8_vv(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b) {
; CHECK-LABEL: sadd_nxv1i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b)
ret <vscale x 1 x i8> %v
}
define <vscale x 1 x i8> @sadd_nxv1i8_vx(<vscale x 1 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_nxv1i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i8> @llvm.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
ret <vscale x 1 x i8> %v
}
define <vscale x 1 x i8> @sadd_nxv1i8_vi(<vscale x 1 x i8> %va) {
; CHECK-LABEL: sadd_nxv1i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> undef, i8 5, i32 0
%vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i8> @llvm.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
ret <vscale x 1 x i8> %v
}
declare <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
define <vscale x 2 x i8> @sadd_nxv2i8_vv(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b) {
; CHECK-LABEL: sadd_nxv2i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b)
ret <vscale x 2 x i8> %v
}
define <vscale x 2 x i8> @sadd_nxv2i8_vx(<vscale x 2 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_nxv2i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
ret <vscale x 2 x i8> %v
}
define <vscale x 2 x i8> @sadd_nxv2i8_vi(<vscale x 2 x i8> %va) {
; CHECK-LABEL: sadd_nxv2i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> undef, i8 5, i32 0
%vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
ret <vscale x 2 x i8> %v
}
declare <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
define <vscale x 4 x i8> @sadd_nxv4i8_vv(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b) {
; CHECK-LABEL: sadd_nxv4i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @sadd_nxv4i8_vx(<vscale x 4 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_nxv4i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @sadd_nxv4i8_vi(<vscale x 4 x i8> %va) {
; CHECK-LABEL: sadd_nxv4i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> undef, i8 5, i32 0
%vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
ret <vscale x 4 x i8> %v
}
declare <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
define <vscale x 8 x i8> @sadd_nxv8i8_vv(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b) {
; CHECK-LABEL: sadd_nxv8i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b)
ret <vscale x 8 x i8> %v
}
define <vscale x 8 x i8> @sadd_nxv8i8_vx(<vscale x 8 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_nxv8i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
ret <vscale x 8 x i8> %v
}
define <vscale x 8 x i8> @sadd_nxv8i8_vi(<vscale x 8 x i8> %va) {
; CHECK-LABEL: sadd_nxv8i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
%vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
ret <vscale x 8 x i8> %v
}
declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
define <vscale x 16 x i8> @sadd_nxv16i8_vv(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b) {
; CHECK-LABEL: sadd_nxv16i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %v
}
define <vscale x 16 x i8> @sadd_nxv16i8_vx(<vscale x 16 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_nxv16i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
ret <vscale x 16 x i8> %v
}
define <vscale x 16 x i8> @sadd_nxv16i8_vi(<vscale x 16 x i8> %va) {
; CHECK-LABEL: sadd_nxv16i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> undef, i8 5, i32 0
%vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
ret <vscale x 16 x i8> %v
}
declare <vscale x 32 x i8> @llvm.sadd.sat.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>)
define <vscale x 32 x i8> @sadd_nxv32i8_vv(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b) {
; CHECK-LABEL: sadd_nxv32i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b)
ret <vscale x 32 x i8> %v
}
define <vscale x 32 x i8> @sadd_nxv32i8_vx(<vscale x 32 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_nxv32i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i8> @llvm.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
ret <vscale x 32 x i8> %v
}
define <vscale x 32 x i8> @sadd_nxv32i8_vi(<vscale x 32 x i8> %va) {
; CHECK-LABEL: sadd_nxv32i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> undef, i8 5, i32 0
%vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i8> @llvm.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
ret <vscale x 32 x i8> %v
}
declare <vscale x 64 x i8> @llvm.sadd.sat.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>)
define <vscale x 64 x i8> @sadd_nxv64i8_vv(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b) {
; CHECK-LABEL: sadd_nxv64i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b)
ret <vscale x 64 x i8> %v
}
define <vscale x 64 x i8> @sadd_nxv64i8_vx(<vscale x 64 x i8> %va, i8 %b) {
; CHECK-LABEL: sadd_nxv64i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%v = call <vscale x 64 x i8> @llvm.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
ret <vscale x 64 x i8> %v
}
define <vscale x 64 x i8> @sadd_nxv64i8_vi(<vscale x 64 x i8> %va) {
; CHECK-LABEL: sadd_nxv64i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> undef, i8 5, i32 0
%vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%v = call <vscale x 64 x i8> @llvm.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
ret <vscale x 64 x i8> %v
}
declare <vscale x 1 x i16> @llvm.sadd.sat.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
define <vscale x 1 x i16> @sadd_nxv1i16_vv(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b) {
; CHECK-LABEL: sadd_nxv1i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b)
ret <vscale x 1 x i16> %v
}
define <vscale x 1 x i16> @sadd_nxv1i16_vx(<vscale x 1 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_nxv1i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i16> @llvm.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
ret <vscale x 1 x i16> %v
}
define <vscale x 1 x i16> @sadd_nxv1i16_vi(<vscale x 1 x i16> %va) {
; CHECK-LABEL: sadd_nxv1i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> undef, i16 5, i32 0
%vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i16> @llvm.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
ret <vscale x 1 x i16> %v
}
declare <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
define <vscale x 2 x i16> @sadd_nxv2i16_vv(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b) {
; CHECK-LABEL: sadd_nxv2i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b)
ret <vscale x 2 x i16> %v
}
define <vscale x 2 x i16> @sadd_nxv2i16_vx(<vscale x 2 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_nxv2i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
ret <vscale x 2 x i16> %v
}
define <vscale x 2 x i16> @sadd_nxv2i16_vi(<vscale x 2 x i16> %va) {
; CHECK-LABEL: sadd_nxv2i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> undef, i16 5, i32 0
%vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
ret <vscale x 2 x i16> %v
}
declare <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
define <vscale x 4 x i16> @sadd_nxv4i16_vv(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b) {
; CHECK-LABEL: sadd_nxv4i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @sadd_nxv4i16_vx(<vscale x 4 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_nxv4i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @sadd_nxv4i16_vi(<vscale x 4 x i16> %va) {
; CHECK-LABEL: sadd_nxv4i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> undef, i16 5, i32 0
%vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
ret <vscale x 4 x i16> %v
}
declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
define <vscale x 8 x i16> @sadd_nxv8i16_vv(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b) {
; CHECK-LABEL: sadd_nxv8i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @sadd_nxv8i16_vx(<vscale x 8 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_nxv8i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @sadd_nxv8i16_vi(<vscale x 8 x i16> %va) {
; CHECK-LABEL: sadd_nxv8i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
%vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
ret <vscale x 8 x i16> %v
}
declare <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
define <vscale x 16 x i16> @sadd_nxv16i16_vv(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b) {
; CHECK-LABEL: sadd_nxv16i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b)
ret <vscale x 16 x i16> %v
}
define <vscale x 16 x i16> @sadd_nxv16i16_vx(<vscale x 16 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_nxv16i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
ret <vscale x 16 x i16> %v
}
define <vscale x 16 x i16> @sadd_nxv16i16_vi(<vscale x 16 x i16> %va) {
; CHECK-LABEL: sadd_nxv16i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> undef, i16 5, i32 0
%vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
ret <vscale x 16 x i16> %v
}
declare <vscale x 32 x i16> @llvm.sadd.sat.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>)
define <vscale x 32 x i16> @sadd_nxv32i16_vv(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b) {
; CHECK-LABEL: sadd_nxv32i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b)
ret <vscale x 32 x i16> %v
}
define <vscale x 32 x i16> @sadd_nxv32i16_vx(<vscale x 32 x i16> %va, i16 %b) {
; CHECK-LABEL: sadd_nxv32i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i16> @llvm.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
ret <vscale x 32 x i16> %v
}
define <vscale x 32 x i16> @sadd_nxv32i16_vi(<vscale x 32 x i16> %va) {
; CHECK-LABEL: sadd_nxv32i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> undef, i16 5, i32 0
%vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i16> @llvm.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
ret <vscale x 32 x i16> %v
}
declare <vscale x 1 x i32> @llvm.sadd.sat.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
define <vscale x 1 x i32> @sadd_nxv1i32_vv(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b) {
; CHECK-LABEL: sadd_nxv1i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b)
ret <vscale x 1 x i32> %v
}
define <vscale x 1 x i32> @sadd_nxv1i32_vx(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: sadd_nxv1i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i32> @llvm.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
ret <vscale x 1 x i32> %v
}
define <vscale x 1 x i32> @sadd_nxv1i32_vi(<vscale x 1 x i32> %va) {
; CHECK-LABEL: sadd_nxv1i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> undef, i32 5, i32 0
%vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i32> @llvm.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
ret <vscale x 1 x i32> %v
}
declare <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
define <vscale x 2 x i32> @sadd_nxv2i32_vv(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b) {
; CHECK-LABEL: sadd_nxv2i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b)
ret <vscale x 2 x i32> %v
}
define <vscale x 2 x i32> @sadd_nxv2i32_vx(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: sadd_nxv2i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
ret <vscale x 2 x i32> %v
}
define <vscale x 2 x i32> @sadd_nxv2i32_vi(<vscale x 2 x i32> %va) {
; CHECK-LABEL: sadd_nxv2i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> undef, i32 5, i32 0
%vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
ret <vscale x 2 x i32> %v
}
declare <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
define <vscale x 4 x i32> @sadd_nxv4i32_vv(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b) {
; CHECK-LABEL: sadd_nxv4i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @sadd_nxv4i32_vx(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: sadd_nxv4i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @sadd_nxv4i32_vi(<vscale x 4 x i32> %va) {
; CHECK-LABEL: sadd_nxv4i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
%vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
ret <vscale x 4 x i32> %v
}
declare <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
define <vscale x 8 x i32> @sadd_nxv8i32_vv(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b) {
; CHECK-LABEL: sadd_nxv8i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @sadd_nxv8i32_vx(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: sadd_nxv8i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @sadd_nxv8i32_vi(<vscale x 8 x i32> %va) {
; CHECK-LABEL: sadd_nxv8i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
%vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
ret <vscale x 8 x i32> %v
}
declare <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
define <vscale x 16 x i32> @sadd_nxv16i32_vv(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b) {
; CHECK-LABEL: sadd_nxv16i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b)
ret <vscale x 16 x i32> %v
}
define <vscale x 16 x i32> @sadd_nxv16i32_vx(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: sadd_nxv16i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vsadd.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
ret <vscale x 16 x i32> %v
}
define <vscale x 16 x i32> @sadd_nxv16i32_vi(<vscale x 16 x i32> %va) {
; CHECK-LABEL: sadd_nxv16i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> undef, i32 5, i32 0
%vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
ret <vscale x 16 x i32> %v
}
declare <vscale x 1 x i64> @llvm.sadd.sat.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
define <vscale x 1 x i64> @sadd_nxv1i64_vv(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b) {
; CHECK-LABEL: sadd_nxv1i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b)
ret <vscale x 1 x i64> %v
}
define <vscale x 1 x i64> @sadd_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
; RV32-LABEL: sadd_nxv1i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v25, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v25
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: sadd_nxv1i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vsadd.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i64> @llvm.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
ret <vscale x 1 x i64> %v
}
define <vscale x 1 x i64> @sadd_nxv1i64_vi(<vscale x 1 x i64> %va) {
; CHECK-LABEL: sadd_nxv1i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> undef, i64 5, i32 0
%vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i64> @llvm.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
ret <vscale x 1 x i64> %v
}
declare <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
define <vscale x 2 x i64> @sadd_nxv2i64_vv(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b) {
; CHECK-LABEL: sadd_nxv2i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %v
}
define <vscale x 2 x i64> @sadd_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
; RV32-LABEL: sadd_nxv2i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v26
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: sadd_nxv2i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vsadd.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
ret <vscale x 2 x i64> %v
}
define <vscale x 2 x i64> @sadd_nxv2i64_vi(<vscale x 2 x i64> %va) {
; CHECK-LABEL: sadd_nxv2i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> undef, i64 5, i32 0
%vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
ret <vscale x 2 x i64> %v
}
declare <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
define <vscale x 4 x i64> @sadd_nxv4i64_vv(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b) {
; CHECK-LABEL: sadd_nxv4i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @sadd_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
; RV32-LABEL: sadd_nxv4i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v28, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v28
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: sadd_nxv4i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vsadd.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @sadd_nxv4i64_vi(<vscale x 4 x i64> %va) {
; CHECK-LABEL: sadd_nxv4i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> undef, i64 5, i32 0
%vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
ret <vscale x 4 x i64> %v
}
declare <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
define <vscale x 8 x i64> @sadd_nxv8i64_vv(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b) {
; CHECK-LABEL: sadd_nxv8i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsadd.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @sadd_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
; RV32-LABEL: sadd_nxv8i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: sadd_nxv8i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsadd.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @sadd_nxv8i64_vi(<vscale x 8 x i64> %va) {
; CHECK-LABEL: sadd_nxv8i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsadd.vi v8, v8, 5
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
%vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
ret <vscale x 8 x i64> %v
}

View File

@ -0,0 +1,849 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.uadd.sat.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
define <vscale x 1 x i8> @uadd_nxv1i8_vv(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b) {
; CHECK-LABEL: uadd_nxv1i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b)
ret <vscale x 1 x i8> %v
}
define <vscale x 1 x i8> @uadd_nxv1i8_vx(<vscale x 1 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_nxv1i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i8> @llvm.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
ret <vscale x 1 x i8> %v
}
define <vscale x 1 x i8> @uadd_nxv1i8_vi(<vscale x 1 x i8> %va) {
; CHECK-LABEL: uadd_nxv1i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> undef, i8 8, i32 0
%vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i8> @llvm.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
ret <vscale x 1 x i8> %v
}
declare <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
define <vscale x 2 x i8> @uadd_nxv2i8_vv(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b) {
; CHECK-LABEL: uadd_nxv2i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b)
ret <vscale x 2 x i8> %v
}
define <vscale x 2 x i8> @uadd_nxv2i8_vx(<vscale x 2 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_nxv2i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
ret <vscale x 2 x i8> %v
}
define <vscale x 2 x i8> @uadd_nxv2i8_vi(<vscale x 2 x i8> %va) {
; CHECK-LABEL: uadd_nxv2i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> undef, i8 8, i32 0
%vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
ret <vscale x 2 x i8> %v
}
declare <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
define <vscale x 4 x i8> @uadd_nxv4i8_vv(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b) {
; CHECK-LABEL: uadd_nxv4i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @uadd_nxv4i8_vx(<vscale x 4 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_nxv4i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @uadd_nxv4i8_vi(<vscale x 4 x i8> %va) {
; CHECK-LABEL: uadd_nxv4i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> undef, i8 8, i32 0
%vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
ret <vscale x 4 x i8> %v
}
declare <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
define <vscale x 8 x i8> @uadd_nxv8i8_vv(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b) {
; CHECK-LABEL: uadd_nxv8i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b)
ret <vscale x 8 x i8> %v
}
define <vscale x 8 x i8> @uadd_nxv8i8_vx(<vscale x 8 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_nxv8i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
ret <vscale x 8 x i8> %v
}
define <vscale x 8 x i8> @uadd_nxv8i8_vi(<vscale x 8 x i8> %va) {
; CHECK-LABEL: uadd_nxv8i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> undef, i8 8, i32 0
%vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
ret <vscale x 8 x i8> %v
}
declare <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
define <vscale x 16 x i8> @uadd_nxv16i8_vv(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b) {
; CHECK-LABEL: uadd_nxv16i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %v
}
define <vscale x 16 x i8> @uadd_nxv16i8_vx(<vscale x 16 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_nxv16i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
ret <vscale x 16 x i8> %v
}
define <vscale x 16 x i8> @uadd_nxv16i8_vi(<vscale x 16 x i8> %va) {
; CHECK-LABEL: uadd_nxv16i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
%vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
ret <vscale x 16 x i8> %v
}
declare <vscale x 32 x i8> @llvm.uadd.sat.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>)
define <vscale x 32 x i8> @uadd_nxv32i8_vv(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b) {
; CHECK-LABEL: uadd_nxv32i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b)
ret <vscale x 32 x i8> %v
}
define <vscale x 32 x i8> @uadd_nxv32i8_vx(<vscale x 32 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_nxv32i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i8> @llvm.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
ret <vscale x 32 x i8> %v
}
define <vscale x 32 x i8> @uadd_nxv32i8_vi(<vscale x 32 x i8> %va) {
; CHECK-LABEL: uadd_nxv32i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> undef, i8 8, i32 0
%vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i8> @llvm.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
ret <vscale x 32 x i8> %v
}
declare <vscale x 64 x i8> @llvm.uadd.sat.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>)
define <vscale x 64 x i8> @uadd_nxv64i8_vv(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b) {
; CHECK-LABEL: uadd_nxv64i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b)
ret <vscale x 64 x i8> %v
}
define <vscale x 64 x i8> @uadd_nxv64i8_vx(<vscale x 64 x i8> %va, i8 %b) {
; CHECK-LABEL: uadd_nxv64i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%v = call <vscale x 64 x i8> @llvm.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
ret <vscale x 64 x i8> %v
}
define <vscale x 64 x i8> @uadd_nxv64i8_vi(<vscale x 64 x i8> %va) {
; CHECK-LABEL: uadd_nxv64i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> undef, i8 8, i32 0
%vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%v = call <vscale x 64 x i8> @llvm.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
ret <vscale x 64 x i8> %v
}
declare <vscale x 1 x i16> @llvm.uadd.sat.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
define <vscale x 1 x i16> @uadd_nxv1i16_vv(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b) {
; CHECK-LABEL: uadd_nxv1i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b)
ret <vscale x 1 x i16> %v
}
define <vscale x 1 x i16> @uadd_nxv1i16_vx(<vscale x 1 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_nxv1i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i16> @llvm.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
ret <vscale x 1 x i16> %v
}
define <vscale x 1 x i16> @uadd_nxv1i16_vi(<vscale x 1 x i16> %va) {
; CHECK-LABEL: uadd_nxv1i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> undef, i16 8, i32 0
%vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i16> @llvm.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
ret <vscale x 1 x i16> %v
}
declare <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
define <vscale x 2 x i16> @uadd_nxv2i16_vv(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b) {
; CHECK-LABEL: uadd_nxv2i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b)
ret <vscale x 2 x i16> %v
}
define <vscale x 2 x i16> @uadd_nxv2i16_vx(<vscale x 2 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_nxv2i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
ret <vscale x 2 x i16> %v
}
define <vscale x 2 x i16> @uadd_nxv2i16_vi(<vscale x 2 x i16> %va) {
; CHECK-LABEL: uadd_nxv2i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> undef, i16 8, i32 0
%vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
ret <vscale x 2 x i16> %v
}
declare <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
define <vscale x 4 x i16> @uadd_nxv4i16_vv(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b) {
; CHECK-LABEL: uadd_nxv4i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @uadd_nxv4i16_vx(<vscale x 4 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_nxv4i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @uadd_nxv4i16_vi(<vscale x 4 x i16> %va) {
; CHECK-LABEL: uadd_nxv4i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> undef, i16 8, i32 0
%vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
ret <vscale x 4 x i16> %v
}
declare <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
define <vscale x 8 x i16> @uadd_nxv8i16_vv(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b) {
; CHECK-LABEL: uadd_nxv8i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @uadd_nxv8i16_vx(<vscale x 8 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_nxv8i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @uadd_nxv8i16_vi(<vscale x 8 x i16> %va) {
; CHECK-LABEL: uadd_nxv8i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> undef, i16 8, i32 0
%vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
ret <vscale x 8 x i16> %v
}
declare <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
define <vscale x 16 x i16> @uadd_nxv16i16_vv(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b) {
; CHECK-LABEL: uadd_nxv16i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b)
ret <vscale x 16 x i16> %v
}
define <vscale x 16 x i16> @uadd_nxv16i16_vx(<vscale x 16 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_nxv16i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
ret <vscale x 16 x i16> %v
}
define <vscale x 16 x i16> @uadd_nxv16i16_vi(<vscale x 16 x i16> %va) {
; CHECK-LABEL: uadd_nxv16i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> undef, i16 8, i32 0
%vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
ret <vscale x 16 x i16> %v
}
declare <vscale x 32 x i16> @llvm.uadd.sat.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>)
define <vscale x 32 x i16> @uadd_nxv32i16_vv(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b) {
; CHECK-LABEL: uadd_nxv32i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b)
ret <vscale x 32 x i16> %v
}
define <vscale x 32 x i16> @uadd_nxv32i16_vx(<vscale x 32 x i16> %va, i16 %b) {
; CHECK-LABEL: uadd_nxv32i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i16> @llvm.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
ret <vscale x 32 x i16> %v
}
define <vscale x 32 x i16> @uadd_nxv32i16_vi(<vscale x 32 x i16> %va) {
; CHECK-LABEL: uadd_nxv32i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> undef, i16 8, i32 0
%vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i16> @llvm.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
ret <vscale x 32 x i16> %v
}
declare <vscale x 1 x i32> @llvm.uadd.sat.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
define <vscale x 1 x i32> @uadd_nxv1i32_vv(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b) {
; CHECK-LABEL: uadd_nxv1i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b)
ret <vscale x 1 x i32> %v
}
define <vscale x 1 x i32> @uadd_nxv1i32_vx(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: uadd_nxv1i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i32> @llvm.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
ret <vscale x 1 x i32> %v
}
define <vscale x 1 x i32> @uadd_nxv1i32_vi(<vscale x 1 x i32> %va) {
; CHECK-LABEL: uadd_nxv1i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> undef, i32 8, i32 0
%vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i32> @llvm.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
ret <vscale x 1 x i32> %v
}
declare <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
define <vscale x 2 x i32> @uadd_nxv2i32_vv(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b) {
; CHECK-LABEL: uadd_nxv2i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b)
ret <vscale x 2 x i32> %v
}
define <vscale x 2 x i32> @uadd_nxv2i32_vx(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: uadd_nxv2i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
ret <vscale x 2 x i32> %v
}
define <vscale x 2 x i32> @uadd_nxv2i32_vi(<vscale x 2 x i32> %va) {
; CHECK-LABEL: uadd_nxv2i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> undef, i32 8, i32 0
%vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
ret <vscale x 2 x i32> %v
}
declare <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
define <vscale x 4 x i32> @uadd_nxv4i32_vv(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b) {
; CHECK-LABEL: uadd_nxv4i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @uadd_nxv4i32_vx(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: uadd_nxv4i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @uadd_nxv4i32_vi(<vscale x 4 x i32> %va) {
; CHECK-LABEL: uadd_nxv4i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> undef, i32 8, i32 0
%vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
ret <vscale x 4 x i32> %v
}
declare <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
define <vscale x 8 x i32> @uadd_nxv8i32_vv(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b) {
; CHECK-LABEL: uadd_nxv8i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @uadd_nxv8i32_vx(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: uadd_nxv8i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @uadd_nxv8i32_vi(<vscale x 8 x i32> %va) {
; CHECK-LABEL: uadd_nxv8i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> undef, i32 8, i32 0
%vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
ret <vscale x 8 x i32> %v
}
declare <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
define <vscale x 16 x i32> @uadd_nxv16i32_vv(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b) {
; CHECK-LABEL: uadd_nxv16i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b)
ret <vscale x 16 x i32> %v
}
define <vscale x 16 x i32> @uadd_nxv16i32_vx(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: uadd_nxv16i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vsaddu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
ret <vscale x 16 x i32> %v
}
define <vscale x 16 x i32> @uadd_nxv16i32_vi(<vscale x 16 x i32> %va) {
; CHECK-LABEL: uadd_nxv16i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> undef, i32 8, i32 0
%vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
ret <vscale x 16 x i32> %v
}
declare <vscale x 1 x i64> @llvm.uadd.sat.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
define <vscale x 1 x i64> @uadd_nxv1i64_vv(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b) {
; CHECK-LABEL: uadd_nxv1i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b)
ret <vscale x 1 x i64> %v
}
define <vscale x 1 x i64> @uadd_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
; RV32-LABEL: uadd_nxv1i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v25, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v25
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: uadd_nxv1i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vsaddu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i64> @llvm.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
ret <vscale x 1 x i64> %v
}
define <vscale x 1 x i64> @uadd_nxv1i64_vi(<vscale x 1 x i64> %va) {
; CHECK-LABEL: uadd_nxv1i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> undef, i64 8, i32 0
%vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i64> @llvm.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
ret <vscale x 1 x i64> %v
}
declare <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
define <vscale x 2 x i64> @uadd_nxv2i64_vv(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b) {
; CHECK-LABEL: uadd_nxv2i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %v
}
define <vscale x 2 x i64> @uadd_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
; RV32-LABEL: uadd_nxv2i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v26
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: uadd_nxv2i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vsaddu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
ret <vscale x 2 x i64> %v
}
define <vscale x 2 x i64> @uadd_nxv2i64_vi(<vscale x 2 x i64> %va) {
; CHECK-LABEL: uadd_nxv2i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> undef, i64 8, i32 0
%vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
ret <vscale x 2 x i64> %v
}
declare <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
define <vscale x 4 x i64> @uadd_nxv4i64_vv(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b) {
; CHECK-LABEL: uadd_nxv4i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @uadd_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
; RV32-LABEL: uadd_nxv4i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v28, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v28
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: uadd_nxv4i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vsaddu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @uadd_nxv4i64_vi(<vscale x 4 x i64> %va) {
; CHECK-LABEL: uadd_nxv4i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> undef, i64 8, i32 0
%vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
ret <vscale x 4 x i64> %v
}
declare <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
define <vscale x 8 x i64> @uadd_nxv8i64_vv(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b) {
; CHECK-LABEL: uadd_nxv8i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsaddu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @uadd_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
; RV32-LABEL: uadd_nxv8i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: uadd_nxv8i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsaddu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @uadd_nxv8i64_vi(<vscale x 8 x i64> %va) {
; CHECK-LABEL: uadd_nxv8i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsaddu.vi v8, v8, 8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> undef, i64 8, i32 0
%vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
ret <vscale x 8 x i64> %v
}

View File

@ -0,0 +1,871 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.ssub.sat.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
define <vscale x 1 x i8> @ssub_nxv1i8_vv(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b) {
; CHECK-LABEL: ssub_nxv1i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b)
ret <vscale x 1 x i8> %v
}
define <vscale x 1 x i8> @ssub_nxv1i8_vx(<vscale x 1 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_nxv1i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i8> @llvm.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
ret <vscale x 1 x i8> %v
}
define <vscale x 1 x i8> @ssub_nxv1i8_vi(<vscale x 1 x i8> %va) {
; CHECK-LABEL: ssub_nxv1i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> undef, i8 1, i32 0
%vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i8> @llvm.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
ret <vscale x 1 x i8> %v
}
declare <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
define <vscale x 2 x i8> @ssub_nxv2i8_vv(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b) {
; CHECK-LABEL: ssub_nxv2i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b)
ret <vscale x 2 x i8> %v
}
define <vscale x 2 x i8> @ssub_nxv2i8_vx(<vscale x 2 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_nxv2i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
ret <vscale x 2 x i8> %v
}
define <vscale x 2 x i8> @ssub_nxv2i8_vi(<vscale x 2 x i8> %va) {
; CHECK-LABEL: ssub_nxv2i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> undef, i8 1, i32 0
%vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
ret <vscale x 2 x i8> %v
}
declare <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
define <vscale x 4 x i8> @ssub_nxv4i8_vv(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b) {
; CHECK-LABEL: ssub_nxv4i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @ssub_nxv4i8_vx(<vscale x 4 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_nxv4i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @ssub_nxv4i8_vi(<vscale x 4 x i8> %va) {
; CHECK-LABEL: ssub_nxv4i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> undef, i8 1, i32 0
%vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
ret <vscale x 4 x i8> %v
}
declare <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
define <vscale x 8 x i8> @ssub_nxv8i8_vv(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b) {
; CHECK-LABEL: ssub_nxv8i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b)
ret <vscale x 8 x i8> %v
}
define <vscale x 8 x i8> @ssub_nxv8i8_vx(<vscale x 8 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_nxv8i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
ret <vscale x 8 x i8> %v
}
define <vscale x 8 x i8> @ssub_nxv8i8_vi(<vscale x 8 x i8> %va) {
; CHECK-LABEL: ssub_nxv8i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
%vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
ret <vscale x 8 x i8> %v
}
declare <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
define <vscale x 16 x i8> @ssub_nxv16i8_vv(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b) {
; CHECK-LABEL: ssub_nxv16i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %v
}
define <vscale x 16 x i8> @ssub_nxv16i8_vx(<vscale x 16 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_nxv16i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
ret <vscale x 16 x i8> %v
}
define <vscale x 16 x i8> @ssub_nxv16i8_vi(<vscale x 16 x i8> %va) {
; CHECK-LABEL: ssub_nxv16i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> undef, i8 1, i32 0
%vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
ret <vscale x 16 x i8> %v
}
declare <vscale x 32 x i8> @llvm.ssub.sat.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>)
define <vscale x 32 x i8> @ssub_nxv32i8_vv(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b) {
; CHECK-LABEL: ssub_nxv32i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b)
ret <vscale x 32 x i8> %v
}
define <vscale x 32 x i8> @ssub_nxv32i8_vx(<vscale x 32 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_nxv32i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i8> @llvm.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
ret <vscale x 32 x i8> %v
}
define <vscale x 32 x i8> @ssub_nxv32i8_vi(<vscale x 32 x i8> %va) {
; CHECK-LABEL: ssub_nxv32i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> undef, i8 1, i32 0
%vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i8> @llvm.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
ret <vscale x 32 x i8> %v
}
declare <vscale x 64 x i8> @llvm.ssub.sat.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>)
define <vscale x 64 x i8> @ssub_nxv64i8_vv(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b) {
; CHECK-LABEL: ssub_nxv64i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b)
ret <vscale x 64 x i8> %v
}
define <vscale x 64 x i8> @ssub_nxv64i8_vx(<vscale x 64 x i8> %va, i8 %b) {
; CHECK-LABEL: ssub_nxv64i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%v = call <vscale x 64 x i8> @llvm.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
ret <vscale x 64 x i8> %v
}
define <vscale x 64 x i8> @ssub_nxv64i8_vi(<vscale x 64 x i8> %va) {
; CHECK-LABEL: ssub_nxv64i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> undef, i8 1, i32 0
%vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%v = call <vscale x 64 x i8> @llvm.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
ret <vscale x 64 x i8> %v
}
declare <vscale x 1 x i16> @llvm.ssub.sat.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
define <vscale x 1 x i16> @ssub_nxv1i16_vv(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b) {
; CHECK-LABEL: ssub_nxv1i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b)
ret <vscale x 1 x i16> %v
}
define <vscale x 1 x i16> @ssub_nxv1i16_vx(<vscale x 1 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_nxv1i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i16> @llvm.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
ret <vscale x 1 x i16> %v
}
define <vscale x 1 x i16> @ssub_nxv1i16_vi(<vscale x 1 x i16> %va) {
; CHECK-LABEL: ssub_nxv1i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> undef, i16 1, i32 0
%vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i16> @llvm.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
ret <vscale x 1 x i16> %v
}
declare <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
define <vscale x 2 x i16> @ssub_nxv2i16_vv(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b) {
; CHECK-LABEL: ssub_nxv2i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b)
ret <vscale x 2 x i16> %v
}
define <vscale x 2 x i16> @ssub_nxv2i16_vx(<vscale x 2 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_nxv2i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
ret <vscale x 2 x i16> %v
}
define <vscale x 2 x i16> @ssub_nxv2i16_vi(<vscale x 2 x i16> %va) {
; CHECK-LABEL: ssub_nxv2i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> undef, i16 1, i32 0
%vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
ret <vscale x 2 x i16> %v
}
declare <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
define <vscale x 4 x i16> @ssub_nxv4i16_vv(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b) {
; CHECK-LABEL: ssub_nxv4i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @ssub_nxv4i16_vx(<vscale x 4 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_nxv4i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @ssub_nxv4i16_vi(<vscale x 4 x i16> %va) {
; CHECK-LABEL: ssub_nxv4i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> undef, i16 1, i32 0
%vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
ret <vscale x 4 x i16> %v
}
declare <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
define <vscale x 8 x i16> @ssub_nxv8i16_vv(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b) {
; CHECK-LABEL: ssub_nxv8i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @ssub_nxv8i16_vx(<vscale x 8 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_nxv8i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @ssub_nxv8i16_vi(<vscale x 8 x i16> %va) {
; CHECK-LABEL: ssub_nxv8i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
%vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
ret <vscale x 8 x i16> %v
}
declare <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
define <vscale x 16 x i16> @ssub_nxv16i16_vv(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b) {
; CHECK-LABEL: ssub_nxv16i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b)
ret <vscale x 16 x i16> %v
}
define <vscale x 16 x i16> @ssub_nxv16i16_vx(<vscale x 16 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_nxv16i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
ret <vscale x 16 x i16> %v
}
define <vscale x 16 x i16> @ssub_nxv16i16_vi(<vscale x 16 x i16> %va) {
; CHECK-LABEL: ssub_nxv16i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> undef, i16 1, i32 0
%vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
ret <vscale x 16 x i16> %v
}
declare <vscale x 32 x i16> @llvm.ssub.sat.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>)
define <vscale x 32 x i16> @ssub_nxv32i16_vv(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b) {
; CHECK-LABEL: ssub_nxv32i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b)
ret <vscale x 32 x i16> %v
}
define <vscale x 32 x i16> @ssub_nxv32i16_vx(<vscale x 32 x i16> %va, i16 %b) {
; CHECK-LABEL: ssub_nxv32i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i16> @llvm.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
ret <vscale x 32 x i16> %v
}
define <vscale x 32 x i16> @ssub_nxv32i16_vi(<vscale x 32 x i16> %va) {
; CHECK-LABEL: ssub_nxv32i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> undef, i16 1, i32 0
%vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i16> @llvm.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
ret <vscale x 32 x i16> %v
}
declare <vscale x 1 x i32> @llvm.ssub.sat.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
define <vscale x 1 x i32> @ssub_nxv1i32_vv(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b) {
; CHECK-LABEL: ssub_nxv1i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b)
ret <vscale x 1 x i32> %v
}
define <vscale x 1 x i32> @ssub_nxv1i32_vx(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: ssub_nxv1i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i32> @llvm.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
ret <vscale x 1 x i32> %v
}
define <vscale x 1 x i32> @ssub_nxv1i32_vi(<vscale x 1 x i32> %va) {
; CHECK-LABEL: ssub_nxv1i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> undef, i32 1, i32 0
%vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i32> @llvm.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
ret <vscale x 1 x i32> %v
}
declare <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
define <vscale x 2 x i32> @ssub_nxv2i32_vv(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b) {
; CHECK-LABEL: ssub_nxv2i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b)
ret <vscale x 2 x i32> %v
}
define <vscale x 2 x i32> @ssub_nxv2i32_vx(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: ssub_nxv2i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
ret <vscale x 2 x i32> %v
}
define <vscale x 2 x i32> @ssub_nxv2i32_vi(<vscale x 2 x i32> %va) {
; CHECK-LABEL: ssub_nxv2i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> undef, i32 1, i32 0
%vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
ret <vscale x 2 x i32> %v
}
declare <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
define <vscale x 4 x i32> @ssub_nxv4i32_vv(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b) {
; CHECK-LABEL: ssub_nxv4i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @ssub_nxv4i32_vx(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: ssub_nxv4i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @ssub_nxv4i32_vi(<vscale x 4 x i32> %va) {
; CHECK-LABEL: ssub_nxv4i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
%vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
ret <vscale x 4 x i32> %v
}
declare <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
define <vscale x 8 x i32> @ssub_nxv8i32_vv(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b) {
; CHECK-LABEL: ssub_nxv8i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @ssub_nxv8i32_vx(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: ssub_nxv8i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @ssub_nxv8i32_vi(<vscale x 8 x i32> %va) {
; CHECK-LABEL: ssub_nxv8i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
%vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
ret <vscale x 8 x i32> %v
}
declare <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
define <vscale x 16 x i32> @ssub_nxv16i32_vv(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b) {
; CHECK-LABEL: ssub_nxv16i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b)
ret <vscale x 16 x i32> %v
}
define <vscale x 16 x i32> @ssub_nxv16i32_vx(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: ssub_nxv16i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
ret <vscale x 16 x i32> %v
}
define <vscale x 16 x i32> @ssub_nxv16i32_vi(<vscale x 16 x i32> %va) {
; CHECK-LABEL: ssub_nxv16i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> undef, i32 1, i32 0
%vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
ret <vscale x 16 x i32> %v
}
declare <vscale x 1 x i64> @llvm.ssub.sat.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
define <vscale x 1 x i64> @ssub_nxv1i64_vv(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b) {
; CHECK-LABEL: ssub_nxv1i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b)
ret <vscale x 1 x i64> %v
}
define <vscale x 1 x i64> @ssub_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
; RV32-LABEL: ssub_nxv1i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v25, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v25
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: ssub_nxv1i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vssub.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i64> @llvm.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
ret <vscale x 1 x i64> %v
}
define <vscale x 1 x i64> @ssub_nxv1i64_vi(<vscale x 1 x i64> %va) {
; CHECK-LABEL: ssub_nxv1i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> undef, i64 1, i32 0
%vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i64> @llvm.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
ret <vscale x 1 x i64> %v
}
declare <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
define <vscale x 2 x i64> @ssub_nxv2i64_vv(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b) {
; CHECK-LABEL: ssub_nxv2i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %v
}
define <vscale x 2 x i64> @ssub_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
; RV32-LABEL: ssub_nxv2i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v26
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: ssub_nxv2i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vssub.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
ret <vscale x 2 x i64> %v
}
define <vscale x 2 x i64> @ssub_nxv2i64_vi(<vscale x 2 x i64> %va) {
; CHECK-LABEL: ssub_nxv2i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> undef, i64 1, i32 0
%vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
ret <vscale x 2 x i64> %v
}
declare <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
define <vscale x 4 x i64> @ssub_nxv4i64_vv(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b) {
; CHECK-LABEL: ssub_nxv4i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @ssub_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
; RV32-LABEL: ssub_nxv4i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v28, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v28
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: ssub_nxv4i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vssub.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @ssub_nxv4i64_vi(<vscale x 4 x i64> %va) {
; CHECK-LABEL: ssub_nxv4i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> undef, i64 1, i32 0
%vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
ret <vscale x 4 x i64> %v
}
declare <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
define <vscale x 8 x i64> @ssub_nxv8i64_vv(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b) {
; CHECK-LABEL: ssub_nxv8i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vssub.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @ssub_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
; RV32-LABEL: ssub_nxv8i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: ssub_nxv8i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vssub.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @ssub_nxv8i64_vi(<vscale x 8 x i64> %va) {
; CHECK-LABEL: ssub_nxv8i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vssub.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
%vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
ret <vscale x 8 x i64> %v
}

View File

@ -0,0 +1,871 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.usub.sat.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
define <vscale x 1 x i8> @usub_nxv1i8_vv(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b) {
; CHECK-LABEL: usub_nxv1i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b)
ret <vscale x 1 x i8> %v
}
define <vscale x 1 x i8> @usub_nxv1i8_vx(<vscale x 1 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_nxv1i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i8> @llvm.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
ret <vscale x 1 x i8> %v
}
define <vscale x 1 x i8> @usub_nxv1i8_vi(<vscale x 1 x i8> %va) {
; CHECK-LABEL: usub_nxv1i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> undef, i8 2, i32 0
%vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i8> @llvm.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
ret <vscale x 1 x i8> %v
}
declare <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
define <vscale x 2 x i8> @usub_nxv2i8_vv(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b) {
; CHECK-LABEL: usub_nxv2i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b)
ret <vscale x 2 x i8> %v
}
define <vscale x 2 x i8> @usub_nxv2i8_vx(<vscale x 2 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_nxv2i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
ret <vscale x 2 x i8> %v
}
define <vscale x 2 x i8> @usub_nxv2i8_vi(<vscale x 2 x i8> %va) {
; CHECK-LABEL: usub_nxv2i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> undef, i8 2, i32 0
%vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
ret <vscale x 2 x i8> %v
}
declare <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
define <vscale x 4 x i8> @usub_nxv4i8_vv(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b) {
; CHECK-LABEL: usub_nxv4i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @usub_nxv4i8_vx(<vscale x 4 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_nxv4i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @usub_nxv4i8_vi(<vscale x 4 x i8> %va) {
; CHECK-LABEL: usub_nxv4i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> undef, i8 2, i32 0
%vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
ret <vscale x 4 x i8> %v
}
declare <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
define <vscale x 8 x i8> @usub_nxv8i8_vv(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b) {
; CHECK-LABEL: usub_nxv8i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b)
ret <vscale x 8 x i8> %v
}
define <vscale x 8 x i8> @usub_nxv8i8_vx(<vscale x 8 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_nxv8i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
ret <vscale x 8 x i8> %v
}
define <vscale x 8 x i8> @usub_nxv8i8_vi(<vscale x 8 x i8> %va) {
; CHECK-LABEL: usub_nxv8i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> undef, i8 2, i32 0
%vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
ret <vscale x 8 x i8> %v
}
declare <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
define <vscale x 16 x i8> @usub_nxv16i8_vv(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b) {
; CHECK-LABEL: usub_nxv16i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %v
}
define <vscale x 16 x i8> @usub_nxv16i8_vx(<vscale x 16 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_nxv16i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
ret <vscale x 16 x i8> %v
}
define <vscale x 16 x i8> @usub_nxv16i8_vi(<vscale x 16 x i8> %va) {
; CHECK-LABEL: usub_nxv16i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> undef, i8 2, i32 0
%vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
ret <vscale x 16 x i8> %v
}
declare <vscale x 32 x i8> @llvm.usub.sat.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>)
define <vscale x 32 x i8> @usub_nxv32i8_vv(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b) {
; CHECK-LABEL: usub_nxv32i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b)
ret <vscale x 32 x i8> %v
}
define <vscale x 32 x i8> @usub_nxv32i8_vx(<vscale x 32 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_nxv32i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i8> @llvm.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
ret <vscale x 32 x i8> %v
}
define <vscale x 32 x i8> @usub_nxv32i8_vi(<vscale x 32 x i8> %va) {
; CHECK-LABEL: usub_nxv32i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> undef, i8 2, i32 0
%vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i8> @llvm.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
ret <vscale x 32 x i8> %v
}
declare <vscale x 64 x i8> @llvm.usub.sat.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>)
define <vscale x 64 x i8> @usub_nxv64i8_vv(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b) {
; CHECK-LABEL: usub_nxv64i8_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b)
ret <vscale x 64 x i8> %v
}
define <vscale x 64 x i8> @usub_nxv64i8_vx(<vscale x 64 x i8> %va, i8 %b) {
; CHECK-LABEL: usub_nxv64i8_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%v = call <vscale x 64 x i8> @llvm.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
ret <vscale x 64 x i8> %v
}
define <vscale x 64 x i8> @usub_nxv64i8_vi(<vscale x 64 x i8> %va) {
; CHECK-LABEL: usub_nxv64i8_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> undef, i8 2, i32 0
%vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%v = call <vscale x 64 x i8> @llvm.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
ret <vscale x 64 x i8> %v
}
declare <vscale x 1 x i16> @llvm.usub.sat.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
define <vscale x 1 x i16> @usub_nxv1i16_vv(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b) {
; CHECK-LABEL: usub_nxv1i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b)
ret <vscale x 1 x i16> %v
}
define <vscale x 1 x i16> @usub_nxv1i16_vx(<vscale x 1 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_nxv1i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i16> @llvm.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
ret <vscale x 1 x i16> %v
}
define <vscale x 1 x i16> @usub_nxv1i16_vi(<vscale x 1 x i16> %va) {
; CHECK-LABEL: usub_nxv1i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> undef, i16 2, i32 0
%vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i16> @llvm.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
ret <vscale x 1 x i16> %v
}
declare <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
define <vscale x 2 x i16> @usub_nxv2i16_vv(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b) {
; CHECK-LABEL: usub_nxv2i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b)
ret <vscale x 2 x i16> %v
}
define <vscale x 2 x i16> @usub_nxv2i16_vx(<vscale x 2 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_nxv2i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
ret <vscale x 2 x i16> %v
}
define <vscale x 2 x i16> @usub_nxv2i16_vi(<vscale x 2 x i16> %va) {
; CHECK-LABEL: usub_nxv2i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> undef, i16 2, i32 0
%vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
ret <vscale x 2 x i16> %v
}
declare <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
define <vscale x 4 x i16> @usub_nxv4i16_vv(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b) {
; CHECK-LABEL: usub_nxv4i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @usub_nxv4i16_vx(<vscale x 4 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_nxv4i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @usub_nxv4i16_vi(<vscale x 4 x i16> %va) {
; CHECK-LABEL: usub_nxv4i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> undef, i16 2, i32 0
%vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
ret <vscale x 4 x i16> %v
}
declare <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
define <vscale x 8 x i16> @usub_nxv8i16_vv(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b) {
; CHECK-LABEL: usub_nxv8i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @usub_nxv8i16_vx(<vscale x 8 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_nxv8i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @usub_nxv8i16_vi(<vscale x 8 x i16> %va) {
; CHECK-LABEL: usub_nxv8i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> undef, i16 2, i32 0
%vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
ret <vscale x 8 x i16> %v
}
declare <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
define <vscale x 16 x i16> @usub_nxv16i16_vv(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b) {
; CHECK-LABEL: usub_nxv16i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b)
ret <vscale x 16 x i16> %v
}
define <vscale x 16 x i16> @usub_nxv16i16_vx(<vscale x 16 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_nxv16i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
ret <vscale x 16 x i16> %v
}
define <vscale x 16 x i16> @usub_nxv16i16_vi(<vscale x 16 x i16> %va) {
; CHECK-LABEL: usub_nxv16i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> undef, i16 2, i32 0
%vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
ret <vscale x 16 x i16> %v
}
declare <vscale x 32 x i16> @llvm.usub.sat.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>)
define <vscale x 32 x i16> @usub_nxv32i16_vv(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b) {
; CHECK-LABEL: usub_nxv32i16_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b)
ret <vscale x 32 x i16> %v
}
define <vscale x 32 x i16> @usub_nxv32i16_vx(<vscale x 32 x i16> %va, i16 %b) {
; CHECK-LABEL: usub_nxv32i16_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i16> @llvm.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
ret <vscale x 32 x i16> %v
}
define <vscale x 32 x i16> @usub_nxv32i16_vi(<vscale x 32 x i16> %va) {
; CHECK-LABEL: usub_nxv32i16_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> undef, i16 2, i32 0
%vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x i16> @llvm.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
ret <vscale x 32 x i16> %v
}
declare <vscale x 1 x i32> @llvm.usub.sat.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
define <vscale x 1 x i32> @usub_nxv1i32_vv(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b) {
; CHECK-LABEL: usub_nxv1i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b)
ret <vscale x 1 x i32> %v
}
define <vscale x 1 x i32> @usub_nxv1i32_vx(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: usub_nxv1i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i32> @llvm.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
ret <vscale x 1 x i32> %v
}
define <vscale x 1 x i32> @usub_nxv1i32_vi(<vscale x 1 x i32> %va) {
; CHECK-LABEL: usub_nxv1i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> undef, i32 2, i32 0
%vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i32> @llvm.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
ret <vscale x 1 x i32> %v
}
declare <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
define <vscale x 2 x i32> @usub_nxv2i32_vv(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b) {
; CHECK-LABEL: usub_nxv2i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b)
ret <vscale x 2 x i32> %v
}
define <vscale x 2 x i32> @usub_nxv2i32_vx(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: usub_nxv2i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
ret <vscale x 2 x i32> %v
}
define <vscale x 2 x i32> @usub_nxv2i32_vi(<vscale x 2 x i32> %va) {
; CHECK-LABEL: usub_nxv2i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> undef, i32 2, i32 0
%vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
ret <vscale x 2 x i32> %v
}
declare <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
define <vscale x 4 x i32> @usub_nxv4i32_vv(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b) {
; CHECK-LABEL: usub_nxv4i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @usub_nxv4i32_vx(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: usub_nxv4i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @usub_nxv4i32_vi(<vscale x 4 x i32> %va) {
; CHECK-LABEL: usub_nxv4i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> undef, i32 2, i32 0
%vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
ret <vscale x 4 x i32> %v
}
declare <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
define <vscale x 8 x i32> @usub_nxv8i32_vv(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b) {
; CHECK-LABEL: usub_nxv8i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @usub_nxv8i32_vx(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: usub_nxv8i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @usub_nxv8i32_vi(<vscale x 8 x i32> %va) {
; CHECK-LABEL: usub_nxv8i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> undef, i32 2, i32 0
%vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
ret <vscale x 8 x i32> %v
}
declare <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
define <vscale x 16 x i32> @usub_nxv16i32_vv(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b) {
; CHECK-LABEL: usub_nxv16i32_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b)
ret <vscale x 16 x i32> %v
}
define <vscale x 16 x i32> @usub_nxv16i32_vx(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: usub_nxv16i32_vx:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
ret <vscale x 16 x i32> %v
}
define <vscale x 16 x i32> @usub_nxv16i32_vi(<vscale x 16 x i32> %va) {
; CHECK-LABEL: usub_nxv16i32_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> undef, i32 2, i32 0
%vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
ret <vscale x 16 x i32> %v
}
declare <vscale x 1 x i64> @llvm.usub.sat.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
define <vscale x 1 x i64> @usub_nxv1i64_vv(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b) {
; CHECK-LABEL: usub_nxv1i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b)
ret <vscale x 1 x i64> %v
}
define <vscale x 1 x i64> @usub_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
; RV32-LABEL: usub_nxv1i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v25, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v25
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: usub_nxv1i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vssubu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i64> @llvm.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
ret <vscale x 1 x i64> %v
}
define <vscale x 1 x i64> @usub_nxv1i64_vi(<vscale x 1 x i64> %va) {
; CHECK-LABEL: usub_nxv1i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> undef, i64 2, i32 0
%vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x i64> @llvm.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
ret <vscale x 1 x i64> %v
}
declare <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
define <vscale x 2 x i64> @usub_nxv2i64_vv(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b) {
; CHECK-LABEL: usub_nxv2i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %v
}
define <vscale x 2 x i64> @usub_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
; RV32-LABEL: usub_nxv2i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v26
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: usub_nxv2i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vssubu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
ret <vscale x 2 x i64> %v
}
define <vscale x 2 x i64> @usub_nxv2i64_vi(<vscale x 2 x i64> %va) {
; CHECK-LABEL: usub_nxv2i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> undef, i64 2, i32 0
%vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
ret <vscale x 2 x i64> %v
}
declare <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
define <vscale x 4 x i64> @usub_nxv4i64_vv(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b) {
; CHECK-LABEL: usub_nxv4i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @usub_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
; RV32-LABEL: usub_nxv4i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v28, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v28
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: usub_nxv4i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vssubu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @usub_nxv4i64_vi(<vscale x 4 x i64> %va) {
; CHECK-LABEL: usub_nxv4i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> undef, i64 2, i32 0
%vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
ret <vscale x 4 x i64> %v
}
declare <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
define <vscale x 8 x i64> @usub_nxv8i64_vv(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b) {
; CHECK-LABEL: usub_nxv8i64_vv:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vssubu.vv v8, v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @usub_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
; RV32-LABEL: usub_nxv8i64_vx:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: usub_nxv8i64_vx:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vssubu.vx v8, v8, a0
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @usub_nxv8i64_vi(<vscale x 8 x i64> %va) {
; CHECK-LABEL: usub_nxv8i64_vi:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 2
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vssubu.vx v8, v8, a0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> undef, i64 2, i32 0
%vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
ret <vscale x 8 x i64> %v
}