diff --git a/include/llvm/IR/IntrinsicsARM.td b/include/llvm/IR/IntrinsicsARM.td index 6e63022d4cf..578b22a3abb 100644 --- a/include/llvm/IR/IntrinsicsARM.td +++ b/include/llvm/IR/IntrinsicsARM.td @@ -984,7 +984,9 @@ multiclass MVEMXPredicated rets, list flags, } // The first two parameters are compile-time constants: -// * Halving: is the a halving (vhcaddq) or non-halving (vcaddq) instruction +// * Halving: 0 means halving (vhcaddq), 1 means non-halving (vcaddq) +// instruction. Note: the flag is inverted to match the corresonding +// bit in the instruction encoding // * Rotation angle: 0 mean 90 deg, 1 means 180 deg defm int_arm_mve_vcaddq : MVEMXPredicated< [llvm_anyvector_ty], diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 93e1a8fd2a7..887fd947bf3 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -233,22 +233,6 @@ private: void SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry, uint16_t OpcodeWithNoCarry, bool Add, bool Predicated); - /// Select MVE complex vector addition intrinsic - /// OpcodesInt are opcodes for non-halving addition of complex integer vectors - /// OpcodesHInt are opcodes for halving addition of complex integer vectors - /// OpcodesFP are opcodes for addition of complex floating point vectors - void SelectMVE_VCADD(SDNode *N, const uint16_t *OpcodesInt, - const uint16_t *OpcodesHInt, const uint16_t *OpcodesFP, - bool Predicated); - - /// Select MVE complex vector multiplication intrinsic - void SelectMVE_VCMUL(SDNode *N, uint16_t OpcodeF16, uint16_t OpcodeF32, - bool Predicated); - - /// Sekect NVE complex vector multiply-add intrinsic - void SelectMVE_VCMLA(SDNode *N, uint16_t OpcodeF16, uint16_t OpcodeF32, - bool Predicated); - /// SelectMVE_VLD - Select MVE interleaving load intrinsics. NumVecs /// should be 2 or 4. The opcode array specifies the instructions /// used for 8, 16 and 32-bit lane sizes respectively, and each @@ -2533,138 +2517,6 @@ void ARMDAGToDAGISel::SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry, CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); } -/// Convert an SDValue to a boolean value. SDVal must be a compile-time constant -static bool SDValueToConstBool(SDValue SDVal) { - ConstantSDNode *SDValConstant = dyn_cast(SDVal); - assert(SDValConstant && "expected a compile-time constant"); - uint64_t Value = SDValConstant->getZExtValue(); - assert((Value == 0 || Value == 1) && "expected value 0 or 1"); - return Value; -} - -/// Select an opcode based on a floating point vector type. One opcode -/// corresponds to 16-bit floating point element type, the other to two 32-bit -/// element type. -/// Other types are not allowed -static uint16_t SelectFPOpcode(EVT VT, uint16_t OpcodeF16, uint16_t OpcodeF32) { - assert(VT.isFloatingPoint() && VT.isVector() && - "expected a floating-point vector"); - switch (VT.getVectorElementType().getSizeInBits()) { - case 16: - return OpcodeF16; - case 32: - return OpcodeF32; - default: - llvm_unreachable("bad vector element size"); - } -} - -void ARMDAGToDAGISel::SelectMVE_VCADD(SDNode *N, const uint16_t *OpcodesInt, - const uint16_t *OpcodesHInt, - const uint16_t *OpcodesFP, - bool Predicated) { - EVT VT = N->getValueType(0); - SDLoc Loc(N); - - bool IsHalved = SDValueToConstBool(N->getOperand(1)); - bool IsAngle270 = SDValueToConstBool(N->getOperand(2)); - bool IsFP = VT.isFloatingPoint(); - if (IsHalved) - assert(!IsFP && "vhcaddq requires integer vector type"); - - uint16_t Opcode; - if (IsFP) { - Opcode = SelectFPOpcode(VT, OpcodesFP[0], OpcodesFP[1]); - } else { - const uint16_t *Opcodes = IsHalved ? OpcodesHInt : OpcodesInt; - switch (VT.getVectorElementType().getSizeInBits()) { - case 8: - Opcode = Opcodes[0]; - break; - case 16: - Opcode = Opcodes[1]; - break; - case 32: - Opcode = Opcodes[2]; - break; - default: - llvm_unreachable("bad vector element size"); - } - } - - int FirstInputOp = Predicated ? 4 : 3; - SmallVector Ops; - // Vectors - Ops.push_back(N->getOperand(FirstInputOp)); - Ops.push_back(N->getOperand(FirstInputOp + 1)); - // Rotation - Ops.push_back(CurDAG->getTargetConstant(IsAngle270, Loc, MVT::i32)); - - if (Predicated) - AddMVEPredicateToOps(Ops, Loc, - N->getOperand(FirstInputOp + 2), // predicate - N->getOperand(FirstInputOp - 1)); // inactive - else - AddEmptyMVEPredicateToOps(Ops, Loc, VT); - - CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); -} - -static uint32_t GetCMulRotation(SDValue V) { - const ConstantSDNode *RotConstant = dyn_cast(V); - assert(RotConstant && "expected a compile-time constant"); - uint64_t RotValue = RotConstant->getZExtValue(); - assert(RotValue < 4 && "expected value in range [0, 3]"); - return RotValue; -} - -void ARMDAGToDAGISel::SelectMVE_VCMUL(SDNode *N, uint16_t OpcodeF16, - uint16_t OpcodeF32, bool Predicated) { - EVT VT = N->getValueType(0); - SDLoc Loc(N); - - int FirstInputOp = Predicated ? 3 : 2; - SmallVector Ops; - // Vectors - Ops.push_back(N->getOperand(FirstInputOp)); - Ops.push_back(N->getOperand(FirstInputOp + 1)); - // Rotation - uint32_t RotValue = GetCMulRotation(N->getOperand(1)); - Ops.push_back(CurDAG->getTargetConstant(RotValue, Loc, MVT::i32)); - - if (Predicated) - AddMVEPredicateToOps(Ops, Loc, - N->getOperand(FirstInputOp + 2), // predicate - N->getOperand(FirstInputOp - 1)); // inactive - else - AddEmptyMVEPredicateToOps(Ops, Loc, VT); - - uint16_t Opcode = SelectFPOpcode(VT, OpcodeF16, OpcodeF32); - CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); -} - -void ARMDAGToDAGISel::SelectMVE_VCMLA(SDNode *N, uint16_t OpcodeF16, - uint16_t OpcodeF32, bool Predicated) { - SDLoc Loc(N); - - SmallVector Ops; - // The 3 vector operands - for (int i = 2; i < 5; ++i) - Ops.push_back(N->getOperand(i)); - // Rotation - uint32_t RotValue = GetCMulRotation(N->getOperand(1)); - Ops.push_back(CurDAG->getTargetConstant(RotValue, Loc, MVT::i32)); - - if (Predicated) - AddMVEPredicateToOps(Ops, Loc, N->getOperand(5)); - else - AddEmptyMVEPredicateToOps(Ops, Loc); - - EVT VT = N->getValueType(0); - uint16_t Opcode = SelectFPOpcode(VT, OpcodeF16, OpcodeF32); - CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); -} - void ARMDAGToDAGISel::SelectMVE_VLD(SDNode *N, unsigned NumVecs, const uint16_t *const *Opcodes) { EVT VT = N->getValueType(0); @@ -4510,35 +4362,6 @@ void ARMDAGToDAGISel::Select(SDNode *N) { IntNo == Intrinsic::arm_mve_vadc_predicated); return; - case Intrinsic::arm_mve_vcaddq: - case Intrinsic::arm_mve_vcaddq_predicated: { - static const uint16_t OpcodesInt[] = { - ARM::MVE_VCADDi8, ARM::MVE_VCADDi16, ARM::MVE_VCADDi32, - }; - static const uint16_t OpcodesHInt[] = { - ARM::MVE_VHCADDs8, ARM::MVE_VHCADDs16, ARM::MVE_VHCADDs32, - }; - static const uint16_t OpcodesFP[] = { - ARM::MVE_VCADDf16, ARM::MVE_VCADDf32, - }; - - SelectMVE_VCADD(N, OpcodesInt, OpcodesHInt, - OpcodesFP, IntNo == Intrinsic::arm_mve_vcaddq_predicated); - return; - } - - case Intrinsic::arm_mve_vcmulq: - case Intrinsic::arm_mve_vcmulq_predicated: - SelectMVE_VCMUL(N, ARM::MVE_VCMULf16, ARM::MVE_VCMULf32, - IntNo == Intrinsic::arm_mve_vcmulq_predicated); - return; - - case Intrinsic::arm_mve_vcmlaq: - case Intrinsic::arm_mve_vcmlaq_predicated: - SelectMVE_VCMLA(N, ARM::MVE_VCMLAf16, ARM::MVE_VCMLAf32, - IntNo == Intrinsic::arm_mve_vcmlaq_predicated); - return; - } break; } diff --git a/lib/Target/ARM/ARMInstrMVE.td b/lib/Target/ARM/ARMInstrMVE.td index d351ae8905b..7bc067c25ef 100644 --- a/lib/Target/ARM/ARMInstrMVE.td +++ b/lib/Target/ARM/ARMInstrMVE.td @@ -2960,10 +2960,10 @@ multiclass MVE_VMUL_fp_m defm MVE_VMULf32 : MVE_VMUL_fp_m; defm MVE_VMULf16 : MVE_VMUL_fp_m; -class MVE_VCMLA pattern=[]> +class MVE_VCMLA : MVEFloatArithNeon<"vcmla", suffix, size, (outs MQPR:$Qd), (ins MQPR:$Qd_src, MQPR:$Qn, MQPR:$Qm, complexrotateop:$rot), - "$Qd, $Qn, $Qm, $rot", vpred_n, "$Qd = $Qd_src", pattern> { + "$Qd, $Qn, $Qm, $rot", vpred_n, "$Qd = $Qd_src", []> { bits<4> Qd; bits<4> Qn; bits<2> rot; @@ -2980,8 +2980,32 @@ class MVE_VCMLA pattern=[]> let Inst{4} = 0b0; } -def MVE_VCMLAf16 : MVE_VCMLA<"f16", 0b0>; -def MVE_VCMLAf32 : MVE_VCMLA<"f32", 0b1>; +multiclass MVE_VCMLA_m { + def "" : MVE_VCMLA; + + let Predicates = [HasMVEFloat] in { + def : Pat<(VTI.Vec (int_arm_mve_vcmlaq + imm:$rot, (VTI.Vec MQPR:$Qd_src), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qd_src), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot))>; + + def : Pat<(VTI.Vec (int_arm_mve_vcmlaq_predicated + imm:$rot, (VTI.Vec MQPR:$Qd_src), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + (VTI.Pred VCCR:$mask))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qd_src), (VTI.Vec MQPR:$Qn), + (VTI.Vec MQPR:$Qm), imm:$rot, + ARMVCCThen, (VTI.Pred VCCR:$mask)))>; + + } +} + +defm MVE_VCMLAf16 : MVE_VCMLA_m; +defm MVE_VCMLAf32 : MVE_VCMLA_m; class MVE_VADDSUBFMA_fp; defm MVE_VSUBf32 : MVE_VSUB_fp_m; defm MVE_VSUBf16 : MVE_VSUB_fp_m; -class MVE_VCADD pattern=[]> +class MVE_VCADD : MVEFloatArithNeon<"vcadd", suffix, size, (outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm, complexrotateopodd:$rot), - "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, pattern> { + "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> { bits<4> Qd; bits<4> Qn; bit rot; @@ -3077,8 +3101,31 @@ class MVE_VCADD pattern=[]> let Inst{4} = 0b0; } -def MVE_VCADDf16 : MVE_VCADD<"f16", 0b0>; -def MVE_VCADDf32 : MVE_VCADD<"f32", 0b1, "@earlyclobber $Qd">; +multiclass MVE_VCADD_m { + def "" : MVE_VCADD; + + let Predicates = [HasMVEFloat] in { + def : Pat<(VTI.Vec (int_arm_mve_vcaddq (i32 1), + imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot))>; + + def : Pat<(VTI.Vec (int_arm_mve_vcaddq_predicated (i32 1), + imm:$rot, (VTI.Vec MQPR:$inactive), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + (VTI.Pred VCCR:$mask))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot, + ARMVCCThen, (VTI.Pred VCCR:$mask), + (VTI.Vec MQPR:$inactive)))>; + + } +} + +defm MVE_VCADDf16 : MVE_VCADD_m; +defm MVE_VCADDf32 : MVE_VCADD_m; class MVE_VABD_fp : MVE_float<"vabd", suffix, (outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), @@ -3690,10 +3737,10 @@ defm MVE_VQDMLSDHX : MVE_VQxDMLxDH_multi<"vqdmlsdhx", 0b1, 0b0, 0b1>; defm MVE_VQRDMLSDH : MVE_VQxDMLxDH_multi<"vqrdmlsdh", 0b0, 0b1, 0b1>; defm MVE_VQRDMLSDHX : MVE_VQxDMLxDH_multi<"vqrdmlsdhx", 0b1, 0b1, 0b1>; -class MVE_VCMUL pattern=[]> +class MVE_VCMUL : MVE_qDest_qSrc { + "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> { bits<4> Qn; bits<2> rot; @@ -3709,8 +3756,33 @@ class MVE_VCMUL let Predicates = [HasMVEFloat]; } -def MVE_VCMULf16 : MVE_VCMUL<"vcmul", "f16", 0b0>; -def MVE_VCMULf32 : MVE_VCMUL<"vcmul", "f32", 0b1, "@earlyclobber $Qd">; +multiclass MVE_VCMUL_m { + def "" : MVE_VCMUL; + + + let Predicates = [HasMVEFloat] in { + def : Pat<(VTI.Vec (int_arm_mve_vcmulq + imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot))>; + + def : Pat<(VTI.Vec (int_arm_mve_vcmulq_predicated + imm:$rot, (VTI.Vec MQPR:$inactive), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + (VTI.Pred VCCR:$mask))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot, + ARMVCCThen, (VTI.Pred VCCR:$mask), + (VTI.Vec MQPR:$inactive)))>; + + } +} + +defm MVE_VCMULf16 : MVE_VCMUL_m<"vcmul", MVE_v8f16, 0b0>; +defm MVE_VCMULf32 : MVE_VCMUL_m<"vcmul", MVE_v4f32, 0b1, "@earlyclobber $Qd">; class MVE_VMULL bits_21_20, bit T, string cstr, list pattern=[]> @@ -3938,10 +4010,10 @@ defm MVE_VCVTf32f16bh : MVE_VCVT_h2f_m<"vcvtb", 0b0>; defm MVE_VCVTf32f16th : MVE_VCVT_h2f_m<"vcvtt", 0b1>; class MVE_VxCADD size, bit halve, - string cstr="", list pattern=[]> + string cstr=""> : MVE_qDest_qSrc { + "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> { bits<4> Qn; bit rot; @@ -3955,13 +4027,37 @@ class MVE_VxCADD size, bit halve, let Inst{0} = 0b0; } -def MVE_VCADDi8 : MVE_VxCADD<"vcadd", "i8", 0b00, 0b1>; -def MVE_VCADDi16 : MVE_VxCADD<"vcadd", "i16", 0b01, 0b1>; -def MVE_VCADDi32 : MVE_VxCADD<"vcadd", "i32", 0b10, 0b1, "@earlyclobber $Qd">; +multiclass MVE_VxCADD_m { + def "" : MVE_VxCADD; -def MVE_VHCADDs8 : MVE_VxCADD<"vhcadd", "s8", 0b00, 0b0>; -def MVE_VHCADDs16 : MVE_VxCADD<"vhcadd", "s16", 0b01, 0b0>; -def MVE_VHCADDs32 : MVE_VxCADD<"vhcadd", "s32", 0b10, 0b0, "@earlyclobber $Qd">; + let Predicates = [HasMVEInt] in { + def : Pat<(VTI.Vec (int_arm_mve_vcaddq halve, + imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot))>; + + def : Pat<(VTI.Vec (int_arm_mve_vcaddq_predicated halve, + imm:$rot, (VTI.Vec MQPR:$inactive), + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + (VTI.Pred VCCR:$mask))), + (VTI.Vec (!cast(NAME) + (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm), + imm:$rot, + ARMVCCThen, (VTI.Pred VCCR:$mask), + (VTI.Vec MQPR:$inactive)))>; + + } +} + +defm MVE_VCADDi8 : MVE_VxCADD_m<"vcadd", MVE_v16i8, 0b1>; +defm MVE_VCADDi16 : MVE_VxCADD_m<"vcadd", MVE_v8i16, 0b1>; +defm MVE_VCADDi32 : MVE_VxCADD_m<"vcadd", MVE_v4i32, 0b1, "@earlyclobber $Qd">; + +defm MVE_VHCADDs8 : MVE_VxCADD_m<"vhcadd", MVE_v16s8, 0b0>; +defm MVE_VHCADDs16 : MVE_VxCADD_m<"vhcadd", MVE_v8s16, 0b0>; +defm MVE_VHCADDs32 : MVE_VxCADD_m<"vhcadd", MVE_v4s32, 0b0, "@earlyclobber $Qd">; class MVE_VADCSBC pattern=[]> diff --git a/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll b/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll index 34fbfa4d7a4..9bb24fc61cc 100644 --- a/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll +++ b/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll @@ -23,7 +23,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_u8(<16 x i8> %a, <16 x i8> % ; CHECK-NEXT: vcadd.i8 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -33,7 +33,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_u16(<8 x i16> %a, <8 x i16> ; CHECK-NEXT: vcadd.i16 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -44,7 +44,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_u32(<4 x i32> %a, <4 x i32> ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -54,7 +54,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_s8(<16 x i8> %a, <16 x i8> % ; CHECK-NEXT: vcadd.i8 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -64,7 +64,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_s16(<8 x i16> %a, <8 x i16> ; CHECK-NEXT: vcadd.i16 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -75,7 +75,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_s32(<4 x i32> %a, <4 x i32> ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -85,7 +85,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot90_f16(<8 x half> %a, <8 x hal ; CHECK-NEXT: vcadd.f16 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 0, <8 x half> %a, <8 x half> %b) + %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 0, <8 x half> %a, <8 x half> %b) ret <8 x half> %0 } @@ -96,7 +96,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot90_f32(<4 x float> %a, <4 x f ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 0, <4 x float> %a, <4 x float> %b) + %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 0, <4 x float> %a, <4 x float> %b) ret <4 x float> %0 } @@ -106,7 +106,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_u8(<16 x i8> %a, <16 x i8> ; CHECK-NEXT: vcadd.i8 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -116,7 +116,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_u16(<8 x i16> %a, <8 x i16> ; CHECK-NEXT: vcadd.i16 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -127,7 +127,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_u32(<4 x i32> %a, <4 x i32> ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -137,7 +137,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_s8(<16 x i8> %a, <16 x i8> ; CHECK-NEXT: vcadd.i8 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -147,7 +147,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_s16(<8 x i16> %a, <8 x i16> ; CHECK-NEXT: vcadd.i16 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -158,7 +158,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_s32(<4 x i32> %a, <4 x i32> ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -168,7 +168,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot270_f16(<8 x half> %a, <8 x ha ; CHECK-NEXT: vcadd.f16 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 1, <8 x half> %a, <8 x half> %b) + %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 1, <8 x half> %a, <8 x half> %b) ret <8 x half> %0 } @@ -179,7 +179,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot270_f32(<4 x float> %a, <4 x ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 1, <4 x float> %a, <4 x float> %b) + %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 1, <4 x float> %a, <4 x float> %b) ret <4 x float> %0 } @@ -193,7 +193,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_m_u8(<16 x i8> %inactive, <1 entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -207,7 +207,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_m_u16(<8 x i16> %inactive, < entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -221,7 +221,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_m_u32(<4 x i32> %inactive, < entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -235,7 +235,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_m_s8(<16 x i8> %inactive, <1 entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -249,7 +249,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_m_s16(<8 x i16> %inactive, < entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -263,7 +263,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_m_s32(<4 x i32> %inactive, < entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -277,7 +277,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot90_m_f16(<8 x half> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1) + %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1) ret <8 x half> %2 } @@ -291,7 +291,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot90_m_f32(<4 x float> %inactiv entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1) + %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1) ret <4 x float> %2 } @@ -305,7 +305,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_m_u8(<16 x i8> %inactive, < entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -319,7 +319,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_m_u16(<8 x i16> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -333,7 +333,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_m_u32(<4 x i32> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -347,7 +347,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_m_s8(<16 x i8> %inactive, < entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -361,7 +361,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_m_s16(<8 x i16> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -375,7 +375,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_m_s32(<4 x i32> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -389,7 +389,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot270_m_f16(<8 x half> %inactive entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1) + %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1) ret <8 x half> %2 } @@ -403,7 +403,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot270_m_f32(<4 x float> %inacti entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1) + %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1) ret <4 x float> %2 } @@ -417,7 +417,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_x_u8(<16 x i8> %a, <16 x i8> entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -431,7 +431,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_x_u16(<8 x i16> %a, <8 x i16 entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -446,7 +446,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_x_u32(<4 x i32> %a, <4 x i32 entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -460,7 +460,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_x_s8(<16 x i8> %a, <16 x i8> entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -474,7 +474,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_x_s16(<8 x i16> %a, <8 x i16 entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -489,7 +489,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_x_s32(<4 x i32> %a, <4 x i32 entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -503,7 +503,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot90_x_f16(<8 x half> %a, <8 x h entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1) + %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1) ret <8 x half> %2 } @@ -518,7 +518,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot90_x_f32(<4 x float> %a, <4 x entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1) + %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1) ret <4 x float> %2 } @@ -532,7 +532,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_x_u8(<16 x i8> %a, <16 x i8 entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -546,7 +546,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_x_u16(<8 x i16> %a, <8 x i1 entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -561,7 +561,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_x_u32(<4 x i32> %a, <4 x i3 entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -575,7 +575,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_x_s8(<16 x i8> %a, <16 x i8 entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -589,7 +589,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_x_s16(<8 x i16> %a, <8 x i1 entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -604,7 +604,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_x_s32(<4 x i32> %a, <4 x i3 entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -618,7 +618,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot270_x_f16(<8 x half> %a, <8 x entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1) + %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1) ret <8 x half> %2 } @@ -633,7 +633,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot270_x_f32(<4 x float> %a, <4 entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1) + %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1) ret <4 x float> %2 } @@ -643,7 +643,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot90_s8(<16 x i8> %a, <16 x i8> ; CHECK-NEXT: vhcadd.s8 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -653,7 +653,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot90_s16(<8 x i16> %a, <8 x i16> ; CHECK-NEXT: vhcadd.s16 q0, q0, q1, #90 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -664,7 +664,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot90_s32(<4 x i32> %a, <4 x i32> ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -674,7 +674,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot270_s8(<16 x i8> %a, <16 x i8> ; CHECK-NEXT: vhcadd.s8 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> %a, <16 x i8> %b) + %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> %a, <16 x i8> %b) ret <16 x i8> %0 } @@ -684,7 +684,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot270_s16(<8 x i16> %a, <8 x i16 ; CHECK-NEXT: vhcadd.s16 q0, q0, q1, #270 ; CHECK-NEXT: bx lr entry: - %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> %a, <8 x i16> %b) + %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> %a, <8 x i16> %b) ret <8 x i16> %0 } @@ -695,7 +695,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot270_s32(<4 x i32> %a, <4 x i32 ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr entry: - %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> %a, <4 x i32> %b) + %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> %a, <4 x i32> %b) ret <4 x i32> %0 } @@ -709,7 +709,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot90_x_s8(<16 x i8> %a, <16 x i8 entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -723,7 +723,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot90_x_s16(<8 x i16> %a, <8 x i1 entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -738,7 +738,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot90_x_s32(<4 x i32> %a, <4 x i3 entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -752,7 +752,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot270_x_s8(<16 x i8> %a, <16 x i entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -766,7 +766,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot270_x_s16(<8 x i16> %a, <8 x i entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -781,7 +781,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot270_x_s32(<4 x i32> %a, <4 x i entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -795,7 +795,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot90_m_s8(<16 x i8> %inactive, < entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -809,7 +809,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot90_m_s16(<8 x i16> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -823,7 +823,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot90_m_s32(<4 x i32> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 } @@ -837,7 +837,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot270_m_s8(<16 x i8> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) - %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) + %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1) ret <16 x i8> %2 } @@ -851,7 +851,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot270_m_s16(<8 x i16> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) - %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) + %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1) ret <8 x i16> %2 } @@ -865,6 +865,6 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot270_m_s32(<4 x i32> %inactive, entry: %0 = zext i16 %p to i32 %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) - %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) + %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1) ret <4 x i32> %2 }