1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[ARM] Add predicated add reduction patterns

Given a vecreduce.add(select(p, x, 0)), we can convert that to a
predicated vaddv, as the else value for the select is the identity
value, a zero. That is what this patch does for the vaddv, vaddva,
vaddlv and vaddlva instructions, copying the existing patterns to also
handle predication through a select.

Differential Revision: https://reviews.llvm.org/D84101
This commit is contained in:
David Green 2020-07-22 17:30:02 +01:00
parent b4f9e0d70d
commit 030e640555
5 changed files with 193 additions and 1101 deletions

View File

@ -1718,6 +1718,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::VMULLu: return "ARMISD::VMULLu";
case ARMISD::VADDVs: return "ARMISD::VADDVs";
case ARMISD::VADDVu: return "ARMISD::VADDVu";
case ARMISD::VADDVps: return "ARMISD::VADDVps";
case ARMISD::VADDVpu: return "ARMISD::VADDVpu";
case ARMISD::VADDLVs: return "ARMISD::VADDLVs";
case ARMISD::VADDLVu: return "ARMISD::VADDLVu";
case ARMISD::VADDLVAs: return "ARMISD::VADDLVAs";
@ -14729,6 +14731,20 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
return A;
return SDValue();
};
auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode,
ArrayRef<MVT> ExtTypes, SDValue &Mask) {
if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT ||
!ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode()))
return SDValue();
Mask = N0->getOperand(0);
SDValue Ext = N0->getOperand(1);
if (Ext->getOpcode() != ExtendCode)
return SDValue();
SDValue A = Ext->getOperand(0);
if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
return A;
return SDValue();
};
auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes,
SDValue &A, SDValue &B) {
if (ResVT != RetTy || N0->getOpcode() != ISD::MUL)
@ -14759,6 +14775,16 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}))
return Create64bitNode(ARMISD::VADDLVu, {A});
SDValue Mask;
if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask))
return DAG.getNode(ARMISD::VADDVps, dl, ResVT, A, Mask);
if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask))
return DAG.getNode(ARMISD::VADDVpu, dl, ResVT, A, Mask);
if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask))
return Create64bitNode(ARMISD::VADDLVps, {A, Mask});
if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask))
return Create64bitNode(ARMISD::VADDLVpu, {A, Mask});
SDValue A, B;
if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B);

View File

@ -219,14 +219,16 @@ class VectorType;
// MVE reductions
VADDVs, // sign- or zero-extend the elements of a vector to i32,
VADDVu, // add them all together, and return an i32 of their sum
VADDVps, // Same as VADDV[su] but with a v4i1 predicate mask
VADDVpu,
VADDLVs, // sign- or zero-extend elements to i64 and sum, returning
VADDLVu, // the low and high 32-bit halves of the sum
VADDLVAs, // same as VADDLV[su] but also add an input accumulator
VADDLVAs, // Same as VADDLV[su] but also add an input accumulator
VADDLVAu, // provided as low and high halves
VADDLVps, // same as VADDLVs but with a v4i1 predicate mask
VADDLVpu, // same as VADDLVu but with a v4i1 predicate mask
VADDLVAps, // same as VADDLVps but with a v4i1 predicate mask
VADDLVApu, // same as VADDLVpu but with a v4i1 predicate mask
VADDLVps, // Same as VADDLV[su] but with a v4i1 predicate mask
VADDLVpu,
VADDLVAps, // Same as VADDLVp[su] but with a v4i1 predicate mask
VADDLVApu,
VMLAVs,
VMLAVu,
VMLALVs,

View File

@ -684,8 +684,13 @@ class MVE_VADDV<string iname, string suffix, dag iops, string cstr,
let validForTailPredication = 1;
}
def SDTVecReduceP : SDTypeProfile<1, 2, [ // VADDLVp
SDTCisInt<0>, SDTCisVec<1>, SDTCisVec<2>
]>;
def ARMVADDVs : SDNode<"ARMISD::VADDVs", SDTVecReduce>;
def ARMVADDVu : SDNode<"ARMISD::VADDVu", SDTVecReduce>;
def ARMVADDVps : SDNode<"ARMISD::VADDVps", SDTVecReduceP>;
def ARMVADDVpu : SDNode<"ARMISD::VADDVpu", SDTVecReduceP>;
multiclass MVE_VADDV_A<MVEVectorVTInfo VTI> {
def acc : MVE_VADDV<"vaddva", VTI.Suffix,
@ -702,20 +707,39 @@ multiclass MVE_VADDV_A<MVEVectorVTInfo VTI> {
if VTI.Unsigned then {
def : Pat<(i32 (vecreduce_add (VTI.Vec MQPR:$vec))),
(i32 (InstN $vec))>;
def : Pat<(i32 (vecreduce_add (VTI.Vec (vselect (VTI.Pred VCCR:$pred),
(VTI.Vec MQPR:$vec),
(VTI.Vec ARMimmAllZerosV))))),
(i32 (InstN $vec, ARMVCCThen, $pred))>;
def : Pat<(i32 (ARMVADDVu (VTI.Vec MQPR:$vec))),
(i32 (InstN $vec))>;
def : Pat<(i32 (ARMVADDVpu (VTI.Vec MQPR:$vec), (VTI.Pred VCCR:$pred))),
(i32 (InstN $vec, ARMVCCThen, $pred))>;
def : Pat<(i32 (add (i32 (vecreduce_add (VTI.Vec MQPR:$vec))),
(i32 tGPREven:$acc))),
(i32 (InstA $acc, $vec))>;
def : Pat<(i32 (add (i32 (vecreduce_add (VTI.Vec (vselect (VTI.Pred VCCR:$pred),
(VTI.Vec MQPR:$vec),
(VTI.Vec ARMimmAllZerosV))))),
(i32 tGPREven:$acc))),
(i32 (InstA $acc, $vec, ARMVCCThen, $pred))>;
def : Pat<(i32 (add (i32 (ARMVADDVu (VTI.Vec MQPR:$vec))),
(i32 tGPREven:$acc))),
(i32 (InstA $acc, $vec))>;
def : Pat<(i32 (add (i32 (ARMVADDVpu (VTI.Vec MQPR:$vec), (VTI.Pred VCCR:$pred))),
(i32 tGPREven:$acc))),
(i32 (InstA $acc, $vec, ARMVCCThen, $pred))>;
} else {
def : Pat<(i32 (ARMVADDVs (VTI.Vec MQPR:$vec))),
(i32 (InstN $vec))>;
def : Pat<(i32 (add (i32 (ARMVADDVs (VTI.Vec MQPR:$vec))),
(i32 tGPREven:$acc))),
(i32 (InstA $acc, $vec))>;
def : Pat<(i32 (ARMVADDVps (VTI.Vec MQPR:$vec), (VTI.Pred VCCR:$pred))),
(i32 (InstN $vec, ARMVCCThen, $pred))>;
def : Pat<(i32 (add (i32 (ARMVADDVps (VTI.Vec MQPR:$vec), (VTI.Pred VCCR:$pred))),
(i32 tGPREven:$acc))),
(i32 (InstA $acc, $vec, ARMVCCThen, $pred))>;
}
def : Pat<(i32 (int_arm_mve_addv_predicated (VTI.Vec MQPR:$vec),

File diff suppressed because it is too large Load Diff

View File

@ -4,10 +4,9 @@
define arm_aapcs_vfpcc i32 @add_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %b) {
; CHECK-LABEL: add_v4i32_v4i32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: vmul.i32 q0, q0, q1
; CHECK-NEXT: vpt.i32 eq, q2, zr
; CHECK-NEXT: vmult.i32 q3, q0, q1
; CHECK-NEXT: vaddv.u32 r0, q3
; CHECK-NEXT: vaddvt.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i32> %b, zeroinitializer
@ -381,14 +380,10 @@ entry:
define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_zext(<4 x i16> %x, <4 x i16> %y, <4 x i16> %b) {
; CHECK-LABEL: add_v4i16_v4i32_zext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u16 q2, q2
; CHECK-NEXT: vmovlb.u16 q1, q1
; CHECK-NEXT: vcmp.i32 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.u16 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i32 q2, q0, q1
; CHECK-NEXT: vaddv.u32 r0, q2
; CHECK-NEXT: vmullb.u16 q0, q0, q1
; CHECK-NEXT: vmovlb.u16 q1, q2
; CHECK-NEXT: vpt.i32 eq, q1, zr
; CHECK-NEXT: vaddvt.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i16> %b, zeroinitializer
@ -403,14 +398,10 @@ entry:
define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_sext(<4 x i16> %x, <4 x i16> %y, <4 x i16> %b) {
; CHECK-LABEL: add_v4i16_v4i32_sext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u16 q2, q2
; CHECK-NEXT: vmovlb.s16 q1, q1
; CHECK-NEXT: vcmp.i32 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i32 q2, q0, q1
; CHECK-NEXT: vaddv.u32 r0, q2
; CHECK-NEXT: vmullb.s16 q0, q0, q1
; CHECK-NEXT: vmovlb.u16 q1, q2
; CHECK-NEXT: vpt.i32 eq, q1, zr
; CHECK-NEXT: vaddvt.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i16> %b, zeroinitializer
@ -425,10 +416,9 @@ entry:
define arm_aapcs_vfpcc zeroext i16 @add_v8i16_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b) {
; CHECK-LABEL: add_v8i16_v8i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: vmul.i16 q0, q0, q1
; CHECK-NEXT: vpt.i16 eq, q2, zr
; CHECK-NEXT: vmult.i16 q3, q0, q1
; CHECK-NEXT: vaddv.u16 r0, q3
; CHECK-NEXT: vaddvt.u16 r0, q0
; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
entry:
@ -1290,14 +1280,12 @@ define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_zext(<4 x i8> %x, <4 x i8> %y, <4 x i
; CHECK-LABEL: add_v4i8_v4i32_zext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0xff
; CHECK-NEXT: vand q2, q2, q3
; CHECK-NEXT: vand q1, q1, q3
; CHECK-NEXT: vcmp.i32 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vand q0, q0, q3
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i32 q2, q0, q1
; CHECK-NEXT: vaddv.u32 r0, q2
; CHECK-NEXT: vmul.i32 q0, q0, q1
; CHECK-NEXT: vand q1, q2, q3
; CHECK-NEXT: vpt.i32 eq, q1, zr
; CHECK-NEXT: vaddvt.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i8> %b, zeroinitializer
@ -1312,17 +1300,15 @@ entry:
define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_sext(<4 x i8> %x, <4 x i8> %y, <4 x i8> %b) {
; CHECK-LABEL: add_v4i8_v4i32_sext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0xff
; CHECK-NEXT: vmovlb.s8 q1, q1
; CHECK-NEXT: vand q2, q2, q3
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: vcmp.i32 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.s16 q1, q1
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i32 q2, q0, q1
; CHECK-NEXT: vaddv.u32 r0, q2
; CHECK-NEXT: vmul.i32 q0, q0, q1
; CHECK-NEXT: vmov.i32 q1, #0xff
; CHECK-NEXT: vand q1, q2, q1
; CHECK-NEXT: vpt.i32 eq, q1, zr
; CHECK-NEXT: vaddvt.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i8> %b, zeroinitializer
@ -1593,14 +1579,10 @@ entry:
define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_zext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b) {
; CHECK-LABEL: add_v8i8_v8i16_zext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u8 q2, q2
; CHECK-NEXT: vmovlb.u8 q1, q1
; CHECK-NEXT: vcmp.i16 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.u8 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i16 q2, q0, q1
; CHECK-NEXT: vaddv.u16 r0, q2
; CHECK-NEXT: vmullb.u8 q0, q0, q1
; CHECK-NEXT: vmovlb.u8 q1, q2
; CHECK-NEXT: vpt.i16 eq, q1, zr
; CHECK-NEXT: vaddvt.u16 r0, q0
; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
entry:
@ -1616,14 +1598,10 @@ entry:
define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_sext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b) {
; CHECK-LABEL: add_v8i8_v8i16_sext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u8 q2, q2
; CHECK-NEXT: vmovlb.s8 q1, q1
; CHECK-NEXT: vcmp.i16 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i16 q2, q0, q1
; CHECK-NEXT: vaddv.u16 r0, q2
; CHECK-NEXT: vmullb.s8 q0, q0, q1
; CHECK-NEXT: vmovlb.u8 q1, q2
; CHECK-NEXT: vpt.i16 eq, q1, zr
; CHECK-NEXT: vaddvt.u16 r0, q0
; CHECK-NEXT: sxth r0, r0
; CHECK-NEXT: bx lr
entry:
@ -1639,10 +1617,9 @@ entry:
define arm_aapcs_vfpcc zeroext i8 @add_v16i8_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
; CHECK-LABEL: add_v16i8_v16i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: vmul.i8 q0, q0, q1
; CHECK-NEXT: vpt.i8 eq, q2, zr
; CHECK-NEXT: vmult.i8 q3, q0, q1
; CHECK-NEXT: vaddv.u8 r0, q3
; CHECK-NEXT: vaddvt.u8 r0, q0
; CHECK-NEXT: uxtb r0, r0
; CHECK-NEXT: bx lr
entry:
@ -2567,10 +2544,9 @@ entry:
define arm_aapcs_vfpcc i32 @add_v4i32_v4i32_acc(<4 x i32> %x, <4 x i32> %y, <4 x i32> %b, i32 %a) {
; CHECK-LABEL: add_v4i32_v4i32_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: vmul.i32 q0, q0, q1
; CHECK-NEXT: vpt.i32 eq, q2, zr
; CHECK-NEXT: vmult.i32 q3, q0, q1
; CHECK-NEXT: vaddva.u32 r0, q3
; CHECK-NEXT: vaddvat.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i32> %b, zeroinitializer
@ -2967,14 +2943,10 @@ entry:
define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_acc_zext(<4 x i16> %x, <4 x i16> %y, <4 x i16> %b, i32 %a) {
; CHECK-LABEL: add_v4i16_v4i32_acc_zext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u16 q2, q2
; CHECK-NEXT: vmovlb.u16 q1, q1
; CHECK-NEXT: vcmp.i32 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.u16 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i32 q2, q0, q1
; CHECK-NEXT: vaddva.u32 r0, q2
; CHECK-NEXT: vmullb.u16 q0, q0, q1
; CHECK-NEXT: vmovlb.u16 q1, q2
; CHECK-NEXT: vpt.i32 eq, q1, zr
; CHECK-NEXT: vaddvat.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i16> %b, zeroinitializer
@ -2990,14 +2962,10 @@ entry:
define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_acc_sext(<4 x i16> %x, <4 x i16> %y, <4 x i16> %b, i32 %a) {
; CHECK-LABEL: add_v4i16_v4i32_acc_sext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u16 q2, q2
; CHECK-NEXT: vmovlb.s16 q1, q1
; CHECK-NEXT: vcmp.i32 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i32 q2, q0, q1
; CHECK-NEXT: vaddva.u32 r0, q2
; CHECK-NEXT: vmullb.s16 q0, q0, q1
; CHECK-NEXT: vmovlb.u16 q1, q2
; CHECK-NEXT: vpt.i32 eq, q1, zr
; CHECK-NEXT: vaddvat.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i16> %b, zeroinitializer
@ -3013,10 +2981,9 @@ entry:
define arm_aapcs_vfpcc zeroext i16 @add_v8i16_v8i16_acc(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i16 %a) {
; CHECK-LABEL: add_v8i16_v8i16_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: vmul.i16 q0, q0, q1
; CHECK-NEXT: vpt.i16 eq, q2, zr
; CHECK-NEXT: vmult.i16 q3, q0, q1
; CHECK-NEXT: vaddva.u16 r0, q3
; CHECK-NEXT: vaddvat.u16 r0, q0
; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
entry:
@ -3899,14 +3866,12 @@ define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_zext(<4 x i8> %x, <4 x i8> %y, <4
; CHECK-LABEL: add_v4i8_v4i32_acc_zext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0xff
; CHECK-NEXT: vand q2, q2, q3
; CHECK-NEXT: vand q1, q1, q3
; CHECK-NEXT: vcmp.i32 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vand q0, q0, q3
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i32 q2, q0, q1
; CHECK-NEXT: vaddva.u32 r0, q2
; CHECK-NEXT: vmul.i32 q0, q0, q1
; CHECK-NEXT: vand q1, q2, q3
; CHECK-NEXT: vpt.i32 eq, q1, zr
; CHECK-NEXT: vaddvat.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i8> %b, zeroinitializer
@ -3922,17 +3887,15 @@ entry:
define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_sext(<4 x i8> %x, <4 x i8> %y, <4 x i8> %b, i32 %a) {
; CHECK-LABEL: add_v4i8_v4i32_acc_sext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0xff
; CHECK-NEXT: vmovlb.s8 q1, q1
; CHECK-NEXT: vand q2, q2, q3
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: vcmp.i32 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.s16 q1, q1
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i32 q2, q0, q1
; CHECK-NEXT: vaddva.u32 r0, q2
; CHECK-NEXT: vmul.i32 q0, q0, q1
; CHECK-NEXT: vmov.i32 q1, #0xff
; CHECK-NEXT: vand q1, q2, q1
; CHECK-NEXT: vpt.i32 eq, q1, zr
; CHECK-NEXT: vaddvat.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <4 x i8> %b, zeroinitializer
@ -4206,14 +4169,10 @@ entry:
define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_acc_zext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b, i16 %a) {
; CHECK-LABEL: add_v8i8_v8i16_acc_zext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u8 q2, q2
; CHECK-NEXT: vmovlb.u8 q1, q1
; CHECK-NEXT: vcmp.i16 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.u8 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i16 q2, q0, q1
; CHECK-NEXT: vaddva.u16 r0, q2
; CHECK-NEXT: vmullb.u8 q0, q0, q1
; CHECK-NEXT: vmovlb.u8 q1, q2
; CHECK-NEXT: vpt.i16 eq, q1, zr
; CHECK-NEXT: vaddvat.u16 r0, q0
; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
entry:
@ -4230,14 +4189,10 @@ entry:
define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_acc_sext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b, i16 %a) {
; CHECK-LABEL: add_v8i8_v8i16_acc_sext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u8 q2, q2
; CHECK-NEXT: vmovlb.s8 q1, q1
; CHECK-NEXT: vcmp.i16 eq, q2, zr
; CHECK-NEXT: vmov.i32 q2, #0x0
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: vpst
; CHECK-NEXT: vmult.i16 q2, q0, q1
; CHECK-NEXT: vaddva.u16 r0, q2
; CHECK-NEXT: vmullb.s8 q0, q0, q1
; CHECK-NEXT: vmovlb.u8 q1, q2
; CHECK-NEXT: vpt.i16 eq, q1, zr
; CHECK-NEXT: vaddvat.u16 r0, q0
; CHECK-NEXT: sxth r0, r0
; CHECK-NEXT: bx lr
entry:
@ -4254,10 +4209,9 @@ entry:
define arm_aapcs_vfpcc zeroext i8 @add_v16i8_v16i8_acc(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i8 %a) {
; CHECK-LABEL: add_v16i8_v16i8_acc:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i32 q3, #0x0
; CHECK-NEXT: vmul.i8 q0, q0, q1
; CHECK-NEXT: vpt.i8 eq, q2, zr
; CHECK-NEXT: vmult.i8 q3, q0, q1
; CHECK-NEXT: vaddva.u8 r0, q3
; CHECK-NEXT: vaddvat.u8 r0, q0
; CHECK-NEXT: uxtb r0, r0
; CHECK-NEXT: bx lr
entry: