mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 04:02:41 +01:00
[X86][SSE] Move unpack(hop,hop) fold from foldShuffleOfHorizOp to combineTargetShuffle
By moving this after more of the shuffle canonicalization we reduce the demanded vector elts, avoiding a few unnecessary copies/moves etc.
This commit is contained in:
parent
8c4acc26ec
commit
6c8dead54f
@ -37566,6 +37566,29 @@ static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
|
||||
}
|
||||
return SDValue();
|
||||
}
|
||||
case X86ISD::UNPCKL:
|
||||
case X86ISD::UNPCKH: {
|
||||
// unpcklo(hop(x,y),hop(z,w)) -> permute(hop(x,z)).
|
||||
// unpckhi(hop(x,y),hop(z,w)) -> permute(hop(y,w)).
|
||||
// Don't fold if hop(x,y) == hop(z,w).
|
||||
// TODO: Merge this into canonicalizeShuffleMaskWithHorizOp?
|
||||
SDValue N0 = N.getOperand(0);
|
||||
SDValue N1 = N.getOperand(1);
|
||||
if (VT.getScalarSizeInBits() == 32 && N0 != N1 &&
|
||||
N0.getOpcode() == N1.getOpcode() && isHorizOp(N0.getOpcode())) {
|
||||
unsigned LoHi = Opcode == X86ISD::UNPCKL ? 0 : 1;
|
||||
SDValue Res = DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(LoHi),
|
||||
N1.getOperand(LoHi));
|
||||
// Use SHUFPS for the permute so this will work on SSE3 targets, shuffle
|
||||
// combining and domain handling will simplify this later on.
|
||||
EVT ShuffleVT = VT.changeVectorElementType(MVT::f32);
|
||||
Res = DAG.getBitcast(ShuffleVT, Res);
|
||||
Res = DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
|
||||
getV4X86ShuffleImm8ForMask({0, 2, 1, 3}, DL, DAG));
|
||||
return DAG.getBitcast(VT, Res);
|
||||
}
|
||||
return SDValue();
|
||||
}
|
||||
case X86ISD::VPERMI: {
|
||||
// vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
|
||||
// TODO: Remove when we have preferred domains in combineX86ShuffleChain.
|
||||
@ -38071,9 +38094,8 @@ static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
|
||||
// TODO: Merge this into canonicalizeShuffleMaskWithHorizOp.
|
||||
static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
|
||||
unsigned Opcode = N->getOpcode();
|
||||
if (Opcode != X86ISD::UNPCKL && Opcode != X86ISD::UNPCKH)
|
||||
if (Opcode != X86ISD::SHUFP)
|
||||
return SDValue();
|
||||
if (Opcode != X86ISD::SHUFP)
|
||||
return SDValue();
|
||||
|
||||
EVT VT = N->getValueType(0);
|
||||
SDValue HOp = N->getOperand(0);
|
||||
@ -38081,28 +38103,6 @@ static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
|
||||
HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
|
||||
return SDValue();
|
||||
|
||||
// unpcklo(hop(x,y),hop(z,w)) -> permute(hop(x,z)).
|
||||
// unpckhi(hop(x,y),hop(z,w)) -> permute(hop(y,w)).
|
||||
// Don't fold if hop(x,y) == hop(z,w).
|
||||
if (Opcode == X86ISD::UNPCKL || Opcode == X86ISD::UNPCKH) {
|
||||
SDValue HOp2 = N->getOperand(1);
|
||||
if (HOp.getOpcode() != HOp2.getOpcode() || VT.getScalarSizeInBits() != 32)
|
||||
return SDValue();
|
||||
if (HOp == HOp2)
|
||||
return SDValue();
|
||||
SDLoc DL(HOp);
|
||||
unsigned LoHi = Opcode == X86ISD::UNPCKL ? 0 : 1;
|
||||
SDValue Res = DAG.getNode(HOp.getOpcode(), DL, VT, HOp.getOperand(LoHi),
|
||||
HOp2.getOperand(LoHi));
|
||||
// Use SHUFPS for the permute so this will work on SSE3 targets, shuffle
|
||||
// combining and domain handling will simplify this later on.
|
||||
EVT ShuffleVT = VT.changeVectorElementType(MVT::f32);
|
||||
Res = DAG.getBitcast(ShuffleVT, Res);
|
||||
Res = DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
|
||||
getV4X86ShuffleImm8ForMask({0, 2, 1, 3}, DL, DAG));
|
||||
return DAG.getBitcast(VT, Res);
|
||||
}
|
||||
|
||||
// shufps(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
|
||||
// Don't fold if hop(x,y) == hop(z,w).
|
||||
if (Opcode == X86ISD::SHUFP) {
|
||||
|
@ -94,8 +94,8 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
|
||||
; SSSE3-SLOW-LABEL: pair_sum_v4i32_v4i32:
|
||||
; SSSE3-SLOW: # %bb.0:
|
||||
; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm0
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
|
||||
; SSSE3-SLOW-NEXT: phaddd %xmm2, %xmm3
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
|
||||
@ -115,9 +115,9 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
|
||||
; AVX1-SLOW-LABEL: pair_sum_v4i32_v4i32:
|
||||
; AVX1-SLOW: # %bb.0:
|
||||
; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
|
||||
; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-SLOW-NEXT: vphaddd %xmm2, %xmm2, %xmm1
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
|
||||
; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
|
||||
@ -184,57 +184,55 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
|
||||
; SSSE3-SLOW: # %bb.0:
|
||||
; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm0
|
||||
; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm1
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3,1,3]
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3]
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; SSSE3-SLOW-NEXT: addps %xmm1, %xmm0
|
||||
; SSSE3-SLOW-NEXT: movaps %xmm2, %xmm1
|
||||
; SSSE3-SLOW-NEXT: haddps %xmm3, %xmm1
|
||||
; SSSE3-SLOW-NEXT: haddps %xmm2, %xmm3
|
||||
; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm1
|
||||
; SSSE3-SLOW-NEXT: haddps %xmm2, %xmm1
|
||||
; SSSE3-SLOW-NEXT: haddps %xmm4, %xmm5
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm5[3,1]
|
||||
; SSSE3-SLOW-NEXT: addps %xmm1, %xmm3
|
||||
; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
|
||||
; SSSE3-SLOW-NEXT: haddps %xmm3, %xmm2
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
|
||||
; SSSE3-SLOW-NEXT: addps %xmm2, %xmm1
|
||||
; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; SSSE3-SLOW-NEXT: haddps %xmm7, %xmm6
|
||||
; SSSE3-SLOW-NEXT: haddps %xmm6, %xmm6
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm6[0,3]
|
||||
; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm1
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,3]
|
||||
; SSSE3-SLOW-NEXT: retq
|
||||
;
|
||||
; SSSE3-FAST-LABEL: pair_sum_v8f32_v4f32:
|
||||
; SSSE3-FAST: # %bb.0:
|
||||
; SSSE3-FAST-NEXT: haddps %xmm1, %xmm0
|
||||
; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0
|
||||
; SSSE3-FAST-NEXT: movaps %xmm2, %xmm1
|
||||
; SSSE3-FAST-NEXT: haddps %xmm3, %xmm1
|
||||
; SSSE3-FAST-NEXT: haddps %xmm2, %xmm3
|
||||
; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
|
||||
; SSSE3-FAST-NEXT: haddps %xmm2, %xmm1
|
||||
; SSSE3-FAST-NEXT: haddps %xmm4, %xmm5
|
||||
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
|
||||
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm5[3,1]
|
||||
; SSSE3-FAST-NEXT: addps %xmm1, %xmm3
|
||||
; SSSE3-FAST-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
|
||||
; SSSE3-FAST-NEXT: haddps %xmm3, %xmm2
|
||||
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
|
||||
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
|
||||
; SSSE3-FAST-NEXT: addps %xmm2, %xmm1
|
||||
; SSSE3-FAST-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; SSSE3-FAST-NEXT: haddps %xmm6, %xmm6
|
||||
; SSSE3-FAST-NEXT: haddps %xmm7, %xmm7
|
||||
; SSSE3-FAST-NEXT: haddps %xmm7, %xmm6
|
||||
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm6[0,2]
|
||||
; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
|
||||
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,2]
|
||||
; SSSE3-FAST-NEXT: retq
|
||||
;
|
||||
; AVX1-SLOW-LABEL: pair_sum_v8f32_v4f32:
|
||||
; AVX1-SLOW: # %bb.0:
|
||||
; AVX1-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
|
||||
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm8 = xmm0[0,2,1,3]
|
||||
; AVX1-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,1]
|
||||
; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm8, %xmm0
|
||||
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm1
|
||||
; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm2
|
||||
; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm3
|
||||
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm1[0,2],xmm2[0,1]
|
||||
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm3[0]
|
||||
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,1]
|
||||
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[1]
|
||||
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm4, %xmm1
|
||||
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
|
||||
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
|
||||
; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
|
||||
; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
|
||||
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm2
|
||||
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
|
||||
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
|
||||
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,1]
|
||||
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[1]
|
||||
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm3, %xmm1
|
||||
; AVX1-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
|
||||
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
||||
@ -247,34 +245,34 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
|
||||
; AVX1-FAST-LABEL: pair_sum_v8f32_v4f32:
|
||||
; AVX1-FAST: # %bb.0:
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm2, %xmm1
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm2, %xmm2, %xmm2
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm3, %xmm3
|
||||
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[1],xmm3[1],zero,zero
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm4, %xmm4, %xmm3
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm8
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm2, %xmm2, %xmm1
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm3, %xmm0
|
||||
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[1],xmm0[1],zero,zero
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm4, %xmm4, %xmm1
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm5, %xmm5, %xmm4
|
||||
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,1]
|
||||
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
|
||||
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[1,3]
|
||||
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[1]
|
||||
; AVX1-FAST-NEXT: vaddps %xmm2, %xmm1, %xmm1
|
||||
; AVX1-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
|
||||
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm2, %xmm2
|
||||
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,1]
|
||||
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
|
||||
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
|
||||
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[1]
|
||||
; AVX1-FAST-NEXT: vaddps %xmm0, %xmm2, %xmm0
|
||||
; AVX1-FAST-NEXT: vmovlhps {{.*#+}} xmm1 = xmm8[0],xmm0[0]
|
||||
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
|
||||
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm7, %xmm6, %xmm2
|
||||
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm2, %xmm2
|
||||
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
|
||||
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
||||
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[2]
|
||||
; AVX1-FAST-NEXT: retq
|
||||
;
|
||||
; AVX2-SLOW-LABEL: pair_sum_v8f32_v4f32:
|
||||
; AVX2-SLOW: # %bb.0:
|
||||
; AVX2-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
|
||||
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm8 = xmm0[0,2,1,3]
|
||||
; AVX2-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,1]
|
||||
; AVX2-SLOW-NEXT: vaddps %xmm0, %xmm8, %xmm0
|
||||
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
|
||||
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; AVX2-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
|
||||
; AVX2-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
|
||||
; AVX2-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
|
||||
; AVX2-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm2
|
||||
@ -364,17 +362,17 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
|
||||
; SSSE3-SLOW-LABEL: pair_sum_v8i32_v4i32:
|
||||
; SSSE3-SLOW: # %bb.0:
|
||||
; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm0
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
|
||||
; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm2
|
||||
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm5
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,1,0,1]
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[1,1]
|
||||
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,1,0,1]
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
|
||||
; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm2
|
||||
; SSSE3-SLOW-NEXT: movdqa %xmm2, %xmm1
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[2,0]
|
||||
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[2,0]
|
||||
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
|
||||
; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
||||
; SSSE3-SLOW-NEXT: phaddd %xmm7, %xmm6
|
||||
@ -388,12 +386,12 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
|
||||
; SSSE3-FAST: # %bb.0:
|
||||
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm0
|
||||
; SSSE3-FAST-NEXT: phaddd %xmm0, %xmm0
|
||||
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
|
||||
; SSSE3-FAST-NEXT: movdqa %xmm5, %xmm1
|
||||
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm5
|
||||
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm4
|
||||
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm1
|
||||
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
|
||||
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
|
||||
; SSSE3-FAST-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm5[2,0]
|
||||
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[2,0]
|
||||
@ -409,20 +407,20 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
|
||||
; AVX1-SLOW-LABEL: pair_sum_v8i32_v4i32:
|
||||
; AVX1-SLOW: # %bb.0:
|
||||
; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,1]
|
||||
; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm1
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,2,1,3]
|
||||
; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm3
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm1
|
||||
; AVX1-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm4
|
||||
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
||||
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm2
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,2,1,3]
|
||||
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[0,0,0,0]
|
||||
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm5[6,7]
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,3,1,1]
|
||||
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[1],zero
|
||||
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5],xmm5[6,7]
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,3,1,1]
|
||||
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[1],zero
|
||||
; AVX1-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
|
||||
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm2, %xmm1
|
||||
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm3, %xmm1
|
||||
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
||||
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
||||
@ -436,20 +434,20 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
|
||||
; AVX1-FAST: # %bb.0:
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm8
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm1
|
||||
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm2, %xmm2
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm3, %xmm3
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm2, %xmm1
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm3, %xmm0
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm4
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm5
|
||||
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
|
||||
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm5[0,0,0,0]
|
||||
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
|
||||
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
|
||||
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5,6,7]
|
||||
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[1],zero
|
||||
; AVX1-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[3]
|
||||
; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
|
||||
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
|
||||
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
|
||||
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[0,0,0,0]
|
||||
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm3[6,7]
|
||||
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
|
||||
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
|
||||
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm4[1],zero
|
||||
; AVX1-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[3]
|
||||
; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm2, %xmm0
|
||||
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm8[0],xmm0[0]
|
||||
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
|
||||
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
@ -462,9 +460,9 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
|
||||
; AVX2-SLOW-LABEL: pair_sum_v8i32_v4i32:
|
||||
; AVX2-SLOW: # %bb.0:
|
||||
; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
|
||||
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,1]
|
||||
; AVX2-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
|
||||
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
|
||||
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX2-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm1
|
||||
; AVX2-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm4
|
||||
; AVX2-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm2
|
||||
@ -1147,13 +1145,13 @@ define <4 x i32> @reduction_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32
|
||||
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
|
||||
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
|
||||
; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
|
||||
; AVX-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
|
||||
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
|
||||
; AVX-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
|
||||
; AVX-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
|
||||
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
|
||||
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||
; AVX-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; AVX-FAST-NEXT: retq
|
||||
%5 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %0)
|
||||
|
Loading…
Reference in New Issue
Block a user