1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[X86][SSE] Fold HOP(SHUFFLE(X),SHUFFLE(Y)) --> SHUFFLE(HOP(X,Y))

This is beginning to look like a canonicalization stage that could be performed as part of shuffle combining

Another step towards PR41813
This commit is contained in:
Simon Pilgrim 2020-08-12 12:11:08 +01:00
parent 88b9fc954b
commit 4c91fdf3fa
3 changed files with 37 additions and 20 deletions

View File

@ -42071,6 +42071,39 @@ static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
}
}
// Attempt to fold HOP(SHUFFLE(X),SHUFFLE(Y)) -> SHUFFLE(HOP(X,Y)).
// TODO: Merge with binary shuffle folds below.
if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
int PostShuffle[4] = {0, 1, 2, 3};
// If the op is an unary shuffle that can scale to v2x64,
// then we can perform this as a v4x32 post shuffle.
auto AdjustOp = [&](SDValue V, int Offset) {
auto *SVN = dyn_cast<ShuffleVectorSDNode>(V);
SmallVector<int, 2> ScaledMask;
if (!SVN || !SVN->getOperand(1).isUndef() ||
!scaleShuffleElements(SVN->getMask(), 2, ScaledMask) ||
!N->isOnlyUserOf(V.getNode()))
return SDValue();
PostShuffle[Offset + 0] = ScaledMask[0];
PostShuffle[Offset + 1] = ScaledMask[1];
return SVN->getOperand(0);
};
SDValue Src0 = AdjustOp(N0, 0);
SDValue Src1 = AdjustOp(N1, 2);
if (Src0 || Src1) {
Src0 = Src0 ? Src0 : N0;
Src1 = Src1 ? Src1 : N1;
SDLoc DL(N);
MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
SDValue Res = DAG.getNode(Opcode, DL, VT, Src0, Src1);
Res = DAG.getBitcast(ShufVT, Res);
Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, PostShuffle);
return DAG.getBitcast(VT, Res);
}
}
// Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
// TODO: Relax shuffle scaling to support sub-128-bit subvector shuffles.
if (VT.is256BitVector() && Subtarget.hasInt256()) {

View File

@ -879,55 +879,45 @@ declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>)
define <4 x float> @PR34724_1(<4 x float> %a, <4 x float> %b) {
; SSSE3_SLOW-LABEL: PR34724_1:
; SSSE3_SLOW: # %bb.0:
; SSSE3_SLOW-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSSE3_SLOW-NEXT: haddps %xmm1, %xmm0
; SSSE3_SLOW-NEXT: movsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; SSSE3_SLOW-NEXT: addps %xmm1, %xmm2
; SSSE3_SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,0],xmm0[2,0]
; SSSE3_SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[2,0]
; SSSE3_SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
; SSSE3_SLOW-NEXT: retq
;
; SSSE3_FAST-LABEL: PR34724_1:
; SSSE3_FAST: # %bb.0:
; SSSE3_FAST-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSSE3_FAST-NEXT: haddps %xmm1, %xmm0
; SSSE3_FAST-NEXT: haddps %xmm1, %xmm1
; SSSE3_FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
; SSSE3_FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
; SSSE3_FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSSE3_FAST-NEXT: retq
;
; AVX1_SLOW-LABEL: PR34724_1:
; AVX1_SLOW: # %bb.0:
; AVX1_SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1_SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1_SLOW-NEXT: vmovsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; AVX1_SLOW-NEXT: vaddps %xmm1, %xmm2, %xmm1
; AVX1_SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX1_SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX1_SLOW-NEXT: retq
;
; AVX1_FAST-LABEL: PR34724_1:
; AVX1_FAST: # %bb.0:
; AVX1_FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1_FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1_FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX1_FAST-NEXT: retq
;
; AVX2_SLOW-LABEL: PR34724_1:
; AVX2_SLOW: # %bb.0:
; AVX2_SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2_SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX2_SLOW-NEXT: vmovsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; AVX2_SLOW-NEXT: vaddps %xmm1, %xmm2, %xmm1
; AVX2_SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX2_SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX2_SLOW-NEXT: retq
;
; AVX2_FAST-LABEL: PR34724_1:
; AVX2_FAST: # %bb.0:
; AVX2_FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2_FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX2_FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX2_FAST-NEXT: retq
%t0 = shufflevector <4 x float> %a, <4 x float> %b, <2 x i32> <i32 2, i32 4>
%t1 = shufflevector <4 x float> %a, <4 x float> %b, <2 x i32> <i32 3, i32 5>

View File

@ -955,38 +955,32 @@ define <4 x float> @PR45747_2(<4 x float> %a, <4 x float> %b) nounwind {
define <4 x float> @PR34724_add_v4f32_u123(<4 x float> %0, <4 x float> %1) {
; SSE-SLOW-LABEL: PR34724_add_v4f32_u123:
; SSE-SLOW: # %bb.0:
; SSE-SLOW-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-SLOW-NEXT: haddps %xmm1, %xmm0
; SSE-SLOW-NEXT: movsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; SSE-SLOW-NEXT: addps %xmm1, %xmm2
; SSE-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,0],xmm0[2,0]
; SSE-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[2,0]
; SSE-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
; SSE-SLOW-NEXT: retq
;
; SSE-FAST-LABEL: PR34724_add_v4f32_u123:
; SSE-FAST: # %bb.0:
; SSE-FAST-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-FAST-NEXT: haddps %xmm1, %xmm0
; SSE-FAST-NEXT: haddps %xmm1, %xmm1
; SSE-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSE-FAST-NEXT: retq
;
; AVX-SLOW-LABEL: PR34724_add_v4f32_u123:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vmovsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; AVX-SLOW-NEXT: vaddps %xmm1, %xmm2, %xmm1
; AVX-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: PR34724_add_v4f32_u123:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX-FAST-NEXT: retq
%3 = shufflevector <4 x float> %0, <4 x float> %1, <2 x i32> <i32 2, i32 4>
%4 = shufflevector <4 x float> %0, <4 x float> %1, <2 x i32> <i32 3, i32 5>