diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 091168336b0..fb939daab57 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -7986,9 +7986,17 @@ static SDValue lowerVectorShuffleWithSHUPFS(SDLoc DL, MVT VT, } else if (NumV2Elements == 2) { if (Mask[0] < 4 && Mask[1] < 4) { // Handle the easy case where we have V1 in the low lanes and V2 in the - // high lanes. We never see this reversed because we sort the shuffle. + // high lanes. NewMask[2] -= 4; NewMask[3] -= 4; + } else if (Mask[2] < 4 && Mask[3] < 4) { + // We also handle the reversed case because this utility may get called + // when we detect a SHUFPS pattern but can't easily commute the shuffle to + // arrange things in the right direction. + NewMask[0] -= 4; + NewMask[1] -= 4; + HighV = V1; + LowV = V2; } else { // We have a mixture of V1 and V2 in both low and high lanes. Rather than // trying to place elements directly, just blend them and set up the final @@ -9392,7 +9400,7 @@ static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT, SmallVector FlippedBlendMask; for (int i = 0, Size = Mask.size(); i < Size; ++i) FlippedBlendMask.push_back( - Mask[i] < 0 ? -1 : ((Mask[i] / LaneSize == i / LaneSize) + Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize) ? Mask[i] : Mask[i] % LaneSize + (i / LaneSize) * LaneSize + Size)); @@ -9469,15 +9477,19 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, return Blend; // Check if the blend happens to exactly fit that of SHUFPD. - if (Mask[0] < 4 && (Mask[1] == -1 || Mask[1] >= 4) && - Mask[2] < 4 && (Mask[3] == -1 || Mask[3] >= 4)) { + if ((Mask[0] == -1 || Mask[0] < 2) && + (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) && + (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) && + (Mask[3] == -1 || Mask[3] >= 6)) { unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) | ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3); return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2, DAG.getConstant(SHUFPDMask, MVT::i8)); } - if ((Mask[0] == -1 || Mask[0] >= 4) && Mask[1] < 4 && - (Mask[2] == -1 || Mask[2] >= 4) && Mask[3] < 4) { + if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) && + (Mask[1] == -1 || Mask[1] < 2) && + (Mask[2] == -1 || Mask[2] >= 6) && + (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) { unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) | ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3); return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1, @@ -9564,8 +9576,10 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2, // If the shuffle mask is repeated in each 128-bit lane, we have many more // options to efficiently lower the shuffle. - SmallVector RepeatedMask; + SmallVector RepeatedMask; if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) { + assert(RepeatedMask.size() == 4 && + "Repeated masks must be half the mask width!"); if (isSingleInputShuffleMask(Mask)) return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1, getV4X86ShuffleImm8ForMask(RepeatedMask, DAG)); @@ -9577,12 +9591,12 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2, return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2); // Otherwise, fall back to a SHUFPS sequence. Here it is important that we - // have already handled any direct blends. - int SHUFPSMask[] = {Mask[0], Mask[1], Mask[2], Mask[3]}; - for (int &M : SHUFPSMask) - if (M >= 8) - M -= 4; - return lowerVectorShuffleWithSHUPFS(DL, MVT::v8f32, SHUFPSMask, V1, V2, DAG); + // have already handled any direct blends. We also need to squash the + // repeated mask into a simulated v4f32 mask. + for (int i = 0; i < 4; ++i) + if (RepeatedMask[i] >= 8) + RepeatedMask[i] -= 4; + return lowerVectorShuffleWithSHUPFS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG); } // If we have a single input shuffle with different shuffle patterns in the diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll index 423400c00a6..b7cfa0b1dab 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -566,6 +566,46 @@ define <4 x i64> @shuffle_v4i64_4015(<4 x i64> %a, <4 x i64> %b) { ret <4 x i64> %shuffle } +define <4 x i64> @shuffle_v4i64_2u35(<4 x i64> %a, <4 x i64> %b) { +; AVX1-LABEL: @shuffle_v4i64_2u35 +; AVX1: # BB#0: +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpermilpd {{.*}} # xmm2 = xmm0[1,0] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0,1,2],ymm1[3] +; AVX1-NEXT: retq +; +; AVX2-LABEL: @shuffle_v4i64_2u35 +; AVX2: # BB#0: +; AVX2-NEXT: vpermq {{.*}} # ymm1 = ymm1[0,1,2,1] +; AVX2-NEXT: vpermq {{.*}} # ymm0 = ymm0[2,1,3,3] +; AVX2-NEXT: vpblendd {{.*}} # ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + +define <4 x i64> @shuffle_v4i64_1251(<4 x i64> %a, <4 x i64> %b) { +; AVX1-LABEL: @shuffle_v4i64_1251 +; AVX1: # BB#0: +; AVX1-NEXT: vperm2f128 {{.*}} # ymm2 = ymm0[2,3,0,1] +; AVX1-NEXT: vshufpd {{.*}} # ymm0 = ymm0[1],ymm2[0],ymm0[2],ymm2[3] +; AVX1-NEXT: vpermilpd {{.*}} # xmm1 = xmm1[1,0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0,1],ymm1[2],ymm0[3] +; AVX1-NEXT: retq +; +; AVX2-LABEL: @shuffle_v4i64_1251 +; AVX2: # BB#0: +; AVX2-NEXT: vpermq {{.*}} # ymm1 = ymm1[0,1,1,3] +; AVX2-NEXT: vpermq {{.*}} # ymm0 = ymm0[1,2,2,1] +; AVX2-NEXT: vpblendd {{.*}} # ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] +; AVX2-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> + ret <4 x i64> %shuffle +} + define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: @stress_test1 ; AVX1: # BB#0: diff --git a/test/CodeGen/X86/vector-shuffle-256-v8.ll b/test/CodeGen/X86/vector-shuffle-256-v8.ll index 7e69ac5c537..fb5c993250e 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v8.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v8.ll @@ -646,6 +646,29 @@ define <8 x float> @shuffle_v8f32_uuu3uu66(<8 x float> %a, <8 x float> %b) { ret <8 x float> %shuffle } +define <8 x float> @shuffle_v8f32_c348cda0(<8 x float> %a, <8 x float> %b) { +; AVX1-LABEL: @shuffle_v8f32_c348cda0 +; AVX1: # BB#0: +; AVX1-NEXT: vperm2f128 {{.*}} # ymm2 = ymm0[2,3,0,1] +; AVX1-NEXT: vshufps {{.*}} # ymm0 = ymm0[0,3],ymm2[0,0],ymm0[4,7],ymm2[4,4] +; AVX1-NEXT: vperm2f128 {{.*}} # ymm2 = ymm1[2,3,0,1] +; AVX1-NEXT: vpermilps {{.*}} # ymm1 = ymm1[0,1,2,0,4,5,6,4] +; AVX1-NEXT: vblendps {{.*}} # ymm1 = ymm2[0],ymm1[1,2,3,4,5],ymm2[6],ymm1[7] +; AVX1-NEXT: vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: @shuffle_v8f32_c348cda0 +; AVX2: # BB#0: +; AVX2-NEXT: vmovaps {{.*}} # ymm2 = +; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vmovaps {{.*}} # ymm2 = <4,u,u,0,4,5,2,u> +; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7] +; AVX2-NEXT: retq + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> + ret <8 x float> %shuffle +} + define <8 x i32> @shuffle_v8i32_00000000(<8 x i32> %a, <8 x i32> %b) { ; AVX1-LABEL: @shuffle_v8i32_00000000 ; AVX1: # BB#0: @@ -1514,3 +1537,26 @@ define <8 x i32> @shuffle_v8i32_uuu3uu66(<8 x i32> %a, <8 x i32> %b) { %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> ret <8 x i32> %shuffle } + +define <8 x i32> @shuffle_v8i32_6caa87e5(<8 x i32> %a, <8 x i32> %b) { +; AVX1-LABEL: @shuffle_v8i32_6caa87e5 +; AVX1: # BB#0: +; AVX1-NEXT: vperm2f128 {{.*}} # ymm2 = ymm1[2,3,0,1] +; AVX1-NEXT: vshufps {{.*}} # ymm1 = ymm2[0,0],ymm1[2,2],ymm2[4,4],ymm1[6,6] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpermilps {{.*}} # xmm2 = xmm0[2,1,2,3] +; AVX1-NEXT: vpermilps {{.*}} # xmm0 = xmm0[0,3,2,1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vblendps {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: @shuffle_v8i32_6caa87e5 +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqa {{.*}} # ymm2 = +; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpermq {{.*}} # ymm0 = ymm0[3,1,3,2] +; AVX2-NEXT: vpblendd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7] +; AVX2-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> + ret <8 x i32> %shuffle +}