diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 25bd7609fc5..82459f50966 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -48146,10 +48146,11 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS)); } break; + case X86ISD::HADD: + case X86ISD::HSUB: case X86ISD::PACKSS: case X86ISD::PACKUS: - if (!IsSplat && NumOps == 2 && VT.is256BitVector() && - Subtarget.hasInt256()) { + if (!IsSplat && VT.is256BitVector() && Subtarget.hasInt256()) { SmallVector LHS, RHS; for (unsigned i = 0; i != NumOps; ++i) { LHS.push_back(Ops[i].getOperand(0)); diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll index 256e1f460ba..f28ed59621e 100644 --- a/test/CodeGen/X86/haddsub-2.ll +++ b/test/CodeGen/X86/haddsub-2.ll @@ -579,11 +579,9 @@ define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) { ; ; AVX2-LABEL: avx2_vphadd_d_test: ; AVX2: # %bb.0: -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vphaddd %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vphaddd %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vphaddd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq %vecext = extractelement <8 x i32> %A, i32 0 %vecext1 = extractelement <8 x i32> %A, i32 1 @@ -737,11 +735,9 @@ define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) nounwind { ; ; AVX2-LABEL: avx2_vphadd_w_test: ; AVX2: # %bb.0: -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vphaddw %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vphaddw %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vphaddw %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq %vecext = extractelement <16 x i16> %a, i32 0 %vecext1 = extractelement <16 x i16> %a, i32 1