; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2,AVX2-SLOW %s ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2,AVX2-FAST %s ; These patterns are produced by LoopVectorizer for interleaved loads. define void @vf2(<4 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1) nounwind { ; AVX2-LABEL: vf2: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7] ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] ; AVX2-NEXT: vmovd %xmm1, (%rsi) ; AVX2-NEXT: vmovd %xmm0, (%rdx) ; AVX2-NEXT: retq %wide.vec = load <4 x i16>, <4 x i16>* %in.vec, align 32 %strided.vec0 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> %strided.vec1 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> store <2 x i16> %strided.vec0, <2 x i16>* %out.vec0, align 32 store <2 x i16> %strided.vec1, <2 x i16>* %out.vec1, align 32 ret void } define void @vf4(<8 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1) nounwind { ; AVX2-LABEL: vf4: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u] ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u] ; AVX2-NEXT: vmovq %xmm1, (%rsi) ; AVX2-NEXT: vmovq %xmm0, (%rdx) ; AVX2-NEXT: retq %wide.vec = load <8 x i16>, <8 x i16>* %in.vec, align 32 %strided.vec0 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> %strided.vec1 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> store <4 x i16> %strided.vec0, <4 x i16>* %out.vec0, align 32 store <4 x i16> %strided.vec1, <4 x i16>* %out.vec1, align 32 ret void } define void @vf8(<16 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1) nounwind { ; AVX2-LABEL: vf8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa (%rdi), %xmm1 ; AVX2-NEXT: vmovdqa 16(%rdi), %xmm2 ; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7] ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7] ; AVX2-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15] ; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2 ; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; AVX2-NEXT: vmovdqa %xmm0, (%rsi) ; AVX2-NEXT: vmovdqa %xmm1, (%rdx) ; AVX2-NEXT: retq %wide.vec = load <16 x i16>, <16 x i16>* %in.vec, align 32 %strided.vec0 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> %strided.vec1 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> store <8 x i16> %strided.vec0, <8 x i16>* %out.vec0, align 32 store <8 x i16> %strided.vec1, <8 x i16>* %out.vec1, align 32 ret void } define void @vf16(<32 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.vec1) nounwind { ; AVX2-SLOW-LABEL: vf16: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm3 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,2],ymm2[0,2],ymm3[4,6],ymm2[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rsi) ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: vf16: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u> ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm1, %ymm3 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm0, %ymm2 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rsi) ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq %wide.vec = load <32 x i16>, <32 x i16>* %in.vec, align 32 %strided.vec0 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> %strided.vec1 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> store <16 x i16> %strided.vec0, <16 x i16>* %out.vec0, align 32 store <16 x i16> %strided.vec1, <16 x i16>* %out.vec1, align 32 ret void } define void @vf32(<64 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.vec1) nounwind { ; AVX2-SLOW-LABEL: vf32: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm2 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm3 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm4 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm5[0,2],ymm4[0,2],ymm5[4,6],ymm4[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm6 = ymm2[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = ; AVX2-SLOW-NEXT: vpshufb %ymm6, %ymm1, %ymm1 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u> ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-SLOW-NEXT: vpshufb %ymm6, %ymm3, %ymm1 ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm2, %ymm2 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3] ; AVX2-SLOW-NEXT: vmovaps %ymm5, 32(%rsi) ; AVX2-SLOW-NEXT: vmovaps %ymm4, (%rsi) ; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%rdx) ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: vf32: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm2 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm3 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u> ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm1, %ymm5 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm0, %ymm6 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm3, %ymm6 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm2, %ymm4 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2],ymm6[0,2],ymm4[4,6],ymm6[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm1, %ymm1 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u> ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm3, %ymm1 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm2, %ymm2 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3] ; AVX2-FAST-NEXT: vmovaps %ymm4, 32(%rsi) ; AVX2-FAST-NEXT: vmovaps %ymm5, (%rsi) ; AVX2-FAST-NEXT: vmovdqa %ymm1, 32(%rdx) ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq %wide.vec = load <64 x i16>, <64 x i16>* %in.vec, align 32 %strided.vec0 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <32 x i32> %strided.vec1 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <32 x i32> store <32 x i16> %strided.vec0, <32 x i16>* %out.vec0, align 32 store <32 x i16> %strided.vec1, <32 x i16>* %out.vec1, align 32 ret void }