From 592fba5b4db731c3638e1521b074a610eed0e2bb Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 15 Jul 2019 15:43:04 +0000 Subject: [PATCH] [X86][SSE] Add PACKSS with zero shuffle masks. This is an example of expansion due to D61129 - it should combine back to a PACKSS with a zero operand. llvm-svn: 366077 --- test/CodeGen/X86/packss.ll | 61 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/test/CodeGen/X86/packss.ll b/test/CodeGen/X86/packss.ll index 2e22501a986..5ac486281e7 100644 --- a/test/CodeGen/X86/packss.ll +++ b/test/CodeGen/X86/packss.ll @@ -264,3 +264,64 @@ define <8 x i16> @trunc_ashr_v4i64_demandedelts(<4 x i64> %a0) { %5 = trunc <8 x i32> %4 to <8 x i16> ret <8 x i16> %5 } + +define <16 x i8> @packsswb_icmp_128_zero(<8 x i16> %a0) { +; SSE-LABEL: packsswb_128_zero: +; SSE: # %bb.0: +; SSE-NEXT: pxor %xmm1, %xmm1 +; SSE-NEXT: pcmpeqw %xmm0, %xmm1 +; SSE-NEXT: packsswb %xmm0, %xmm1 +; SSE-NEXT: movq {{.*#+}} xmm0 = xmm1[0],zero +; SSE-NEXT: ret{{[l|q]}} +; +; AVX-LABEL: packsswb_128_zero: +; AVX: # %bb.0: +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX-NEXT: ret{{[l|q]}} + %1 = icmp eq <8 x i16> %a0, zeroinitializer + %2 = sext <8 x i1> %1 to <8 x i8> + %3 = shufflevector <8 x i8> %2, <8 x i8> zeroinitializer, <16 x i32> + ret <16 x i8> %3 +} + +define <32 x i8> @packsswb_icmp_zero_256(<16 x i16> %a0) { +; SSE-LABEL: packsswb_zero_256: +; SSE: # %bb.0: +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: pcmpeqw %xmm2, %xmm1 +; SSE-NEXT: pcmpeqw %xmm2, %xmm0 +; SSE-NEXT: packsswb %xmm1, %xmm0 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3] +; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] +; SSE-NEXT: movaps %xmm2, %xmm1 +; SSE-NEXT: ret{{[l|q]}} +; +; AVX1-LABEL: packsswb_zero_256: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7] +; AVX1-NEXT: ret{{[l|q]}} +; +; AVX2-LABEL: packsswb_zero_256: +; AVX2: # %bb.0: +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7] +; AVX2-NEXT: ret{{[l|q]}} + %1 = icmp eq <16 x i16> %a0, zeroinitializer + %2 = sext <16 x i1> %1 to <16 x i8> + %3 = shufflevector <16 x i8> zeroinitializer, <16 x i8> %2, <32 x i32> + ret <32 x i8> %3 +}