mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
[X86][SSE] Combine v16i8 SHL by constants to multiplies
Pre-AVX512 (which can perform a quick extend/shift/truncate), extending to 2 v8i16 for the PMULLW and then truncating is more performant than relying on the generic PBLENDVB vXi8 shift path and uses a similar amount of mask constant pool data. Differential Revision: https://reviews.llvm.org/D48963 llvm-svn: 336513
This commit is contained in:
parent
d9072865e2
commit
424dd446d9
@ -23306,7 +23306,8 @@ static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
|
||||
SelectionDAG &DAG) {
|
||||
MVT VT = Amt.getSimpleValueType();
|
||||
if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
|
||||
(Subtarget.hasInt256() && VT == MVT::v16i16)))
|
||||
(Subtarget.hasInt256() && VT == MVT::v16i16) ||
|
||||
(!Subtarget.hasAVX512() && VT == MVT::v16i8)))
|
||||
return SDValue();
|
||||
|
||||
if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
|
||||
|
@ -174,68 +174,55 @@ define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind {
|
||||
ret <8 x i16> %1
|
||||
}
|
||||
|
||||
define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind {
|
||||
; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
|
||||
; X86: # %bb.0:
|
||||
; X86-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-NEXT: psllw $4, %xmm2
|
||||
; X86-NEXT: pand {{\.LCPI.*}}, %xmm2
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640]
|
||||
; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X86-NEXT: movdqa %xmm1, %xmm2
|
||||
; X86-NEXT: psllw $2, %xmm2
|
||||
; X86-NEXT: pand {{\.LCPI.*}}, %xmm2
|
||||
; X86-NEXT: paddb %xmm0, %xmm0
|
||||
; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X86-NEXT: movdqa %xmm1, %xmm2
|
||||
; X86-NEXT: paddb %xmm1, %xmm2
|
||||
; X86-NEXT: paddb %xmm0, %xmm0
|
||||
; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X86-NEXT: movdqa %xmm1, %xmm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movdqa %xmm0, %xmm1
|
||||
; X64-NEXT: movdqa %xmm0, %xmm2
|
||||
; X64-NEXT: psllw $4, %xmm2
|
||||
; X64-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; X64-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640]
|
||||
; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X64-NEXT: movdqa %xmm1, %xmm2
|
||||
; X64-NEXT: psllw $2, %xmm2
|
||||
; X64-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; X64-NEXT: paddb %xmm0, %xmm0
|
||||
; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X64-NEXT: movdqa %xmm1, %xmm2
|
||||
; X64-NEXT: paddb %xmm1, %xmm2
|
||||
; X64-NEXT: paddb %xmm0, %xmm0
|
||||
; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; X64-NEXT: movdqa %xmm1, %xmm0
|
||||
; X64-NEXT: retq
|
||||
;
|
||||
define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind {
|
||||
; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
|
||||
; X86: # %bb.0:
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8]
|
||||
; X86-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X86-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; X86-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X86-NEXT: pmullw %xmm2, %xmm0
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; X86-NEXT: pand %xmm2, %xmm0
|
||||
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm1
|
||||
; X86-NEXT: pand %xmm2, %xmm1
|
||||
; X86-NEXT: packuswb %xmm0, %xmm1
|
||||
; X86-NEXT: movdqa %xmm1, %xmm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8]
|
||||
; X64-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X64-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X64-NEXT: pmullw %xmm2, %xmm0
|
||||
; X64-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; X64-NEXT: pand %xmm2, %xmm0
|
||||
; X64-NEXT: pmullw {{.*}}(%rip), %xmm1
|
||||
; X64-NEXT: pand %xmm2, %xmm1
|
||||
; X64-NEXT: packuswb %xmm0, %xmm1
|
||||
; X64-NEXT: movdqa %xmm1, %xmm0
|
||||
; X64-NEXT: retq
|
||||
;
|
||||
; X64-XOP-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
|
||||
; X64-XOP: # %bb.0:
|
||||
; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
|
||||
; X64-XOP-NEXT: retq
|
||||
;
|
||||
; X64-AVX2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
|
||||
; X64-AVX2: # %bb.0:
|
||||
; X64-AVX2-NEXT: vpsllw $4, %xmm0, %xmm1
|
||||
; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,8192,24640,8192,24640,8192,24640]
|
||||
; X64-AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
|
||||
; X64-AVX2-NEXT: vpsllw $2, %xmm0, %xmm1
|
||||
; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; X64-AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm2
|
||||
; X64-AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
|
||||
; X64-AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm1
|
||||
; X64-AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm2
|
||||
; X64-AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
|
||||
; X64-AVX2-NEXT: retq
|
||||
%1 = mul <16 x i8> %a0, <i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8>
|
||||
ret <16 x i8> %1
|
||||
;
|
||||
; X64-AVX2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
|
||||
; X64-AVX2: # %bb.0:
|
||||
; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
||||
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
||||
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
||||
; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
||||
; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; X64-AVX2-NEXT: vzeroupper
|
||||
; X64-AVX2-NEXT: retq
|
||||
%1 = mul <16 x i8> %a0, <i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8>
|
||||
ret <16 x i8> %1
|
||||
}
|
||||
|
||||
;
|
||||
|
@ -925,74 +925,67 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
|
||||
ret <8 x i16> %shift
|
||||
}
|
||||
|
||||
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
||||
; SSE2-LABEL: constant_shift_v16i8:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
|
||||
; SSE2-NEXT: pxor %xmm1, %xmm1
|
||||
; SSE2-NEXT: pxor %xmm3, %xmm3
|
||||
; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm4
|
||||
; SSE2-NEXT: pandn %xmm0, %xmm4
|
||||
; SSE2-NEXT: psllw $4, %xmm0
|
||||
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pand %xmm3, %xmm0
|
||||
; SSE2-NEXT: por %xmm4, %xmm0
|
||||
; SSE2-NEXT: paddb %xmm2, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm3, %xmm3
|
||||
; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm4
|
||||
; SSE2-NEXT: pandn %xmm0, %xmm4
|
||||
; SSE2-NEXT: psllw $2, %xmm0
|
||||
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pand %xmm3, %xmm0
|
||||
; SSE2-NEXT: por %xmm4, %xmm0
|
||||
; SSE2-NEXT: paddb %xmm2, %xmm2
|
||||
; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: pandn %xmm0, %xmm2
|
||||
; SSE2-NEXT: paddb %xmm0, %xmm0
|
||||
; SSE2-NEXT: pand %xmm1, %xmm0
|
||||
; SSE2-NEXT: por %xmm2, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_shift_v16i8:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: psllw $4, %xmm2
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE41-NEXT: psllw $2, %xmm2
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; SSE41-NEXT: paddb %xmm0, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE41-NEXT: paddb %xmm1, %xmm2
|
||||
; SSE41-NEXT: paddb %xmm0, %xmm0
|
||||
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: constant_shift_v16i8:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
|
||||
; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vpsllw $2, %xmm0, %xmm1
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
|
||||
; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm1
|
||||
; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
|
||||
; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: constant_shift_v16i8:
|
||||
; XOP: # %bb.0:
|
||||
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
||||
; SSE2-LABEL: constant_shift_v16i8:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
|
||||
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; SSE2-NEXT: pand %xmm2, %xmm3
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
||||
; SSE2-NEXT: pand %xmm2, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm3, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_shift_v16i8:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]
|
||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; SSE41-NEXT: pand %xmm2, %xmm0
|
||||
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm1
|
||||
; SSE41-NEXT: pand %xmm2, %xmm1
|
||||
; SSE41-NEXT: packuswb %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: constant_shift_v16i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw %xmm1, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_shift_v16i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
||||
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
||||
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: constant_shift_v16i8:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
@ -1029,38 +1022,25 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
||||
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
|
||||
; AVX512BWVL-NEXT: vzeroupper
|
||||
; AVX512BWVL-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v16i8:
|
||||
; X32-SSE: # %bb.0:
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
|
||||
; X32-SSE-NEXT: pxor %xmm1, %xmm1
|
||||
; X32-SSE-NEXT: pxor %xmm3, %xmm3
|
||||
; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
|
||||
; X32-SSE-NEXT: movdqa %xmm3, %xmm4
|
||||
; X32-SSE-NEXT: pandn %xmm0, %xmm4
|
||||
; X32-SSE-NEXT: psllw $4, %xmm0
|
||||
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
|
||||
; X32-SSE-NEXT: pand %xmm3, %xmm0
|
||||
; X32-SSE-NEXT: por %xmm4, %xmm0
|
||||
; X32-SSE-NEXT: paddb %xmm2, %xmm2
|
||||
; X32-SSE-NEXT: pxor %xmm3, %xmm3
|
||||
; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
|
||||
; X32-SSE-NEXT: movdqa %xmm3, %xmm4
|
||||
; X32-SSE-NEXT: pandn %xmm0, %xmm4
|
||||
; X32-SSE-NEXT: psllw $2, %xmm0
|
||||
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
|
||||
; X32-SSE-NEXT: pand %xmm3, %xmm0
|
||||
; X32-SSE-NEXT: por %xmm4, %xmm0
|
||||
; X32-SSE-NEXT: paddb %xmm2, %xmm2
|
||||
; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm1
|
||||
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
||||
; X32-SSE-NEXT: pandn %xmm0, %xmm2
|
||||
; X32-SSE-NEXT: paddb %xmm0, %xmm0
|
||||
; X32-SSE-NEXT: pand %xmm1, %xmm0
|
||||
; X32-SSE-NEXT: por %xmm2, %xmm0
|
||||
; X32-SSE-NEXT: retl
|
||||
%shift = shl <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
|
||||
ret <16 x i8> %shift
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v16i8:
|
||||
; X32-SSE: # %bb.0:
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]
|
||||
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
||||
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
|
||||
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
|
||||
; X32-SSE-NEXT: pmullw %xmm2, %xmm3
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; X32-SSE-NEXT: pand %xmm2, %xmm3
|
||||
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; X32-SSE-NEXT: pmullw %xmm1, %xmm0
|
||||
; X32-SSE-NEXT: pand %xmm2, %xmm0
|
||||
; X32-SSE-NEXT: packuswb %xmm3, %xmm0
|
||||
; X32-SSE-NEXT: retl
|
||||
%shift = shl <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
|
||||
ret <16 x i8> %shift
|
||||
}
|
||||
|
||||
;
|
||||
|
@ -986,35 +986,32 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
|
||||
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
|
||||
; AVX1-LABEL: constant_shift_v32i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32]
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsllw $2, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6
|
||||
; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7
|
||||
; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsllw $2, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_shift_v32i8:
|
||||
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
|
||||
; AVX1-LABEL: constant_shift_v32i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128]
|
||||
; AVX1-NEXT: vpmullw %xmm5, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_shift_v32i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
@ -1090,35 +1087,32 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
|
||||
; AVX512BWVL-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
|
||||
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512BWVL-NEXT: retq
|
||||
;
|
||||
; X32-AVX1-LABEL: constant_shift_v32i8:
|
||||
; X32-AVX1: # %bb.0:
|
||||
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; X32-AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
|
||||
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32]
|
||||
; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
|
||||
; X32-AVX1-NEXT: vpsllw $2, %xmm1, %xmm2
|
||||
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; X32-AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
|
||||
; X32-AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6
|
||||
; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1
|
||||
; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
|
||||
; X32-AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7
|
||||
; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1
|
||||
; X32-AVX1-NEXT: vpsllw $4, %xmm0, %xmm2
|
||||
; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
|
||||
; X32-AVX1-NEXT: vpsllw $2, %xmm0, %xmm2
|
||||
; X32-AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
|
||||
; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0
|
||||
; X32-AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm2
|
||||
; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0
|
||||
; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; X32-AVX1-NEXT: retl
|
||||
;
|
||||
; X32-AVX2-LABEL: constant_shift_v32i8:
|
||||
;
|
||||
; X32-AVX1-LABEL: constant_shift_v32i8:
|
||||
; X32-AVX1: # %bb.0:
|
||||
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]
|
||||
; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X32-AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
|
||||
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
||||
; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
|
||||
; X32-AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
||||
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128]
|
||||
; X32-AVX1-NEXT: vpmullw %xmm5, %xmm2, %xmm2
|
||||
; X32-AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
|
||||
; X32-AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
|
||||
; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X32-AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
|
||||
; X32-AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
|
||||
; X32-AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; X32-AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0
|
||||
; X32-AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
||||
; X32-AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
||||
; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; X32-AVX1-NEXT: retl
|
||||
;
|
||||
; X32-AVX2-LABEL: constant_shift_v32i8:
|
||||
; X32-AVX2: # %bb.0:
|
||||
; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
|
||||
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
|
||||
|
Loading…
Reference in New Issue
Block a user