mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 05:01:59 +01:00
[X86][AVX] Enable ISD::SRL -> ISD::MULHU for v16i16
Now that rL340913 has landed with improved v16i16 selects as shuffles. llvm-svn: 342349
This commit is contained in:
parent
3ea496a526
commit
ee6557f858
@ -23896,10 +23896,8 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
|
||||
|
||||
// Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
|
||||
// can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
|
||||
// TODO: Improve support for the shift by zero special case.
|
||||
if (Opc == ISD::SRL && ConstantAmt &&
|
||||
((Subtarget.hasSSE41() && VT == MVT::v8i16) ||
|
||||
DAG.isKnownNeverZero(Amt)) &&
|
||||
(Subtarget.hasSSE41() || DAG.isKnownNeverZero(Amt)) &&
|
||||
(VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
|
||||
SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
|
||||
SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
|
||||
|
@ -974,17 +974,9 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
;
|
||||
; AVX2-LABEL: constant_shift_v16i16:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
|
||||
; AVX2-NEXT: vpsrlvd %ymm3, %ymm4, %ymm3
|
||||
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
|
||||
; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
|
||||
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v16i16:
|
||||
@ -997,17 +989,16 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v16i16:
|
||||
; XOPAVX2: # %bb.0:
|
||||
; XOPAVX2-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
||||
; XOPAVX2-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX2-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
|
||||
; XOPAVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
|
||||
; XOPAVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; AVX512DQ-LABEL: constant_shift_v16i16:
|
||||
; AVX512DQ: # %bb.0:
|
||||
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
||||
; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
|
||||
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
|
||||
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
|
||||
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; AVX512DQ-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_shift_v16i16:
|
||||
@ -1020,9 +1011,9 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
;
|
||||
; AVX512DQVL-LABEL: constant_shift_v16i16:
|
||||
; AVX512DQVL: # %bb.0:
|
||||
; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
||||
; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
|
||||
; AVX512DQVL-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; AVX512DQVL-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
|
||||
; AVX512DQVL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
|
||||
; AVX512DQVL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; AVX512DQVL-NEXT: retq
|
||||
;
|
||||
; AVX512BWVL-LABEL: constant_shift_v16i16:
|
||||
@ -1041,17 +1032,9 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
;
|
||||
; X32-AVX2-LABEL: constant_shift_v16i16:
|
||||
; X32-AVX2: # %bb.0:
|
||||
; X32-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
||||
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
|
||||
; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
|
||||
; X32-AVX2-NEXT: vpsrlvd %ymm3, %ymm4, %ymm3
|
||||
; X32-AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
|
||||
; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
|
||||
; X32-AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
||||
; X32-AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; X32-AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; X32-AVX2-NEXT: vpmulhuw {{\.LCPI.*}}, %ymm0, %ymm1
|
||||
; X32-AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
|
||||
; X32-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
||||
; X32-AVX2-NEXT: retl
|
||||
%shift = lshr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
|
||||
ret <16 x i16> %shift
|
||||
|
@ -196,13 +196,13 @@ define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
|
||||
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
|
||||
; AVX512DQ-LABEL: constant_shift_v32i16:
|
||||
; AVX512DQ: # %bb.0:
|
||||
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
||||
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
||||
; AVX512DQ-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
|
||||
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
||||
; AVX512DQ-NEXT: vpsrlvd %zmm2, %zmm1, %zmm1
|
||||
; AVX512DQ-NEXT: vpmovdw %zmm1, %ymm1
|
||||
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = <u,32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2>
|
||||
; AVX512DQ-NEXT: vpmulhuw %ymm2, %ymm0, %ymm3
|
||||
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
|
||||
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
|
||||
; AVX512DQ-NEXT: vpmulhuw %ymm2, %ymm1, %ymm2
|
||||
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
|
||||
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
|
||||
; AVX512DQ-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_shift_v32i16:
|
||||
|
Loading…
x
Reference in New Issue
Block a user