1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

[X86][AVX] getTargetShuffleMask - don't decode VBROADCAST(EXTRACT_SUBVECTOR(X,0)) patterns.

getTargetShuffleMask is used by the various "SimplifyDemanded" folds so we can't assume that the bypassed extract_subvector can be safely simplified - getFauxShuffleMask performs a more general decode that allows us to more safely catch many of these cases so the impact is minimal.

(cherry picked from commit 5b5dc2442ac7a574a3b7d17c15ebeeb9eb3bec26)
This commit is contained in:
Simon Pilgrim 2020-07-21 21:50:38 +01:00 committed by Hans Wennborg
parent c505dd41ad
commit dea959b256
8 changed files with 244 additions and 259 deletions

View File

@ -6916,25 +6916,16 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
DecodeZeroMoveLowMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::VBROADCAST: {
SDValue N0 = N->getOperand(0);
// See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
// add the pre-extracted value to the Ops vector.
if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N0.getOperand(0).getValueType() == VT &&
N0.getConstantOperandVal(1) == 0)
Ops.push_back(N0.getOperand(0));
// We only decode broadcasts of same-sized vectors, unless the broadcast
// came from an extract from the original width. If we found one, we
// pushed it the Ops vector above.
if (N0.getValueType() == VT || !Ops.empty()) {
case X86ISD::VBROADCAST:
// We only decode broadcasts of same-sized vectors, peeking through to
// extracted subvectors is likely to cause hasOneUse issues with
// SimplifyDemandedBits etc.
if (N->getOperand(0).getValueType() == VT) {
DecodeVectorBroadcast(NumElems, Mask);
IsUnary = true;
break;
}
return false;
}
case X86ISD::VPERMILPV: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
IsUnary = true;

View File

@ -1092,12 +1092,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX2-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX2-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX2-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX2-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
@ -1110,12 +1110,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512F-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512F-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512F-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512F-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512F-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512F-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512F-NEXT: vmovdqa %ymm1, %ymm0
@ -1126,12 +1126,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VL-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VL-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VL-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VL-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VL-NEXT: vptestnmd %ymm2, %ymm2, %k1
; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
; AVX512VL-NEXT: vmovdqa %ymm1, %ymm0
@ -1144,12 +1144,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512BW-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512BW-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512BW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512BW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512BW-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512BW-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512BW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512BW-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa %ymm1, %ymm0
@ -1162,12 +1162,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VBMI2-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512VBMI2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VBMI2-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512VBMI2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VBMI2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VBMI2-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VBMI2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VBMI2-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512VBMI2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VBMI2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VBMI2-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512VBMI2-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512VBMI2-NEXT: vmovdqa %ymm1, %ymm0
@ -1178,12 +1178,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VLBW-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VLBW-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VLBW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VLBW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VLBW-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VLBW-NEXT: vptestnmd %ymm2, %ymm2, %k1
; AVX512VLBW-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
; AVX512VLBW-NEXT: vmovdqa %ymm1, %ymm0
@ -1224,12 +1224,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; XOPAVX2-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; XOPAVX2-NEXT: vpslld %xmm3, %ymm0, %ymm3
; XOPAVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; XOPAVX2-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; XOPAVX2-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; XOPAVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; XOPAVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; XOPAVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
@ -1271,12 +1271,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX2-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX2-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@ -1287,12 +1287,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512F-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512F-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@ -1303,12 +1303,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@ -1320,12 +1320,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512BW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512BW-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512BW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512BW-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa %ymm1, %ymm0
@ -1337,12 +1337,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512VBMI2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VBMI2-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512VBMI2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VBMI2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VBMI2-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VBMI2-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512VBMI2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VBMI2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VBMI2-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512VBMI2-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
; AVX512VBMI2-NEXT: vmovdqa %ymm1, %ymm0
@ -1353,12 +1353,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VLBW-NEXT: vptestnmw %ymm2, %ymm2, %k1
; AVX512VLBW-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
; AVX512VLBW-NEXT: vmovdqa %ymm1, %ymm0
@ -1399,12 +1399,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; XOPAVX2-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; XOPAVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; XOPAVX2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; XOPAVX2-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; XOPAVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; XOPAVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; XOPAVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@ -1458,6 +1458,7 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX2-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX2-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3
; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX2-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
@ -1467,7 +1468,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX2-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@ -1482,6 +1482,7 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512F-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX512F-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512F-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
@ -1491,7 +1492,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512F-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpeqb %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
@ -1506,6 +1506,7 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VL-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX512VL-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VL-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
@ -1514,9 +1515,8 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VL-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX512VL-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512VL-NEXT: vpternlogq $236, %ymm1, %ymm3, %ymm4
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm1
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpcmpeqb %ymm1, %ymm2, %ymm1
; AVX512VL-NEXT: vpblendvb %ymm1, %ymm0, %ymm4, %ymm0
; AVX512VL-NEXT: retq
;

View File

@ -643,12 +643,12 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512F-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512F-NEXT: vpslld %xmm3, %zmm0, %zmm3
; AVX512F-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512F-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512F-NEXT: vpsrld %xmm4, %zmm1, %zmm1
; AVX512F-NEXT: vpord %zmm1, %zmm3, %zmm1
; AVX512F-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512F-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0
@ -659,12 +659,12 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VL-NEXT: vpslld %xmm3, %zmm0, %zmm3
; AVX512VL-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VL-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VL-NEXT: vpsrld %xmm4, %zmm1, %zmm1
; AVX512VL-NEXT: vpord %zmm1, %zmm3, %zmm1
; AVX512VL-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512VL-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512VL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512VL-NEXT: vmovdqa64 %zmm1, %zmm0
@ -676,12 +676,12 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512BW-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512BW-NEXT: vpslld %xmm3, %zmm0, %zmm3
; AVX512BW-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512BW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512BW-NEXT: vpsrld %xmm4, %zmm1, %zmm1
; AVX512BW-NEXT: vpord %zmm1, %zmm3, %zmm1
; AVX512BW-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512BW-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
@ -698,12 +698,12 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512VLBW-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VLBW-NEXT: vpslld %xmm3, %zmm0, %zmm3
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512VLBW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VLBW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VLBW-NEXT: vpsrld %xmm4, %zmm1, %zmm1
; AVX512VLBW-NEXT: vpord %zmm1, %zmm3, %zmm1
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512VLBW-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512VLBW-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512VLBW-NEXT: vmovdqa64 %zmm1, %zmm0
@ -771,12 +771,12 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm3, %zmm0, %zmm3
; AVX512BW-NEXT: vpbroadcastw %xmm2, %zmm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm4, %zmm1, %zmm1
; AVX512BW-NEXT: vporq %zmm1, %zmm3, %zmm1
; AVX512BW-NEXT: vpbroadcastw %xmm2, %zmm2
; AVX512BW-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
@ -793,12 +793,12 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i
; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm3, %zmm0, %zmm3
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %zmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm4, %zmm1, %zmm1
; AVX512VLBW-NEXT: vporq %zmm1, %zmm3, %zmm1
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %zmm2
; AVX512VLBW-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512VLBW-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
; AVX512VLBW-NEXT: vmovdqa64 %zmm1, %zmm0
@ -890,6 +890,7 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512BW-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX512BW-NEXT: vpbroadcastb %xmm3, %zmm3
; AVX512BW-NEXT: vpandq %zmm3, %zmm4, %zmm3
; AVX512BW-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512BW-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
@ -898,7 +899,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512BW-NEXT: vpternlogq $236, %zmm6, %zmm3, %zmm1
; AVX512BW-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512BW-NEXT: vptestnmb %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
@ -913,6 +913,7 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VBMI2-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX512VBMI2-NEXT: vpbroadcastb %xmm3, %zmm3
; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm4, %zmm3
; AVX512VBMI2-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VBMI2-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
@ -921,7 +922,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VBMI2-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512VBMI2-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512VBMI2-NEXT: vpternlogq $236, %zmm6, %zmm3, %zmm1
; AVX512VBMI2-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VBMI2-NEXT: vptestnmb %zmm2, %zmm2, %k1
; AVX512VBMI2-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
; AVX512VBMI2-NEXT: vmovdqa64 %zmm1, %zmm0
@ -936,6 +936,7 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VLBW-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX512VLBW-NEXT: vpbroadcastb %xmm3, %zmm3
; AVX512VLBW-NEXT: vpandq %zmm3, %zmm4, %zmm3
; AVX512VLBW-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
@ -944,7 +945,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VLBW-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512VLBW-NEXT: vpternlogq $236, %zmm6, %zmm3, %zmm1
; AVX512VLBW-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VLBW-NEXT: vptestnmb %zmm2, %zmm2, %k1
; AVX512VLBW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
; AVX512VLBW-NEXT: vmovdqa64 %zmm1, %zmm0
@ -959,6 +959,7 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VLVBMI2-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX512VLVBMI2-NEXT: vpbroadcastb %xmm3, %zmm3
; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm4, %zmm3
; AVX512VLVBMI2-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VLVBMI2-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VLVBMI2-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
@ -967,7 +968,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VLVBMI2-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512VLVBMI2-NEXT: vpbroadcastb %xmm1, %zmm1
; AVX512VLVBMI2-NEXT: vpternlogq $236, %zmm6, %zmm3, %zmm1
; AVX512VLVBMI2-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VLVBMI2-NEXT: vptestnmb %zmm2, %zmm2, %k1
; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
; AVX512VLVBMI2-NEXT: vmovdqa64 %zmm1, %zmm0

View File

@ -366,13 +366,14 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounw
define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm3, %ymm2, %ymm4
; AVX512F-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
; AVX512F-NEXT: vpbroadcastw %xmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm1
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpsllw %xmm2, %ymm3, %ymm4
; AVX512F-NEXT: vpsllw %xmm2, %ymm0, %ymm2
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpsubw %ymm1, %ymm4, %ymm1
; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
@ -380,22 +381,23 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpsrlvd %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512F-NEXT: vpsrlvd %zmm1, %zmm3, %zmm1
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm0, %zmm3, %zmm0
; AVX512F-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_funnnel_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm3, %ymm2, %ymm4
; AVX512VL-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
; AVX512VL-NEXT: vpbroadcastw %xmm1, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-NEXT: vpsllw %xmm2, %ymm3, %ymm4
; AVX512VL-NEXT: vpsllw %xmm2, %ymm0, %ymm2
; AVX512VL-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
; AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512VL-NEXT: vpsubw %ymm1, %ymm4, %ymm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
@ -403,11 +405,11 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm2, %zmm1
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm3, %zmm1
; AVX512VL-NEXT: vpmovdw %zmm1, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm0, %zmm3, %zmm0
; AVX512VL-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatvar_funnnel_v32i16:
@ -447,16 +449,17 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpsrlw $4, %ymm2, %ymm3
; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm2
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm5
; AVX512F-NEXT: vpxor %xmm6, %xmm6, %xmm6
; AVX512F-NEXT: vpsubb %ymm5, %ymm6, %ymm5
; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
; AVX512F-NEXT: vpsubb %ymm2, %ymm5, %ymm5
; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm5, %ymm5
; AVX512F-NEXT: vpsllw $5, %ymm5, %ymm5
; AVX512F-NEXT: vpblendvb %ymm5, %ymm3, %ymm2, %ymm3
; AVX512F-NEXT: vpblendvb %ymm5, %ymm3, %ymm1, %ymm3
; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm6
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
@ -477,31 +480,32 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512F-NEXT: vpand %ymm5, %ymm9, %ymm5
; AVX512F-NEXT: vpblendvb %ymm10, %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm2, %ymm1, %ymm1
; AVX512F-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpsllw %xmm1, %xmm4, %xmm4
; AVX512F-NEXT: vpsllw %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
; AVX512F-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpsllw %xmm2, %ymm0, %ymm0
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm3, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_funnnel_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm3
; AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm2
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm3
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm5
; AVX512VL-NEXT: vpxor %xmm6, %xmm6, %xmm6
; AVX512VL-NEXT: vpsubb %ymm5, %ymm6, %ymm5
; AVX512VL-NEXT: vpxor %xmm5, %xmm5, %xmm5
; AVX512VL-NEXT: vpsubb %ymm2, %ymm5, %ymm5
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm5, %ymm5
; AVX512VL-NEXT: vpsllw $5, %ymm5, %ymm5
; AVX512VL-NEXT: vpblendvb %ymm5, %ymm3, %ymm2, %ymm3
; AVX512VL-NEXT: vpblendvb %ymm5, %ymm3, %ymm1, %ymm3
; AVX512VL-NEXT: vpsrlw $2, %ymm3, %ymm6
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
@ -522,16 +526,16 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512VL-NEXT: vpand %ymm5, %ymm9, %ymm5
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm5, %ymm4, %ymm4
; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX512VL-NEXT: vpsllw %xmm1, %xmm4, %xmm4
; AVX512VL-NEXT: vpsllw %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpsllw %xmm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm3, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;

View File

@ -1095,12 +1095,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX2-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX2-NEXT: vpsrld %xmm3, %ymm1, %ymm3
; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX2-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX2-NEXT: vpslld %xmm4, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
@ -1113,12 +1113,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512F-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512F-NEXT: vpsrld %xmm3, %ymm1, %ymm3
; AVX512F-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512F-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512F-NEXT: vpslld %xmm4, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512F-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
@ -1129,12 +1129,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VL-NEXT: vpsrld %xmm3, %ymm1, %ymm3
; AVX512VL-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VL-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VL-NEXT: vpslld %xmm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VL-NEXT: vptestnmd %ymm2, %ymm2, %k1
; AVX512VL-NEXT: vmovdqa32 %ymm1, %ymm0 {%k1}
; AVX512VL-NEXT: retq
@ -1146,12 +1146,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512BW-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512BW-NEXT: vpsrld %xmm3, %ymm1, %ymm3
; AVX512BW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512BW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512BW-NEXT: vpslld %xmm4, %ymm0, %ymm0
; AVX512BW-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512BW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512BW-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
@ -1164,12 +1164,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VBMI2-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512VBMI2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VBMI2-NEXT: vpsrld %xmm3, %ymm1, %ymm3
; AVX512VBMI2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VBMI2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VBMI2-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VBMI2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VBMI2-NEXT: vpslld %xmm4, %ymm0, %ymm0
; AVX512VBMI2-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512VBMI2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VBMI2-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512VBMI2-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; AVX512VBMI2-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
@ -1180,12 +1180,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VLBW-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VLBW-NEXT: vpsrld %xmm3, %ymm1, %ymm3
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VLBW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VLBW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VLBW-NEXT: vpslld %xmm4, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VLBW-NEXT: vptestnmd %ymm2, %ymm2, %k1
; AVX512VLBW-NEXT: vmovdqa32 %ymm1, %ymm0 {%k1}
; AVX512VLBW-NEXT: retq
@ -1226,12 +1226,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; XOPAVX2-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; XOPAVX2-NEXT: vpsrld %xmm3, %ymm1, %ymm3
; XOPAVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; XOPAVX2-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; XOPAVX2-NEXT: vpslld %xmm4, %ymm0, %ymm0
; XOPAVX2-NEXT: vpor %ymm3, %ymm0, %ymm0
; XOPAVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; XOPAVX2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
@ -1273,12 +1273,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX2-NEXT: vpsrlw %xmm3, %ymm1, %ymm3
; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX2-NEXT: vpsllw %xmm4, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@ -1289,12 +1289,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512F-NEXT: vpsrlw %xmm3, %ymm1, %ymm3
; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512F-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm4, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@ -1305,12 +1305,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm3, %ymm1, %ymm3
; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@ -1322,12 +1322,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm3, %ymm1, %ymm3
; AVX512BW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm4, %ymm0, %ymm0
; AVX512BW-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512BW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512BW-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
@ -1339,12 +1339,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512VBMI2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VBMI2-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VBMI2-NEXT: vpsrlw %xmm3, %ymm1, %ymm3
; AVX512VBMI2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VBMI2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VBMI2-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm4, %ymm0, %ymm0
; AVX512VBMI2-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512VBMI2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VBMI2-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512VBMI2-NEXT: vmovdqu16 %zmm1, %zmm0 {%k1}
; AVX512VBMI2-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
@ -1355,12 +1355,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm3, %ymm1, %ymm3
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm4, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VLBW-NEXT: vptestnmw %ymm2, %ymm2, %k1
; AVX512VLBW-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1}
; AVX512VLBW-NEXT: retq
@ -1401,12 +1401,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; XOPAVX2-NEXT: vpsrlw %xmm3, %ymm1, %ymm3
; XOPAVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; XOPAVX2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; XOPAVX2-NEXT: vpsllw %xmm4, %ymm0, %ymm0
; XOPAVX2-NEXT: vpor %ymm3, %ymm0, %ymm0
; XOPAVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; XOPAVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@ -1461,6 +1461,7 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX2-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX2-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3
; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX2-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
@ -1469,7 +1470,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX2-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@ -1485,6 +1485,7 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512F-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX512F-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512F-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
@ -1493,7 +1494,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512F-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpor %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpeqb %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
@ -1502,24 +1502,24 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VL-LABEL: splatvar_funnnel_v32i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VL-NEXT: vpsubb %xmm2, %xmm3, %xmm3
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX512VL-NEXT: vpsllw %xmm3, %xmm4, %xmm3
; AVX512VL-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm3, %ymm1, %ymm5
; AVX512VL-NEXT: vpsrlw %xmm3, %xmm4, %xmm3
; AVX512VL-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX512VL-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX512VL-NEXT: vpternlogq $236, %ymm5, %ymm0, %ymm3
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm0
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: vpblendvb %ymm0, %ymm1, %ymm3, %ymm0
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm3
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VL-NEXT: vpsubb %xmm3, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX512VL-NEXT: vpsllw %xmm4, %xmm5, %xmm4
; AVX512VL-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm2, %ymm1, %ymm4
; AVX512VL-NEXT: vpsrlw %xmm2, %xmm5, %xmm2
; AVX512VL-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512VL-NEXT: vpternlogq $236, %ymm4, %ymm0, %ymm2
; AVX512VL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512VL-NEXT: vpcmpeqb %ymm0, %ymm3, %ymm0
; AVX512VL-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatvar_funnnel_v32i8:

View File

@ -633,12 +633,12 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512F-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512F-NEXT: vpsrld %xmm3, %zmm1, %zmm3
; AVX512F-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512F-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512F-NEXT: vpslld %xmm4, %zmm0, %zmm0
; AVX512F-NEXT: vpord %zmm3, %zmm0, %zmm0
; AVX512F-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512F-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512F-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
@ -648,12 +648,12 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VL-NEXT: vpsrld %xmm3, %zmm1, %zmm3
; AVX512VL-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VL-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VL-NEXT: vpslld %xmm4, %zmm0, %zmm0
; AVX512VL-NEXT: vpord %zmm3, %zmm0, %zmm0
; AVX512VL-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512VL-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512VL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; AVX512VL-NEXT: retq
@ -664,12 +664,12 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512BW-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512BW-NEXT: vpsrld %xmm3, %zmm1, %zmm3
; AVX512BW-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512BW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512BW-NEXT: vpslld %xmm4, %zmm0, %zmm0
; AVX512BW-NEXT: vpord %zmm3, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512BW-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
@ -686,12 +686,12 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512VLBW-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VLBW-NEXT: vpsrld %xmm3, %zmm1, %zmm3
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512VLBW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VLBW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VLBW-NEXT: vpslld %xmm4, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpord %zmm3, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %zmm2
; AVX512VLBW-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512VLBW-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; AVX512VLBW-NEXT: retq
@ -759,12 +759,12 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm3, %zmm1, %zmm3
; AVX512BW-NEXT: vpbroadcastw %xmm2, %zmm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm4, %zmm0, %zmm0
; AVX512BW-NEXT: vporq %zmm3, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastw %xmm2, %zmm2
; AVX512BW-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
@ -781,12 +781,12 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i
; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm3, %zmm1, %zmm3
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %zmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm4, %zmm0, %zmm0
; AVX512VLBW-NEXT: vporq %zmm3, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %zmm2
; AVX512VLBW-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512VLBW-NEXT: vmovdqu16 %zmm1, %zmm0 {%k1}
; AVX512VLBW-NEXT: retq
@ -872,88 +872,88 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512BW-LABEL: splatvar_funnnel_v64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512BW-NEXT: vpsubb %xmm2, %xmm3, %xmm3
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm3, %zmm0, %zmm0
; AVX512BW-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX512BW-NEXT: vpsllw %xmm3, %xmm4, %xmm3
; AVX512BW-NEXT: vpbroadcastb %xmm3, %zmm3
; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm3
; AVX512BW-NEXT: vpbroadcastb %xmm2, %zmm3
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512BW-NEXT: vpsubb %xmm3, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm4, %zmm0, %zmm0
; AVX512BW-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX512BW-NEXT: vpsllw %xmm4, %xmm5, %xmm4
; AVX512BW-NEXT: vpbroadcastb %xmm4, %zmm4
; AVX512BW-NEXT: vpandq %zmm4, %zmm0, %zmm4
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm0, %zmm1, %zmm5
; AVX512BW-NEXT: vpsrlw %xmm0, %xmm4, %xmm0
; AVX512BW-NEXT: vpsrlw %xmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpsrlw %xmm0, %xmm5, %xmm0
; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX512BW-NEXT: vpbroadcastb %xmm0, %zmm0
; AVX512BW-NEXT: vpternlogq $236, %zmm5, %zmm3, %zmm0
; AVX512BW-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512BW-NEXT: vptestnmb %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vpternlogq $236, %zmm2, %zmm4, %zmm0
; AVX512BW-NEXT: vptestnmb %zmm3, %zmm3, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
;
; AVX512VBMI2-LABEL: splatvar_funnnel_v64i8:
; AVX512VBMI2: # %bb.0:
; AVX512VBMI2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VBMI2-NEXT: vpsubb %xmm2, %xmm3, %xmm3
; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm3, %zmm0, %zmm0
; AVX512VBMI2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX512VBMI2-NEXT: vpsllw %xmm3, %xmm4, %xmm3
; AVX512VBMI2-NEXT: vpbroadcastb %xmm3, %zmm3
; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm0, %zmm3
; AVX512VBMI2-NEXT: vpbroadcastb %xmm2, %zmm3
; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VBMI2-NEXT: vpsubb %xmm3, %xmm4, %xmm4
; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm4, %zmm0, %zmm0
; AVX512VBMI2-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX512VBMI2-NEXT: vpsllw %xmm4, %xmm5, %xmm4
; AVX512VBMI2-NEXT: vpbroadcastb %xmm4, %zmm4
; AVX512VBMI2-NEXT: vpandq %zmm4, %zmm0, %zmm4
; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsrlw %xmm0, %zmm1, %zmm5
; AVX512VBMI2-NEXT: vpsrlw %xmm0, %xmm4, %xmm0
; AVX512VBMI2-NEXT: vpsrlw %xmm0, %zmm1, %zmm2
; AVX512VBMI2-NEXT: vpsrlw %xmm0, %xmm5, %xmm0
; AVX512VBMI2-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX512VBMI2-NEXT: vpbroadcastb %xmm0, %zmm0
; AVX512VBMI2-NEXT: vpternlogq $236, %zmm5, %zmm3, %zmm0
; AVX512VBMI2-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VBMI2-NEXT: vptestnmb %zmm2, %zmm2, %k1
; AVX512VBMI2-NEXT: vpternlogq $236, %zmm2, %zmm4, %zmm0
; AVX512VBMI2-NEXT: vptestnmb %zmm3, %zmm3, %k1
; AVX512VBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512VBMI2-NEXT: retq
;
; AVX512VLBW-LABEL: splatvar_funnnel_v64i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm3, %xmm3
; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm3, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpsllw %xmm3, %xmm4, %xmm3
; AVX512VLBW-NEXT: vpbroadcastb %xmm3, %zmm3
; AVX512VLBW-NEXT: vpandq %zmm3, %zmm0, %zmm3
; AVX512VLBW-NEXT: vpbroadcastb %xmm2, %zmm3
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VLBW-NEXT: vpsubb %xmm3, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm4, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX512VLBW-NEXT: vpsllw %xmm4, %xmm5, %xmm4
; AVX512VLBW-NEXT: vpbroadcastb %xmm4, %zmm4
; AVX512VLBW-NEXT: vpandq %zmm4, %zmm0, %zmm4
; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm0, %zmm1, %zmm5
; AVX512VLBW-NEXT: vpsrlw %xmm0, %xmm4, %xmm0
; AVX512VLBW-NEXT: vpsrlw %xmm0, %zmm1, %zmm2
; AVX512VLBW-NEXT: vpsrlw %xmm0, %xmm5, %xmm0
; AVX512VLBW-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX512VLBW-NEXT: vpbroadcastb %xmm0, %zmm0
; AVX512VLBW-NEXT: vpternlogq $236, %zmm5, %zmm3, %zmm0
; AVX512VLBW-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VLBW-NEXT: vptestnmb %zmm2, %zmm2, %k1
; AVX512VLBW-NEXT: vpternlogq $236, %zmm2, %zmm4, %zmm0
; AVX512VLBW-NEXT: vptestnmb %zmm3, %zmm3, %k1
; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512VLBW-NEXT: retq
;
; AVX512VLVBMI2-LABEL: splatvar_funnnel_v64i8:
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VLVBMI2-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VLVBMI2-NEXT: vpsubb %xmm2, %xmm3, %xmm3
; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsllw %xmm3, %zmm0, %zmm0
; AVX512VLVBMI2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX512VLVBMI2-NEXT: vpsllw %xmm3, %xmm4, %xmm3
; AVX512VLVBMI2-NEXT: vpbroadcastb %xmm3, %zmm3
; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm0, %zmm3
; AVX512VLVBMI2-NEXT: vpbroadcastb %xmm2, %zmm3
; AVX512VLVBMI2-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VLVBMI2-NEXT: vpsubb %xmm3, %xmm4, %xmm4
; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsllw %xmm4, %zmm0, %zmm0
; AVX512VLVBMI2-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX512VLVBMI2-NEXT: vpsllw %xmm4, %xmm5, %xmm4
; AVX512VLVBMI2-NEXT: vpbroadcastb %xmm4, %zmm4
; AVX512VLVBMI2-NEXT: vpandq %zmm4, %zmm0, %zmm4
; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsrlw %xmm0, %zmm1, %zmm5
; AVX512VLVBMI2-NEXT: vpsrlw %xmm0, %xmm4, %xmm0
; AVX512VLVBMI2-NEXT: vpsrlw %xmm0, %zmm1, %zmm2
; AVX512VLVBMI2-NEXT: vpsrlw %xmm0, %xmm5, %xmm0
; AVX512VLVBMI2-NEXT: vpsrlw $8, %xmm0, %xmm0
; AVX512VLVBMI2-NEXT: vpbroadcastb %xmm0, %zmm0
; AVX512VLVBMI2-NEXT: vpternlogq $236, %zmm5, %zmm3, %zmm0
; AVX512VLVBMI2-NEXT: vpbroadcastb %xmm2, %zmm2
; AVX512VLVBMI2-NEXT: vptestnmb %zmm2, %zmm2, %k1
; AVX512VLVBMI2-NEXT: vpternlogq $236, %zmm2, %zmm4, %zmm0
; AVX512VLVBMI2-NEXT: vptestnmb %zmm3, %zmm3, %k1
; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
; AVX512VLVBMI2-NEXT: retq
%splat = shufflevector <64 x i8> %amt, <64 x i8> undef, <64 x i32> zeroinitializer

View File

@ -366,13 +366,14 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounw
define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; AVX512F-NEXT: vpsrlw %xmm3, %ymm2, %ymm4
; AVX512F-NEXT: vpsrlw %xmm3, %ymm0, %ymm3
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
; AVX512F-NEXT: vpbroadcastw %xmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm1
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpsrlw %xmm2, %ymm3, %ymm4
; AVX512F-NEXT: vpsrlw %xmm2, %ymm0, %ymm2
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpsubw %ymm1, %ymm4, %ymm1
; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
@ -380,22 +381,23 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpsllvd %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512F-NEXT: vpsllvd %zmm1, %zmm3, %zmm1
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm3, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_funnnel_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm3, %ymm2, %ymm4
; AVX512VL-NEXT: vpsrlw %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3
; AVX512VL-NEXT: vpbroadcastw %xmm1, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-NEXT: vpsrlw %xmm2, %ymm3, %ymm4
; AVX512VL-NEXT: vpsrlw %xmm2, %ymm0, %ymm2
; AVX512VL-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
; AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512VL-NEXT: vpsubw %ymm1, %ymm4, %ymm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
@ -403,11 +405,11 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512VL-NEXT: vpsllvd %zmm1, %zmm2, %zmm1
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512VL-NEXT: vpsllvd %zmm1, %zmm3, %zmm1
; AVX512VL-NEXT: vpmovdw %zmm1, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm3, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatvar_funnnel_v32i16:
@ -447,13 +449,14 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm1
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm2, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm5
; AVX512F-NEXT: vpxor %xmm6, %xmm6, %xmm6
; AVX512F-NEXT: vpsubb %ymm5, %ymm6, %ymm5
; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
; AVX512F-NEXT: vpsubb %ymm1, %ymm5, %ymm5
; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm5, %ymm5
; AVX512F-NEXT: vpsllw $5, %ymm5, %ymm5
; AVX512F-NEXT: vpblendvb %ymm5, %ymm3, %ymm2, %ymm3
@ -490,13 +493,14 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
;
; AVX512VL-LABEL: splatvar_funnnel_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm1, %zmm1
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm2, %ymm3
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm5
; AVX512VL-NEXT: vpxor %xmm6, %xmm6, %xmm6
; AVX512VL-NEXT: vpsubb %ymm5, %ymm6, %ymm5
; AVX512VL-NEXT: vpxor %xmm5, %xmm5, %xmm5
; AVX512VL-NEXT: vpsubb %ymm1, %ymm5, %ymm5
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm5, %ymm5
; AVX512VL-NEXT: vpsllw $5, %ymm5, %ymm5
; AVX512VL-NEXT: vpblendvb %ymm5, %ymm3, %ymm2, %ymm3

View File

@ -348,38 +348,34 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
; AVX512F-LABEL: splatvar_rotate_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; AVX512F-NEXT: vpsubw %xmm1, %xmm3, %xmm4
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512F-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; AVX512F-NEXT: vpsubw %xmm1, %xmm3, %xmm1
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpsllw %xmm2, %ymm3, %ymm5
; AVX512F-NEXT: vpsllw %xmm2, %ymm3, %ymm4
; AVX512F-NEXT: vpsllw %xmm2, %ymm0, %ymm2
; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
; AVX512F-NEXT: vpsrlw %xmm1, %ymm3, %ymm1
; AVX512F-NEXT: vpsrlw %xmm4, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
; AVX512F-NEXT: vpsrlw %xmm1, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_rotate_v32i16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %xmm1, %xmm3, %xmm4
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VL-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %xmm1, %xmm3, %xmm1
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-NEXT: vpsllw %xmm2, %ymm3, %ymm5
; AVX512VL-NEXT: vpsllw %xmm2, %ymm3, %ymm4
; AVX512VL-NEXT: vpsllw %xmm2, %ymm0, %ymm2
; AVX512VL-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm3, %ymm1
; AVX512VL-NEXT: vpsrlw %xmm4, %ymm0, %ymm0
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm3, %ymm3
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512VL-NEXT: retq
;
@ -416,64 +412,54 @@ define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512F-LABEL: splatvar_rotate_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512F-NEXT: vpsubb %xmm1, %xmm3, %xmm4
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512F-NEXT: vpsubb %xmm1, %xmm3, %xmm1
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpsllw %xmm2, %ymm3, %ymm5
; AVX512F-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
; AVX512F-NEXT: vpsllw %xmm2, %xmm6, %xmm7
; AVX512F-NEXT: vpbroadcastb %xmm7, %ymm7
; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5
; AVX512F-NEXT: vpsllw %xmm2, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX512F-NEXT: vpsllw %xmm2, %xmm5, %xmm6
; AVX512F-NEXT: vpbroadcastb %xmm6, %ymm6
; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
; AVX512F-NEXT: vpsllw %xmm2, %ymm0, %ymm2
; AVX512F-NEXT: vpand %ymm7, %ymm2, %ymm2
; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
; AVX512F-NEXT: vpsrlw %xmm1, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw %xmm1, %xmm6, %xmm1
; AVX512F-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512F-NEXT: vpand %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpsrlw %xmm4, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw %xmm4, %xmm6, %xmm3
; AVX512F-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX512F-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: vpsrlw %xmm1, %xmm5, %xmm4
; AVX512F-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX512F-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_rotate_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VL-NEXT: vpsubb %xmm1, %xmm3, %xmm4
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VL-NEXT: vpsubb %xmm1, %xmm3, %xmm1
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-NEXT: vpsllw %xmm2, %ymm3, %ymm5
; AVX512VL-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
; AVX512VL-NEXT: vpsllw %xmm2, %xmm6, %xmm7
; AVX512VL-NEXT: vpbroadcastb %xmm7, %ymm7
; AVX512VL-NEXT: vpand %ymm7, %ymm5, %ymm5
; AVX512VL-NEXT: vpsllw %xmm2, %ymm3, %ymm4
; AVX512VL-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX512VL-NEXT: vpsllw %xmm2, %xmm5, %xmm6
; AVX512VL-NEXT: vpbroadcastb %xmm6, %ymm6
; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
; AVX512VL-NEXT: vpsllw %xmm2, %ymm0, %ymm2
; AVX512VL-NEXT: vpand %ymm7, %ymm2, %ymm2
; AVX512VL-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2
; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512VL-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm3, %ymm3
; AVX512VL-NEXT: vpsrlw %xmm1, %xmm6, %xmm1
; AVX512VL-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
; AVX512VL-NEXT: vpand %ymm1, %ymm3, %ymm1
; AVX512VL-NEXT: vpsrlw %xmm4, %ymm0, %ymm0
; AVX512VL-NEXT: vpsrlw %xmm4, %xmm6, %xmm3
; AVX512VL-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX512VL-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-NEXT: vpsrlw %xmm1, %xmm5, %xmm4
; AVX512VL-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX512VL-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512VL-NEXT: retq
;