1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[DAGCombiner] narrow truncated vector binops when legal

This is the smallest vector enhancement I could find to D54640.
Here, we're allowing narrowing to only legal vector ops because we'll see
regressions without that. All of the test diffs are wins from what I can tell.
With AVX/AVX512, we can shrink ymm/zmm ops to xmm.

x86 vector multiplies are the problem case that we're avoiding due to the
patchwork ISA, and it's not clear to me if we can dance around those
regressions using TLI hooks or if we need preliminary patches to plug those
holes.

Differential Revision: https://reviews.llvm.org/D55126

llvm-svn: 348195
This commit is contained in:
Sanjay Patel 2018-12-03 21:57:35 +00:00
parent e15678c1c5
commit efd7aca81b
4 changed files with 181 additions and 311 deletions

View File

@ -9758,14 +9758,18 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
case ISD::AND:
case ISD::OR:
case ISD::XOR:
// TODO: This should allow vector constants/types too.
if (!LegalOperations && N0.hasOneUse() &&
(isa<ConstantSDNode>(N0.getOperand(0)) ||
isa<ConstantSDNode>(N0.getOperand(1)))) {
SDLoc DL(N);
SDValue NarrowL = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(0));
SDValue NarrowR = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(1));
return DAG.getNode(N0.getOpcode(), DL, VT, NarrowL, NarrowR);
(isConstantOrConstantVector(N0.getOperand(0)) ||
isConstantOrConstantVector(N0.getOperand(1)))) {
// TODO: We already restricted this to pre-legalization, but for vectors
// we are extra cautious to not create an unsupported operation.
// Target-specific changes are likely needed to avoid regressions here.
if (VT.isScalarInteger() || TLI.isOperationLegal(N0.getOpcode(), VT)) {
SDLoc DL(N);
SDValue NarrowL = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(0));
SDValue NarrowR = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(1));
return DAG.getNode(N0.getOpcode(), DL, VT, NarrowL, NarrowR);
}
}
}

View File

@ -37805,6 +37805,8 @@ static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
/// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
/// the codegen.
/// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
/// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
/// anything that is guaranteed to be transformed by DAGCombiner.
static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {

View File

@ -1340,49 +1340,39 @@ define <16 x i8> @trunc_ext_sub_v16i16_v16i8(<16 x i8> %x, <16 x i8> %y) {
define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v4i64_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; SSE-NEXT: psubq %xmm2, %xmm0
; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v4i64_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc_sub_const_v4i64_v4i32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc_sub_const_v4i64_v4i32:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v4i64_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
@ -1393,53 +1383,38 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v8i64_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm4
; SSE-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
; SSE-NEXT: psubq %xmm4, %xmm0
; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
; SSE-NEXT: psubq {{.*}}(%rip), %xmm2
; SSE-NEXT: psubq {{.*}}(%rip), %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
; SSE-NEXT: psubw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v8i64_v8i16:
; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc_sub_const_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@ -1447,28 +1422,26 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc_sub_const_v8i64_v8i16:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v8i64_v8i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
@ -1479,41 +1452,38 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v8i32_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
; SSE-NEXT: psubd {{.*}}(%rip), %xmm1
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: psubw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v8i32_v8i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v8i32_v8i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v8i32_v8i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@ -1524,17 +1494,6 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v16i64_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm8
; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7]
; SSE-NEXT: psubq %xmm8, %xmm0
; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
; SSE-NEXT: psubq {{.*}}(%rip), %xmm2
; SSE-NEXT: psubq {{.*}}(%rip), %xmm3
; SSE-NEXT: psubq {{.*}}(%rip), %xmm4
; SSE-NEXT: psubq {{.*}}(%rip), %xmm5
; SSE-NEXT: psubq {{.*}}(%rip), %xmm6
; SSE-NEXT: psubq {{.*}}(%rip), %xmm7
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SSE-NEXT: pand %xmm8, %xmm7
; SSE-NEXT: pand %xmm8, %xmm6
@ -1551,51 +1510,38 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: packuswb %xmm2, %xmm0
; SSE-NEXT: packuswb %xmm4, %xmm0
; SSE-NEXT: psubb {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v16i64_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm4
; AVX1-NEXT: vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm7
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm3
; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm4 = mem[0,0]
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm7
; AVX1-NEXT: vpackusdw %xmm3, %xmm7, %xmm3
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm6, %xmm6
; AVX1-NEXT: vpackusdw %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm3
; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm3
; AVX1-NEXT: vpackusdw %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc_sub_const_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm3, %ymm3
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@ -1615,15 +1561,12 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc_sub_const_v16i64_v16i8:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm3, %ymm3
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm2, %ymm2
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
@ -1640,17 +1583,17 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FAST-NEXT: vpand %xmm5, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v16i64_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm1, %zmm1
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm1, %ymm1
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
@ -1661,10 +1604,6 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v16i32_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
; SSE-NEXT: psubd {{.*}}(%rip), %xmm1
; SSE-NEXT: psubd {{.*}}(%rip), %xmm2
; SSE-NEXT: psubd {{.*}}(%rip), %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pand %xmm4, %xmm2
@ -1673,31 +1612,27 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: packuswb %xmm2, %xmm0
; SSE-NEXT: psubb {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v16i32_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v16i32_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@ -1707,13 +1642,14 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v16i32_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@ -1724,56 +1660,52 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v16i16_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: psubw {{.*}}(%rip), %xmm0
; SSE-NEXT: psubw {{.*}}(%rip), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: psubb {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = sub <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>

View File

@ -1340,49 +1340,39 @@ define <16 x i8> @trunc_ext_sub_v16i16_v16i8(<16 x i8> %x, <16 x i8> %y) {
define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v4i64_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; SSE-NEXT: psubq %xmm2, %xmm0
; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v4i64_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc_sub_const_v4i64_v4i32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc_sub_const_v4i64_v4i32:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v4i64_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
@ -1393,53 +1383,38 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v8i64_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm4
; SSE-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
; SSE-NEXT: psubq %xmm4, %xmm0
; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
; SSE-NEXT: psubq {{.*}}(%rip), %xmm2
; SSE-NEXT: psubq {{.*}}(%rip), %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
; SSE-NEXT: psubw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v8i64_v8i16:
; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc_sub_const_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
@ -1447,28 +1422,26 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc_sub_const_v8i64_v8i16:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v8i64_v8i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
; AVX512-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
@ -1479,41 +1452,38 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v8i32_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
; SSE-NEXT: psubd {{.*}}(%rip), %xmm1
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: psubw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v8i32_v8i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v8i32_v8i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v8i32_v8i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@ -1524,17 +1494,6 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v16i64_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: movl $1, %eax
; SSE-NEXT: movq %rax, %xmm8
; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7]
; SSE-NEXT: psubq %xmm8, %xmm0
; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
; SSE-NEXT: psubq {{.*}}(%rip), %xmm2
; SSE-NEXT: psubq {{.*}}(%rip), %xmm3
; SSE-NEXT: psubq {{.*}}(%rip), %xmm4
; SSE-NEXT: psubq {{.*}}(%rip), %xmm5
; SSE-NEXT: psubq {{.*}}(%rip), %xmm6
; SSE-NEXT: psubq {{.*}}(%rip), %xmm7
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SSE-NEXT: pand %xmm8, %xmm7
; SSE-NEXT: pand %xmm8, %xmm6
@ -1551,51 +1510,38 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: packuswb %xmm2, %xmm0
; SSE-NEXT: packuswb %xmm4, %xmm0
; SSE-NEXT: psubb {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v16i64_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: movl $1, %eax
; AVX1-NEXT: vmovq %rax, %xmm4
; AVX1-NEXT: vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm7
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm3
; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm4 = mem[0,0]
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm7
; AVX1-NEXT: vpackusdw %xmm3, %xmm7, %xmm3
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm6, %xmm6
; AVX1-NEXT: vpackusdw %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
; AVX1-NEXT: # xmm5 = mem[0,0]
; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm3
; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm3
; AVX1-NEXT: vpackusdw %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc_sub_const_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm3, %ymm3
; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
@ -1615,15 +1561,12 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc_sub_const_v16i64_v16i8:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm3, %ymm3
; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm2, %ymm2
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
@ -1640,17 +1583,17 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FAST-NEXT: vpand %xmm5, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v16i64_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm1, %zmm1
; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm1, %ymm1
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
@ -1661,10 +1604,6 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v16i32_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
; SSE-NEXT: psubd {{.*}}(%rip), %xmm1
; SSE-NEXT: psubd {{.*}}(%rip), %xmm2
; SSE-NEXT: psubd {{.*}}(%rip), %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: pand %xmm4, %xmm2
@ -1673,31 +1612,27 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: packuswb %xmm2, %xmm0
; SSE-NEXT: psubb {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v16i32_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v16i32_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
@ -1707,13 +1642,14 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: trunc_sub_const_v16i32_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@ -1724,56 +1660,52 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: trunc_sub_const_v16i16_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: psubw {{.*}}(%rip), %xmm0
; SSE-NEXT: psubw {{.*}}(%rip), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: psubb {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512BW-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: trunc_sub_const_v16i16_v16i8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
; AVX512DQ-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = sub <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>