1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/X86/combine-sdiv.ll
Simon Pilgrim dff7071951 [DAGCombiner] Ensure we use the correct CC result type in visitSDIV (REAPPLIED)
We could get away with it for constant folded cases, but not for rL335719.

Thanks to Krzysztof Parzyszek for noticing.

Reapply original commit rL335821 which was reverted at rL335871 due to a WebAssembly bug that was fixed at rL335884.

llvm-svn: 335886
2018-06-28 17:33:41 +00:00

2465 lines
107 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX512,AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX512,AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefixes=CHECK,AVX,XOP
; fold (sdiv x, 1) -> x
define i32 @combine_sdiv_by_one(i32 %x) {
; CHECK-LABEL: combine_sdiv_by_one:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%1 = sdiv i32 %x, 1
ret i32 %1
}
define <4 x i32> @combine_vec_sdiv_by_one(<4 x i32> %x) {
; CHECK-LABEL: combine_vec_sdiv_by_one:
; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %1
}
; fold (sdiv x, -1) -> 0 - x
define i32 @combine_sdiv_by_negone(i32 %x) {
; CHECK-LABEL: combine_sdiv_by_negone:
; CHECK: # %bb.0:
; CHECK-NEXT: negl %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%1 = sdiv i32 %x, -1
ret i32 %1
}
define <4 x i32> @combine_vec_sdiv_by_negone(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_negone:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: psubd %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_negone:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
ret <4 x i32> %1
}
; fold (sdiv x, INT_MIN) -> select((icmp eq x, INT_MIN), 1, 0)
define i32 @combine_sdiv_by_minsigned(i32 %x) {
; CHECK-LABEL: combine_sdiv_by_minsigned:
; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl $-2147483648, %edi # imm = 0x80000000
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
%1 = sdiv i32 %x, -2147483648
ret i32 %1
}
define <4 x i32> @combine_vec_sdiv_by_minsigned(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_minsigned:
; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd {{.*}}(%rip), %xmm0
; SSE-NEXT: psrld $31, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_minsigned:
; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_minsigned:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_minsigned:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
; AVX512F-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_by_minsigned:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpeqd {{.*}}(%rip){1to4}, %xmm0, %k1
; AVX512BW-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_minsigned:
; XOP: # %bb.0:
; XOP-NEXT: vpcomeqd {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: vpsrld $31, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
ret <4 x i32> %1
}
; TODO fold (sdiv x, x) -> 1
define i32 @combine_sdiv_dupe(i32 %x) {
; CHECK-LABEL: combine_sdiv_dupe:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: cltd
; CHECK-NEXT: idivl %edi
; CHECK-NEXT: retq
%1 = sdiv i32 %x, %x
ret i32 %1
}
define <4 x i32> @combine_vec_sdiv_dupe(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_dupe:
; SSE: # %bb.0:
; SSE-NEXT: pextrd $1, %xmm0, %ecx
; SSE-NEXT: movl %ecx, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: movd %xmm0, %esi
; SSE-NEXT: movl %esi, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %esi
; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: pinsrd $1, %ecx, %xmm1
; SSE-NEXT: pextrd $2, %xmm0, %ecx
; SSE-NEXT: movl %ecx, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: pinsrd $2, %eax, %xmm1
; SSE-NEXT: pextrd $3, %xmm0, %ecx
; SSE-NEXT: movl %ecx, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: pinsrd $3, %eax, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_dupe:
; AVX: # %bb.0:
; AVX-NEXT: vpextrd $1, %xmm0, %ecx
; AVX-NEXT: movl %ecx, %eax
; AVX-NEXT: cltd
; AVX-NEXT: idivl %ecx
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: vmovd %xmm0, %esi
; AVX-NEXT: movl %esi, %eax
; AVX-NEXT: cltd
; AVX-NEXT: idivl %esi
; AVX-NEXT: vmovd %eax, %xmm1
; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
; AVX-NEXT: vpextrd $2, %xmm0, %ecx
; AVX-NEXT: movl %ecx, %eax
; AVX-NEXT: cltd
; AVX-NEXT: idivl %ecx
; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrd $3, %xmm0, %ecx
; AVX-NEXT: movl %ecx, %eax
; AVX-NEXT: cltd
; AVX-NEXT: idivl %ecx
; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = sdiv <4 x i32> %x, %x
ret <4 x i32> %1
}
; fold (sdiv x, y) -> (udiv x, y) iff x and y are positive
define <4 x i32> @combine_vec_sdiv_by_pos0(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pos0:
; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: psrld $2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_pos0:
; AVX: # %bb.0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
%2 = sdiv <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
ret <4 x i32> %2
}
define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pos1:
; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $3, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: psrld $4, %xmm0
; SSE-NEXT: psrld $2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pos1:
; AVX1: # %bb.0:
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $2, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpsrld $3, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2ORLATER-LABEL: combine_vec_sdiv_by_pos1:
; AVX2ORLATER: # %bb.0:
; AVX2ORLATER-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX2ORLATER-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2ORLATER-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pos1:
; XOP: # %bb.0:
; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
%1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
%2 = sdiv <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16>
ret <4 x i32> %2
}
; fold (sdiv x, (1 << c)) -> x >>u c
define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2a:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrld $30, %xmm1
; SSE-NEXT: paddd %xmm0, %xmm1
; SSE-NEXT: psrad $2, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_pow2a:
; AVX: # %bb.0:
; AVX-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX-NEXT: vpsrld $30, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
ret <4 x i32> %1
}
define <4 x i32> @combine_vec_sdiv_by_pow2a_neg(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2a_neg:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrld $30, %xmm1
; SSE-NEXT: paddd %xmm0, %xmm1
; SSE-NEXT: psrad $2, %xmm1
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_pow2a_neg:
; AVX: # %bb.0:
; AVX-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX-NEXT: vpsrld $30, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 -4, i32 -4, i32 -4, i32 -4>
ret <4 x i32> %1
}
define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pcmpgtb %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrlw $4, %xmm3
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [49408,32992,24736,57408,49408,32992,24736,57408]
; SSE-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrlw $2, %xmm3
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
; SSE-NEXT: paddb %xmm0, %xmm0
; SSE-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrlw $1, %xmm3
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
; SSE-NEXT: paddb %xmm0, %xmm0
; SSE-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE-NEXT: paddb %xmm1, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psraw $4, %xmm4
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [16384,32800,41056,8384,16384,32800,41056,8384]
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
; SSE-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psraw $2, %xmm4
; SSE-NEXT: paddw %xmm0, %xmm0
; SSE-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psraw $1, %xmm4
; SSE-NEXT: paddw %xmm0, %xmm0
; SSE-NEXT: pblendvb %xmm0, %xmm4, %xmm3
; SSE-NEXT: psrlw $8, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: psraw $4, %xmm4
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
; SSE-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: psraw $2, %xmm4
; SSE-NEXT: paddw %xmm0, %xmm0
; SSE-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: psraw $1, %xmm4
; SSE-NEXT: paddw %xmm0, %xmm0
; SSE-NEXT: pblendvb %xmm0, %xmm4, %xmm2
; SSE-NEXT: psrlw $8, %xmm2
; SSE-NEXT: packuswb %xmm3, %xmm2
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
; SSE-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [49408,32992,24736,57408,49408,32992,24736,57408]
; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm2
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT: vpsraw $4, %xmm2, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [16384,32800,41056,8384,16384,32800,41056,8384]
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsraw $2, %xmm2, %xmm3
; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsraw $1, %xmm2, %xmm3
; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpsraw $4, %xmm1, %xmm3
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsraw $2, %xmm1, %xmm3
; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsraw $1, %xmm1, %xmm3
; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm1
; AVX2-NEXT: vpsrlw $4, %xmm1, %xmm2
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [49408,32992,24736,57408,49408,32992,24736,57408]
; AVX2-NEXT: vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsrlw $2, %xmm1, %xmm2
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpaddb %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsrlw $1, %xmm1, %xmm2
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpaddb %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX2-NEXT: vpsraw $4, %xmm2, %xmm3
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16384,32800,41056,8384,16384,32800,41056,8384]
; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
; AVX2-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpsraw $2, %xmm2, %xmm3
; AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm5
; AVX2-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpsraw $1, %xmm2, %xmm3
; AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm5
; AVX2-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2-NEXT: vpsraw $4, %xmm1, %xmm3
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; AVX2-NEXT: vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vpsraw $2, %xmm1, %xmm3
; AVX2-NEXT: vpaddw %xmm4, %xmm4, %xmm4
; AVX2-NEXT: vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vpsraw $1, %xmm1, %xmm3
; AVX2-NEXT: vpaddw %xmm4, %xmm4, %xmm4
; AVX2-NEXT: vpblendvb %xmm4, %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm1
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512F-NEXT: vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vpaddb %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
; AVX512F-NEXT: vpsravd {{.*}}(%rip), %zmm1, %zmm1
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm1
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpmovwb %ymm1, %xmm1
; AVX512BW-NEXT: vpaddb %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpmovwb %ymm1, %xmm1
; AVX512BW-NEXT: movw $257, %ax # imm = 0x101
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
; AVX512BW-NEXT: vmovdqa %xmm1, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm3
; XOP-NEXT: vpshlb %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpaddb %xmm2, %xmm0, %xmm2
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshab %xmm1, %xmm2, %xmm1
; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
; XOP-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = sdiv <16 x i8> %x, <i8 1, i8 4, i8 2, i8 16, i8 8, i8 32, i8 64, i8 2, i8 1, i8 4, i8 2, i8 16, i8 8, i8 32, i8 64, i8 2>
ret <16 x i8> %1
}
define <8 x i16> @combine_vec_sdiv_by_pow2b_v8i16(<8 x i16> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psraw $15, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrlw $8, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: psrlw $4, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4],xmm2[5,6],xmm1[7]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrlw $2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1,2],xmm1[3,4],xmm2[5,6,7]
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: psrlw $1, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
; SSE-NEXT: paddw %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psraw $4, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7]
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psraw $2, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: psraw $1, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3],xmm1[4,5],xmm3[6],xmm1[7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4],xmm1[5,6],xmm2[7]
; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3,4],xmm2[5,6,7]
; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX1-NEXT: vpsraw $2, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512F-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpmovsxwd %xmm1, %ymm1
; AVX512F-NEXT: vpsravd {{.*}}(%rip), %ymm1, %ymm1
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %xmm1, %xmm1
; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %xmm1, %xmm1
; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
; XOP-NEXT: vpsraw $15, %xmm0, %xmm3
; XOP-NEXT: vpshlw %xmm2, %xmm3, %xmm2
; XOP-NEXT: vpaddw %xmm2, %xmm0, %xmm2
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshaw %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; XOP-NEXT: retq
%1 = sdiv <8 x i16> %x, <i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2>
ret <8 x i16> %1
}
define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrlw $8, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm3, %xmm0
; SSE-NEXT: psrlw $4, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1,2,3,4],xmm3[5,6],xmm0[7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrlw $2, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1,2],xmm0[3,4],xmm3[5,6,7]
; SSE-NEXT: movdqa %xmm3, %xmm0
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7]
; SSE-NEXT: paddw %xmm2, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psraw $4, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1,2],xmm3[3],xmm0[4],xmm3[5,6],xmm0[7]
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psraw $2, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: psraw $1, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3],xmm0[4,5],xmm4[6],xmm0[7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psraw $15, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrlw $8, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: psrlw $4, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3,4],xmm3[5,6],xmm2[7]
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrlw $2, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1,2],xmm2[3,4],xmm3[5,6,7]
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: psrlw $1, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
; SSE-NEXT: paddw %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psraw $4, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0,1,2],xmm3[3],xmm2[4],xmm3[5,6],xmm2[7]
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psraw $2, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: psraw $1, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsraw $15, %xmm1, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3,4],xmm2[5,6],xmm3[7]
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3,4],xmm3[5,6,7]
; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
; AVX1-NEXT: vpaddw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX1-NEXT: vpsraw $2, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3,4],xmm2[5,6],xmm3[7]
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3,4],xmm3[5,6,7]
; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsraw $4, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4],xmm3[5,6],xmm2[7]
; AVX1-NEXT: vpsraw $2, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
; AVX1-NEXT: vpsraw $1, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535]
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,14,15,12,13,11,10,15,16,14,15,12,13,11,10,15]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
; AVX2-NEXT: vpsraw $15, %ymm0, %ymm4
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4],ymm4[4],ymm1[5],ymm4[5],ymm1[6],ymm4[6],ymm1[7],ymm4[7],ymm1[12],ymm4[12],ymm1[13],ymm4[13],ymm1[14],ymm4[14],ymm1[15],ymm4[15]
; AVX2-NEXT: vpsrlvd %ymm3, %ymm5, %ymm3
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm4 = ymm1[0],ymm4[0],ymm1[1],ymm4[1],ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[8],ymm4[8],ymm1[9],ymm4[9],ymm1[10],ymm4[10],ymm1[11],ymm4[11]
; AVX2-NEXT: vpsrlvd %ymm2, %ymm4, %ymm2
; AVX2-NEXT: vpsrld $16, %ymm2, %ymm2
; AVX2-NEXT: vpackusdw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm2
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,1,4,3,5,6,1,0,2,1,4,3,5,6,1]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
; AVX2-NEXT: vpsravd %ymm5, %ymm3, %ymm3
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
; AVX2-NEXT: vpsravd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
; AVX2-NEXT: vpackusdw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsraw $15, %ymm0, %ymm1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512F-NEXT: vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
; AVX512F-NEXT: vpaddw %ymm1, %ymm0, %ymm1
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
; AVX512F-NEXT: vpsravd {{.*}}(%rip), %zmm1, %zmm1
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsraw $15, %ymm0, %ymm1
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm1
; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOP-NEXT: vpsraw $15, %xmm3, %xmm4
; XOP-NEXT: vpshlw %xmm2, %xmm4, %xmm4
; XOP-NEXT: vpaddw %xmm4, %xmm3, %xmm3
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpshaw %xmm1, %xmm3, %xmm3
; XOP-NEXT: vpsraw $15, %xmm0, %xmm4
; XOP-NEXT: vpshlw %xmm2, %xmm4, %xmm2
; XOP-NEXT: vpaddw %xmm2, %xmm0, %xmm2
; XOP-NEXT: vpshaw %xmm1, %xmm2, %xmm1
; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
; XOP-NEXT: retq
%1 = sdiv <16 x i16> %x, <i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2, i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2>
ret <16 x i16> %1
}
define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm5
; SSE-NEXT: psrlw $8, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm0[0],xmm5[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm0
; SSE-NEXT: psrlw $4, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm5[0],xmm0[1,2,3,4],xmm5[5,6],xmm0[7]
; SSE-NEXT: movdqa %xmm0, %xmm5
; SSE-NEXT: psrlw $2, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm0[0],xmm5[1,2],xmm0[3,4],xmm5[5,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm0
; SSE-NEXT: psrlw $1, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm5[0,1],xmm0[2],xmm5[3],xmm0[4,5],xmm5[6],xmm0[7]
; SSE-NEXT: paddw %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm5
; SSE-NEXT: psraw $4, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm0[0,1,2],xmm5[3],xmm0[4],xmm5[5,6],xmm0[7]
; SSE-NEXT: movdqa %xmm5, %xmm6
; SSE-NEXT: psraw $2, %xmm6
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0],xmm6[1],xmm5[2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
; SSE-NEXT: movdqa %xmm6, %xmm0
; SSE-NEXT: psraw $1, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm6[0,1],xmm0[2],xmm6[3],xmm0[4,5],xmm6[6],xmm0[7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: psraw $15, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: psrlw $8, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm1[0],xmm5[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm1
; SSE-NEXT: psrlw $4, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1,2,3,4],xmm5[5,6],xmm1[7]
; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: psrlw $2, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm1[0],xmm5[1,2],xmm1[3,4],xmm5[5,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm1
; SSE-NEXT: psrlw $1, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3],xmm1[4,5],xmm5[6],xmm1[7]
; SSE-NEXT: paddw %xmm4, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: psraw $4, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm1[0,1,2],xmm5[3],xmm1[4],xmm5[5,6],xmm1[7]
; SSE-NEXT: movdqa %xmm5, %xmm6
; SSE-NEXT: psraw $2, %xmm6
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0],xmm6[1],xmm5[2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: psraw $1, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm6[0,1],xmm1[2],xmm6[3],xmm1[4,5],xmm6[6],xmm1[7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: psraw $15, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: psrlw $8, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0],xmm5[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm4
; SSE-NEXT: psrlw $4, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2,3,4],xmm5[5,6],xmm4[7]
; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: psrlw $2, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0],xmm5[1,2],xmm4[3,4],xmm5[5,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm4
; SSE-NEXT: psrlw $1, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2],xmm5[3],xmm4[4,5],xmm5[6],xmm4[7]
; SSE-NEXT: paddw %xmm2, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: psraw $4, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0,1,2],xmm5[3],xmm4[4],xmm5[5,6],xmm4[7]
; SSE-NEXT: movdqa %xmm5, %xmm6
; SSE-NEXT: psraw $2, %xmm6
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0],xmm6[1],xmm5[2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
; SSE-NEXT: movdqa %xmm6, %xmm4
; SSE-NEXT: psraw $1, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2],xmm6[3],xmm4[4,5],xmm6[6],xmm4[7]
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: psraw $15, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm5
; SSE-NEXT: psrlw $8, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0],xmm5[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm2
; SSE-NEXT: psrlw $4, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1,2,3,4],xmm5[5,6],xmm2[7]
; SSE-NEXT: movdqa %xmm2, %xmm5
; SSE-NEXT: psrlw $2, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0],xmm5[1,2],xmm2[3,4],xmm5[5,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm2
; SSE-NEXT: psrlw $1, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3],xmm2[4,5],xmm5[6],xmm2[7]
; SSE-NEXT: paddw %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm5
; SSE-NEXT: psraw $4, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2],xmm5[3],xmm2[4],xmm5[5,6],xmm2[7]
; SSE-NEXT: movdqa %xmm5, %xmm2
; SSE-NEXT: psraw $2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3],xmm2[4],xmm5[5],xmm2[6],xmm5[7]
; SSE-NEXT: movdqa %xmm2, %xmm5
; SSE-NEXT: psraw $1, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1],xmm5[2],xmm2[3],xmm5[4,5],xmm2[6],xmm5[7]
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsraw $15, %xmm2, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3,4],xmm3[5,6],xmm4[7]
; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2],xmm3[3,4],xmm4[5,6,7]
; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsraw $4, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4],xmm3[5,6],xmm2[7]
; AVX1-NEXT: vpsraw $2, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
; AVX1-NEXT: vpsraw $1, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm3
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3,4],xmm3[5,6],xmm4[7]
; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2],xmm3[3,4],xmm4[5,6,7]
; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpsraw $4, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4],xmm4[5,6],xmm3[7]
; AVX1-NEXT: vpsraw $2, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
; AVX1-NEXT: vpsraw $1, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm3
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535]
; AVX1-NEXT: vandps %ymm2, %ymm3, %ymm3
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vorps %ymm0, %ymm3, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpsraw $15, %xmm3, %xmm4
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $4, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3,4],xmm4[5,6],xmm5[7]
; AVX1-NEXT: vpsrlw $2, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2],xmm4[3,4],xmm5[5,6,7]
; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6],xmm5[7]
; AVX1-NEXT: vpaddw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsraw $4, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4],xmm4[5,6],xmm3[7]
; AVX1-NEXT: vpsraw $2, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
; AVX1-NEXT: vpsraw $1, %xmm3, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
; AVX1-NEXT: vpsraw $15, %xmm1, %xmm4
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3,4,5,6,7]
; AVX1-NEXT: vpsrlw $4, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3,4],xmm4[5,6],xmm5[7]
; AVX1-NEXT: vpsrlw $2, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2],xmm4[3,4],xmm5[5,6,7]
; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6],xmm5[7]
; AVX1-NEXT: vpaddw %xmm4, %xmm1, %xmm4
; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm5[3],xmm4[4],xmm5[5,6],xmm4[7]
; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6],xmm5[7]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vandps %ymm2, %ymm3, %ymm3
; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [16,14,15,12,13,11,10,15,16,14,15,12,13,11,10,15]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
; AVX2-NEXT: vpsraw $15, %ymm0, %ymm5
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm2[4],ymm5[4],ymm2[5],ymm5[5],ymm2[6],ymm5[6],ymm2[7],ymm5[7],ymm2[12],ymm5[12],ymm2[13],ymm5[13],ymm2[14],ymm5[14],ymm2[15],ymm5[15]
; AVX2-NEXT: vpsrlvd %ymm4, %ymm6, %ymm6
; AVX2-NEXT: vpsrld $16, %ymm6, %ymm6
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm5 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[8],ymm5[8],ymm2[9],ymm5[9],ymm2[10],ymm5[10],ymm2[11],ymm5[11]
; AVX2-NEXT: vpsrlvd %ymm3, %ymm5, %ymm5
; AVX2-NEXT: vpsrld $16, %ymm5, %ymm5
; AVX2-NEXT: vpackusdw %ymm6, %ymm5, %ymm5
; AVX2-NEXT: vpaddw %ymm5, %ymm0, %ymm5
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm2[4],ymm5[4],ymm2[5],ymm5[5],ymm2[6],ymm5[6],ymm2[7],ymm5[7],ymm2[12],ymm5[12],ymm2[13],ymm5[13],ymm2[14],ymm5[14],ymm2[15],ymm5[15]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [0,2,1,4,3,5,6,1,0,2,1,4,3,5,6,1]
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm8 = ymm7[4],ymm2[4],ymm7[5],ymm2[5],ymm7[6],ymm2[6],ymm7[7],ymm2[7],ymm7[12],ymm2[12],ymm7[13],ymm2[13],ymm7[14],ymm2[14],ymm7[15],ymm2[15]
; AVX2-NEXT: vpsravd %ymm8, %ymm6, %ymm6
; AVX2-NEXT: vpsrld $16, %ymm6, %ymm6
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm5 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[8],ymm5[8],ymm2[9],ymm5[9],ymm2[10],ymm5[10],ymm2[11],ymm5[11]
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm7 = ymm7[0],ymm2[0],ymm7[1],ymm2[1],ymm7[2],ymm2[2],ymm7[3],ymm2[3],ymm7[8],ymm2[8],ymm7[9],ymm2[9],ymm7[10],ymm2[10],ymm7[11],ymm2[11]
; AVX2-NEXT: vpsravd %ymm7, %ymm5, %ymm5
; AVX2-NEXT: vpsrld $16, %ymm5, %ymm5
; AVX2-NEXT: vpackusdw %ymm6, %ymm5, %ymm5
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3,4,5,6,7],ymm0[8],ymm5[9,10,11,12,13,14,15]
; AVX2-NEXT: vpsraw $15, %ymm1, %ymm5
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm2[4],ymm5[4],ymm2[5],ymm5[5],ymm2[6],ymm5[6],ymm2[7],ymm5[7],ymm2[12],ymm5[12],ymm2[13],ymm5[13],ymm2[14],ymm5[14],ymm2[15],ymm5[15]
; AVX2-NEXT: vpsrlvd %ymm4, %ymm6, %ymm4
; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm5 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[8],ymm5[8],ymm2[9],ymm5[9],ymm2[10],ymm5[10],ymm2[11],ymm5[11]
; AVX2-NEXT: vpsrlvd %ymm3, %ymm5, %ymm3
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
; AVX2-NEXT: vpackusdw %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm3
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
; AVX2-NEXT: vpsravd %ymm8, %ymm4, %ymm4
; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
; AVX2-NEXT: vpsravd %ymm7, %ymm2, %ymm2
; AVX2-NEXT: vpsrld $16, %ymm2, %ymm2
; AVX2-NEXT: vpackusdw %ymm4, %ymm2, %ymm2
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsraw $15, %ymm0, %ymm2
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [16,14,15,12,13,11,10,15,16,14,15,12,13,11,10,15]
; AVX512F-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
; AVX512F-NEXT: vpsrlvd %zmm3, %zmm2, %zmm2
; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
; AVX512F-NEXT: vpaddw %ymm2, %ymm0, %ymm2
; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [0,2,1,4,3,5,6,1,0,2,1,4,3,5,6,1]
; AVX512F-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3]
; AVX512F-NEXT: vpsravd %zmm4, %zmm2, %zmm2
; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15]
; AVX512F-NEXT: vpsraw $15, %ymm1, %ymm2
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpsrlvd %zmm3, %zmm2, %zmm2
; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
; AVX512F-NEXT: vpaddw %ymm2, %ymm1, %ymm2
; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
; AVX512F-NEXT: vpsravd %zmm4, %zmm2, %zmm2
; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
; AVX512F-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsraw $15, %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: movl $16843009, %eax # imm = 0x1010101
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
; XOP: # %bb.0:
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm2, %xmm3
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm4
; XOP-NEXT: vpsraw $15, %xmm4, %xmm5
; XOP-NEXT: vpshlw %xmm3, %xmm5, %xmm5
; XOP-NEXT: vpaddw %xmm5, %xmm4, %xmm4
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm2, %xmm2
; XOP-NEXT: vpshaw %xmm2, %xmm4, %xmm4
; XOP-NEXT: vpsraw $15, %xmm0, %xmm5
; XOP-NEXT: vpshlw %xmm3, %xmm5, %xmm5
; XOP-NEXT: vpaddw %xmm5, %xmm0, %xmm5
; XOP-NEXT: vpshaw %xmm2, %xmm5, %xmm5
; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
; XOP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535]
; XOP-NEXT: vpcmov %ymm5, %ymm0, %ymm4, %ymm0
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm4
; XOP-NEXT: vpsraw $15, %xmm4, %xmm6
; XOP-NEXT: vpshlw %xmm3, %xmm6, %xmm6
; XOP-NEXT: vpaddw %xmm6, %xmm4, %xmm4
; XOP-NEXT: vpshaw %xmm2, %xmm4, %xmm4
; XOP-NEXT: vpsraw $15, %xmm1, %xmm6
; XOP-NEXT: vpshlw %xmm3, %xmm6, %xmm3
; XOP-NEXT: vpaddw %xmm3, %xmm1, %xmm3
; XOP-NEXT: vpshaw %xmm2, %xmm3, %xmm2
; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
; XOP-NEXT: vpcmov %ymm5, %ymm1, %ymm2, %ymm1
; XOP-NEXT: retq
%1 = sdiv <32 x i16> %x, <i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2, i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2, i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2, i16 1, i16 4, i16 2, i16 16, i16 8, i16 32, i16 64, i16 2>
ret <32 x i16> %1
}
define <4 x i32> @combine_vec_sdiv_by_pow2b_v4i32(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrld $28, %xmm2
; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: psrld $30, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: psrld $29, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; SSE-NEXT: paddd %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrad $3, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0,1,2,3],xmm3[4,5,6,7]
; SSE-NEXT: psrad $4, %xmm2
; SSE-NEXT: psrad $2, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3,4,5,6,7]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $28, %xmm1, %xmm2
; AVX1-NEXT: vpsrld $30, %xmm1, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpsrld $29, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpsrad $4, %xmm1, %xmm2
; AVX1-NEXT: vpsrad $2, %xmm1, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpsrad $3, %xmm1, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2ORLATER-LABEL: combine_vec_sdiv_by_pow2b_v4i32:
; AVX2ORLATER: # %bb.0:
; AVX2ORLATER-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX2ORLATER-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
; AVX2ORLATER-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; AVX2ORLATER-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
; AVX2ORLATER-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2ORLATER-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v4i32:
; XOP: # %bb.0:
; XOP-NEXT: vpsrad $31, %xmm0, %xmm1
; XOP-NEXT: vpshld {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; XOP-NEXT: vpshad {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; XOP-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
ret <4 x i32> %1
}
define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrad $31, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm0
; SSE-NEXT: psrld $28, %xmm0
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psrld $30, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5,6,7]
; SSE-NEXT: psrld $29, %xmm3
; SSE-NEXT: pxor %xmm5, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
; SSE-NEXT: paddd %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: movdqa %xmm3, %xmm0
; SSE-NEXT: psrad $3, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
; SSE-NEXT: psrad $4, %xmm3
; SSE-NEXT: psrad $2, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: psrad $31, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: psrld $28, %xmm2
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psrld $30, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: psrld $29, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
; SSE-NEXT: paddd %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: psrad $3, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: psrad $4, %xmm3
; SSE-NEXT: psrad $2, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX1-NEXT: vpsrld $28, %xmm2, %xmm3
; AVX1-NEXT: vpsrld $30, %xmm2, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpsrld $29, %xmm2, %xmm2
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsrad $4, %xmm1, %xmm2
; AVX1-NEXT: vpsrad $2, %xmm1, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpsrad $3, %xmm1, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
; AVX1-NEXT: vpsrld $28, %xmm2, %xmm3
; AVX1-NEXT: vpsrld $30, %xmm2, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpsrld $29, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsrad $4, %xmm2, %xmm3
; AVX1-NEXT: vpsrad $2, %xmm2, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpsrad $3, %xmm2, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX1-NEXT: retq
;
; AVX2ORLATER-LABEL: combine_vec_sdiv_by_pow2b_v8i32:
; AVX2ORLATER: # %bb.0:
; AVX2ORLATER-NEXT: vpsrad $31, %ymm0, %ymm1
; AVX2ORLATER-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1
; AVX2ORLATER-NEXT: vpaddd %ymm1, %ymm0, %ymm1
; AVX2ORLATER-NEXT: vpsravd {{.*}}(%rip), %ymm1, %ymm1
; AVX2ORLATER-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX2ORLATER-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v8i32:
; XOP: # %bb.0:
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOP-NEXT: vpsrad $31, %xmm1, %xmm2
; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [4294967264,4294967266,4294967267,4294967268]
; XOP-NEXT: vpshld %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,4294967294,4294967293,4294967292]
; XOP-NEXT: vpshad %xmm2, %xmm1, %xmm1
; XOP-NEXT: vpsrad $31, %xmm0, %xmm4
; XOP-NEXT: vpshld %xmm3, %xmm4, %xmm3
; XOP-NEXT: vpaddd %xmm3, %xmm0, %xmm3
; XOP-NEXT: vpshad %xmm2, %xmm3, %xmm2
; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; XOP-NEXT: retq
%1 = sdiv <8 x i32> %x, <i32 1, i32 4, i32 8, i32 16, i32 1, i32 4, i32 8, i32 16>
ret <8 x i32> %1
}
define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm6
; SSE-NEXT: psrad $31, %xmm6
; SSE-NEXT: movdqa %xmm6, %xmm0
; SSE-NEXT: psrld $28, %xmm0
; SSE-NEXT: movdqa %xmm6, %xmm7
; SSE-NEXT: psrld $30, %xmm7
; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm0[4,5,6,7]
; SSE-NEXT: psrld $29, %xmm6
; SSE-NEXT: pxor %xmm5, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
; SSE-NEXT: paddd %xmm1, %xmm6
; SSE-NEXT: movdqa %xmm6, %xmm7
; SSE-NEXT: movdqa %xmm6, %xmm0
; SSE-NEXT: psrad $3, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm6[0,1,2,3],xmm0[4,5,6,7]
; SSE-NEXT: psrad $4, %xmm6
; SSE-NEXT: psrad $2, %xmm7
; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3],xmm0[4,5],xmm7[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm4, %xmm6
; SSE-NEXT: psrad $31, %xmm6
; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: psrld $28, %xmm1
; SSE-NEXT: movdqa %xmm6, %xmm7
; SSE-NEXT: psrld $30, %xmm7
; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: psrld $29, %xmm6
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
; SSE-NEXT: paddd %xmm4, %xmm6
; SSE-NEXT: movdqa %xmm6, %xmm7
; SSE-NEXT: movdqa %xmm6, %xmm1
; SSE-NEXT: psrad $3, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm6[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: psrad $4, %xmm6
; SSE-NEXT: psrad $2, %xmm7
; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3],xmm1[4,5],xmm7[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: psrad $31, %xmm6
; SSE-NEXT: movdqa %xmm6, %xmm4
; SSE-NEXT: psrld $28, %xmm4
; SSE-NEXT: movdqa %xmm6, %xmm7
; SSE-NEXT: psrld $30, %xmm7
; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm4[4,5,6,7]
; SSE-NEXT: psrld $29, %xmm6
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
; SSE-NEXT: paddd %xmm2, %xmm6
; SSE-NEXT: movdqa %xmm6, %xmm7
; SSE-NEXT: movdqa %xmm6, %xmm4
; SSE-NEXT: psrad $3, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
; SSE-NEXT: psrad $4, %xmm6
; SSE-NEXT: psrad $2, %xmm7
; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2,3],xmm4[4,5],xmm7[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: psrld $28, %xmm6
; SSE-NEXT: movdqa %xmm2, %xmm7
; SSE-NEXT: psrld $30, %xmm7
; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: psrld $29, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,3],xmm2[4,5],xmm7[6,7]
; SSE-NEXT: paddd %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: movdqa %xmm2, %xmm5
; SSE-NEXT: psrad $3, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7]
; SSE-NEXT: psrad $4, %xmm2
; SSE-NEXT: psrad $2, %xmm6
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1],xmm5[2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpsrad $31, %xmm3, %xmm2
; AVX1-NEXT: vpsrld $28, %xmm2, %xmm4
; AVX1-NEXT: vpsrld $30, %xmm2, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpsrld $29, %xmm2, %xmm5
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7]
; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsrad $4, %xmm3, %xmm4
; AVX1-NEXT: vpsrad $2, %xmm3, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpsrad $3, %xmm3, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm4
; AVX1-NEXT: vpsrld $28, %xmm4, %xmm5
; AVX1-NEXT: vpsrld $30, %xmm4, %xmm6
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpsrld $29, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm4
; AVX1-NEXT: vpsrad $4, %xmm4, %xmm5
; AVX1-NEXT: vpsrad $2, %xmm4, %xmm6
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpsrad $3, %xmm4, %xmm6
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1,2,3],ymm0[4],ymm3[5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpsrad $31, %xmm3, %xmm4
; AVX1-NEXT: vpsrld $28, %xmm4, %xmm5
; AVX1-NEXT: vpsrld $30, %xmm4, %xmm6
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpsrld $29, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsrad $4, %xmm3, %xmm4
; AVX1-NEXT: vpsrad $2, %xmm3, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpsrad $3, %xmm3, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm4
; AVX1-NEXT: vpsrld $28, %xmm4, %xmm5
; AVX1-NEXT: vpsrld $30, %xmm4, %xmm6
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpsrld $29, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm2
; AVX1-NEXT: vpsrad $4, %xmm2, %xmm4
; AVX1-NEXT: vpsrad $2, %xmm2, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpsrad $3, %xmm2, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm2
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [32,30,29,28,32,30,29,28]
; AVX2-NEXT: # ymm3 = mem[0,1,0,1]
; AVX2-NEXT: vpsrlvd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm2
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,2,3,4,0,2,3,4]
; AVX2-NEXT: # ymm4 = mem[0,1,0,1]
; AVX2-NEXT: vpsravd %ymm4, %ymm2, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6,7]
; AVX2-NEXT: vpsrad $31, %ymm1, %ymm2
; AVX2-NEXT: vpsrlvd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpsravd %ymm4, %ymm2, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrad $31, %zmm0, %zmm1
; AVX512F-NEXT: vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512F-NEXT: vpsravd {{.*}}(%rip), %zmm1, %zmm1
; AVX512F-NEXT: movw $4369, %ax # imm = 0x1111
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrad $31, %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsravd {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: movw $4369, %ax # imm = 0x1111
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
; XOP: # %bb.0:
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOP-NEXT: vpsrad $31, %xmm2, %xmm3
; XOP-NEXT: vmovdqa {{.*#+}} xmm4 = [4294967264,4294967266,4294967267,4294967268]
; XOP-NEXT: vpshld %xmm4, %xmm3, %xmm3
; XOP-NEXT: vpaddd %xmm3, %xmm2, %xmm2
; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,4294967294,4294967293,4294967292]
; XOP-NEXT: vpshad %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpsrad $31, %xmm0, %xmm5
; XOP-NEXT: vpshld %xmm4, %xmm5, %xmm5
; XOP-NEXT: vpaddd %xmm5, %xmm0, %xmm5
; XOP-NEXT: vpshad %xmm3, %xmm5, %xmm5
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6,7]
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOP-NEXT: vpsrad $31, %xmm2, %xmm5
; XOP-NEXT: vpshld %xmm4, %xmm5, %xmm5
; XOP-NEXT: vpaddd %xmm5, %xmm2, %xmm2
; XOP-NEXT: vpshad %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpsrad $31, %xmm1, %xmm5
; XOP-NEXT: vpshld %xmm4, %xmm5, %xmm4
; XOP-NEXT: vpaddd %xmm4, %xmm1, %xmm4
; XOP-NEXT: vpshad %xmm3, %xmm4, %xmm3
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
; XOP-NEXT: retq
%1 = sdiv <16 x i32> %x, <i32 1, i32 4, i32 8, i32 16, i32 1, i32 4, i32 8, i32 16, i32 1, i32 4, i32 8, i32 16, i32 1, i32 4, i32 8, i32 16>
ret <16 x i32> %1
}
define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: psrlq $62, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: paddq %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: psrlq $2, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: psubq %xmm2, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpsrlq $62, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpsrlq $2, %xmm1, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm1
; AVX2-NEXT: movl $2, %eax
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; AVX2-NEXT: vpsrlvq %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsubq %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: movl $2, %eax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
; AVX512F-NEXT: vpsraq $63, %zmm0, %zmm2
; AVX512F-NEXT: vpsrlvq {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpaddq %xmm2, %xmm0, %xmm2
; AVX512F-NEXT: vpsravq %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsraq $63, %xmm0, %xmm1
; AVX512BW-NEXT: vpsrlvq {{.*}}(%rip), %xmm1, %xmm1
; AVX512BW-NEXT: vpaddq %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: movl $2, %eax
; AVX512BW-NEXT: vmovq %rax, %xmm2
; AVX512BW-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsravq %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; XOP: # %bb.0:
; XOP-NEXT: movl $2, %eax
; XOP-NEXT: vmovq %rax, %xmm1
; XOP-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm3
; XOP-NEXT: vpshaq %xmm3, %xmm0, %xmm3
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm2
; XOP-NEXT: vpshlq %xmm2, %xmm3, %xmm2
; XOP-NEXT: vpaddq %xmm2, %xmm0, %xmm2
; XOP-NEXT: vpshaq %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; XOP-NEXT: retq
%1 = sdiv <2 x i64> %x, <i64 1, i64 4>
ret <2 x i64> %1
}
define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: psrlq $60, %xmm3
; SSE-NEXT: psrlq $61, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; SSE-NEXT: paddq %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrlq $4, %xmm2
; SSE-NEXT: psrlq $3, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1152921504606846976,576460752303423488]
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: psubq %xmm2, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE-NEXT: psrlq $62, %xmm2
; SSE-NEXT: pxor %xmm3, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: paddq %xmm0, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: psrlq $2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952]
; SSE-NEXT: pxor %xmm3, %xmm2
; SSE-NEXT: psubq %xmm3, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vpsrlq $60, %xmm3, %xmm4
; AVX1-NEXT: vpsrlq $61, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsrlq $4, %xmm1, %xmm3
; AVX1-NEXT: vpsrlq $3, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1152921504606846976,576460752303423488]
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm3
; AVX1-NEXT: vpsrlq $62, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsrlq $2, %xmm2, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm1
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [9223372036854775808,2305843009213693952,1152921504606846976,576460752303423488]
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,3,4]
; AVX512F-NEXT: vpsraq $63, %zmm0, %zmm2
; AVX512F-NEXT: vpsrlvq {{.*}}(%rip), %ymm2, %ymm2
; AVX512F-NEXT: vpaddq %ymm2, %ymm0, %ymm2
; AVX512F-NEXT: vpsravq %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsraq $63, %ymm0, %ymm1
; AVX512BW-NEXT: vpsrlvq {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpaddq %ymm1, %ymm0, %ymm1
; AVX512BW-NEXT: vpsravq {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
; XOP: # %bb.0:
; XOP-NEXT: movl $2, %eax
; XOP-NEXT: vmovq %rax, %xmm1
; XOP-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm3
; XOP-NEXT: vpshaq %xmm3, %xmm0, %xmm4
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm5
; XOP-NEXT: vpshlq %xmm5, %xmm4, %xmm4
; XOP-NEXT: vpaddq %xmm4, %xmm0, %xmm4
; XOP-NEXT: vpshaq %xmm1, %xmm4, %xmm1
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm4
; XOP-NEXT: vpshaq %xmm3, %xmm4, %xmm3
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm5
; XOP-NEXT: vpshlq %xmm5, %xmm3, %xmm3
; XOP-NEXT: vpaddq %xmm3, %xmm4, %xmm3
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm2
; XOP-NEXT: vpshaq %xmm2, %xmm3, %xmm2
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; XOP-NEXT: retq
%1 = sdiv <4 x i64> %x, <i64 1, i64 4, i64 8, i64 16>
ret <4 x i64> %1
}
define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: psrlq $60, %xmm5
; SSE-NEXT: psrlq $61, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4,5,6,7]
; SSE-NEXT: paddq %xmm3, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: psrlq $4, %xmm3
; SSE-NEXT: psrlq $3, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [1152921504606846976,576460752303423488]
; SSE-NEXT: pxor %xmm5, %xmm1
; SSE-NEXT: psubq %xmm5, %xmm1
; SSE-NEXT: movdqa %xmm4, %xmm3
; SSE-NEXT: psrad $31, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE-NEXT: movdqa %xmm3, %xmm6
; SSE-NEXT: psrlq $60, %xmm6
; SSE-NEXT: psrlq $61, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: paddq %xmm4, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psrlq $4, %xmm4
; SSE-NEXT: psrlq $3, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
; SSE-NEXT: pxor %xmm5, %xmm3
; SSE-NEXT: psubq %xmm5, %xmm3
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrad $31, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
; SSE-NEXT: psrlq $62, %xmm4
; SSE-NEXT: pxor %xmm5, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
; SSE-NEXT: paddq %xmm0, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm6
; SSE-NEXT: psrlq $2, %xmm6
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [9223372036854775808,2305843009213693952]
; SSE-NEXT: pxor %xmm4, %xmm6
; SSE-NEXT: psubq %xmm4, %xmm6
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: psrad $31, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE-NEXT: psrlq $62, %xmm6
; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: paddq %xmm2, %xmm6
; SSE-NEXT: movdqa %xmm6, %xmm5
; SSE-NEXT: psrlq $2, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
; SSE-NEXT: pxor %xmm4, %xmm5
; SSE-NEXT: psubq %xmm4, %xmm5
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpsrlq $60, %xmm4, %xmm5
; AVX1-NEXT: vpsrlq $61, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpaddq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsrlq $4, %xmm3, %xmm4
; AVX1-NEXT: vpsrlq $3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1152921504606846976,576460752303423488]
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsubq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm5
; AVX1-NEXT: vpsrlq $62, %xmm5, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpaddq %xmm5, %xmm0, %xmm5
; AVX1-NEXT: vpsrlq $2, %xmm5, %xmm6
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [9223372036854775808,2305843009213693952]
; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpsubq %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm5
; AVX1-NEXT: vpsrlq $60, %xmm5, %xmm7
; AVX1-NEXT: vpsrlq $61, %xmm5, %xmm5
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7]
; AVX1-NEXT: vpaddq %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpsrlq $4, %xmm3, %xmm5
; AVX1-NEXT: vpsrlq $3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsubq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm4
; AVX1-NEXT: vpsrlq $62, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpaddq %xmm2, %xmm1, %xmm2
; AVX1-NEXT: vpsrlq $2, %xmm2, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpsubq %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [64,62,61,60]
; AVX2-NEXT: vpsrlvq %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [0,2,3,4]
; AVX2-NEXT: vpsrlvq %ymm5, %ymm3, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [9223372036854775808,2305843009213693952,1152921504606846976,576460752303423488]
; AVX2-NEXT: vpxor %ymm6, %ymm3, %ymm3
; AVX2-NEXT: vpsubq %ymm6, %ymm3, %ymm3
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpsrlvq %ymm4, %ymm2, %ymm2
; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpsrlvq %ymm5, %ymm2, %ymm2
; AVX2-NEXT: vpxor %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpsubq %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsraq $63, %zmm0, %zmm1
; AVX512F-NEXT: vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
; AVX512F-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512F-NEXT: vpsravq {{.*}}(%rip), %zmm1, %zmm1
; AVX512F-NEXT: movb $17, %al
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsraq $63, %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsravq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: movb $17, %al
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
; XOP: # %bb.0:
; XOP-NEXT: movl $2, %eax
; XOP-NEXT: vmovq %rax, %xmm2
; XOP-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; XOP-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm9
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm4
; XOP-NEXT: vpshaq %xmm4, %xmm0, %xmm5
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm6
; XOP-NEXT: vpshlq %xmm6, %xmm5, %xmm5
; XOP-NEXT: vpaddq %xmm5, %xmm0, %xmm5
; XOP-NEXT: vpshaq %xmm9, %xmm5, %xmm8
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm7
; XOP-NEXT: vpshaq %xmm4, %xmm7, %xmm5
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm2
; XOP-NEXT: vpshlq %xmm2, %xmm5, %xmm5
; XOP-NEXT: vpaddq %xmm5, %xmm7, %xmm5
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm3
; XOP-NEXT: vpshaq %xmm3, %xmm5, %xmm5
; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm8, %ymm5
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4,5,6,7]
; XOP-NEXT: vpshaq %xmm4, %xmm1, %xmm5
; XOP-NEXT: vpshlq %xmm6, %xmm5, %xmm5
; XOP-NEXT: vpaddq %xmm5, %xmm1, %xmm5
; XOP-NEXT: vpshaq %xmm9, %xmm5, %xmm5
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm6
; XOP-NEXT: vpshaq %xmm4, %xmm6, %xmm4
; XOP-NEXT: vpshlq %xmm2, %xmm4, %xmm2
; XOP-NEXT: vpaddq %xmm2, %xmm6, %xmm2
; XOP-NEXT: vpshaq %xmm3, %xmm2, %xmm2
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
; XOP-NEXT: retq
%1 = sdiv <8 x i64> %x, <i64 1, i64 4, i64 8, i64 16, i64 1, i64 4, i64 8, i64 16>
ret <8 x i64> %1
}
define <4 x i32> @combine_vec_sdiv_by_pow2b_PosAndNeg(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: psrld $28, %xmm1
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrld $30, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: psrld $29, %xmm2
; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
; SSE-NEXT: paddd %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: psrad $3, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: psrad $4, %xmm2
; SSE-NEXT: psrad $2, %xmm3
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
; SSE-NEXT: psubd %xmm1, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4,5],xmm4[6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $28, %xmm1, %xmm2
; AVX1-NEXT: vpsrld $30, %xmm1, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpsrld $29, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpsrad $4, %xmm1, %xmm2
; AVX1-NEXT: vpsrad $2, %xmm1, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpsrad $3, %xmm1, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2ORLATER-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
; AVX2ORLATER: # %bb.0:
; AVX2ORLATER-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX2ORLATER-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
; AVX2ORLATER-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; AVX2ORLATER-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
; AVX2ORLATER-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2ORLATER-NEXT: vpsubd %xmm1, %xmm2, %xmm2
; AVX2ORLATER-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3]
; AVX2ORLATER-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2ORLATER-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
; XOP: # %bb.0:
; XOP-NEXT: vpsrad $31, %xmm0, %xmm1
; XOP-NEXT: vpshld {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; XOP-NEXT: vpshad {{.*}}(%rip), %xmm1, %xmm1
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
; XOP-NEXT: vpsubd %xmm1, %xmm2, %xmm2
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; XOP-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 1, i32 -4, i32 8, i32 -16>
ret <4 x i32> %1
}
define <4 x i32> @combine_vec_sdiv_by_pow2b_undef1(<4 x i32> %x) {
; CHECK-LABEL: combine_vec_sdiv_by_pow2b_undef1:
; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 undef, i32 -4, i32 undef, i32 -16>
ret <4 x i32> %1
}
define <4 x i32> @combine_vec_sdiv_by_pow2b_undef2(<4 x i32> %x) {
; CHECK-LABEL: combine_vec_sdiv_by_pow2b_undef2:
; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 undef, i32 4, i32 undef, i32 16>
ret <4 x i32> %1
}
define <4 x i32> @combine_vec_sdiv_by_pow2b_undef3(<4 x i32> %x) {
; CHECK-LABEL: combine_vec_sdiv_by_pow2b_undef3:
; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = sdiv <4 x i32> %x, <i32 undef, i32 -4, i32 undef, i32 16>
ret <4 x i32> %1
}
; PR37119
define <16 x i8> @non_splat_minus_one_divisor_0(<16 x i8> %A) {
; SSE-LABEL: non_splat_minus_one_divisor_0:
; SSE: # %bb.0:
; SSE-NEXT: pextrb $1, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pextrb $0, %xmm0, %ecx
; SSE-NEXT: negb %cl
; SSE-NEXT: movzbl %cl, %ecx
; SSE-NEXT: movd %ecx, %xmm1
; SSE-NEXT: pinsrb $1, %eax, %xmm1
; SSE-NEXT: pextrb $2, %xmm0, %eax
; SSE-NEXT: pinsrb $2, %eax, %xmm1
; SSE-NEXT: pextrb $3, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $3, %eax, %xmm1
; SSE-NEXT: pextrb $4, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $4, %eax, %xmm1
; SSE-NEXT: pextrb $5, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $5, %eax, %xmm1
; SSE-NEXT: pextrb $6, %xmm0, %eax
; SSE-NEXT: pinsrb $6, %eax, %xmm1
; SSE-NEXT: pextrb $7, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $7, %eax, %xmm1
; SSE-NEXT: pextrb $8, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $8, %eax, %xmm1
; SSE-NEXT: pextrb $9, %xmm0, %eax
; SSE-NEXT: pinsrb $9, %eax, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
; SSE-NEXT: retq
;
; AVX1-LABEL: non_splat_minus_one_divisor_0:
; AVX1: # %bb.0:
; AVX1-NEXT: vpextrb $1, %xmm0, %eax
; AVX1-NEXT: negb %al
; AVX1-NEXT: movzbl %al, %eax
; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
; AVX1-NEXT: negb %cl
; AVX1-NEXT: movzbl %cl, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm1
; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrb $2, %xmm0, %eax
; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrb $3, %xmm0, %eax
; AVX1-NEXT: negb %al
; AVX1-NEXT: movzbl %al, %eax
; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrb $4, %xmm0, %eax
; AVX1-NEXT: negb %al
; AVX1-NEXT: movzbl %al, %eax
; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrb $5, %xmm0, %eax
; AVX1-NEXT: negb %al
; AVX1-NEXT: movzbl %al, %eax
; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrb $6, %xmm0, %eax
; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrb $7, %xmm0, %eax
; AVX1-NEXT: negb %al
; AVX1-NEXT: movzbl %al, %eax
; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrb $8, %xmm0, %eax
; AVX1-NEXT: negb %al
; AVX1-NEXT: movzbl %al, %eax
; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrb $9, %xmm0, %eax
; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: non_splat_minus_one_divisor_0:
; AVX2: # %bb.0:
; AVX2-NEXT: vpextrb $1, %xmm0, %eax
; AVX2-NEXT: negb %al
; AVX2-NEXT: movzbl %al, %eax
; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
; AVX2-NEXT: negb %cl
; AVX2-NEXT: movzbl %cl, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm1
; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpextrb $2, %xmm0, %eax
; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpextrb $3, %xmm0, %eax
; AVX2-NEXT: negb %al
; AVX2-NEXT: movzbl %al, %eax
; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpextrb $4, %xmm0, %eax
; AVX2-NEXT: negb %al
; AVX2-NEXT: movzbl %al, %eax
; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpextrb $5, %xmm0, %eax
; AVX2-NEXT: negb %al
; AVX2-NEXT: movzbl %al, %eax
; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpextrb $6, %xmm0, %eax
; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpextrb $7, %xmm0, %eax
; AVX2-NEXT: negb %al
; AVX2-NEXT: movzbl %al, %eax
; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpextrb $8, %xmm0, %eax
; AVX2-NEXT: negb %al
; AVX2-NEXT: movzbl %al, %eax
; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpextrb $9, %xmm0, %eax
; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: non_splat_minus_one_divisor_0:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrb $1, %xmm0, %eax
; AVX512F-NEXT: negb %al
; AVX512F-NEXT: movzbl %al, %eax
; AVX512F-NEXT: vpextrb $0, %xmm0, %ecx
; AVX512F-NEXT: negb %cl
; AVX512F-NEXT: movzbl %cl, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm1
; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX512F-NEXT: vpextrb $2, %xmm0, %eax
; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX512F-NEXT: vpextrb $3, %xmm0, %eax
; AVX512F-NEXT: negb %al
; AVX512F-NEXT: movzbl %al, %eax
; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX512F-NEXT: vpextrb $4, %xmm0, %eax
; AVX512F-NEXT: negb %al
; AVX512F-NEXT: movzbl %al, %eax
; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX512F-NEXT: vpextrb $5, %xmm0, %eax
; AVX512F-NEXT: negb %al
; AVX512F-NEXT: movzbl %al, %eax
; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX512F-NEXT: vpextrb $6, %xmm0, %eax
; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX512F-NEXT: vpextrb $7, %xmm0, %eax
; AVX512F-NEXT: negb %al
; AVX512F-NEXT: movzbl %al, %eax
; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; AVX512F-NEXT: vpextrb $8, %xmm0, %eax
; AVX512F-NEXT: negb %al
; AVX512F-NEXT: movzbl %al, %eax
; AVX512F-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX512F-NEXT: vpextrb $9, %xmm0, %eax
; AVX512F-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: non_splat_minus_one_divisor_0:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpextrb $1, %xmm0, %eax
; AVX512BW-NEXT: negb %al
; AVX512BW-NEXT: movzbl %al, %eax
; AVX512BW-NEXT: vpextrb $0, %xmm0, %ecx
; AVX512BW-NEXT: negb %cl
; AVX512BW-NEXT: movzbl %cl, %ecx
; AVX512BW-NEXT: vmovd %ecx, %xmm1
; AVX512BW-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $2, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $3, %xmm0, %eax
; AVX512BW-NEXT: negb %al
; AVX512BW-NEXT: movzbl %al, %eax
; AVX512BW-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $4, %xmm0, %eax
; AVX512BW-NEXT: negb %al
; AVX512BW-NEXT: movzbl %al, %eax
; AVX512BW-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $5, %xmm0, %eax
; AVX512BW-NEXT: negb %al
; AVX512BW-NEXT: movzbl %al, %eax
; AVX512BW-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $6, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $7, %xmm0, %eax
; AVX512BW-NEXT: negb %al
; AVX512BW-NEXT: movzbl %al, %eax
; AVX512BW-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $8, %xmm0, %eax
; AVX512BW-NEXT: negb %al
; AVX512BW-NEXT: movzbl %al, %eax
; AVX512BW-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $9, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $10, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $11, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $12, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $13, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $14, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vpextrb $15, %xmm0, %eax
; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
; AVX512BW-NEXT: retq
;
; XOP-LABEL: non_splat_minus_one_divisor_0:
; XOP: # %bb.0:
; XOP-NEXT: vpextrb $1, %xmm0, %eax
; XOP-NEXT: negb %al
; XOP-NEXT: movzbl %al, %eax
; XOP-NEXT: vpextrb $0, %xmm0, %ecx
; XOP-NEXT: negb %cl
; XOP-NEXT: movzbl %cl, %ecx
; XOP-NEXT: vmovd %ecx, %xmm1
; XOP-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; XOP-NEXT: vpextrb $2, %xmm0, %eax
; XOP-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; XOP-NEXT: vpextrb $3, %xmm0, %eax
; XOP-NEXT: negb %al
; XOP-NEXT: movzbl %al, %eax
; XOP-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; XOP-NEXT: vpextrb $4, %xmm0, %eax
; XOP-NEXT: negb %al
; XOP-NEXT: movzbl %al, %eax
; XOP-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; XOP-NEXT: vpextrb $5, %xmm0, %eax
; XOP-NEXT: negb %al
; XOP-NEXT: movzbl %al, %eax
; XOP-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; XOP-NEXT: vpextrb $6, %xmm0, %eax
; XOP-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; XOP-NEXT: vpextrb $7, %xmm0, %eax
; XOP-NEXT: negb %al
; XOP-NEXT: movzbl %al, %eax
; XOP-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; XOP-NEXT: vpextrb $8, %xmm0, %eax
; XOP-NEXT: negb %al
; XOP-NEXT: movzbl %al, %eax
; XOP-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6,7,8],xmm0[9,10,11,12,13,14,15]
; XOP-NEXT: retq
%div = sdiv <16 x i8> %A, <i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <16 x i8> %div
}
define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; SSE-LABEL: non_splat_minus_one_divisor_1:
; SSE: # %bb.0:
; SSE-NEXT: pextrb $1, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pextrb $0, %xmm0, %ecx
; SSE-NEXT: negb %cl
; SSE-NEXT: movzbl %cl, %ecx
; SSE-NEXT: movd %ecx, %xmm1
; SSE-NEXT: pinsrb $1, %eax, %xmm1
; SSE-NEXT: pextrb $2, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrb $7, %cl
; SSE-NEXT: addb %al, %cl
; SSE-NEXT: sarb %cl
; SSE-NEXT: movzbl %cl, %eax
; SSE-NEXT: pinsrb $2, %eax, %xmm1
; SSE-NEXT: pextrb $3, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $3, %eax, %xmm1
; SSE-NEXT: pextrb $4, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $4, %eax, %xmm1
; SSE-NEXT: pextrb $5, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $5, %eax, %xmm1
; SSE-NEXT: pextrb $6, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrb $7, %cl
; SSE-NEXT: addb %al, %cl
; SSE-NEXT: sarb %cl
; SSE-NEXT: movzbl %cl, %eax
; SSE-NEXT: pinsrb $6, %eax, %xmm1
; SSE-NEXT: pextrb $7, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $7, %eax, %xmm1
; SSE-NEXT: pextrb $8, %xmm0, %eax
; SSE-NEXT: negb %al
; SSE-NEXT: movzbl %al, %eax
; SSE-NEXT: pinsrb $8, %eax, %xmm1
; SSE-NEXT: pextrb $9, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrb $7, %cl
; SSE-NEXT: addb %al, %cl
; SSE-NEXT: sarb %cl
; SSE-NEXT: movzbl %cl, %eax
; SSE-NEXT: pinsrb $9, %eax, %xmm1
; SSE-NEXT: pextrb $10, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrb $7, %cl
; SSE-NEXT: addb %al, %cl
; SSE-NEXT: sarb %cl
; SSE-NEXT: movzbl %cl, %eax
; SSE-NEXT: pinsrb $10, %eax, %xmm1
; SSE-NEXT: pextrb $11, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrb $7, %cl
; SSE-NEXT: addb %al, %cl
; SSE-NEXT: sarb %cl
; SSE-NEXT: movzbl %cl, %eax
; SSE-NEXT: pinsrb $11, %eax, %xmm1
; SSE-NEXT: pextrb $12, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrb $7, %cl
; SSE-NEXT: addb %al, %cl
; SSE-NEXT: sarb %cl
; SSE-NEXT: movzbl %cl, %eax
; SSE-NEXT: pinsrb $12, %eax, %xmm1
; SSE-NEXT: pextrb $13, %xmm0, %eax
; SSE-NEXT: xorl %ecx, %ecx
; SSE-NEXT: cmpb $-128, %al
; SSE-NEXT: sete %cl
; SSE-NEXT: pinsrb $13, %ecx, %xmm1
; SSE-NEXT: pextrb $14, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrb $7, %cl
; SSE-NEXT: addb %al, %cl
; SSE-NEXT: sarb %cl
; SSE-NEXT: movzbl %cl, %eax
; SSE-NEXT: pinsrb $14, %eax, %xmm1
; SSE-NEXT: pextrb $15, %xmm0, %eax
; SSE-NEXT: xorl %ecx, %ecx
; SSE-NEXT: cmpb $-128, %al
; SSE-NEXT: sete %cl
; SSE-NEXT: pinsrb $15, %ecx, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: non_splat_minus_one_divisor_1:
; AVX: # %bb.0:
; AVX-NEXT: vpextrb $1, %xmm0, %eax
; AVX-NEXT: negb %al
; AVX-NEXT: movzbl %al, %eax
; AVX-NEXT: vpextrb $0, %xmm0, %ecx
; AVX-NEXT: negb %cl
; AVX-NEXT: movzbl %cl, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $2, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shrb $7, %cl
; AVX-NEXT: addb %al, %cl
; AVX-NEXT: sarb %cl
; AVX-NEXT: movzbl %cl, %eax
; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $3, %xmm0, %eax
; AVX-NEXT: negb %al
; AVX-NEXT: movzbl %al, %eax
; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $4, %xmm0, %eax
; AVX-NEXT: negb %al
; AVX-NEXT: movzbl %al, %eax
; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $5, %xmm0, %eax
; AVX-NEXT: negb %al
; AVX-NEXT: movzbl %al, %eax
; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $6, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shrb $7, %cl
; AVX-NEXT: addb %al, %cl
; AVX-NEXT: sarb %cl
; AVX-NEXT: movzbl %cl, %eax
; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $7, %xmm0, %eax
; AVX-NEXT: negb %al
; AVX-NEXT: movzbl %al, %eax
; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $8, %xmm0, %eax
; AVX-NEXT: negb %al
; AVX-NEXT: movzbl %al, %eax
; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $9, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shrb $7, %cl
; AVX-NEXT: addb %al, %cl
; AVX-NEXT: sarb %cl
; AVX-NEXT: movzbl %cl, %eax
; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $10, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shrb $7, %cl
; AVX-NEXT: addb %al, %cl
; AVX-NEXT: sarb %cl
; AVX-NEXT: movzbl %cl, %eax
; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $11, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shrb $7, %cl
; AVX-NEXT: addb %al, %cl
; AVX-NEXT: sarb %cl
; AVX-NEXT: movzbl %cl, %eax
; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $12, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shrb $7, %cl
; AVX-NEXT: addb %al, %cl
; AVX-NEXT: sarb %cl
; AVX-NEXT: movzbl %cl, %eax
; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $13, %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
; AVX-NEXT: cmpb $-128, %al
; AVX-NEXT: sete %cl
; AVX-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1
; AVX-NEXT: vpextrb $14, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shrb $7, %cl
; AVX-NEXT: addb %al, %cl
; AVX-NEXT: sarb %cl
; AVX-NEXT: movzbl %cl, %eax
; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $15, %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
; AVX-NEXT: cmpb $-128, %al
; AVX-NEXT: sete %cl
; AVX-NEXT: vpinsrb $15, %ecx, %xmm1, %xmm0
; AVX-NEXT: retq
%div = sdiv <16 x i8> %A, <i8 -1, i8 -1, i8 2, i8 -1, i8 -1, i8 -1, i8 2, i8 -1, i8 -1, i8 2, i8 2, i8 2, i8 2, i8 -128, i8 2, i8 -128>
ret <16 x i8> %div
}
define <4 x i32> @non_splat_minus_one_divisor_2(<4 x i32> %A) {
; SSE-LABEL: non_splat_minus_one_divisor_2:
; SSE: # %bb.0:
; SSE-NEXT: pextrd $1, %xmm0, %eax
; SSE-NEXT: movd %xmm0, %ecx
; SSE-NEXT: negl %ecx
; SSE-NEXT: movd %ecx, %xmm1
; SSE-NEXT: pinsrd $1, %eax, %xmm1
; SSE-NEXT: pextrd $2, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $31, %ecx
; SSE-NEXT: addl %eax, %ecx
; SSE-NEXT: sarl %ecx
; SSE-NEXT: pinsrd $2, %ecx, %xmm1
; SSE-NEXT: pextrd $3, %xmm0, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $31, %ecx
; SSE-NEXT: addl %eax, %ecx
; SSE-NEXT: sarl %ecx
; SSE-NEXT: negl %ecx
; SSE-NEXT: pinsrd $3, %ecx, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: non_splat_minus_one_divisor_2:
; AVX: # %bb.0:
; AVX-NEXT: vpextrd $1, %xmm0, %eax
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: negl %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrd $2, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shrl $31, %ecx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: sarl %ecx
; AVX-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: shrl $31, %ecx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: sarl %ecx
; AVX-NEXT: negl %ecx
; AVX-NEXT: vpinsrd $3, %ecx, %xmm1, %xmm0
; AVX-NEXT: retq
%div = sdiv <4 x i32> %A, <i32 -1, i32 1, i32 2, i32 -2>
ret <4 x i32> %div
}