mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
4cbc84cfa7
Under normal circumstances we prefer the higher performance MOVD to extract the 0'th element of a v8i16 vector instead of PEXTRW. But as detailed on PR27265, this prevents the SSE41 implementation of PEXTRW from folding the store of the 0'th element. Additionally it prevents us from making use of the fact that the (SSE2) reg-reg version of PEXTRW implicitly zero-extends the i16 element to the i32/i64 destination register. This patch only preferentially lowers to MOVD if we will not be zero-extending the extracted i16, nor prevent a store from being folded (on SSSE41). Fix for PR27265. Differential Revision: https://reviews.llvm.org/D22509 llvm-svn: 276289
1654 lines
66 KiB
LLVM
1654 lines
66 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
|
|
|
|
;
|
|
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
|
|
|
|
;
|
|
; Variable Shifts
|
|
;
|
|
|
|
define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
|
|
; SSE2-LABEL: var_shift_v2i64:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: psrlq %xmm3, %xmm4
|
|
; SSE2-NEXT: psrlq %xmm1, %xmm2
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: psrlq %xmm3, %xmm2
|
|
; SSE2-NEXT: psrlq %xmm1, %xmm0
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
|
|
; SSE2-NEXT: xorpd %xmm4, %xmm2
|
|
; SSE2-NEXT: psubq %xmm4, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: var_shift_v2i64:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE41-NEXT: psrlq %xmm1, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
|
|
; SSE41-NEXT: psrlq %xmm4, %xmm2
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE41-NEXT: psrlq %xmm1, %xmm3
|
|
; SSE41-NEXT: psrlq %xmm4, %xmm0
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
|
|
; SSE41-NEXT: pxor %xmm2, %xmm0
|
|
; SSE41-NEXT: psubq %xmm2, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: var_shift_v2i64:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
|
|
; AVX1-NEXT: vpsrlq %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
|
|
; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrlq %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
|
|
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: var_shift_v2i64:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm3
|
|
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsubq %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOP-LABEL: var_shift_v2i64:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
|
|
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: var_shift_v2i64:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
; AVX512-NEXT: vpsrlvq %xmm1, %xmm2, %xmm3
|
|
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsubq %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: var_shift_v2i64:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
|
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
|
|
; X32-SSE-NEXT: movdqa %xmm3, %xmm4
|
|
; X32-SSE-NEXT: psrlq %xmm2, %xmm4
|
|
; X32-SSE-NEXT: movq {{.*#+}} xmm5 = xmm1[0],zero
|
|
; X32-SSE-NEXT: psrlq %xmm5, %xmm3
|
|
; X32-SSE-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; X32-SSE-NEXT: psrlq %xmm2, %xmm1
|
|
; X32-SSE-NEXT: psrlq %xmm5, %xmm0
|
|
; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
|
|
; X32-SSE-NEXT: xorpd %xmm4, %xmm1
|
|
; X32-SSE-NEXT: psubq %xmm4, %xmm1
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <2 x i64> %a, %b
|
|
ret <2 x i64> %shift
|
|
}
|
|
|
|
define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
|
; SSE2-LABEL: var_shift_v4i32:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: psrad %xmm2, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: psrlq $32, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE2-NEXT: psrad %xmm2, %xmm4
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3]
|
|
; SSE2-NEXT: pxor %xmm3, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm5
|
|
; SSE2-NEXT: psrad %xmm4, %xmm5
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
|
|
; SSE2-NEXT: psrad %xmm1, %xmm0
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: var_shift_v4i32:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE41-NEXT: psrad %xmm2, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: psrlq $32, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE41-NEXT: psrad %xmm2, %xmm4
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
|
|
; SSE41-NEXT: pxor %xmm2, %xmm2
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
|
|
; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: psrad %xmm1, %xmm2
|
|
; SSE41-NEXT: psrad %xmm3, %xmm0
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: var_shift_v4i32:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
|
|
; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
|
; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: var_shift_v4i32:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: var_shift_v4i32:
|
|
; XOPAVX1: # BB#0:
|
|
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
|
|
; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: var_shift_v4i32:
|
|
; XOPAVX2: # BB#0:
|
|
; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: var_shift_v4i32:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: var_shift_v4i32:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; X32-SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; X32-SSE-NEXT: psrad %xmm2, %xmm3
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; X32-SSE-NEXT: psrlq $32, %xmm2
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm4
|
|
; X32-SSE-NEXT: psrad %xmm2, %xmm4
|
|
; X32-SSE-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3]
|
|
; X32-SSE-NEXT: pxor %xmm3, %xmm3
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm5
|
|
; X32-SSE-NEXT: psrad %xmm4, %xmm5
|
|
; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
|
|
; X32-SSE-NEXT: psrad %xmm1, %xmm0
|
|
; X32-SSE-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3]
|
|
; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <4 x i32> %a, %b
|
|
ret <4 x i32> %shift
|
|
}
|
|
|
|
define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|
; SSE2-LABEL: var_shift_v8i16:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: psllw $12, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: psraw $15, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: paddw %xmm1, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: psraw $15, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
; SSE2-NEXT: psraw $4, %xmm0
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: paddw %xmm1, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: psraw $15, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
; SSE2-NEXT: psraw $2, %xmm0
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: paddw %xmm1, %xmm1
|
|
; SSE2-NEXT: psraw $15, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm0, %xmm2
|
|
; SSE2-NEXT: psraw $1, %xmm0
|
|
; SSE2-NEXT: pand %xmm1, %xmm0
|
|
; SSE2-NEXT: por %xmm2, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: var_shift_v8i16:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: psllw $12, %xmm0
|
|
; SSE41-NEXT: psllw $4, %xmm1
|
|
; SSE41-NEXT: por %xmm0, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE41-NEXT: paddw %xmm3, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE41-NEXT: psraw $8, %xmm4
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE41-NEXT: psraw $4, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm1, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE41-NEXT: psraw $2, %xmm1
|
|
; SSE41-NEXT: paddw %xmm3, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm1, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE41-NEXT: psraw $1, %xmm1
|
|
; SSE41-NEXT: paddw %xmm3, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm1, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: var_shift_v8i16:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
|
|
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
|
|
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: var_shift_v8i16:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
|
|
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
|
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOP-LABEL: var_shift_v8i16:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
|
|
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: var_shift_v8i16:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: ## kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
|
|
; AVX512-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
|
|
; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: var_shift_v8i16:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: psllw $12, %xmm1
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; X32-SSE-NEXT: psraw $15, %xmm2
|
|
; X32-SSE-NEXT: movdqa %xmm2, %xmm3
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm3
|
|
; X32-SSE-NEXT: psraw $8, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm2, %xmm0
|
|
; X32-SSE-NEXT: por %xmm3, %xmm0
|
|
; X32-SSE-NEXT: paddw %xmm1, %xmm1
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; X32-SSE-NEXT: psraw $15, %xmm2
|
|
; X32-SSE-NEXT: movdqa %xmm2, %xmm3
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm3
|
|
; X32-SSE-NEXT: psraw $4, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm2, %xmm0
|
|
; X32-SSE-NEXT: por %xmm3, %xmm0
|
|
; X32-SSE-NEXT: paddw %xmm1, %xmm1
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; X32-SSE-NEXT: psraw $15, %xmm2
|
|
; X32-SSE-NEXT: movdqa %xmm2, %xmm3
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm3
|
|
; X32-SSE-NEXT: psraw $2, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm2, %xmm0
|
|
; X32-SSE-NEXT: por %xmm3, %xmm0
|
|
; X32-SSE-NEXT: paddw %xmm1, %xmm1
|
|
; X32-SSE-NEXT: psraw $15, %xmm1
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm2
|
|
; X32-SSE-NEXT: psraw $1, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm1, %xmm0
|
|
; X32-SSE-NEXT: por %xmm2, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <8 x i16> %a, %b
|
|
ret <8 x i16> %shift
|
|
}
|
|
|
|
define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|
; SSE2-LABEL: var_shift_v16i8:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; SSE2-NEXT: psllw $5, %xmm1
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
|
|
; SSE2-NEXT: pxor %xmm3, %xmm3
|
|
; SSE2-NEXT: pxor %xmm5, %xmm5
|
|
; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE2-NEXT: pandn %xmm2, %xmm6
|
|
; SSE2-NEXT: psraw $4, %xmm2
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
; SSE2-NEXT: por %xmm6, %xmm2
|
|
; SSE2-NEXT: paddw %xmm4, %xmm4
|
|
; SSE2-NEXT: pxor %xmm5, %xmm5
|
|
; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE2-NEXT: pandn %xmm2, %xmm6
|
|
; SSE2-NEXT: psraw $2, %xmm2
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
; SSE2-NEXT: por %xmm6, %xmm2
|
|
; SSE2-NEXT: paddw %xmm4, %xmm4
|
|
; SSE2-NEXT: pxor %xmm5, %xmm5
|
|
; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE2-NEXT: pandn %xmm2, %xmm4
|
|
; SSE2-NEXT: psraw $1, %xmm2
|
|
; SSE2-NEXT: pand %xmm5, %xmm2
|
|
; SSE2-NEXT: por %xmm4, %xmm2
|
|
; SSE2-NEXT: psrlw $8, %xmm2
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pxor %xmm4, %xmm4
|
|
; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pandn %xmm0, %xmm5
|
|
; SSE2-NEXT: psraw $4, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm5, %xmm0
|
|
; SSE2-NEXT: paddw %xmm1, %xmm1
|
|
; SSE2-NEXT: pxor %xmm4, %xmm4
|
|
; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pandn %xmm0, %xmm5
|
|
; SSE2-NEXT: psraw $2, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm5, %xmm0
|
|
; SSE2-NEXT: paddw %xmm1, %xmm1
|
|
; SSE2-NEXT: pcmpgtw %xmm1, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE2-NEXT: pandn %xmm0, %xmm1
|
|
; SSE2-NEXT: psraw $1, %xmm0
|
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
|
; SSE2-NEXT: por %xmm1, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: var_shift_v16i8:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: psllw $5, %xmm1
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE41-NEXT: psraw $4, %xmm4
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE41-NEXT: psraw $2, %xmm4
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE41-NEXT: psraw $1, %xmm4
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm3
|
|
; SSE41-NEXT: psrlw $8, %xmm3
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: psraw $4, %xmm2
|
|
; SSE41-NEXT: pblendvb %xmm2, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: psraw $2, %xmm2
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm2, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: psraw $1, %xmm2
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm2, %xmm1
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm3, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: var_shift_v16i8:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
|
|
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
|
|
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
|
|
; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
|
|
; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
|
|
; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
|
|
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
|
|
; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
|
|
; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: var_shift_v16i8:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
|
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: var_shift_v16i8:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX512-NEXT: vpsraw $4, %xmm3, %xmm4
|
|
; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX512-NEXT: vpsraw $2, %xmm3, %xmm4
|
|
; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX512-NEXT: vpsraw $1, %xmm3, %xmm4
|
|
; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
|
|
; AVX512-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX512-NEXT: vpsraw $4, %xmm0, %xmm3
|
|
; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsraw $2, %xmm0, %xmm3
|
|
; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsraw $1, %xmm0, %xmm3
|
|
; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: var_shift_v16i8:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; X32-SSE-NEXT: psllw $5, %xmm1
|
|
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
|
|
; X32-SSE-NEXT: pxor %xmm3, %xmm3
|
|
; X32-SSE-NEXT: pxor %xmm5, %xmm5
|
|
; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; X32-SSE-NEXT: movdqa %xmm5, %xmm6
|
|
; X32-SSE-NEXT: pandn %xmm2, %xmm6
|
|
; X32-SSE-NEXT: psraw $4, %xmm2
|
|
; X32-SSE-NEXT: pand %xmm5, %xmm2
|
|
; X32-SSE-NEXT: por %xmm6, %xmm2
|
|
; X32-SSE-NEXT: paddw %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pxor %xmm5, %xmm5
|
|
; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; X32-SSE-NEXT: movdqa %xmm5, %xmm6
|
|
; X32-SSE-NEXT: pandn %xmm2, %xmm6
|
|
; X32-SSE-NEXT: psraw $2, %xmm2
|
|
; X32-SSE-NEXT: pand %xmm5, %xmm2
|
|
; X32-SSE-NEXT: por %xmm6, %xmm2
|
|
; X32-SSE-NEXT: paddw %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pxor %xmm5, %xmm5
|
|
; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; X32-SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; X32-SSE-NEXT: pandn %xmm2, %xmm4
|
|
; X32-SSE-NEXT: psraw $1, %xmm2
|
|
; X32-SSE-NEXT: pand %xmm5, %xmm2
|
|
; X32-SSE-NEXT: por %xmm4, %xmm2
|
|
; X32-SSE-NEXT: psrlw $8, %xmm2
|
|
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X32-SSE-NEXT: pxor %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
|
|
; X32-SSE-NEXT: movdqa %xmm4, %xmm5
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm5
|
|
; X32-SSE-NEXT: psraw $4, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm4, %xmm0
|
|
; X32-SSE-NEXT: por %xmm5, %xmm0
|
|
; X32-SSE-NEXT: paddw %xmm1, %xmm1
|
|
; X32-SSE-NEXT: pxor %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
|
|
; X32-SSE-NEXT: movdqa %xmm4, %xmm5
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm5
|
|
; X32-SSE-NEXT: psraw $2, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm4, %xmm0
|
|
; X32-SSE-NEXT: por %xmm5, %xmm0
|
|
; X32-SSE-NEXT: paddw %xmm1, %xmm1
|
|
; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm3
|
|
; X32-SSE-NEXT: movdqa %xmm3, %xmm1
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm1
|
|
; X32-SSE-NEXT: psraw $1, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm3, %xmm0
|
|
; X32-SSE-NEXT: por %xmm1, %xmm0
|
|
; X32-SSE-NEXT: psrlw $8, %xmm0
|
|
; X32-SSE-NEXT: packuswb %xmm2, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <16 x i8> %a, %b
|
|
ret <16 x i8> %shift
|
|
}
|
|
|
|
;
|
|
; Uniform Variable Shifts
|
|
;
|
|
|
|
define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
|
|
; SSE-LABEL: splatvar_shift_v2i64:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
; SSE-NEXT: psrlq %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq %xmm1, %xmm0
|
|
; SSE-NEXT: pxor %xmm2, %xmm0
|
|
; SSE-NEXT: psubq %xmm2, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: splatvar_shift_v2i64:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
; AVX-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatvar_shift_v2i64:
|
|
; XOPAVX1: # BB#0:
|
|
; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
|
|
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
|
|
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatvar_shift_v2i64:
|
|
; XOPAVX2: # BB#0:
|
|
; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1
|
|
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1
|
|
; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatvar_shift_v2i64:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
|
|
; AVX512-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: splatvar_shift_v2i64:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
|
|
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
|
|
; X32-SSE-NEXT: psrlq %xmm1, %xmm2
|
|
; X32-SSE-NEXT: psrlq %xmm1, %xmm0
|
|
; X32-SSE-NEXT: pxor %xmm2, %xmm0
|
|
; X32-SSE-NEXT: psubq %xmm2, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%splat = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
|
|
%shift = ashr <2 x i64> %a, %splat
|
|
ret <2 x i64> %shift
|
|
}
|
|
|
|
define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
|
; SSE2-LABEL: splatvar_shift_v4i32:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: xorps %xmm2, %xmm2
|
|
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
|
|
; SSE2-NEXT: psrad %xmm2, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: splatvar_shift_v4i32:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: pxor %xmm2, %xmm2
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
|
; SSE41-NEXT: psrad %xmm2, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: splatvar_shift_v4i32:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
|
; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: splatvar_shift_v4i32:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
|
; XOP-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatvar_shift_v4i32:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
|
; AVX512-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
|
|
; AVX512-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: splatvar_shift_v4i32:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: xorps %xmm2, %xmm2
|
|
; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
|
|
; X32-SSE-NEXT: psrad %xmm2, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
|
|
%shift = ashr <4 x i32> %a, %splat
|
|
ret <4 x i32> %shift
|
|
}
|
|
|
|
define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|
; SSE2-LABEL: splatvar_shift_v8i16:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: pextrw $0, %xmm1, %eax
|
|
; SSE2-NEXT: movd %eax, %xmm1
|
|
; SSE2-NEXT: psraw %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: splatvar_shift_v8i16:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: pxor %xmm2, %xmm2
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3,4,5,6,7]
|
|
; SSE41-NEXT: psraw %xmm2, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: splatvar_shift_v8i16:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
|
|
; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: splatvar_shift_v8i16:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
|
|
; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatvar_shift_v8i16:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
|
|
; AVX512-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: splatvar_shift_v8i16:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: pextrw $0, %xmm1, %eax
|
|
; X32-SSE-NEXT: movd %eax, %xmm1
|
|
; X32-SSE-NEXT: psraw %xmm1, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%splat = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
|
|
%shift = ashr <8 x i16> %a, %splat
|
|
ret <8 x i16> %shift
|
|
}
|
|
|
|
define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|
; SSE2-LABEL: splatvar_shift_v16i8:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
; SSE2-NEXT: psllw $5, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
; SSE2-NEXT: pxor %xmm5, %xmm5
|
|
; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE2-NEXT: pandn %xmm1, %xmm6
|
|
; SSE2-NEXT: psraw $4, %xmm1
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: por %xmm6, %xmm1
|
|
; SSE2-NEXT: paddw %xmm4, %xmm4
|
|
; SSE2-NEXT: pxor %xmm5, %xmm5
|
|
; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE2-NEXT: pandn %xmm1, %xmm6
|
|
; SSE2-NEXT: psraw $2, %xmm1
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: por %xmm6, %xmm1
|
|
; SSE2-NEXT: paddw %xmm4, %xmm4
|
|
; SSE2-NEXT: pxor %xmm5, %xmm5
|
|
; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE2-NEXT: pandn %xmm1, %xmm4
|
|
; SSE2-NEXT: psraw $1, %xmm1
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: por %xmm4, %xmm1
|
|
; SSE2-NEXT: psrlw $8, %xmm1
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pxor %xmm4, %xmm4
|
|
; SSE2-NEXT: pcmpgtw %xmm3, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pandn %xmm0, %xmm5
|
|
; SSE2-NEXT: psraw $4, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm5, %xmm0
|
|
; SSE2-NEXT: paddw %xmm3, %xmm3
|
|
; SSE2-NEXT: pxor %xmm4, %xmm4
|
|
; SSE2-NEXT: pcmpgtw %xmm3, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pandn %xmm0, %xmm5
|
|
; SSE2-NEXT: psraw $2, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm5, %xmm0
|
|
; SSE2-NEXT: paddw %xmm3, %xmm3
|
|
; SSE2-NEXT: pcmpgtw %xmm3, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
; SSE2-NEXT: psraw $1, %xmm0
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: splatvar_shift_v16i8:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: pxor %xmm0, %xmm0
|
|
; SSE41-NEXT: pshufb %xmm0, %xmm1
|
|
; SSE41-NEXT: psllw $5, %xmm1
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE41-NEXT: psraw $4, %xmm4
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE41-NEXT: psraw $2, %xmm4
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE41-NEXT: psraw $1, %xmm4
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm3
|
|
; SSE41-NEXT: psrlw $8, %xmm3
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: psraw $4, %xmm2
|
|
; SSE41-NEXT: pblendvb %xmm2, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: psraw $2, %xmm2
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm2, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: psraw $1, %xmm2
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm2, %xmm1
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm3, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: splatvar_shift_v16i8:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpsraw $4, %xmm3, %xmm4
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsraw $2, %xmm3, %xmm4
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsraw $1, %xmm3, %xmm4
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $2, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatvar_shift_v16i8:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
|
|
; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX2-NEXT: vpsraw $4, %xmm3, %xmm4
|
|
; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpsraw $2, %xmm3, %xmm4
|
|
; AVX2-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpsraw $1, %xmm3, %xmm4
|
|
; AVX2-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX2-NEXT: vpsraw $4, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsraw $2, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsraw $1, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: splatvar_shift_v16i8:
|
|
; XOPAVX1: # BB#0:
|
|
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
|
; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: splatvar_shift_v16i8:
|
|
; XOPAVX2: # BB#0:
|
|
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
|
|
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
|
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatvar_shift_v16i8:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
|
|
; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX512-NEXT: vpsraw $4, %xmm3, %xmm4
|
|
; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX512-NEXT: vpsraw $2, %xmm3, %xmm4
|
|
; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX512-NEXT: vpsraw $1, %xmm3, %xmm4
|
|
; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
|
|
; AVX512-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX512-NEXT: vpsraw $4, %xmm0, %xmm3
|
|
; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsraw $2, %xmm0, %xmm3
|
|
; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsraw $1, %xmm0, %xmm3
|
|
; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: splatvar_shift_v16i8:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
|
|
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
; X32-SSE-NEXT: psllw $5, %xmm3
|
|
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
|
|
; X32-SSE-NEXT: pxor %xmm2, %xmm2
|
|
; X32-SSE-NEXT: pxor %xmm5, %xmm5
|
|
; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; X32-SSE-NEXT: movdqa %xmm5, %xmm6
|
|
; X32-SSE-NEXT: pandn %xmm1, %xmm6
|
|
; X32-SSE-NEXT: psraw $4, %xmm1
|
|
; X32-SSE-NEXT: pand %xmm5, %xmm1
|
|
; X32-SSE-NEXT: por %xmm6, %xmm1
|
|
; X32-SSE-NEXT: paddw %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pxor %xmm5, %xmm5
|
|
; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; X32-SSE-NEXT: movdqa %xmm5, %xmm6
|
|
; X32-SSE-NEXT: pandn %xmm1, %xmm6
|
|
; X32-SSE-NEXT: psraw $2, %xmm1
|
|
; X32-SSE-NEXT: pand %xmm5, %xmm1
|
|
; X32-SSE-NEXT: por %xmm6, %xmm1
|
|
; X32-SSE-NEXT: paddw %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pxor %xmm5, %xmm5
|
|
; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; X32-SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; X32-SSE-NEXT: pandn %xmm1, %xmm4
|
|
; X32-SSE-NEXT: psraw $1, %xmm1
|
|
; X32-SSE-NEXT: pand %xmm5, %xmm1
|
|
; X32-SSE-NEXT: por %xmm4, %xmm1
|
|
; X32-SSE-NEXT: psrlw $8, %xmm1
|
|
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X32-SSE-NEXT: pxor %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pcmpgtw %xmm3, %xmm4
|
|
; X32-SSE-NEXT: movdqa %xmm4, %xmm5
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm5
|
|
; X32-SSE-NEXT: psraw $4, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm4, %xmm0
|
|
; X32-SSE-NEXT: por %xmm5, %xmm0
|
|
; X32-SSE-NEXT: paddw %xmm3, %xmm3
|
|
; X32-SSE-NEXT: pxor %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pcmpgtw %xmm3, %xmm4
|
|
; X32-SSE-NEXT: movdqa %xmm4, %xmm5
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm5
|
|
; X32-SSE-NEXT: psraw $2, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm4, %xmm0
|
|
; X32-SSE-NEXT: por %xmm5, %xmm0
|
|
; X32-SSE-NEXT: paddw %xmm3, %xmm3
|
|
; X32-SSE-NEXT: pcmpgtw %xmm3, %xmm2
|
|
; X32-SSE-NEXT: movdqa %xmm2, %xmm3
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm3
|
|
; X32-SSE-NEXT: psraw $1, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm2, %xmm0
|
|
; X32-SSE-NEXT: por %xmm3, %xmm0
|
|
; X32-SSE-NEXT: psrlw $8, %xmm0
|
|
; X32-SSE-NEXT: packuswb %xmm1, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
|
|
%shift = ashr <16 x i8> %a, %splat
|
|
ret <16 x i8> %shift
|
|
}
|
|
|
|
;
|
|
; Constant Shifts
|
|
;
|
|
|
|
define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
|
|
; SSE2-LABEL: constant_shift_v2i64:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrlq $7, %xmm1
|
|
; SSE2-NEXT: psrlq $1, %xmm0
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
|
|
; SSE2-NEXT: movapd {{.*#+}} xmm0 = [4611686018427387904,72057594037927936]
|
|
; SSE2-NEXT: xorpd %xmm0, %xmm1
|
|
; SSE2-NEXT: psubq %xmm0, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: constant_shift_v2i64:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrlq $7, %xmm1
|
|
; SSE41-NEXT: psrlq $1, %xmm0
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
|
|
; SSE41-NEXT: pxor %xmm1, %xmm0
|
|
; SSE41-NEXT: psubq %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: constant_shift_v2i64:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: constant_shift_v2i64:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOP-LABEL: constant_shift_v2i64:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
|
|
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: constant_shift_v2i64:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: constant_shift_v2i64:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; X32-SSE-NEXT: psrlq $7, %xmm2
|
|
; X32-SSE-NEXT: psrlq $1, %xmm1
|
|
; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; X32-SSE-NEXT: psrlq $7, %xmm1
|
|
; X32-SSE-NEXT: psrlq $1, %xmm0
|
|
; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
|
|
; X32-SSE-NEXT: xorpd %xmm2, %xmm1
|
|
; X32-SSE-NEXT: psubq %xmm2, %xmm1
|
|
; X32-SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <2 x i64> %a, <i64 1, i64 7>
|
|
ret <2 x i64> %shift
|
|
}
|
|
|
|
define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
|
|
; SSE2-LABEL: constant_shift_v4i32:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrad $7, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: psrad $5, %xmm2
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: psrad $6, %xmm2
|
|
; SSE2-NEXT: psrad $4, %xmm0
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: constant_shift_v4i32:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrad $7, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: psrad $5, %xmm2
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrad $6, %xmm1
|
|
; SSE41-NEXT: psrad $4, %xmm0
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: constant_shift_v4i32:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
|
|
; AVX1-NEXT: vpsrad $6, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpsrad $4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: constant_shift_v4i32:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: constant_shift_v4i32:
|
|
; XOPAVX1: # BB#0:
|
|
; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: constant_shift_v4i32:
|
|
; XOPAVX2: # BB#0:
|
|
; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: constant_shift_v4i32:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: constant_shift_v4i32:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; X32-SSE-NEXT: psrad $7, %xmm1
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; X32-SSE-NEXT: psrad $5, %xmm2
|
|
; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; X32-SSE-NEXT: psrad $6, %xmm2
|
|
; X32-SSE-NEXT: psrad $4, %xmm0
|
|
; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7>
|
|
ret <4 x i32> %shift
|
|
}
|
|
|
|
define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
|
|
; SSE2-LABEL: constant_shift_v8i16:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psraw $4, %xmm1
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: psraw $2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,0,65535,0,65535,0]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: pand %xmm0, %xmm1
|
|
; SSE2-NEXT: psraw $1, %xmm2
|
|
; SSE2-NEXT: pandn %xmm2, %xmm0
|
|
; SSE2-NEXT: por %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: constant_shift_v8i16:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psraw $4, %xmm1
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: psraw $2, %xmm2
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: psraw $1, %xmm0
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: constant_shift_v8i16:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
|
|
; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
|
; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: constant_shift_v8i16:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
|
|
; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
|
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOP-LABEL: constant_shift_v8i16:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
|
|
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: constant_shift_v8i16:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
|
|
; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: constant_shift_v8i16:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; X32-SSE-NEXT: psraw $4, %xmm1
|
|
; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
|
|
; X32-SSE-NEXT: psraw $2, %xmm1
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
|
|
; X32-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
|
; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,0,65535,0,65535,0]
|
|
; X32-SSE-NEXT: movdqa %xmm2, %xmm1
|
|
; X32-SSE-NEXT: pand %xmm0, %xmm1
|
|
; X32-SSE-NEXT: psraw $1, %xmm2
|
|
; X32-SSE-NEXT: pandn %xmm2, %xmm0
|
|
; X32-SSE-NEXT: por %xmm1, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <8 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
|
|
ret <8 x i16> %shift
|
|
}
|
|
|
|
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
|
; SSE2-LABEL: constant_shift_v16i8:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
|
; SSE2-NEXT: psllw $5, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
; SSE2-NEXT: pxor %xmm5, %xmm5
|
|
; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE2-NEXT: pandn %xmm1, %xmm6
|
|
; SSE2-NEXT: psraw $4, %xmm1
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: por %xmm6, %xmm1
|
|
; SSE2-NEXT: paddw %xmm4, %xmm4
|
|
; SSE2-NEXT: pxor %xmm5, %xmm5
|
|
; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE2-NEXT: pandn %xmm1, %xmm6
|
|
; SSE2-NEXT: psraw $2, %xmm1
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: por %xmm6, %xmm1
|
|
; SSE2-NEXT: paddw %xmm4, %xmm4
|
|
; SSE2-NEXT: pxor %xmm5, %xmm5
|
|
; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE2-NEXT: pandn %xmm1, %xmm4
|
|
; SSE2-NEXT: psraw $1, %xmm1
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: por %xmm4, %xmm1
|
|
; SSE2-NEXT: psrlw $8, %xmm1
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pxor %xmm4, %xmm4
|
|
; SSE2-NEXT: pcmpgtw %xmm3, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pandn %xmm0, %xmm5
|
|
; SSE2-NEXT: psraw $4, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm5, %xmm0
|
|
; SSE2-NEXT: paddw %xmm3, %xmm3
|
|
; SSE2-NEXT: pxor %xmm4, %xmm4
|
|
; SSE2-NEXT: pcmpgtw %xmm3, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: pandn %xmm0, %xmm5
|
|
; SSE2-NEXT: psraw $2, %xmm0
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm5, %xmm0
|
|
; SSE2-NEXT: paddw %xmm3, %xmm3
|
|
; SSE2-NEXT: pcmpgtw %xmm3, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: pandn %xmm0, %xmm3
|
|
; SSE2-NEXT: psraw $1, %xmm0
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: constant_shift_v16i8:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
|
; SSE41-NEXT: psllw $5, %xmm3
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE41-NEXT: psraw $4, %xmm4
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE41-NEXT: psraw $2, %xmm4
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE41-NEXT: psraw $1, %xmm4
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm4, %xmm2
|
|
; SSE41-NEXT: psrlw $8, %xmm2
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
|
; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE41-NEXT: psraw $4, %xmm3
|
|
; SSE41-NEXT: pblendvb %xmm3, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE41-NEXT: psraw $2, %xmm3
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm3, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE41-NEXT: psraw $1, %xmm3
|
|
; SSE41-NEXT: paddw %xmm0, %xmm0
|
|
; SSE41-NEXT: pblendvb %xmm3, %xmm1
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: constant_shift_v16i8:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
|
; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
|
|
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
|
|
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
|
|
; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
|
|
; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
|
|
; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
|
|
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
|
|
; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
|
|
; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: constant_shift_v16i8:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
|
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: constant_shift_v16i8:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
|
; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX512-NEXT: vpsraw $4, %xmm3, %xmm4
|
|
; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX512-NEXT: vpsraw $2, %xmm3, %xmm4
|
|
; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
|
; AVX512-NEXT: vpsraw $1, %xmm3, %xmm4
|
|
; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
|
|
; AVX512-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; AVX512-NEXT: vpsraw $4, %xmm0, %xmm3
|
|
; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsraw $2, %xmm0, %xmm3
|
|
; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsraw $1, %xmm0, %xmm3
|
|
; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: constant_shift_v16i8:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
|
; X32-SSE-NEXT: psllw $5, %xmm3
|
|
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
|
|
; X32-SSE-NEXT: pxor %xmm2, %xmm2
|
|
; X32-SSE-NEXT: pxor %xmm5, %xmm5
|
|
; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; X32-SSE-NEXT: movdqa %xmm5, %xmm6
|
|
; X32-SSE-NEXT: pandn %xmm1, %xmm6
|
|
; X32-SSE-NEXT: psraw $4, %xmm1
|
|
; X32-SSE-NEXT: pand %xmm5, %xmm1
|
|
; X32-SSE-NEXT: por %xmm6, %xmm1
|
|
; X32-SSE-NEXT: paddw %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pxor %xmm5, %xmm5
|
|
; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; X32-SSE-NEXT: movdqa %xmm5, %xmm6
|
|
; X32-SSE-NEXT: pandn %xmm1, %xmm6
|
|
; X32-SSE-NEXT: psraw $2, %xmm1
|
|
; X32-SSE-NEXT: pand %xmm5, %xmm1
|
|
; X32-SSE-NEXT: por %xmm6, %xmm1
|
|
; X32-SSE-NEXT: paddw %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pxor %xmm5, %xmm5
|
|
; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
|
|
; X32-SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; X32-SSE-NEXT: pandn %xmm1, %xmm4
|
|
; X32-SSE-NEXT: psraw $1, %xmm1
|
|
; X32-SSE-NEXT: pand %xmm5, %xmm1
|
|
; X32-SSE-NEXT: por %xmm4, %xmm1
|
|
; X32-SSE-NEXT: psrlw $8, %xmm1
|
|
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; X32-SSE-NEXT: pxor %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pcmpgtw %xmm3, %xmm4
|
|
; X32-SSE-NEXT: movdqa %xmm4, %xmm5
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm5
|
|
; X32-SSE-NEXT: psraw $4, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm4, %xmm0
|
|
; X32-SSE-NEXT: por %xmm5, %xmm0
|
|
; X32-SSE-NEXT: paddw %xmm3, %xmm3
|
|
; X32-SSE-NEXT: pxor %xmm4, %xmm4
|
|
; X32-SSE-NEXT: pcmpgtw %xmm3, %xmm4
|
|
; X32-SSE-NEXT: movdqa %xmm4, %xmm5
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm5
|
|
; X32-SSE-NEXT: psraw $2, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm4, %xmm0
|
|
; X32-SSE-NEXT: por %xmm5, %xmm0
|
|
; X32-SSE-NEXT: paddw %xmm3, %xmm3
|
|
; X32-SSE-NEXT: pcmpgtw %xmm3, %xmm2
|
|
; X32-SSE-NEXT: movdqa %xmm2, %xmm3
|
|
; X32-SSE-NEXT: pandn %xmm0, %xmm3
|
|
; X32-SSE-NEXT: psraw $1, %xmm0
|
|
; X32-SSE-NEXT: pand %xmm2, %xmm0
|
|
; X32-SSE-NEXT: por %xmm3, %xmm0
|
|
; X32-SSE-NEXT: psrlw $8, %xmm0
|
|
; X32-SSE-NEXT: packuswb %xmm1, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
|
|
ret <16 x i8> %shift
|
|
}
|
|
|
|
;
|
|
; Uniform Constant Shifts
|
|
;
|
|
|
|
define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
|
|
; SSE2-LABEL: splatconstant_shift_v2i64:
|
|
; SSE2: # BB#0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: psrad $7, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
|
|
; SSE2-NEXT: psrlq $7, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: splatconstant_shift_v2i64:
|
|
; SSE41: # BB#0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE41-NEXT: psrad $7, %xmm1
|
|
; SSE41-NEXT: psrlq $7, %xmm0
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: splatconstant_shift_v2i64:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: splatconstant_shift_v2i64:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpsrad $7, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpsrlq $7, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOP-LABEL: splatconstant_shift_v2i64:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
|
|
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatconstant_shift_v2i64:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpsrad $7, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpsrlq $7, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: splatconstant_shift_v2i64:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; X32-SSE-NEXT: psrad $7, %xmm1
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
|
|
; X32-SSE-NEXT: psrlq $7, %xmm0
|
|
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <2 x i64> %a, <i64 7, i64 7>
|
|
ret <2 x i64> %shift
|
|
}
|
|
|
|
define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
|
|
; SSE-LABEL: splatconstant_shift_v4i32:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: psrad $5, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: splatconstant_shift_v4i32:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vpsrad $5, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: splatconstant_shift_v4i32:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpsrad $5, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatconstant_shift_v4i32:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpsrad $5, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: splatconstant_shift_v4i32:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: psrad $5, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <4 x i32> %a, <i32 5, i32 5, i32 5, i32 5>
|
|
ret <4 x i32> %shift
|
|
}
|
|
|
|
define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
|
|
; SSE-LABEL: splatconstant_shift_v8i16:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: psraw $3, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: splatconstant_shift_v8i16:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: splatconstant_shift_v8i16:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpsraw $3, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatconstant_shift_v8i16:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpsraw $3, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: splatconstant_shift_v8i16:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: psraw $3, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
|
|
ret <8 x i16> %shift
|
|
}
|
|
|
|
define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
|
|
; SSE-LABEL: splatconstant_shift_v16i8:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: psrlw $3, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; SSE-NEXT: pxor %xmm1, %xmm0
|
|
; SSE-NEXT: psubb %xmm1, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: splatconstant_shift_v16i8:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: splatconstant_shift_v16i8:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
|
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: splatconstant_shift_v16i8:
|
|
; AVX512: ## BB#0:
|
|
; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; X32-SSE-LABEL: splatconstant_shift_v16i8:
|
|
; X32-SSE: # BB#0:
|
|
; X32-SSE-NEXT: psrlw $3, %xmm0
|
|
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
|
|
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; X32-SSE-NEXT: pxor %xmm1, %xmm0
|
|
; X32-SSE-NEXT: psubb %xmm1, %xmm0
|
|
; X32-SSE-NEXT: retl
|
|
%shift = ashr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
|
|
ret <16 x i8> %shift
|
|
}
|