mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
[X86][SSE] Fix tests for vector rotates by splat variable.
We weren't correctly splatting the offset shift llvm-svn: 332435
This commit is contained in:
parent
a95b850aca
commit
370f3ae49c
@ -913,6 +913,7 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; SSE2-NEXT: pxor %xmm2, %xmm2
|
||||
; SSE2-NEXT: xorps %xmm3, %xmm3
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32,32,32,32]
|
||||
; SSE2-NEXT: psubd %xmm1, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -943,6 +944,7 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; SSE41-LABEL: splatvar_rotate_v4i32:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32]
|
||||
; SSE41-NEXT: psubd %xmm1, %xmm3
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -970,6 +972,7 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; AVX1-LABEL: splatvar_rotate_v4i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
|
||||
; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpslld %xmm2, %xmm0, %xmm2
|
||||
@ -991,6 +994,7 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; AVX2-LABEL: splatvar_rotate_v4i32:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
|
||||
; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; AVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
|
||||
@ -998,36 +1002,31 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: splatvar_rotate_v4i32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
|
||||
; AVX512-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; AVX512-NEXT: vpslld %xmm2, %xmm0, %xmm2
|
||||
; AVX512-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
||||
; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
; AVX512BW-LABEL: splatvar_rotate_v4i32:
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
|
||||
; AVX512BW-NEXT: vpbroadcastd %xmm1, %xmm1
|
||||
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
|
||||
; AVX512BW-NEXT: vzeroupper
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatvar_rotate_v4i32:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpbroadcastd %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vprolvd %xmm1, %xmm0, %xmm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_rotate_v4i32:
|
||||
; XOPAVX1: # %bb.0:
|
||||
; XOPAVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
|
||||
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
|
||||
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpslld %xmm2, %xmm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; XOPAVX1-NEXT: vprotd %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_rotate_v4i32:
|
||||
; XOPAVX2: # %bb.0:
|
||||
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
|
||||
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
|
||||
; XOPAVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
|
||||
; XOPAVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; XOPAVX2-NEXT: vpbroadcastd %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vprotd %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_rotate_v4i32:
|
||||
@ -1035,6 +1034,7 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; X32-SSE-NEXT: pxor %xmm2, %xmm2
|
||||
; X32-SSE-NEXT: xorps %xmm3, %xmm3
|
||||
; X32-SSE-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
|
||||
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [32,32,32,32]
|
||||
; X32-SSE-NEXT: psubd %xmm1, %xmm4
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -1062,7 +1062,7 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; X32-SSE-NEXT: movdqa %xmm1, %xmm0
|
||||
; X32-SSE-NEXT: retl
|
||||
%splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
|
||||
%splat32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %b
|
||||
%splat32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %splat
|
||||
%shl = shl <4 x i32> %a, %splat
|
||||
%lshr = lshr <4 x i32> %a, %splat32
|
||||
%or = or <4 x i32> %shl, %lshr
|
||||
@ -1072,8 +1072,10 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; SSE2-LABEL: splatvar_rotate_v8i16:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,0,2,3,4,5,6,7]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
|
||||
; SSE2-NEXT: psubw %xmm1, %xmm2
|
||||
; SSE2-NEXT: psubw %xmm3, %xmm2
|
||||
; SSE2-NEXT: pextrw $0, %xmm1, %eax
|
||||
; SSE2-NEXT: movd %eax, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -1116,6 +1118,8 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm4 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [16,16,16,16,16,16,16,16]
|
||||
; SSE41-NEXT: psubw %xmm1, %xmm0
|
||||
; SSE41-NEXT: movdqa %xmm2, %xmm3
|
||||
@ -1150,6 +1154,8 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX1-LABEL: splatvar_rotate_v8i16:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
|
||||
; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpsllw %xmm2, %xmm0, %xmm2
|
||||
@ -1173,6 +1179,7 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX2-LABEL: splatvar_rotate_v8i16:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
|
||||
; AVX2-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; AVX2-NEXT: vpsllw %xmm2, %xmm0, %xmm2
|
||||
@ -1189,6 +1196,7 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
|
||||
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX512BW-NEXT: vpbroadcastw %xmm1, %xmm1
|
||||
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
|
||||
; AVX512BW-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; AVX512BW-NEXT: vpsllw %xmm2, %xmm0, %xmm2
|
||||
@ -1200,6 +1208,7 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX512VL-LABEL: splatvar_rotate_v8i16:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX512VL-NEXT: vpbroadcastw %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
|
||||
; AVX512VL-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; AVX512VL-NEXT: vpsllw %xmm2, %xmm0, %xmm2
|
||||
@ -1207,22 +1216,25 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX512VL-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatvar_rotate_v8i16:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
|
||||
; XOP-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; XOP-NEXT: vpsllw %xmm2, %xmm0, %xmm2
|
||||
; XOP-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOP-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
; XOPAVX1-LABEL: splatvar_rotate_v8i16:
|
||||
; XOPAVX1: # %bb.0:
|
||||
; XOPAVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
|
||||
; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; XOPAVX1-NEXT: vprotw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_rotate_v8i16:
|
||||
; XOPAVX2: # %bb.0:
|
||||
; XOPAVX2-NEXT: vpbroadcastw %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vprotw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_rotate_v8i16:
|
||||
; X32-SSE: # %bb.0:
|
||||
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,0,2,3,4,5,6,7]
|
||||
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
|
||||
; X32-SSE-NEXT: psubw %xmm1, %xmm2
|
||||
; X32-SSE-NEXT: psubw %xmm3, %xmm2
|
||||
; X32-SSE-NEXT: pextrw $0, %xmm1, %eax
|
||||
; X32-SSE-NEXT: movd %eax, %xmm3
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -1261,7 +1273,7 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; X32-SSE-NEXT: por %xmm3, %xmm0
|
||||
; X32-SSE-NEXT: retl
|
||||
%splat = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
|
||||
%splat16 = sub <8 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %b
|
||||
%splat16 = sub <8 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %splat
|
||||
%shl = shl <8 x i16> %a, %splat
|
||||
%lshr = lshr <8 x i16> %a, %splat16
|
||||
%or = or <8 x i16> %shl, %lshr
|
||||
@ -1271,11 +1283,11 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; SSE2-LABEL: splatvar_rotate_v16i8:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; SSE2-NEXT: psubb %xmm1, %xmm3
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,0,0]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; SSE2-NEXT: psubb %xmm4, %xmm3
|
||||
; SSE2-NEXT: psllw $5, %xmm4
|
||||
; SSE2-NEXT: pxor %xmm2, %xmm2
|
||||
; SSE2-NEXT: pxor %xmm1, %xmm1
|
||||
@ -1336,9 +1348,9 @@ define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE41-NEXT: pxor %xmm0, %xmm0
|
||||
; SSE41-NEXT: pshufb %xmm0, %xmm1
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; SSE41-NEXT: psubb %xmm1, %xmm2
|
||||
; SSE41-NEXT: pshufb %xmm0, %xmm1
|
||||
; SSE41-NEXT: psllw $5, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE41-NEXT: paddb %xmm1, %xmm4
|
||||
@ -1384,129 +1396,118 @@ define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; AVX1-LABEL: splatvar_rotate_v16i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpsllw $5, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vpsllw $2, %xmm1, %xmm4
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm4
|
||||
; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsllw $5, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: splatvar_rotate_v16i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm2
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; AVX2-NEXT: vpsllw $5, %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpsllw $4, %xmm0, %xmm3
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
||||
; AVX2-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm3
|
||||
; AVX2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm3
|
||||
; AVX2-NEXT: vpsllw $2, %xmm3, %xmm4
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
||||
; AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
|
||||
; AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm3
|
||||
; AVX2-NEXT: vpaddb %xmm3, %xmm3, %xmm4
|
||||
; AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
|
||||
; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm3
|
||||
; AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm1
|
||||
; AVX2-NEXT: vpsllw $5, %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm3
|
||||
; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm4
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
||||
; AVX2-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm1
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm1
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm2
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm2
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpaddb %xmm3, %xmm3, %xmm3
|
||||
; AVX2-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; AVX2-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: splatvar_rotate_v16i8:
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm2
|
||||
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512BW-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
|
||||
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512BW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
|
||||
; AVX512BW-NEXT: vpsllvw %zmm2, %zmm0, %zmm2
|
||||
; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
|
||||
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
|
||||
; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512BW-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX512BW-NEXT: vzeroupper
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatvar_rotate_v16i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512VL-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512VL-NEXT: vpsubb %xmm1, %xmm2, %xmm2
|
||||
; AVX512VL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX512VL-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
|
||||
; AVX512VL-NEXT: vpsllvw %ymm2, %ymm0, %ymm2
|
||||
; AVX512VL-NEXT: vpmovwb %ymm2, %xmm2
|
||||
; AVX512VL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
|
||||
; AVX512VL-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsllvw %ymm1, %ymm0, %ymm1
|
||||
; AVX512VL-NEXT: vpmovwb %ymm1, %xmm1
|
||||
; AVX512VL-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
|
||||
; AVX512VL-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpmovwb %ymm0, %xmm0
|
||||
; AVX512VL-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; AVX512VL-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX512VL-NEXT: vzeroupper
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_rotate_v16i8:
|
||||
; XOPAVX1: # %bb.0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
|
||||
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm4, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpor %xmm0, %xmm3, %xmm0
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vprotb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_rotate_v16i8:
|
||||
; XOPAVX2: # %bb.0:
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm2
|
||||
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm0, %xmm2
|
||||
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vprotb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_rotate_v16i8:
|
||||
; X32-SSE: # %bb.0:
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; X32-SSE-NEXT: psubb %xmm1, %xmm3
|
||||
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
|
||||
; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,0,0]
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; X32-SSE-NEXT: psubb %xmm4, %xmm3
|
||||
; X32-SSE-NEXT: psllw $5, %xmm4
|
||||
; X32-SSE-NEXT: pxor %xmm2, %xmm2
|
||||
; X32-SSE-NEXT: pxor %xmm1, %xmm1
|
||||
@ -1563,7 +1564,7 @@ define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; X32-SSE-NEXT: por %xmm1, %xmm0
|
||||
; X32-SSE-NEXT: retl
|
||||
%splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
|
||||
%splat8 = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
|
||||
%splat8 = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %splat
|
||||
%shl = shl <16 x i8> %a, %splat
|
||||
%lshr = lshr <16 x i8> %a, %splat8
|
||||
%or = or <16 x i8> %shl, %lshr
|
||||
|
@ -523,93 +523,82 @@ define <4 x i64> @splatvar_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
|
||||
define <8 x i32> @splatvar_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
||||
; AVX1-LABEL: splatvar_rotate_v8i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32]
|
||||
; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
||||
; AVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
|
||||
; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX1-NEXT: vpslld %xmm1, %xmm4, %xmm5
|
||||
; AVX1-NEXT: vpslld %xmm1, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpslld %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vpsrldq {{.*#+}} xmm5 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
||||
; AVX1-NEXT: vpsrld %xmm5, %xmm4, %xmm5
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm8
|
||||
; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
||||
; AVX1-NEXT: vpsrld %xmm4, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
|
||||
; AVX1-NEXT: vpsrld %xmm6, %xmm4, %xmm6
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
|
||||
; AVX1-NEXT: vpxor %xmm6, %xmm6, %xmm6
|
||||
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
|
||||
; AVX1-NEXT: vpsrld %xmm7, %xmm4, %xmm7
|
||||
; AVX1-NEXT: vpsrld %xmm6, %xmm3, %xmm7
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4,5,6,7]
|
||||
; AVX1-NEXT: vpxor %xmm7, %xmm7, %xmm7
|
||||
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
|
||||
; AVX1-NEXT: vpsrld %xmm7, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
|
||||
; AVX1-NEXT: vpsrld %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4,5,6,7]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
|
||||
; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
||||
; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpsrld %xmm5, %xmm0, %xmm5
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
|
||||
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
|
||||
; AVX1-NEXT: vpsrld %xmm5, %xmm0, %xmm5
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
|
||||
; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4,5,6,7]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
|
||||
; AVX1-NEXT: vpsrld %xmm2, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3],xmm1[4,5],xmm5[6,7]
|
||||
; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpsrld %xmm6, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrld %xmm7, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vorps %ymm0, %ymm8, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: splatvar_rotate_v8i32:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32]
|
||||
; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpbroadcastd %xmm1, %ymm2
|
||||
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32]
|
||||
; AVX2-NEXT: vpsubd %ymm2, %ymm3, %ymm2
|
||||
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: splatvar_rotate_v8i32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32]
|
||||
; AVX512-NEXT: vpsubd %ymm1, %ymm2, %ymm2
|
||||
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX512-NEXT: vpslld %xmm1, %ymm0, %ymm1
|
||||
; AVX512-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
||||
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512-NEXT: retq
|
||||
; AVX512BW-LABEL: splatvar_rotate_v8i32:
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
||||
; AVX512BW-NEXT: vpbroadcastd %xmm1, %ymm1
|
||||
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatvar_rotate_v8i32:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpbroadcastd %xmm1, %ymm1
|
||||
; AVX512VL-NEXT: vprolvd %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_rotate_v8i32:
|
||||
; XOPAVX1: # %bb.0:
|
||||
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32]
|
||||
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
||||
; XOPAVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; XOPAVX1-NEXT: vpslld %xmm1, %xmm4, %xmm5
|
||||
; XOPAVX1-NEXT: vpslld %xmm1, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
|
||||
; XOPAVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
|
||||
; XOPAVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
|
||||
; XOPAVX1-NEXT: vpshld %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm3
|
||||
; XOPAVX1-NEXT: vpshld %xmm3, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vprotd %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vprotd %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_rotate_v8i32:
|
||||
; XOPAVX2: # %bb.0:
|
||||
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32]
|
||||
; XOPAVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
|
||||
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; XOPAVX2-NEXT: vpslld %xmm1, %ymm0, %ymm1
|
||||
; XOPAVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; XOPAVX2-NEXT: vpbroadcastd %xmm1, %ymm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
||||
; XOPAVX2-NEXT: vprotd %xmm3, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vprotd %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
|
||||
%splat32 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %b
|
||||
%splat32 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %splat
|
||||
%shl = shl <8 x i32> %a, %splat
|
||||
%lshr = lshr <8 x i32> %a, %splat32
|
||||
%or = or <8 x i32> %shl, %lshr
|
||||
@ -619,51 +608,46 @@ define <8 x i32> @splatvar_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
||||
define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
||||
; AVX1-LABEL: splatvar_rotate_v16i16:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
|
||||
; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
||||
; AVX1-NEXT: vpsubw %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[0,0,2,3,4,5,6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
|
||||
; AVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX1-NEXT: vpsllw %xmm1, %xmm4, %xmm5
|
||||
; AVX1-NEXT: vpsllw %xmm1, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vpsllw $12, %xmm2, %xmm5
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vpsllw $12, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpor %xmm5, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm5
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm6
|
||||
; AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsllw $12, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpsllw $4, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpor %xmm4, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm6
|
||||
; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm7
|
||||
; AVX1-NEXT: vpblendvb %xmm7, %xmm5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: splatvar_rotate_v16i16:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpbroadcastw %xmm1, %ymm2
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX2-NEXT: vpsubw %ymm2, %ymm3, %ymm2
|
||||
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
@ -682,8 +666,9 @@ define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
|
||||
; AVX512BW-LABEL: splatvar_rotate_v16i16:
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
||||
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512BW-NEXT: vpsubw %ymm1, %ymm2, %ymm2
|
||||
; AVX512BW-NEXT: vpbroadcastw %xmm1, %ymm2
|
||||
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512BW-NEXT: vpsubw %ymm2, %ymm3, %ymm2
|
||||
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX512BW-NEXT: vpsllw %xmm1, %ymm0, %ymm1
|
||||
; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
@ -692,8 +677,9 @@ define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
|
||||
;
|
||||
; AVX512VL-LABEL: splatvar_rotate_v16i16:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512VL-NEXT: vpsubw %ymm1, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpbroadcastw %xmm1, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512VL-NEXT: vpsubw %ymm2, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
|
||||
@ -702,42 +688,25 @@ define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_rotate_v16i16:
|
||||
; XOPAVX1: # %bb.0:
|
||||
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
|
||||
; XOPAVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm3
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
||||
; XOPAVX1-NEXT: vpsubw %xmm4, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; XOPAVX1-NEXT: vpsllw %xmm1, %xmm4, %xmm5
|
||||
; XOPAVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
|
||||
; XOPAVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
|
||||
; XOPAVX1-NEXT: vpsubw %xmm2, %xmm5, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlw %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubw %xmm3, %xmm5, %xmm3
|
||||
; XOPAVX1-NEXT: vpshlw %xmm3, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
|
||||
; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vprotw %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vprotw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_rotate_v16i16:
|
||||
; XOPAVX2: # %bb.0:
|
||||
; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; XOPAVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm2
|
||||
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; XOPAVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
||||
; XOPAVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; XOPAVX2-NEXT: vpsubw %xmm3, %xmm4, %xmm3
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
|
||||
; XOPAVX2-NEXT: vpshlw %xmm3, %xmm5, %xmm3
|
||||
; XOPAVX2-NEXT: vpsubw %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX2-NEXT: vpshlw %xmm2, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; XOPAVX2-NEXT: vpbroadcastw %xmm1, %ymm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
||||
; XOPAVX2-NEXT: vprotw %xmm3, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vprotw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
|
||||
%splat16 = sub <16 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %b
|
||||
%splat16 = sub <16 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %splat
|
||||
%shl = shl <16 x i16> %a, %splat
|
||||
%lshr = lshr <16 x i16> %a, %splat16
|
||||
%or = or <16 x i16> %shl, %lshr
|
||||
@ -748,152 +717,127 @@ define <32 x i8> @splatvar_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
||||
; AVX1-LABEL: splatvar_rotate_v32i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX1-NEXT: vpsubb %xmm1, %xmm4, %xmm8
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; AVX1-NEXT: vpsubb %xmm1, %xmm4, %xmm9
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; AVX1-NEXT: vpsllw $4, %xmm4, %xmm5
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm10, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpsllw $5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm5
|
||||
; AVX1-NEXT: vpsllw $2, %xmm5, %xmm7
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm7, %xmm7
|
||||
; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm7, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm7
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm6
|
||||
; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm7
|
||||
; AVX1-NEXT: vpand %xmm10, %xmm7, %xmm7
|
||||
; AVX1-NEXT: vpblendvb %xmm3, %xmm7, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm3, %xmm7
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm7, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm8
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpsllw $2, %xmm4, %xmm6
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsllw $5, %xmm9, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm6
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm6, %xmm6
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm6, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vpsllw $2, %xmm1, %xmm6
|
||||
; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
|
||||
; AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm10
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsllw $5, %xmm8, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
||||
; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsllw $5, %xmm8, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm7
|
||||
; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
|
||||
; AVX1-NEXT: vorps %ymm0, %ymm10, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: splatvar_rotate_v32i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpbroadcastb %xmm1, %ymm2
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX2-NEXT: vpsubb %ymm1, %ymm3, %ymm1
|
||||
; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX2-NEXT: vpsubb %ymm1, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm3
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm3
|
||||
; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm3
|
||||
; AVX2-NEXT: vpsllw $2, %ymm3, %ymm4
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
|
||||
; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm4
|
||||
; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
|
||||
; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm3
|
||||
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm1
|
||||
; AVX2-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm3
|
||||
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm4
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpblendvb %ymm3, %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
|
||||
; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: splatvar_rotate_v32i8:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastb %xmm1, %ymm2
|
||||
; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512-NEXT: vpsubb %ymm1, %ymm3, %ymm1
|
||||
; AVX512-NEXT: vpbroadcastb %xmm1, %ymm1
|
||||
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512-NEXT: vpsubb %ymm1, %ymm2, %ymm2
|
||||
; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
|
||||
; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
|
||||
; AVX512-NEXT: vpsllvw %zmm2, %zmm0, %zmm2
|
||||
; AVX512-NEXT: vpmovwb %zmm2, %ymm2
|
||||
; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
|
||||
; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
|
||||
; AVX512-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
|
||||
; AVX512-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
|
||||
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_rotate_v32i8:
|
||||
; XOPAVX1: # %bb.0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm3
|
||||
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm4, %xmm5
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm4, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpshlb %xmm3, %xmm4, %xmm6
|
||||
; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm3
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm4, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb %xmm5, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: vorps %ymm0, %ymm3, %ymm0
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vprotb %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vprotb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_rotate_v32i8:
|
||||
; XOPAVX2: # %bb.0:
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm2
|
||||
; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; XOPAVX2-NEXT: vpsubb %ymm1, %ymm3, %ymm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
|
||||
; XOPAVX2-NEXT: vpshlb %xmm4, %xmm3, %xmm4
|
||||
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm0, %xmm2
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
|
||||
; XOPAVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5
|
||||
; XOPAVX2-NEXT: vpsubb %xmm4, %xmm5, %xmm4
|
||||
; XOPAVX2-NEXT: vpshlb %xmm4, %xmm3, %xmm3
|
||||
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm5, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
||||
; XOPAVX2-NEXT: vprotb %xmm3, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vprotb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
|
||||
%splat8 = sub <32 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
|
||||
%splat8 = sub <32 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %splat
|
||||
%shl = shl <32 x i8> %a, %splat
|
||||
%lshr = lshr <32 x i8> %a, %splat8
|
||||
%or = or <32 x i8> %shl, %lshr
|
||||
|
@ -304,8 +304,8 @@ define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
|
||||
; Uniform Variable Rotates
|
||||
;
|
||||
|
||||
define <8 x i64> @splatvar_rotate_v2i64(<8 x i64> %a, <8 x i64> %b) nounwind {
|
||||
; AVX512-LABEL: splatvar_rotate_v2i64:
|
||||
define <8 x i64> @splatvar_rotate_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
|
||||
; AVX512-LABEL: splatvar_rotate_v8i64:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastq %xmm1, %zmm1
|
||||
; AVX512-NEXT: vprolvq %zmm1, %zmm0, %zmm0
|
||||
@ -321,15 +321,11 @@ define <8 x i64> @splatvar_rotate_v2i64(<8 x i64> %a, <8 x i64> %b) nounwind {
|
||||
define <16 x i32> @splatvar_rotate_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
|
||||
; AVX512-LABEL: splatvar_rotate_v16i32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
|
||||
; AVX512-NEXT: vpsubd %zmm1, %zmm2, %zmm2
|
||||
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX512-NEXT: vpslld %xmm1, %zmm0, %zmm1
|
||||
; AVX512-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vporq %zmm0, %zmm1, %zmm0
|
||||
; AVX512-NEXT: vpbroadcastd %xmm1, %zmm1
|
||||
; AVX512-NEXT: vprolvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
%splat32 = sub <16 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %b
|
||||
%splat32 = sub <16 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %splat
|
||||
%shl = shl <16 x i32> %a, %splat
|
||||
%lshr = lshr <16 x i32> %a, %splat32
|
||||
%or = or <16 x i32> %shl, %lshr
|
||||
@ -339,8 +335,8 @@ define <16 x i32> @splatvar_rotate_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind
|
||||
define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
|
||||
; AVX512F-LABEL: splatvar_rotate_v32i16:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm3
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512F-NEXT: vpsubw %ymm2, %ymm4, %ymm5
|
||||
; AVX512F-NEXT: vpsubw %ymm3, %ymm4, %ymm3
|
||||
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
|
||||
; AVX512F-NEXT: vpsllw %xmm2, %ymm1, %ymm4
|
||||
@ -350,7 +346,6 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
|
||||
; AVX512F-NEXT: vpsrlvd %zmm3, %zmm1, %zmm1
|
||||
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm1, %ymm4, %ymm1
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
||||
; AVX512F-NEXT: vpsrlvd %zmm3, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
|
||||
@ -359,8 +354,8 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
|
||||
;
|
||||
; AVX512VL-LABEL: splatvar_rotate_v32i16:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm3
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512VL-NEXT: vpsubw %ymm2, %ymm4, %ymm5
|
||||
; AVX512VL-NEXT: vpsubw %ymm3, %ymm4, %ymm3
|
||||
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
|
||||
; AVX512VL-NEXT: vpsllw %xmm2, %ymm1, %ymm4
|
||||
@ -369,7 +364,6 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
||||
; AVX512VL-NEXT: vpsrlvd %zmm3, %zmm1, %zmm1
|
||||
; AVX512VL-NEXT: vpmovdw %zmm1, %ymm1
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
||||
; AVX512VL-NEXT: vpsrlvd %zmm3, %zmm0, %zmm0
|
||||
; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
|
||||
@ -379,8 +373,9 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
|
||||
;
|
||||
; AVX512BW-LABEL: splatvar_rotate_v32i16:
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpbroadcastw %xmm1, %zmm2
|
||||
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512BW-NEXT: vpsubw %zmm2, %zmm3, %zmm2
|
||||
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
@ -389,15 +384,16 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
|
||||
;
|
||||
; AVX512VLBW-LABEL: splatvar_rotate_v32i16:
|
||||
; AVX512VLBW: # %bb.0:
|
||||
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512VLBW-NEXT: vpsubw %zmm1, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpbroadcastw %xmm1, %zmm2
|
||||
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512VLBW-NEXT: vpsubw %zmm2, %zmm3, %zmm2
|
||||
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
|
||||
; AVX512VLBW-NEXT: vpsllw %xmm1, %zmm0, %zmm1
|
||||
; AVX512VLBW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
|
||||
; AVX512VLBW-NEXT: retq
|
||||
%splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
|
||||
%splat16 = sub <32 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %b
|
||||
%splat16 = sub <32 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %splat
|
||||
%shl = shl <32 x i16> %a, %splat
|
||||
%lshr = lshr <32 x i16> %a, %splat16
|
||||
%or = or <32 x i16> %shl, %lshr
|
||||
@ -407,139 +403,131 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
|
||||
define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
|
||||
; AVX512F-LABEL: splatvar_rotate_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512F-NEXT: vpsubb %ymm2, %ymm5, %ymm2
|
||||
; AVX512F-NEXT: vpsubb %ymm3, %ymm5, %ymm3
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm5
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpblendvb %ymm4, %ymm5, %ymm1, %ymm5
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm5, %ymm7
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm7, %ymm7
|
||||
; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm9
|
||||
; AVX512F-NEXT: vpblendvb %ymm9, %ymm7, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpaddb %ymm5, %ymm5, %ymm7
|
||||
; AVX512F-NEXT: vpaddb %ymm9, %ymm9, %ymm10
|
||||
; AVX512F-NEXT: vpblendvb %ymm10, %ymm7, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm7
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm7, %ymm6
|
||||
; AVX512F-NEXT: vpblendvb %ymm4, %ymm6, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm4, %ymm6
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpblendvb %ymm9, %ymm6, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm6
|
||||
; AVX512F-NEXT: vpblendvb %ymm10, %ymm6, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm6
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm6, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm6
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm6, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm6
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm6, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm1, %ymm5, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm3
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512F-NEXT: vpsubb %ymm2, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm4, %ymm6
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm8
|
||||
; AVX512F-NEXT: vpblendvb %ymm8, %ymm6, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm6
|
||||
; AVX512F-NEXT: vpaddb %ymm8, %ymm8, %ymm9
|
||||
; AVX512F-NEXT: vpblendvb %ymm9, %ymm6, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm5
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm2, %ymm5
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm5
|
||||
; AVX512F-NEXT: vpblendvb %ymm9, %ymm5, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm5
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm5, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm5
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm8
|
||||
; AVX512F-NEXT: vpblendvb %ymm8, %ymm5, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm5
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpaddb %ymm8, %ymm8, %ymm10
|
||||
; AVX512F-NEXT: vpblendvb %ymm10, %ymm5, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm1, %ymm4, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm3
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm3
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm0, %ymm4, %ymm0
|
||||
; AVX512F-NEXT: vpblendvb %ymm10, %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatvar_rotate_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512VL-NEXT: vpsubb %ymm2, %ymm5, %ymm2
|
||||
; AVX512VL-NEXT: vpsubb %ymm3, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm3
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpblendvb %ymm4, %ymm3, %ymm1, %ymm3
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm3, %ymm7
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm7, %ymm7
|
||||
; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm9
|
||||
; AVX512VL-NEXT: vpblendvb %ymm9, %ymm7, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm7
|
||||
; AVX512VL-NEXT: vpaddb %ymm9, %ymm9, %ymm10
|
||||
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm7, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm7
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm7, %ymm6
|
||||
; AVX512VL-NEXT: vpblendvb %ymm4, %ymm6, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm4, %ymm6
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpblendvb %ymm9, %ymm6, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm6
|
||||
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm6, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm6
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $2, %ymm1, %ymm6
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $1, %ymm1, %ymm6
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm5
|
||||
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512VL-NEXT: vpsubb %ymm2, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm6
|
||||
; AVX512VL-NEXT: vpblendvb %ymm6, %ymm4, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm2, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm6, %ymm6, %ymm8
|
||||
; AVX512VL-NEXT: vpblendvb %ymm8, %ymm4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm8, %ymm8, %ymm9
|
||||
; AVX512VL-NEXT: vpblendvb %ymm9, %ymm4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpblendvb %ymm6, %ymm4, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm4, %ymm5
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm5
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm5
|
||||
; AVX512VL-NEXT: vpblendvb %ymm8, %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm5
|
||||
; AVX512VL-NEXT: vpblendvb %ymm9, %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm5
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $2, %ymm1, %ymm5
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm8
|
||||
; AVX512VL-NEXT: vpblendvb %ymm8, %ymm5, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $1, %ymm1, %ymm5
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpaddb %ymm8, %ymm8, %ymm10
|
||||
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm5, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm5
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm3
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm3
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm0, %ymm4, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
|
||||
; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: splatvar_rotate_v64i8:
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm2
|
||||
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512BW-NEXT: vpsubb %zmm1, %zmm3, %zmm1
|
||||
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
|
||||
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm3
|
||||
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
|
||||
; AVX512BW-NEXT: vpsllw $5, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
|
||||
; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
|
||||
; AVX512BW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
|
||||
; AVX512BW-NEXT: vpsllw $2, %zmm3, %zmm4
|
||||
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm4, %zmm4
|
||||
; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
|
||||
; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
|
||||
; AVX512BW-NEXT: vmovdqu8 %zmm4, %zmm3 {%k1}
|
||||
; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
|
||||
; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
|
||||
; AVX512BW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1}
|
||||
; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpsllw $5, %zmm2, %zmm1
|
||||
; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm2
|
||||
; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
|
||||
; AVX512BW-NEXT: vpmovb2m %zmm1, %k2
|
||||
@ -559,23 +547,23 @@ define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
|
||||
;
|
||||
; AVX512VLBW-LABEL: splatvar_rotate_v64i8:
|
||||
; AVX512VLBW: # %bb.0:
|
||||
; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %zmm2
|
||||
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm3, %zmm1
|
||||
; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
||||
; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm3
|
||||
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
|
||||
; AVX512VLBW-NEXT: vpsllw $5, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
|
||||
; AVX512VLBW-NEXT: vpsllw $5, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
|
||||
; AVX512VLBW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
|
||||
; AVX512VLBW-NEXT: vpsllw $2, %zmm3, %zmm4
|
||||
; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm4, %zmm4
|
||||
; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
|
||||
; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
|
||||
; AVX512VLBW-NEXT: vmovdqu8 %zmm4, %zmm3 {%k1}
|
||||
; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
|
||||
; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
|
||||
; AVX512VLBW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1}
|
||||
; AVX512VLBW-NEXT: vpsllw $5, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpsllw $5, %zmm2, %zmm1
|
||||
; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm2
|
||||
; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
|
||||
; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k2
|
||||
@ -593,7 +581,7 @@ define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
|
||||
; AVX512VLBW-NEXT: vporq %zmm0, %zmm3, %zmm0
|
||||
; AVX512VLBW-NEXT: retq
|
||||
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
|
||||
%splat8 = sub <64 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
|
||||
%splat8 = sub <64 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %splat
|
||||
%shl = shl <64 x i8> %a, %splat
|
||||
%lshr = lshr <64 x i8> %a, %splat8
|
||||
%or = or <64 x i8> %shl, %lshr
|
||||
|
Loading…
Reference in New Issue
Block a user