1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[NFC][X86][Codegen] Add shuffle test that would benefit from sorting in reduceBuildVecToShuffle()

This commit is contained in:
Roman Lebedev 2021-06-13 00:00:28 +03:00
parent 78819ccd55
commit 209d27cb4a

View File

@ -1059,6 +1059,260 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2
ret void
}
define void @interleave_24i16_out_reverse(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
; SSE2-LABEL: interleave_24i16_out_reverse:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rdi), %xmm8
; SSE2-NEXT: movdqu 16(%rdi), %xmm1
; SSE2-NEXT: movdqu 32(%rdi), %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,0]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: pandn %xmm3, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,5,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[0,1,2,1]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,5,6]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,0],xmm4[2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: movdqa %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm5
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: por %xmm5, %xmm6
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,1,0,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,6,6,6]
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
; SSE2-NEXT: pand %xmm6, %xmm5
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm8[0,1,2,3,4,7,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,4,7]
; SSE2-NEXT: movdqa %xmm6, %xmm0
; SSE2-NEXT: pandn %xmm7, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: pandn %xmm3, %xmm4
; SSE2-NEXT: por %xmm1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,1,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,0,1,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,4]
; SSE2-NEXT: pandn %xmm3, %xmm6
; SSE2-NEXT: por %xmm1, %xmm6
; SSE2-NEXT: movups %xmm2, (%rsi)
; SSE2-NEXT: movdqu %xmm0, (%rdx)
; SSE2-NEXT: movdqu %xmm6, (%rcx)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i16_out_reverse:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rdi), %xmm0
; SSE42-NEXT: movdqu 16(%rdi), %xmm1
; SSE42-NEXT: movdqu 32(%rdi), %xmm2
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
; SSE42-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,6]
; SSE42-NEXT: movdqa %xmm1, %xmm4
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2,3],xmm2[4],xmm4[5,6],xmm2[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm4 = xmm4[14,15,8,9,2,3,12,13,6,7,0,1,u,u,u,u]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5],xmm3[6,7]
; SSE42-NEXT: movdqa %xmm0, %xmm3
; SSE42-NEXT: pshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,14,15,8,9,2,3]
; SSE42-NEXT: movdqa %xmm2, %xmm5
; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm1[2],xmm5[3,4],xmm1[5],xmm5[6,7]
; SSE42-NEXT: pshufb {{.*#+}} xmm5 = xmm5[12,13,6,7,0,1,10,11,4,5,u,u,u,u,u,u]
; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm3[5,6,7]
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,12,13,6,7,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[10,11,4,5,14,15,8,9,2,3,u,u,u,u,u,u]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
; SSE42-NEXT: movdqu %xmm4, (%rsi)
; SSE42-NEXT: movdqu %xmm5, (%rdx)
; SSE42-NEXT: movdqu %xmm1, (%rcx)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i16_out_reverse:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rdi), %xmm0
; AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX1-NEXT: vmovdqu 32(%rdi), %xmm2
; AVX1-NEXT: vpextrw $6, %xmm1, %eax
; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[14,15,8,9,2,3,u,u,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
; AVX1-NEXT: vpextrw $3, %xmm1, %eax
; AVX1-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
; AVX1-NEXT: vpextrw $5, %xmm0, %eax
; AVX1-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
; AVX1-NEXT: vpextrw $2, %xmm0, %eax
; AVX1-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
; AVX1-NEXT: vpextrw $5, %xmm1, %eax
; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[12,13,6,7,0,1,u,u,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpinsrw $3, %eax, %xmm4, %xmm4
; AVX1-NEXT: vpextrw $2, %xmm1, %eax
; AVX1-NEXT: vpinsrw $4, %eax, %xmm4, %xmm4
; AVX1-NEXT: vpextrw $7, %xmm0, %eax
; AVX1-NEXT: vpinsrw $5, %eax, %xmm4, %xmm4
; AVX1-NEXT: vpextrw $4, %xmm0, %eax
; AVX1-NEXT: vpinsrw $6, %eax, %xmm4, %xmm4
; AVX1-NEXT: vpextrw $1, %xmm0, %eax
; AVX1-NEXT: vpinsrw $7, %eax, %xmm4, %xmm4
; AVX1-NEXT: vpextrw $2, %xmm2, %eax
; AVX1-NEXT: vpextrw $5, %xmm2, %edi
; AVX1-NEXT: vmovd %edi, %xmm2
; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
; AVX1-NEXT: vpextrw $7, %xmm1, %eax
; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
; AVX1-NEXT: vpextrw $4, %xmm1, %eax
; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
; AVX1-NEXT: vpextrw $1, %xmm1, %eax
; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm1
; AVX1-NEXT: vpextrw $6, %xmm0, %eax
; AVX1-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpextrw $3, %xmm0, %eax
; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
; AVX1-NEXT: vmovdqu %xmm3, (%rsi)
; AVX1-NEXT: vmovdqu %xmm4, (%rdx)
; AVX1-NEXT: vmovdqu %xmm0, (%rcx)
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: interleave_24i16_out_reverse:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vmovdqu (%rdi), %xmm0
; AVX2-SLOW-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX2-SLOW-NEXT: vmovdqu 32(%rdi), %xmm2
; AVX2-SLOW-NEXT: vpextrw $6, %xmm1, %eax
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[14,15,8,9,2,3,u,u,u,u,u,u,u,u,u,u]
; AVX2-SLOW-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
; AVX2-SLOW-NEXT: vpextrw $3, %xmm1, %eax
; AVX2-SLOW-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
; AVX2-SLOW-NEXT: vmovd %xmm1, %eax
; AVX2-SLOW-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
; AVX2-SLOW-NEXT: vpextrw $5, %xmm0, %eax
; AVX2-SLOW-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
; AVX2-SLOW-NEXT: vpextrw $2, %xmm0, %eax
; AVX2-SLOW-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
; AVX2-SLOW-NEXT: vpextrw $5, %xmm1, %eax
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[12,13,6,7,0,1,u,u,u,u,u,u,u,u,u,u]
; AVX2-SLOW-NEXT: vpinsrw $3, %eax, %xmm4, %xmm4
; AVX2-SLOW-NEXT: vpextrw $2, %xmm1, %eax
; AVX2-SLOW-NEXT: vpinsrw $4, %eax, %xmm4, %xmm4
; AVX2-SLOW-NEXT: vpextrw $7, %xmm0, %eax
; AVX2-SLOW-NEXT: vpinsrw $5, %eax, %xmm4, %xmm4
; AVX2-SLOW-NEXT: vpextrw $4, %xmm0, %eax
; AVX2-SLOW-NEXT: vpinsrw $6, %eax, %xmm4, %xmm4
; AVX2-SLOW-NEXT: vpextrw $1, %xmm0, %eax
; AVX2-SLOW-NEXT: vpinsrw $7, %eax, %xmm4, %xmm4
; AVX2-SLOW-NEXT: vpextrw $2, %xmm2, %eax
; AVX2-SLOW-NEXT: vpextrw $5, %xmm2, %edi
; AVX2-SLOW-NEXT: vmovd %edi, %xmm2
; AVX2-SLOW-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vpextrw $7, %xmm1, %eax
; AVX2-SLOW-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vpextrw $4, %xmm1, %eax
; AVX2-SLOW-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vpextrw $1, %xmm1, %eax
; AVX2-SLOW-NEXT: vpinsrw $4, %eax, %xmm2, %xmm1
; AVX2-SLOW-NEXT: vpextrw $6, %xmm0, %eax
; AVX2-SLOW-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
; AVX2-SLOW-NEXT: vpextrw $3, %xmm0, %eax
; AVX2-SLOW-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
; AVX2-SLOW-NEXT: vmovd %xmm0, %eax
; AVX2-SLOW-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
; AVX2-SLOW-NEXT: vmovdqu %xmm3, (%rsi)
; AVX2-SLOW-NEXT: vmovdqu %xmm4, (%rdx)
; AVX2-SLOW-NEXT: vmovdqu %xmm0, (%rcx)
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: interleave_24i16_out_reverse:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vmovdqu (%rdi), %xmm0
; AVX2-FAST-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX2-FAST-NEXT: vmovdqu 32(%rdi), %xmm2
; AVX2-FAST-NEXT: vpextrw $6, %xmm1, %eax
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[14,15,8,9,2,3,u,u,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
; AVX2-FAST-NEXT: vpextrw $3, %xmm1, %eax
; AVX2-FAST-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
; AVX2-FAST-NEXT: vmovd %xmm1, %eax
; AVX2-FAST-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
; AVX2-FAST-NEXT: vpextrw $5, %xmm0, %eax
; AVX2-FAST-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
; AVX2-FAST-NEXT: vpextrw $2, %xmm0, %eax
; AVX2-FAST-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
; AVX2-FAST-NEXT: vpextrw $5, %xmm1, %eax
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[12,13,6,7,0,1,u,u,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT: vpinsrw $3, %eax, %xmm4, %xmm4
; AVX2-FAST-NEXT: vpextrw $2, %xmm1, %eax
; AVX2-FAST-NEXT: vpinsrw $4, %eax, %xmm4, %xmm4
; AVX2-FAST-NEXT: vpextrw $7, %xmm0, %eax
; AVX2-FAST-NEXT: vpinsrw $5, %eax, %xmm4, %xmm4
; AVX2-FAST-NEXT: vpextrw $4, %xmm0, %eax
; AVX2-FAST-NEXT: vpinsrw $6, %eax, %xmm4, %xmm4
; AVX2-FAST-NEXT: vpextrw $1, %xmm0, %eax
; AVX2-FAST-NEXT: vpinsrw $7, %eax, %xmm4, %xmm4
; AVX2-FAST-NEXT: vpextrw $7, %xmm1, %eax
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[10,11,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
; AVX2-FAST-NEXT: vpextrw $4, %xmm1, %eax
; AVX2-FAST-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
; AVX2-FAST-NEXT: vpextrw $1, %xmm1, %eax
; AVX2-FAST-NEXT: vpinsrw $4, %eax, %xmm2, %xmm1
; AVX2-FAST-NEXT: vpextrw $6, %xmm0, %eax
; AVX2-FAST-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
; AVX2-FAST-NEXT: vpextrw $3, %xmm0, %eax
; AVX2-FAST-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
; AVX2-FAST-NEXT: vmovd %xmm0, %eax
; AVX2-FAST-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
; AVX2-FAST-NEXT: vmovdqu %xmm3, (%rsi)
; AVX2-FAST-NEXT: vmovdqu %xmm4, (%rdx)
; AVX2-FAST-NEXT: vmovdqu %xmm0, (%rcx)
; AVX2-FAST-NEXT: retq
;
; XOP-LABEL: interleave_24i16_out_reverse:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqu (%rdi), %xmm0
; XOP-NEXT: vmovdqu 16(%rdi), %xmm1
; XOP-NEXT: vmovdqu 32(%rdi), %xmm2
; XOP-NEXT: vpextrw $5, %xmm0, %eax
; XOP-NEXT: vpperm {{.*#+}} xmm3 = xmm2[14,15,8,9,2,3],xmm1[12,13,6,7,0,1],xmm2[u,u,u,u]
; XOP-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
; XOP-NEXT: vpextrw $2, %xmm0, %eax
; XOP-NEXT: vpinsrw $7, %eax, %xmm3, %xmm3
; XOP-NEXT: vpperm {{.*#+}} xmm4 = xmm2[12,13,6,7,0,1],xmm1[10,11,4,5],xmm2[u,u,u,u,u,u]
; XOP-NEXT: vpperm {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6,7,8,9],xmm0[14,15,8,9,2,3]
; XOP-NEXT: vpperm {{.*#+}} xmm1 = xmm2[10,11,4,5],xmm1[14,15,8,9,2,3,u,u,u,u,u,u]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6,7,8,9],xmm0[12,13,6,7,0,1]
; XOP-NEXT: vmovdqu %xmm3, (%rsi)
; XOP-NEXT: vmovdqu %xmm4, (%rdx)
; XOP-NEXT: vmovdqu %xmm0, (%rcx)
; XOP-NEXT: retq
%wide.vec.reverse = load <24 x i16>, <24 x i16>* %p, align 4
%wide.vec = shufflevector <24 x i16> %wide.vec.reverse, <24 x i16> undef, <24 x i32> <i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
%s1 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
%s2 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
%s3 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
store <8 x i16> %s1, <8 x i16>* %q1, align 4
store <8 x i16> %s2, <8 x i16>* %q2, align 4
store <8 x i16> %s3, <8 x i16>* %q3, align 4
ret void
}
define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
; SSE2-LABEL: interleave_24i16_in:
; SSE2: # %bb.0: