1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[X86][AVX] Insert zeros byte elements into 256/512-bit vectors using shuffle/and

Avoid extracting/inserting subvectors which makes it more difficult for shuffle combining to merge them together.
This commit is contained in:
Simon Pilgrim 2021-03-12 14:42:20 +00:00
parent ddbea9a58f
commit 1833b432fd
3 changed files with 6 additions and 36 deletions

View File

@ -18854,7 +18854,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
// a blend shuffle with a rematerializable vector than a costly integer
// insertion.
if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
16 <= EltSizeInBits) {
(16 <= EltSizeInBits || (IsZeroElt && !VT.is128BitVector()))) {
SmallVector<int, 8> BlendMask;
for (unsigned i = 0; i != NumElts; ++i)
BlendMask.push_back(i == IdxVal ? i + NumElts : i);

View File

@ -665,11 +665,7 @@ define <16 x i16> @_clearupper16xi16b(<16 x i16>) nounwind {
;
; AVX-LABEL: _clearupper16xi16b:
; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%x8 = bitcast <16 x i16> %0 to <32 x i8>
%r0 = insertelement <32 x i8> %x8, i8 zeroinitializer, i32 1

View File

@ -463,36 +463,10 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; AVX1: # %bb.0:
; AVX1-NEXT: xorl %eax, %eax
; AVX1-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: xorl %eax, %eax
; AVX2-SLOW-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1
; AVX2-SLOW-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
; AVX2-FAST-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-FAST-NEXT: retq
; AVX-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; AVX: # %bb.0:
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
%1 = insertelement <32 x i8> %a, i8 0, i32 0
%2 = insertelement <32 x i8> %1, i8 0, i32 15
%3 = insertelement <32 x i8> %2, i8 0, i32 30