1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 19:12:56 +02:00

[X86][SSE] Add support for combining PINSRB into a target shuffle.

llvm-svn: 293637
This commit is contained in:
Simon Pilgrim 2017-01-31 14:59:44 +00:00
parent ebbe5ba42e
commit c81337daf9
3 changed files with 17 additions and 49 deletions

View File

@ -5770,13 +5770,14 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl<int> &Mask,
Ops.push_back(IsAndN ? N1 : N0);
return true;
}
case X86ISD::PINSRB:
case X86ISD::PINSRW: {
SDValue InVec = N.getOperand(0);
SDValue InScl = N.getOperand(1);
uint64_t InIdx = N.getConstantOperandVal(2);
assert(InIdx < NumElts && "Illegal insertion index");
// Attempt to recognise a PINSRW(VEC, 0, Idx) shuffle pattern.
// Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
if (X86::isZeroNode(InScl)) {
Ops.push_back(InVec);
for (unsigned i = 0; i != NumElts; ++i)
@ -5784,10 +5785,12 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl<int> &Mask,
return true;
}
// Attempt to recognise a PINSRW(ASSERTZEXT(PEXTRW)) shuffle pattern.
// TODO: Expand this to support PINSRB/INSERT_VECTOR_ELT/etc.
// Attempt to recognise a PINSR*(ASSERTZEXT(PEXTR*)) shuffle pattern.
// TODO: Expand this to support INSERT_VECTOR_ELT/etc.
unsigned ExOp =
(X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
if (InScl.getOpcode() != ISD::AssertZext ||
InScl.getOperand(0).getOpcode() != X86ISD::PEXTRW)
InScl.getOperand(0).getOpcode() != ExOp)
return false;
SDValue ExVec = InScl.getOperand(0).getOperand(0);

View File

@ -184,37 +184,10 @@ define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
; AVX-LABEL: _clearupper16xi8a:
; AVX: # BB#0:
; AVX-NEXT: vpextrb $0, %xmm0, %eax
; AVX-NEXT: vpextrb $1, %xmm0, %ecx
; AVX-NEXT: vmovd %eax, %xmm1
; AVX-NEXT: vpextrb $1, %xmm0, %eax
; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $2, %xmm0, %eax
; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $3, %xmm0, %eax
; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $4, %xmm0, %eax
; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $5, %xmm0, %eax
; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $6, %xmm0, %eax
; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $7, %xmm0, %eax
; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $8, %xmm0, %eax
; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $9, %xmm0, %eax
; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $10, %xmm0, %eax
; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $11, %xmm0, %eax
; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $12, %xmm0, %eax
; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $13, %xmm0, %eax
; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $14, %xmm0, %eax
; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrb $15, %xmm0, %eax
; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
; AVX-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <16 x i8> %0, i32 0
@ -342,15 +315,7 @@ define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
;
; AVX-LABEL: _clearupper8xi16b:
; AVX: # BB#0:
; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x8 = bitcast <8 x i16> %0 to <16 x i8>
%r0 = insertelement <16 x i8> %x8, i8 zeroinitializer, i32 1

View File

@ -492,8 +492,8 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: pinsrb $0, %eax, %xmm0
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
; SSE41-NEXT: pinsrb $14, %eax, %xmm1
; SSE41-NEXT: pinsrb $15, %eax, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
@ -504,8 +504,8 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@ -517,8 +517,8 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%1 = insertelement <32 x i8> %a, i8 0, i32 0