1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00

[SelectionDAG] Add knowbits support for CONCAT_VECTOR opcode

llvm-svn: 287387
This commit is contained in:
Simon Pilgrim 2016-11-18 22:21:22 +00:00
parent 422b019be0
commit 391b06023c
2 changed files with 18 additions and 14 deletions

View File

@ -2113,6 +2113,24 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
}
break;
}
case ISD::CONCAT_VECTORS: {
// Split DemandedElts and test each of the demanded subvectors.
KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
EVT SubVectorVT = Op.getOperand(0).getValueType();
unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
unsigned NumSubVectors = Op.getNumOperands();
for (unsigned i = 0; i != NumSubVectors; ++i) {
APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
DemandedSub = DemandedSub.trunc(NumSubVectorElts);
if (!!DemandedSub) {
SDValue Sub = Op.getOperand(i);
computeKnownBits(Sub, KnownZero2, KnownOne2, DemandedSub, Depth + 1);
KnownOne &= KnownOne2;
KnownZero &= KnownZero2;
}
}
break;
}
case ISD::EXTRACT_SUBVECTOR: {
// If we know the element index, just demand that subvector elements,
// otherwise demand them all.

View File

@ -331,15 +331,8 @@ define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) n
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm1, %xmm1
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
; X32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,3,1,3]
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2
; X32-NEXT: vandps {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vcvtdq2ps %ymm2, %ymm2
; X32-NEXT: vpsrld $16, %xmm0, %xmm0
; X32-NEXT: vpsrld $16, %xmm1, %xmm1
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: vcvtdq2ps %ymm0, %ymm0
; X32-NEXT: vmulps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vaddps %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_concat_uitofp:
@ -348,15 +341,8 @@ define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) n
; X64-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,3,1,3]
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2
; X64-NEXT: vandps {{.*}}(%rip), %ymm2, %ymm2
; X64-NEXT: vcvtdq2ps %ymm2, %ymm2
; X64-NEXT: vpsrld $16, %xmm0, %xmm0
; X64-NEXT: vpsrld $16, %xmm1, %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
; X64-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vaddps %ymm2, %ymm0, %ymm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 131071, i32 -1, i32 131071, i32 -1>
%2 = and <4 x i32> %a1, <i32 -1, i32 131071, i32 -1, i32 131071>