1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

[X86] Add X86ISD::VTRUNC to computeKnownBitsForTargetNode.

We have to take special care to avoid the cases where the result of the truncate would be padded with zero elements.

Ideally we'd just use ISD::TRUNCATE for these cases instead.

llvm-svn: 322454
This commit is contained in:
Craig Topper 2018-01-14 08:11:33 +00:00
parent 767d0f3bfe
commit f1f9a8c7f7
2 changed files with 14 additions and 2 deletions

View File

@ -27837,6 +27837,18 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
Known.Zero.setBitsFrom(InBitWidth);
break;
}
case X86ISD::VTRUNC: {
// TODO: Add DemandedElts support.
SDValue N0 = Op.getOperand(0);
// We can only handle cases with the same number of elements. Otherwise
// the truncate fills with zero elements.
// TODO: Maybe we could just discard any 1s we found instead of skipping?
if (VT.getVectorNumElements() != N0.getValueType().getVectorNumElements())
break;
DAG.computeKnownBits(N0, Known, Depth+1);
Known = Known.trunc(BitWidth);
break;
}
case X86ISD::CMOV: {
DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
// If we don't know any bits, early out.

View File

@ -748,7 +748,7 @@ define <16 x i8> @usat_trunc_db_256(<8 x i32> %x) {
; KNL-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
; KNL-NEXT: vpminud %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; KNL-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@ -756,7 +756,7 @@ define <16 x i8> @usat_trunc_db_256(<8 x i32> %x) {
; SKX: ## %bb.0:
; SKX-NEXT: vpminud {{.*}}(%rip){1to8}, %ymm0, %ymm0
; SKX-NEXT: vpmovdw %ymm0, %xmm0
; SKX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SKX-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%tmp1 = icmp ult <8 x i32> %x, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>