mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 03:02:36 +01:00
[SelectionDAG] Add partial sign-bit support to ComputeNumSignBits for BITCAST nodes
Only adds support to the existing 'large element' scalar/vector to 'small element' vector bitcasts. Handle the case where the sign bit extends to only part of the small elements. llvm-svn: 340169
This commit is contained in:
parent
7eabd0b16b
commit
156400eb42
@ -3240,9 +3240,9 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
|
||||
if (VTBits == SrcBits)
|
||||
return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
|
||||
|
||||
bool IsLE = getDataLayout().isLittleEndian();
|
||||
|
||||
// Bitcast 'large element' scalar/vector to 'small element' vector.
|
||||
// TODO: Handle cases other than 'sign splat' when we have a use case.
|
||||
// Requires handling of DemandedElts and Endianness.
|
||||
if ((SrcBits % VTBits) == 0) {
|
||||
assert(Op.getValueType().isVector() && "Expected bitcast to vector");
|
||||
|
||||
@ -3252,9 +3252,23 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
|
||||
if (DemandedElts[i])
|
||||
SrcDemandedElts.setBit(i / Scale);
|
||||
|
||||
// Fast case - sign splat can be simply split across the small elements.
|
||||
Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
|
||||
if (Tmp == SrcBits)
|
||||
return VTBits;
|
||||
|
||||
// Slow case - determine how far the sign extends into each sub-element.
|
||||
Tmp2 = VTBits;
|
||||
for (unsigned i = 0; i != NumElts; ++i)
|
||||
if (DemandedElts[i]) {
|
||||
unsigned SubOffset = i % Scale;
|
||||
SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
|
||||
SubOffset = SubOffset * VTBits;
|
||||
if (Tmp <= SubOffset)
|
||||
return 1;
|
||||
Tmp2 = std::min(Tmp2, Tmp - SubOffset);
|
||||
}
|
||||
return Tmp2;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -42,40 +42,33 @@ define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
|
||||
define <8 x i16> @trunc_ashr_v4i64_bitcast(<4 x i64> %a0) {
|
||||
; SSE-LABEL: trunc_ashr_v4i64_bitcast:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE-NEXT: psrad $31, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
|
||||
; SSE-NEXT: psrad $17, %xmm0
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE-NEXT: psrad $31, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
|
||||
; SSE-NEXT: psrad $17, %xmm1
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
||||
; SSE-NEXT: pslld $16, %xmm1
|
||||
; SSE-NEXT: psrad $16, %xmm1
|
||||
; SSE-NEXT: pslld $16, %xmm0
|
||||
; SSE-NEXT: psrad $16, %xmm0
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE-NEXT: psrad $31, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
|
||||
; SSE-NEXT: psrad $17, %xmm0
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; SSE-NEXT: packssdw %xmm1, %xmm0
|
||||
; SSE-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: trunc_ashr_v4i64_bitcast:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vpsrad $17, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vpsrad $17, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpsrad $17, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
||||
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vzeroupper
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
@ -85,9 +78,8 @@ define <8 x i16> @trunc_ashr_v4i64_bitcast(<4 x i64> %a0) {
|
||||
; AVX2-NEXT: vpsrad $17, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
|
||||
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
|
||||
; AVX2-NEXT: vpackssdw %ymm0, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
||||
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: ret{{[l|q]}}
|
||||
%1 = ashr <4 x i64> %a0, <i64 49, i64 49, i64 49, i64 49>
|
||||
|
Loading…
Reference in New Issue
Block a user