1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[DAGCombiner] Add general constant vector support to (shl (sra x, c1), c1) -> (and x, (shl -1, c1))

We already supported scalar constant / splatted constant vector - now accepts any (non opaque) constant scalar / vector

llvm-svn: 284608
This commit is contained in:
Simon Pilgrim 2016-10-19 16:15:30 +00:00
parent 4b04830ee0
commit ff0b5288c1
2 changed files with 8 additions and 21 deletions

View File

@ -4653,16 +4653,15 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
}
}
}
// fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) {
if (N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1) &&
isConstantOrConstantVector(N1, /* No Opaques */ true)) {
unsigned BitSize = VT.getScalarSizeInBits();
SDLoc DL(N);
SDValue HiBitsMask =
DAG.getConstant(APInt::getHighBitsSet(BitSize,
BitSize - N1C->getZExtValue()),
DL, VT);
return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0),
HiBitsMask);
SDValue AllBits = DAG.getConstant(APInt::getAllOnesValue(BitSize), DL, VT);
SDValue HiBitsMask = DAG.getNode(ISD::SHL, DL, VT, AllBits, N1);
return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), HiBitsMask);
}
// fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)

View File

@ -486,24 +486,12 @@ define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) {
define <4 x i32> @combine_vec_shl_ashr1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_shl_ashr1:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $8, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrad $6, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $7, %xmm1
; SSE-NEXT: psrad $5, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_shl_ashr1:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [5,6,7,8]
; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
%2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>