1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-22 04:22:57 +02:00

[DAGCombine] Improve (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) folding

Add support for cases where only some c1+c2 results exceed the max bitshift, clamping accordingly.

Differential Revision: https://reviews.llvm.org/D35722

llvm-svn: 340010
This commit is contained in:
Simon Pilgrim 2018-08-17 10:52:49 +00:00
parent 30f1f36fe8
commit 8f8f1d8f69
2 changed files with 18 additions and 28 deletions

View File

@ -6591,31 +6591,30 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
} }
// fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
// clamp (add c1, c2) to max shift.
if (N0.getOpcode() == ISD::SRA) { if (N0.getOpcode() == ISD::SRA) {
SDLoc DL(N); SDLoc DL(N);
EVT ShiftVT = N1.getValueType(); EVT ShiftVT = N1.getValueType();
EVT ShiftSVT = ShiftVT.getScalarType();
SmallVector<SDValue, 16> ShiftValues;
auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS, auto SumOfShifts = [&](ConstantSDNode *LHS, ConstantSDNode *RHS) {
ConstantSDNode *RHS) {
APInt c1 = LHS->getAPIntValue(); APInt c1 = LHS->getAPIntValue();
APInt c2 = RHS->getAPIntValue(); APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
return (c1 + c2).uge(OpSizeInBits); APInt Sum = c1 + c2;
unsigned ShiftSum =
Sum.uge(OpSizeInBits) ? (OpSizeInBits - 1) : Sum.getZExtValue();
ShiftValues.push_back(DAG.getConstant(ShiftSum, DL, ShiftSVT));
return true;
}; };
if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange)) if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), SumOfShifts)) {
return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0), SDValue ShiftValue;
DAG.getConstant(OpSizeInBits - 1, DL, ShiftVT)); if (VT.isVector())
ShiftValue = DAG.getBuildVector(ShiftVT, DL, ShiftValues);
auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS, else
ConstantSDNode *RHS) { ShiftValue = ShiftValues[0];
APInt c1 = LHS->getAPIntValue(); return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0), ShiftValue);
APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
return (c1 + c2).ult(OpSizeInBits);
};
if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0), Sum);
} }
} }

View File

@ -120,24 +120,15 @@ define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $27, %xmm1 ; SSE-NEXT: psrad $27, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrad $5, %xmm2 ; SSE-NEXT: psrad $15, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrad $1, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $10, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE-NEXT: psrad $31, %xmm0 ; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
; AVX-LABEL: combine_vec_ashr_ashr3: ; AVX-LABEL: combine_vec_ashr_ashr3:
; AVX: # %bb.0: ; AVX: # %bb.0:
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq ; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 1, i32 5, i32 50, i32 27> %1 = ashr <4 x i32> %x, <i32 1, i32 5, i32 50, i32 27>
%2 = ashr <4 x i32> %1, <i32 33, i32 10, i32 33, i32 0> %2 = ashr <4 x i32> %1, <i32 33, i32 10, i32 33, i32 0>