1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 19:52:54 +01:00

[TargetLowering] x s% C == 0 fold: vector divisor with INT_MIN handling

Summary:
The general fold is only valid for positive divisors.
Which effectively means, it is invalid for `INT_MIN` divisors,
and we currently bailout if we see them.

But that is too strict, we can just fix-up the results.
For that, let's do a second computation 'in parallel':
```
Name: srem -> and
Pre: isPowerOf2(C)
%o = srem i8 %X, C
%r = icmp eq %o, 0
  =>
%n = and i8 %X, C-1
%r = icmp eq %n, 0
```
https://rise4fun.com/Alive/Sup

And then just blend results: if the divisor was `INT_MIN`,
pick the value we got via bit-test,
else pick the value from general fold.

There's interesting observation - `ISD::ROTR` is set to
`LegalizeAction::Expand` before AVX512, so we should not
treat `INT_MIN` divisor as even; and as it can be seen
while `@test_srem_odd_even_one` improves on all run-lines,
`@test_srem_odd_even_INT_MIN` only improves for AVX512.

Reviewers: RKSimon, craig.topper, spatel

Reviewed By: RKSimon

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D66300

llvm-svn: 369268
This commit is contained in:
Roman Lebedev 2019-08-19 15:01:42 +00:00
parent d9db87717d
commit 120ce0f066
2 changed files with 128 additions and 133 deletions

View File

@ -5019,9 +5019,10 @@ SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode,
ISD::CondCode Cond,
DAGCombinerInfo &DCI,
const SDLoc &DL) const {
SmallVector<SDNode *, 3> Built;
SmallVector<SDNode *, 7> Built;
if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond,
DCI, DL, Built)) {
assert(Built.size() <= 7 && "Max size prediction failed.");
for (SDNode *N : Built)
DCI.AddToWorklist(N);
return Folded;
@ -5064,6 +5065,7 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
if (!CompTarget || !CompTarget->isNullValue())
return SDValue();
bool HadIntMinDivisor = false;
bool HadOneDivisor = false;
bool AllDivisorsAreOnes = true;
bool HadEvenDivisor = false;
@ -5080,12 +5082,10 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
// WARNING: this fold is only valid for positive divisors!
APInt D = C->getAPIntValue();
if (D.isMinSignedValue())
return false; // We can't negate INT_MIN.
if (D.isNegative())
D.negate(); // `rem %X, -C` is equivalent to `rem %X, C`
assert(!D.isNegative() && "The fold is only valid for positive divisors!");
HadIntMinDivisor |= D.isMinSignedValue();
// If all divisors are ones, we will prefer to avoid the fold.
HadOneDivisor |= D.isOneValue();
@ -5096,9 +5096,13 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate.");
APInt D0 = D.lshr(K);
// D is even if it has trailing zeros.
HadEvenDivisor |= (K != 0);
// D is a power-of-two if D0 is one.
if (!D.isMinSignedValue()) {
// D is even if it has trailing zeros; unless it's INT_MIN, in which case
// we don't care about this lane in this fold, we'll special-handle it.
HadEvenDivisor |= (K != 0);
}
// D is a power-of-two if D0 is one. This includes INT_MIN.
// If all divisors are power-of-two, we will prefer to avoid the fold.
AllDivisorsArePowerOfTwo &= D0.isOneValue();
@ -5115,7 +5119,11 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
APInt A = APInt::getSignedMaxValue(W).udiv(D0);
A.clearLowBits(K);
NeedToApplyOffset |= A != 0;
if (!D.isMinSignedValue()) {
// If divisor INT_MIN, then we don't care about this lane in this fold,
// we'll special-handle it.
NeedToApplyOffset |= A != 0;
}
// Q = floor((2 * A) / (2^K))
APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K));
@ -5125,7 +5133,8 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
"We are expecting that K is always less than all-ones for ShSVT");
// If the divisor is 1 the result can be constant-folded.
// If the divisor is 1 the result can be constant-folded. Likewise, we
// don't care about INT_MIN lanes, those can be set to undef if appropriate.
if (D.isOneValue()) {
// Set P, A and K to a bogus values so we can try to splat them.
P = 0;
@ -5155,8 +5164,8 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
if (AllDivisorsAreOnes)
return SDValue();
// If this is a srem by a powers-of-two, avoid the fold since it can be
// best implemented as a bit test.
// If this is a srem by a powers-of-two (including INT_MIN), avoid the fold
// since it can be best implemented as a bit test.
if (AllDivisorsArePowerOfTwo)
return SDValue();
@ -5215,8 +5224,52 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
}
// SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q)
return DAG.getSetCC(DL, SETCCVT, Op0, QVal,
((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
SDValue Fold =
DAG.getSetCC(DL, SETCCVT, Op0, QVal,
((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
// If we didn't have lanes with INT_MIN divisor, then we're done.
if (!HadIntMinDivisor)
return Fold;
// That fold is only valid for positive divisors. Which effectively means,
// it is invalid for INT_MIN divisors. So if we have such a lane,
// we must fix-up results for said lanes.
assert(VT.isVector() && "Can/should only get here for vectors.");
if (!isOperationLegalOrCustom(ISD::SETEQ, VT) ||
!isOperationLegalOrCustom(ISD::AND, VT) ||
!isOperationLegalOrCustom(Cond, VT) ||
!isOperationLegalOrCustom(ISD::VSELECT, VT))
return SDValue();
Created.push_back(Fold.getNode());
SDValue IntMin = DAG.getConstant(
APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT);
SDValue IntMax = DAG.getConstant(
APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT);
SDValue Zero =
DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT);
// Which lanes had INT_MIN divisors? Divisor is constant, so const-folded.
SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ);
Created.push_back(DivisorIsIntMin.getNode());
// (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0
SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax);
Created.push_back(Masked.getNode());
SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond);
Created.push_back(MaskedIsZero.getNode());
// To produce final result we need to blend 2 vectors: 'SetCC' and
// 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick
// from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is
// constant-folded, select can get lowered to a shuffle with constant mask.
SDValue Blended =
DAG.getNode(ISD::VSELECT, DL, VT, DivisorIsIntMin, MaskedIsZero, Fold);
return Blended;
}
bool TargetLowering::

View File

@ -1560,123 +1560,77 @@ define <4 x i32> @test_srem_odd_INT_MIN(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_INT_MIN:
; CHECK-SSE2: # %bb.0:
; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE2-NEXT: pxor %xmm2, %xmm2
; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1717986919,1717986919,2147483647,1717986919]
; CHECK-SSE2-NEXT: pand %xmm3, %xmm2
; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm3
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm4
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
; CHECK-SSE2-NEXT: psubd %xmm2, %xmm3
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = <0,u,4294967295,u>
; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm2
; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2
; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2
; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm3
; CHECK-SSE2-NEXT: psrad $1, %xmm3
; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm4
; CHECK-SSE2-NEXT: psrad $30, %xmm4
; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2]
; CHECK-SSE2-NEXT: psrld $31, %xmm2
; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm2
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm3
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
; CHECK-SSE2-NEXT: pand %xmm0, %xmm2
; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm2
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm1
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm3
; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm3
; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm3
; CHECK-SSE2-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-SSE2-NEXT: pxor %xmm3, %xmm0
; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0,2]
; CHECK-SSE2-NEXT: psrld $31, %xmm0
; CHECK-SSE2-NEXT: retq
;
; CHECK-SSE41-LABEL: test_srem_odd_INT_MIN:
; CHECK-SSE41: # %bb.0:
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-SSE41-NEXT: pmuldq {{.*}}(%rip), %xmm1
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm2 = <1717986919,u,2147483647,u>
; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm2
; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,0,4294967295,0]
; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm1
; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1
; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm2
; CHECK-SSE41-NEXT: psrad $30, %xmm2
; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm3
; CHECK-SSE41-NEXT: psrad $1, %xmm3
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5],xmm3[6,7]
; CHECK-SSE41-NEXT: psrld $31, %xmm1
; CHECK-SSE41-NEXT: paddd %xmm3, %xmm1
; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
; CHECK-SSE41-NEXT: psubd %xmm1, %xmm0
; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
; CHECK-SSE41-NEXT: pand %xmm0, %xmm2
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm2
; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,0,858993458]
; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
; CHECK-SSE41-NEXT: psrld $31, %xmm0
; CHECK-SSE41-NEXT: retq
;
; CHECK-AVX1-LABEL: test_srem_odd_INT_MIN:
; CHECK-AVX1: # %bb.0:
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-AVX1-NEXT: vpmuldq {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpsrad $30, %xmm1, %xmm2
; CHECK-AVX1-NEXT: vpsrad $1, %xmm1, %xmm3
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5],xmm3[6,7]
; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
; CHECK-AVX2-LABEL: test_srem_odd_INT_MIN:
; CHECK-AVX2: # %bb.0:
; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
; CHECK-AVX2-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2
; CHECK-AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
; CHECK-AVX2-NEXT: vpand %xmm2, %xmm0, %xmm2
; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX2-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_odd_INT_MIN:
; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 5, i32 2147483648, i32 5>
@ -1799,21 +1753,16 @@ define <4 x i32> @test_srem_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_even_INT_MIN:
; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [306783378,306783378,0,306783378]
; CHECK-AVX512VL-NEXT: vpaddd %xmm3, %xmm2, %xmm2
; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm2, %xmm2
; CHECK-AVX512VL-NEXT: vpminud %xmm3, %xmm2, %xmm3
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; CHECK-AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 14, i32 14, i32 2147483648, i32 14>
@ -1960,22 +1909,15 @@ define <4 x i32> @test_srem_odd_even_INT_MIN(<4 x i32> %X) nounwind {
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_INT_MIN:
; CHECK-AVX512VL: # %bb.0:
; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,2147483647,1374389535]
; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1
; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm2
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 14, i32 2147483648, i32 100>