1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

[DAGCombiner] Try to use SelectionDAG::isKnownToBeAPowerOfTwo instead of just APInt::isPowerOf2

Generalize sdiv/udiv/srem/urem combines using APInt::isPowerOf2, which only works for const/splat-const values, to call SelectionDAG::isKnownToBeAPowerOfTwo instead which recognises many more cases.

Added a DAGCombiner::BuildLogBase2 helper since PowerOf2 combines often involve taking the log2 of such a value.

Differential Revision: https://reviews.llvm.org/D27714

llvm-svn: 289654
This commit is contained in:
Simon Pilgrim 2016-12-14 15:08:13 +00:00
parent 2aaf4b3071
commit f05854a8a9
7 changed files with 163 additions and 268 deletions

View File

@ -360,6 +360,7 @@ namespace {
SDValue BuildSDIV(SDNode *N);
SDValue BuildSDIVPow2(SDNode *N);
SDValue BuildUDIV(SDNode *N);
SDValue BuildLogBase2(SDValue Op, const SDLoc &DL);
SDValue BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags);
SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags *Flags);
SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags);
@ -2415,24 +2416,31 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) {
return Folded;
// fold (udiv x, (1 << c)) -> x >>u c
if (N1C && !N1C->isOpaque() && N1C->getAPIntValue().isPowerOf2())
return DAG.getNode(ISD::SRL, DL, VT, N0,
DAG.getConstant(N1C->getAPIntValue().logBase2(), DL,
getShiftAmountTy(N0.getValueType())));
if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
DAG.isKnownToBeAPowerOfTwo(N1)) {
SDValue LogBase2 = BuildLogBase2(N1, DL);
AddToWorklist(LogBase2.getNode());
EVT ShiftVT = getShiftAmountTy(N0.getValueType());
SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT);
AddToWorklist(Trunc.getNode());
return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc);
}
// fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
if (N1.getOpcode() == ISD::SHL) {
if (ConstantSDNode *SHC = isConstOrConstSplat(N1.getOperand(0))) {
if (!SHC->isOpaque() && SHC->getAPIntValue().isPowerOf2()) {
EVT ADDVT = N1.getOperand(1).getValueType();
SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT,
N1.getOperand(1),
DAG.getConstant(SHC->getAPIntValue()
.logBase2(),
DL, ADDVT));
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::SRL, DL, VT, N0, Add);
}
SDValue N10 = N1.getOperand(0);
if (isConstantOrConstantVector(N10, /*NoOpaques*/ true) &&
DAG.isKnownToBeAPowerOfTwo(N10)) {
SDValue LogBase2 = BuildLogBase2(N10, DL);
AddToWorklist(LogBase2.getNode());
EVT ADDVT = N1.getOperand(1).getValueType();
SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ADDVT);
AddToWorklist(Trunc.getNode());
SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT, N1.getOperand(1), Trunc);
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::SRL, DL, VT, N0, Add);
}
}
@ -2482,21 +2490,21 @@ SDValue DAGCombiner::visitREM(SDNode *N) {
return DAG.getNode(ISD::UREM, DL, VT, N0, N1);
} else {
// fold (urem x, pow2) -> (and x, pow2-1)
if (N1C && !N1C->isNullValue() && !N1C->isOpaque() &&
N1C->getAPIntValue().isPowerOf2()) {
return DAG.getNode(ISD::AND, DL, VT, N0,
DAG.getConstant(N1C->getAPIntValue() - 1, DL, VT));
if (DAG.isKnownToBeAPowerOfTwo(N1)) {
APInt NegOne = APInt::getAllOnesValue(VT.getScalarSizeInBits());
SDValue Add =
DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getConstant(NegOne, DL, VT));
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::AND, DL, VT, N0, Add);
}
// fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
if (N1.getOpcode() == ISD::SHL) {
ConstantSDNode *SHC = isConstOrConstSplat(N1.getOperand(0));
if (SHC && !SHC->isOpaque() && SHC->getAPIntValue().isPowerOf2()) {
APInt NegOne = APInt::getAllOnesValue(VT.getScalarSizeInBits());
SDValue Add =
DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getConstant(NegOne, DL, VT));
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::AND, DL, VT, N0, Add);
}
if (N1.getOpcode() == ISD::SHL &&
DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) {
APInt NegOne = APInt::getAllOnesValue(VT.getScalarSizeInBits());
SDValue Add =
DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getConstant(NegOne, DL, VT));
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::AND, DL, VT, N0, Add);
}
}
@ -15238,6 +15246,17 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) {
return S;
}
/// Determines the LogBase2 value for a non-null input value using the
/// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) {
EVT VT = V.getValueType();
unsigned EltBits = VT.getScalarSizeInBits();
SDValue Ctlz = DAG.getNode(ISD::CTLZ, DL, VT, V);
SDValue Base = DAG.getConstant(EltBits - 1, DL, VT);
SDValue LogBase2 = DAG.getNode(ISD::SUB, DL, VT, Base, Ctlz);
return LogBase2;
}
/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
/// For the reciprocal, we need to find the zero of the function:
/// F(X) = A X - 1 [which has a zero at X = 1/A]

View File

@ -2727,6 +2727,13 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
}
bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
EVT OpVT = Val.getValueType();
unsigned BitWidth = OpVT.getScalarSizeInBits();
// Is the constant a known power of 2?
if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
// A left-shift of a constant one will have exactly one bit set because
// shifting the bit off the end is undefined.
if (Val.getOpcode() == ISD::SHL) {
@ -2743,12 +2750,19 @@ bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
return true;
}
// Are all operands of a build vector constant powers of two?
if (Val.getOpcode() == ISD::BUILD_VECTOR)
if (llvm::all_of(Val->ops(), [this, BitWidth](SDValue E) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
return false;
}))
return true;
// More could be done here, though the above checks are enough
// to handle some common cases.
// Fall back to computeKnownBits to catch other known cases.
EVT OpVT = Val.getValueType();
unsigned BitWidth = OpVT.getScalarSizeInBits();
APInt KnownZero, KnownOne;
computeKnownBits(Val, KnownZero, KnownOne);
return (KnownZero.countPopulation() == BitWidth - 1) &&

View File

@ -82,30 +82,33 @@ define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_sdiv_by_pos1:
; SSE: # BB#0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: pextrd $1, %xmm0, %eax
; SSE-NEXT: shrl $2, %eax
; SSE-NEXT: pextrd $2, %xmm0, %ecx
; SSE-NEXT: pextrd $3, %xmm0, %edx
; SSE-NEXT: pinsrd $1, %eax, %xmm0
; SSE-NEXT: shrl $3, %ecx
; SSE-NEXT: pinsrd $2, %ecx, %xmm0
; SSE-NEXT: shrl $4, %edx
; SSE-NEXT: pinsrd $3, %edx, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $3, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: psrld $4, %xmm0
; SSE-NEXT: psrld $2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_by_pos1:
; AVX: # BB#0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpextrd $1, %xmm0, %eax
; AVX-NEXT: shrl $2, %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1
; AVX-NEXT: vpextrd $2, %xmm0, %eax
; AVX-NEXT: shrl $3, %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: shrl $4, %eax
; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX-NEXT: retq
; AVX1-LABEL: combine_vec_sdiv_by_pos1:
; AVX1: # BB#0:
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $2, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpsrld $3, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_sdiv_by_pos1:
; AVX2: # BB#0:
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
%2 = sdiv <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16>
ret <4 x i32> %2

View File

@ -54,36 +54,12 @@ define <4 x i32> @combine_vec_srem_by_pos0(<4 x i32> %x) {
define <4 x i32> @combine_vec_srem_by_pos1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_srem_by_pos1:
; SSE: # BB#0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: pextrd $3, %xmm0, %eax
; SSE-NEXT: andl $15, %eax
; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: pextrd $2, %xmm0, %eax
; SSE-NEXT: andl $7, %eax
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: pextrd $1, %xmm0, %eax
; SSE-NEXT: andl $3, %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_srem_by_pos1:
; AVX: # BB#0:
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vmovd %eax, %xmm1
; AVX-NEXT: vpextrd $2, %xmm0, %eax
; AVX-NEXT: andl $7, %eax
; AVX-NEXT: vmovd %eax, %xmm2
; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vpextrd $1, %xmm0, %eax
; AVX-NEXT: andl $3, %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
%2 = srem <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16>

View File

@ -47,29 +47,31 @@ define <4 x i32> @combine_vec_udiv_by_pow2a(<4 x i32> %x) {
define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_udiv_by_pow2b:
; SSE: # BB#0:
; SSE-NEXT: pextrd $1, %xmm0, %eax
; SSE-NEXT: shrl $2, %eax
; SSE-NEXT: pextrd $2, %xmm0, %ecx
; SSE-NEXT: pextrd $3, %xmm0, %edx
; SSE-NEXT: pinsrd $1, %eax, %xmm0
; SSE-NEXT: shrl $3, %ecx
; SSE-NEXT: pinsrd $2, %ecx, %xmm0
; SSE-NEXT: shrl $4, %edx
; SSE-NEXT: pinsrd $3, %edx, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $3, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: psrld $4, %xmm0
; SSE-NEXT: psrld $2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_udiv_by_pow2b:
; AVX: # BB#0:
; AVX-NEXT: vpextrd $1, %xmm0, %eax
; AVX-NEXT: shrl $2, %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1
; AVX-NEXT: vpextrd $2, %xmm0, %eax
; AVX-NEXT: shrl $3, %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: shrl $4, %eax
; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX-NEXT: retq
; AVX1-LABEL: combine_vec_udiv_by_pow2b:
; AVX1: # BB#0:
; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $2, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpsrld $3, %xmm0, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_udiv_by_pow2b:
; AVX2: # BB#0:
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = udiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
ret <4 x i32> %1
}
@ -129,88 +131,47 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_udiv_by_shl_pow2b:
; SSE: # BB#0:
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm2
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm2
; SSE-NEXT: pextrd $1, %xmm0, %eax
; SSE-NEXT: pextrd $1, %xmm2, %ecx
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: movd %xmm2, %esi
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %esi
; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: pinsrd $1, %ecx, %xmm1
; SSE-NEXT: pextrd $2, %xmm0, %eax
; SSE-NEXT: pextrd $2, %xmm2, %ecx
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: pinsrd $2, %eax, %xmm1
; SSE-NEXT: pextrd $3, %xmm0, %eax
; SSE-NEXT: pextrd $3, %xmm2, %ecx
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: pinsrd $3, %eax, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrlq $32, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrld %xmm2, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrld %xmm1, %xmm2
; SSE-NEXT: psrld %xmm3, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_udiv_by_shl_pow2b:
; AVX1: # BB#0:
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpextrd $1, %xmm0, %eax
; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vmovd %xmm1, %esi
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %esi
; AVX1-NEXT: vmovd %eax, %xmm2
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $2, %xmm0, %eax
; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $3, %xmm0, %eax
; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_udiv_by_shl_pow2b:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16]
; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpextrd $1, %xmm1, %ecx
; AVX2-NEXT: vpextrd $1, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: vmovd %xmm1, %esi
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %esi
; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
; AVX2-NEXT: vpextrd $2, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
; AVX2-NEXT: vpextrd $3, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; AVX2-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y
%2 = udiv <4 x i32> %x, %1

View File

@ -53,34 +53,12 @@ define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) {
define <4 x i32> @combine_vec_urem_by_pow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_by_pow2b:
; SSE: # BB#0:
; SSE-NEXT: pextrd $3, %xmm0, %eax
; SSE-NEXT: andl $15, %eax
; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: pextrd $2, %xmm0, %eax
; SSE-NEXT: andl $7, %eax
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: pextrd $1, %xmm0, %eax
; SSE-NEXT: andl $3, %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_urem_by_pow2b:
; AVX: # BB#0:
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vmovd %eax, %xmm1
; AVX-NEXT: vpextrd $2, %xmm0, %eax
; AVX-NEXT: andl $7, %eax
; AVX-NEXT: vmovd %eax, %xmm2
; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vpextrd $1, %xmm0, %eax
; AVX-NEXT: andl $3, %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = urem <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
ret <4 x i32> %1
@ -128,30 +106,11 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; SSE: # BB#0:
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm2
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm2
; SSE-NEXT: pextrd $1, %xmm0, %eax
; SSE-NEXT: pextrd $1, %xmm2, %ecx
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movl %edx, %ecx
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: movd %xmm2, %esi
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %esi
; SSE-NEXT: movd %edx, %xmm1
; SSE-NEXT: pinsrd $1, %ecx, %xmm1
; SSE-NEXT: pextrd $2, %xmm0, %eax
; SSE-NEXT: pextrd $2, %xmm2, %ecx
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: pinsrd $2, %edx, %xmm1
; SSE-NEXT: pextrd $3, %xmm0, %eax
; SSE-NEXT: pextrd $3, %xmm2, %ecx
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: pinsrd $3, %edx, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm1
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: paddd %xmm1, %xmm2
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_urem_by_shl_pow2b:
@ -160,54 +119,18 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpextrd $1, %xmm0, %eax
; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: movl %edx, %ecx
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vmovd %xmm1, %esi
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %esi
; AVX1-NEXT: vmovd %edx, %xmm2
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $2, %xmm0, %eax
; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $3, %xmm0, %eax
; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_vec_urem_by_shl_pow2b:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16]
; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpextrd $1, %xmm1, %ecx
; AVX2-NEXT: vpextrd $1, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: movl %edx, %ecx
; AVX2-NEXT: vmovd %xmm1, %esi
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %esi
; AVX2-NEXT: vmovd %edx, %xmm2
; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
; AVX2-NEXT: vpextrd $2, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
; AVX2-NEXT: vpextrd $3, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y
%2 = urem <4 x i32> %x, %1

View File

@ -31,18 +31,17 @@ define i25 @shift_left_pow_2(i25 %x, i25 %y) {
ret i25 %urem
}
; FIXME: A logically right-shifted sign bit is a power-of-2 or UB.
; A logically right-shifted sign bit is a power-of-2 or UB.
define i16 @shift_right_pow_2(i16 %x, i16 %y) {
; CHECK-LABEL: shift_right_pow_2:
; CHECK: # BB#0:
; CHECK-NEXT: movl $32768, %r8d # imm = 0x8000
; CHECK-NEXT: movl $32768, %eax # imm = 0x8000
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: shrl %cl, %r8d
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divw %r8w
; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: shrl %cl, %eax
; CHECK-NEXT: decl %eax
; CHECK-NEXT: andl %edi, %eax
; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
;
%shr = lshr i16 -32768, %y