1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-02-01 05:01:59 +01:00

[X86] Add a DAG combine to simplify PMULDQ/PMULUDQ nodes

These nodes only use the lower 32 bits of their inputs so we can use SimplifyDemandedBits to simplify them.

Differential Revision: https://reviews.llvm.org/D44375

llvm-svn: 328405
This commit is contained in:
Craig Topper 2018-03-24 01:52:01 +00:00
parent c9e40d03d4
commit 86429c61a4
2 changed files with 45 additions and 18 deletions

View File

@ -38534,6 +38534,33 @@ static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
// Simplify PMULDQ and PMULUDQ operations.
static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
APInt DemandedMask(APInt::getLowBitsSet(64, 32));
// PMULQDQ/PMULUDQ only uses lower 32 bits from each vector element.
KnownBits LHSKnown;
if (TLI.SimplifyDemandedBits(LHS, DemandedMask, LHSKnown, TLO)) {
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
KnownBits RHSKnown;
if (TLI.SimplifyDemandedBits(RHS, DemandedMask, RHSKnown, TLO)) {
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
return SDValue();
}
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@ -38655,6 +38682,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget);
case X86ISD::PCMPEQ:
case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
case X86ISD::PMULDQ:
case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI);
}
return SDValue();

View File

@ -692,17 +692,16 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind {
define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_15_63:
; X86: # %bb.0:
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psrlq $32, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [4294967281,4294967295,4294967233,4294967295]
; X86-NEXT: pmuludq %xmm2, %xmm1
; X86-NEXT: movdqa %xmm2, %xmm3
; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967281,4294967295,4294967233,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
; X86-NEXT: movdqa %xmm0, %xmm3
; X86-NEXT: psrlq $32, %xmm3
; X86-NEXT: pmuludq %xmm0, %xmm3
; X86-NEXT: paddq %xmm1, %xmm3
; X86-NEXT: psllq $32, %xmm3
; X86-NEXT: pmuludq %xmm2, %xmm0
; X86-NEXT: pmuludq %xmm1, %xmm3
; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddq %xmm3, %xmm0
; X86-NEXT: psllq $32, %xmm0
; X86-NEXT: paddq %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_15_63:
@ -737,17 +736,16 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_17_65:
; X86: # %bb.0:
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psrlq $32, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [4294967279,4294967295,4294967231,4294967295]
; X86-NEXT: pmuludq %xmm2, %xmm1
; X86-NEXT: movdqa %xmm2, %xmm3
; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967279,4294967295,4294967231,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
; X86-NEXT: movdqa %xmm0, %xmm3
; X86-NEXT: psrlq $32, %xmm3
; X86-NEXT: pmuludq %xmm0, %xmm3
; X86-NEXT: paddq %xmm1, %xmm3
; X86-NEXT: psllq $32, %xmm3
; X86-NEXT: pmuludq %xmm2, %xmm0
; X86-NEXT: pmuludq %xmm1, %xmm3
; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddq %xmm3, %xmm0
; X86-NEXT: psllq $32, %xmm0
; X86-NEXT: paddq %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_17_65: