1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 19:42:54 +02:00

[X86] Prefer reduced width multiplication over pmulld on Silvermont

Summary:
Prefer expansions such as: pmullw,pmulhw,unpacklwd,unpackhwd over pmulld.
On Silvermont [source: Optimization Reference Manual]:
PMULLD has a throughput of 1/11 [instruction/cycles].
PMULHUW/PMULHW/PMULLW have a throughput of 1/2 [instruction/cycles].

Fixes pr31202.

Analysis of this issue was done by Fahana Aleen.

Reviewers: wmi, delena, mkuper

Subscribers: RKSimon, llvm-commits

Differential Revision: https://reviews.llvm.org/D27203

llvm-svn: 288844
This commit is contained in:
Zvi Rackover 2016-12-06 19:35:20 +00:00
parent 09a65b6f28
commit 3eb819344b
5 changed files with 93 additions and 3 deletions

View File

@ -99,6 +99,8 @@ def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
"Bit testing of memory is slow">;
def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true",
"SHLD instruction is slow">;
def FeatureSlowPMULLD : SubtargetFeature<"slow-pmulld", "IsPMULLDSlow", "true",
"PMULLD instruction is slow">;
// FIXME: This should not apply to CPUs that do not have SSE.
def FeatureSlowUAMem16 : SubtargetFeature<"slow-unaligned-mem-16",
"IsUAMem16Slow", "true",
@ -403,6 +405,7 @@ class SilvermontProc<string Name> : ProcessorModel<Name, SLMModel, [
FeatureSlowLEA,
FeatureSlowIncDec,
FeatureSlowBTMem,
FeatureSlowPMULLD,
FeatureLAHFSAHF
]>;
def : SilvermontProc<"silvermont">;

View File

@ -29302,10 +29302,17 @@ static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
/// generate pmullw+pmulhuw for it (MULU16 mode).
static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// pmulld is supported since SSE41. It is better to use pmulld
// instead of pmullw+pmulhw.
// Check for legality
// pmullw/pmulhw are not supported by SSE.
if (Subtarget.hasSSE41() || !Subtarget.hasSSE2())
if (!Subtarget.hasSSE2())
return SDValue();
// Check for profitability
// pmulld is supported since SSE41. It is better to use pmulld
// instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
// the expansion.
bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
return SDValue();
ShrinkMode Mode;

View File

@ -228,6 +228,9 @@ void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
isTargetKFreeBSD() || In64BitMode)
stackAlignment = 16;
assert((!isPMULLDSlow() || hasSSE41()) &&
"Feature Slow PMULLD can only be set on a subtarget with SSE4.1");
}
void X86Subtarget::initializeEnvironment() {
@ -275,6 +278,7 @@ void X86Subtarget::initializeEnvironment() {
HasMWAITX = false;
HasMPX = false;
IsBTMemSlow = false;
IsPMULLDSlow = false;
IsSHLDSlow = false;
IsUAMem16Slow = false;
IsUAMem32Slow = false;

View File

@ -178,6 +178,10 @@ protected:
/// True if SHLD instructions are slow.
bool IsSHLDSlow;
/// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and
// PMULUDQ.
bool IsPMULLDSlow;
/// True if unaligned memory accesses of 16-bytes are slow.
bool IsUAMem16Slow;
@ -452,6 +456,7 @@ public:
bool hasMWAITX() const { return HasMWAITX; }
bool isBTMemSlow() const { return IsBTMemSlow; }
bool isSHLDSlow() const { return IsSHLDSlow; }
bool isPMULLDSlow() const { return IsPMULLDSlow; }
bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }

View File

@ -0,0 +1,71 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK32
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK64
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-32
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-64
define <4 x i32> @foo(<4 x i8> %A) {
; CHECK32-LABEL: foo:
; CHECK32: # BB#0:
; CHECK32-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
; CHECK32-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
; CHECK32-NEXT: movdqa %xmm0, %xmm2
; CHECK32-NEXT: pmullw %xmm1, %xmm0
; CHECK32-NEXT: pmulhw %xmm1, %xmm2
; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: foo:
; CHECK64: # BB#0:
; CHECK64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
; CHECK64-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
; CHECK64-NEXT: movdqa %xmm0, %xmm2
; CHECK64-NEXT: pmullw %xmm1, %xmm0
; CHECK64-NEXT: pmulhw %xmm1, %xmm2
; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; CHECK64-NEXT: retq
;
; SSE4-32-LABEL: foo:
; SSE4-32: # BB#0:
; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0
; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; SSE4-32-NEXT: retl
;
; SSE4-64-LABEL: foo:
; SSE4-64: # BB#0:
; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0
; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE4-64-NEXT: retq
%z = zext <4 x i8> %A to <4 x i32>
%m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778>
ret <4 x i32> %m
}
define <4 x i32> @foo_os(<4 x i8> %A) minsize {
; CHECK32-LABEL: foo_os:
; CHECK32: # BB#0:
; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm0
; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: foo_os:
; CHECK64: # BB#0:
; CHECK64-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK64-NEXT: pmulld {{.*}}(%rip), %xmm0
; CHECK64-NEXT: retq
;
; SSE4-32-LABEL: foo_os:
; SSE4-32: # BB#0:
; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0
; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; SSE4-32-NEXT: retl
;
; SSE4-64-LABEL: foo_os:
; SSE4-64: # BB#0:
; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0
; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE4-64-NEXT: retq
%z = zext <4 x i8> %A to <4 x i32>
%m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778>
ret <4 x i32> %m
}