1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[PGO][PGSO] Add profile guided size optimization to X86 ISel Lowering.

This commit is contained in:
Hiroshi Yamauchi 2020-07-09 10:19:00 -07:00
parent 8717bd1898
commit 45213c9ada
3 changed files with 16 additions and 34 deletions

View File

@ -34448,7 +34448,7 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
return DAG.getBitcast(RootVT, V1);
}
bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
bool OptForSize = DAG.shouldOptForSize();
unsigned RootSizeInBits = RootVT.getSizeInBits();
unsigned NumRootElts = RootVT.getVectorNumElements();
unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
@ -39290,7 +39290,7 @@ static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
}
// Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
bool OptForSize = DAG.shouldOptForSize();
if (!Subtarget.hasFastHorizontalOps() && !OptForSize)
return SDValue();

View File

@ -397,8 +397,7 @@ define <4 x double> @shuffle_v4f64_zz23_optsize(<4 x double> %a) optsize {
define <4 x double> @shuffle_v4f64_zz23_pgso(<4 x double> %a) !prof !14 {
; ALL-LABEL: shuffle_v4f64_zz23_pgso:
; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[2,3]
; ALL-NEXT: retq
%s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
ret <4 x double> %s
@ -441,8 +440,7 @@ define <4 x double> @shuffle_v4f64_zz67_optsize(<4 x double> %a) optsize {
define <4 x double> @shuffle_v4f64_zz67_pgso(<4 x double> %a) !prof !14 {
; ALL-LABEL: shuffle_v4f64_zz67_pgso:
; ALL: # %bb.0:
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[2,3]
; ALL-NEXT: retq
%s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
ret <4 x double> %s

View File

@ -2095,35 +2095,19 @@ define i32 @hadd32_4_optsize(<4 x i32> %x225) optsize {
}
define i32 @hadd32_4_pgso(<4 x i32> %x225) !prof !14 {
; SSE3-SLOW-LABEL: hadd32_4_pgso:
; SSE3-SLOW: # %bb.0:
; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE3-SLOW-NEXT: paddd %xmm0, %xmm1
; SSE3-SLOW-NEXT: phaddd %xmm1, %xmm1
; SSE3-SLOW-NEXT: movd %xmm1, %eax
; SSE3-SLOW-NEXT: retq
; SSE3-LABEL: hadd32_4_pgso:
; SSE3: # %bb.0:
; SSE3-NEXT: phaddd %xmm0, %xmm0
; SSE3-NEXT: phaddd %xmm0, %xmm0
; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: retq
;
; SSE3-FAST-LABEL: hadd32_4_pgso:
; SSE3-FAST: # %bb.0:
; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSE3-FAST-NEXT: movd %xmm0, %eax
; SSE3-FAST-NEXT: retq
;
; AVX-SLOW-LABEL: hadd32_4_pgso:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-SLOW-NEXT: vmovd %xmm0, %eax
; AVX-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: hadd32_4_pgso:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vmovd %xmm0, %eax
; AVX-FAST-NEXT: retq
; AVX-LABEL: hadd32_4_pgso:
; AVX: # %bb.0:
; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: retq
%x226 = shufflevector <4 x i32> %x225, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
%x227 = add <4 x i32> %x225, %x226
%x228 = shufflevector <4 x i32> %x227, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>