1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[ARM] Use isFMAFasterThanFMulAndFAdd for scalars as well as MVE vectors

This adds extra scalar handling to isFMAFasterThanFMulAndFAdd, allowing
the target independent code to handle more folds in more situations (for
example if the fast math flags are present, but the global
AllowFPOpFusion option isnt). It also splits apart the HasSlowFPVMLx
into HasSlowFPVFMx, to allow VFMA and VMLA to be controlled separately
if needed.

Differential Revision: https://reviews.llvm.org/D72139
This commit is contained in:
David Green 2020-01-05 10:59:21 +00:00
parent e74c6af49a
commit b3dd67f4cd
10 changed files with 52 additions and 25 deletions

View File

@ -303,6 +303,10 @@ def FeatureNonpipelinedVFP : SubtargetFeature<"nonpipelined-vfp",
def FeatureHasSlowFPVMLx : SubtargetFeature<"slowfpvmlx", "SlowFPVMLx", "true", def FeatureHasSlowFPVMLx : SubtargetFeature<"slowfpvmlx", "SlowFPVMLx", "true",
"Disable VFP / NEON MAC instructions">; "Disable VFP / NEON MAC instructions">;
// VFPv4 added VFMA instructions that can similar be fast or slow.
def FeatureHasSlowFPVFMx : SubtargetFeature<"slowfpvfmx", "SlowFPVFMx", "true",
"Disable VFP / NEON FMA instructions">;
// Cortex-A8 / A9 Advanced SIMD has multiplier accumulator forwarding. // Cortex-A8 / A9 Advanced SIMD has multiplier accumulator forwarding.
def FeatureVMLxForwarding : SubtargetFeature<"vmlx-forwarding", def FeatureVMLxForwarding : SubtargetFeature<"vmlx-forwarding",
"HasVMLxForwarding", "true", "HasVMLxForwarding", "true",
@ -588,6 +592,7 @@ def ProcExynos : SubtargetFeature<"exynos", "ARMProcFamily", "Exynos",
FeatureHWDivThumb, FeatureHWDivThumb,
FeatureHWDivARM, FeatureHWDivARM,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureHasRetAddrStack, FeatureHasRetAddrStack,
FeatureFuseLiterals, FeatureFuseLiterals,
FeatureFuseAES, FeatureFuseAES,
@ -918,6 +923,7 @@ def : ProcessorModel<"cortex-a5", CortexA8Model, [ARMv7a, ProcA5,
FeatureTrustZone, FeatureTrustZone,
FeatureSlowFPBrcc, FeatureSlowFPBrcc,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureVMLxForwarding, FeatureVMLxForwarding,
FeatureMP, FeatureMP,
FeatureVFP4]>; FeatureVFP4]>;
@ -928,6 +934,7 @@ def : ProcessorModel<"cortex-a7", CortexA8Model, [ARMv7a, ProcA7,
FeatureSlowFPBrcc, FeatureSlowFPBrcc,
FeatureHasVMLxHazards, FeatureHasVMLxHazards,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureVMLxForwarding, FeatureVMLxForwarding,
FeatureMP, FeatureMP,
FeatureVFP4, FeatureVFP4,
@ -940,6 +947,7 @@ def : ProcessorModel<"cortex-a8", CortexA8Model, [ARMv7a, ProcA8,
FeatureSlowFPBrcc, FeatureSlowFPBrcc,
FeatureHasVMLxHazards, FeatureHasVMLxHazards,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureVMLxForwarding]>; FeatureVMLxForwarding]>;
def : ProcessorModel<"cortex-a9", CortexA9Model, [ARMv7a, ProcA9, def : ProcessorModel<"cortex-a9", CortexA9Model, [ARMv7a, ProcA9,
@ -1009,6 +1017,7 @@ def : ProcessorModel<"swift", SwiftModel, [ARMv7a, ProcSwift,
FeatureAvoidPartialCPSR, FeatureAvoidPartialCPSR,
FeatureAvoidMOVsShOp, FeatureAvoidMOVsShOp,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureHasVMLxHazards, FeatureHasVMLxHazards,
FeatureProfUnpredicate, FeatureProfUnpredicate,
FeaturePrefISHSTBarrier, FeaturePrefISHSTBarrier,
@ -1027,6 +1036,7 @@ def : ProcessorModel<"cortex-r4f", CortexA8Model, [ARMv7r, ProcR4,
FeatureHasRetAddrStack, FeatureHasRetAddrStack,
FeatureSlowFPBrcc, FeatureSlowFPBrcc,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureVFP3_D16, FeatureVFP3_D16,
FeatureAvoidPartialCPSR]>; FeatureAvoidPartialCPSR]>;
@ -1036,6 +1046,7 @@ def : ProcessorModel<"cortex-r5", CortexA8Model, [ARMv7r, ProcR5,
FeatureSlowFPBrcc, FeatureSlowFPBrcc,
FeatureHWDivARM, FeatureHWDivARM,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureAvoidPartialCPSR]>; FeatureAvoidPartialCPSR]>;
def : ProcessorModel<"cortex-r7", CortexA8Model, [ARMv7r, ProcR7, def : ProcessorModel<"cortex-r7", CortexA8Model, [ARMv7r, ProcR7,
@ -1046,6 +1057,7 @@ def : ProcessorModel<"cortex-r7", CortexA8Model, [ARMv7r, ProcR7,
FeatureSlowFPBrcc, FeatureSlowFPBrcc,
FeatureHWDivARM, FeatureHWDivARM,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureAvoidPartialCPSR]>; FeatureAvoidPartialCPSR]>;
def : ProcessorModel<"cortex-r8", CortexA8Model, [ARMv7r, def : ProcessorModel<"cortex-r8", CortexA8Model, [ARMv7r,
@ -1056,6 +1068,7 @@ def : ProcessorModel<"cortex-r8", CortexA8Model, [ARMv7r,
FeatureSlowFPBrcc, FeatureSlowFPBrcc,
FeatureHWDivARM, FeatureHWDivARM,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureAvoidPartialCPSR]>; FeatureAvoidPartialCPSR]>;
def : ProcessorModel<"cortex-m3", CortexM4Model, [ARMv7m, def : ProcessorModel<"cortex-m3", CortexM4Model, [ARMv7m,
@ -1073,6 +1086,7 @@ def : ProcessorModel<"cortex-m4", CortexM4Model, [ARMv7em,
FeatureVFP4_D16_SP, FeatureVFP4_D16_SP,
FeaturePrefLoopAlign32, FeaturePrefLoopAlign32,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureUseMISched, FeatureUseMISched,
FeatureHasNoBranchPredictor]>; FeatureHasNoBranchPredictor]>;
@ -1087,6 +1101,7 @@ def : ProcessorModel<"cortex-m33", CortexM4Model, [ARMv8mMainline,
FeatureFPARMv8_D16_SP, FeatureFPARMv8_D16_SP,
FeaturePrefLoopAlign32, FeaturePrefLoopAlign32,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureUseMISched, FeatureUseMISched,
FeatureHasNoBranchPredictor]>; FeatureHasNoBranchPredictor]>;
@ -1095,6 +1110,7 @@ def : ProcessorModel<"cortex-m35p", CortexM4Model, [ARMv8mMainline,
FeatureFPARMv8_D16_SP, FeatureFPARMv8_D16_SP,
FeaturePrefLoopAlign32, FeaturePrefLoopAlign32,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureUseMISched, FeatureUseMISched,
FeatureHasNoBranchPredictor]>; FeatureHasNoBranchPredictor]>;
@ -1182,6 +1198,7 @@ def : ProcessorModel<"cyclone", SwiftModel, [ARMv8a, ProcSwift,
FeatureAvoidPartialCPSR, FeatureAvoidPartialCPSR,
FeatureAvoidMOVsShOp, FeatureAvoidMOVsShOp,
FeatureHasSlowFPVMLx, FeatureHasSlowFPVMLx,
FeatureHasSlowFPVFMx,
FeatureCrypto, FeatureCrypto,
FeatureUseMISched, FeatureUseMISched,
FeatureZCZeroing, FeatureZCZeroing,

View File

@ -15018,16 +15018,19 @@ int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
/// patterns (and we don't have the non-fused floating point instruction). /// patterns (and we don't have the non-fused floating point instruction).
bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
EVT VT) const { EVT VT) const {
if (!Subtarget->hasMVEFloatOps())
return false;
if (!VT.isSimple()) if (!VT.isSimple())
return false; return false;
switch (VT.getSimpleVT().SimpleTy) { switch (VT.getSimpleVT().SimpleTy) {
case MVT::v4f32: case MVT::v4f32:
case MVT::v8f16: case MVT::v8f16:
return true; return Subtarget->hasMVEFloatOps();
case MVT::f16:
return Subtarget->useFPVFMx16();
case MVT::f32:
return Subtarget->useFPVFMx();
case MVT::f64:
return Subtarget->useFPVFMx64();
default: default:
break; break;
} }

View File

@ -182,11 +182,9 @@ def UseMulOps : Predicate<"Subtarget->useMulOps()">;
// But only select them if more precision in FP computation is allowed, and when // But only select them if more precision in FP computation is allowed, and when
// they are not slower than a mul + add sequence. // they are not slower than a mul + add sequence.
// Do not use them for Darwin platforms. // Do not use them for Darwin platforms.
def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion ==" def UseFusedMAC : Predicate<"TM.Options.AllowFPOpFusion =="
" FPOpFusion::Fast && " " FPOpFusion::Fast && "
" Subtarget->hasVFP4Base()) && " "Subtarget->useFPVFMx()">;
"!Subtarget->isTargetDarwin() &&"
"Subtarget->useFPVMLx()">;
def HasFastVGETLNi32 : Predicate<"!Subtarget->hasSlowVGETLNi32()">; def HasFastVGETLNi32 : Predicate<"!Subtarget->hasSlowVGETLNi32()">;
def HasSlowVGETLNi32 : Predicate<"Subtarget->hasSlowVGETLNi32()">; def HasSlowVGETLNi32 : Predicate<"Subtarget->hasSlowVGETLNi32()">;

View File

@ -203,6 +203,10 @@ protected:
/// whether the FP VML[AS] instructions are slow (if so, don't use them). /// whether the FP VML[AS] instructions are slow (if so, don't use them).
bool SlowFPVMLx = false; bool SlowFPVMLx = false;
/// SlowFPVFMx - If the VFP4 / NEON instructions are available, indicates
/// whether the FP VFM[AS] instructions are slow (if so, don't use them).
bool SlowFPVFMx = false;
/// HasVMLxForwarding - If true, NEON has special multiplier accumulator /// HasVMLxForwarding - If true, NEON has special multiplier accumulator
/// forwarding to allow mul + mla being issued back to back. /// forwarding to allow mul + mla being issued back to back.
bool HasVMLxForwarding = false; bool HasVMLxForwarding = false;
@ -632,6 +636,11 @@ public:
bool useMulOps() const { return UseMulOps; } bool useMulOps() const { return UseMulOps; }
bool useFPVMLx() const { return !SlowFPVMLx; } bool useFPVMLx() const { return !SlowFPVMLx; }
bool useFPVFMx() const {
return !isTargetDarwin() && hasVFP4Base() && !SlowFPVFMx;
}
bool useFPVFMx16() const { return useFPVFMx() && hasFullFP16(); }
bool useFPVFMx64() const { return useFPVFMx() && hasFP64(); }
bool hasVMLxForwarding() const { return HasVMLxForwarding; } bool hasVMLxForwarding() const { return HasVMLxForwarding; }
bool isFPBrccSlow() const { return SlowFPBrcc; } bool isFPBrccSlow() const { return SlowFPBrcc; }
bool hasFP64() const { return HasFP64; } bool hasFP64() const { return HasFP64; }

View File

@ -69,15 +69,15 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx, ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx,
ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs, ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs,
ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign, ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign,
ARM::FeatureHasSlowFPVMLx, ARM::FeatureVMLxForwarding, ARM::FeatureHasSlowFPVMLx, ARM::FeatureHasSlowFPVFMx,
ARM::FeaturePref32BitThumb, ARM::FeatureAvoidPartialCPSR, ARM::FeatureVMLxForwarding, ARM::FeaturePref32BitThumb,
ARM::FeatureCheapPredicableCPSR, ARM::FeatureAvoidMOVsShOp, ARM::FeatureAvoidPartialCPSR, ARM::FeatureCheapPredicableCPSR,
ARM::FeatureHasRetAddrStack, ARM::FeatureHasNoBranchPredictor, ARM::FeatureAvoidMOVsShOp, ARM::FeatureHasRetAddrStack,
ARM::FeatureDSP, ARM::FeatureMP, ARM::FeatureVirtualization, ARM::FeatureHasNoBranchPredictor, ARM::FeatureDSP, ARM::FeatureMP,
ARM::FeatureMClass, ARM::FeatureRClass, ARM::FeatureAClass, ARM::FeatureVirtualization, ARM::FeatureMClass, ARM::FeatureRClass,
ARM::FeatureNaClTrap, ARM::FeatureStrictAlign, ARM::FeatureLongCalls, ARM::FeatureAClass, ARM::FeatureNaClTrap, ARM::FeatureStrictAlign,
ARM::FeatureExecuteOnly, ARM::FeatureReserveR9, ARM::FeatureNoMovt, ARM::FeatureLongCalls, ARM::FeatureExecuteOnly, ARM::FeatureReserveR9,
ARM::FeatureNoNegativeImmediates ARM::FeatureNoMovt, ARM::FeatureNoNegativeImmediates
}; };
const ARMSubtarget *getST() const { return ST; } const ARMSubtarget *getST() const { return ST; }

View File

@ -93,12 +93,12 @@ define arm_aapcs_vfpcc float @Test3(float %f1, float %f2, float %f3, float %f4,
; CHECK-SAME: Latency=0 ; CHECK-SAME: Latency=0
; CHECK-DEFAULT: VMLSS ; CHECK-DEFAULT: VMLSS
; CHECK-FAST: VFMSS ; CHECK-FAST: VFNMSS
; > VMLSS common latency = 9 ; > VFNMSS common latency = 9
; CHECK: Latency : 9 ; CHECK: Latency : 9
; CHECK: Successors: ; CHECK: Successors:
; CHECK: Data ; CHECK: Data
; > VMLSS read-advanced latency to the next VMLSS = 4 ; > VFNMSS read-advanced latency to the next VMLSS = 4
; CHECK-SAME: Latency=4 ; CHECK-SAME: Latency=4
; CHECK-DEFAULT: VMLSS ; CHECK-DEFAULT: VMLSS

View File

@ -571,7 +571,7 @@ define void @test_fmuladd(half* %p, half* %q, half* %r) {
; CHECK: vldr.16 s0, [r1] ; CHECK: vldr.16 s0, [r1]
; CHECK-NEXT: vldr.16 s2, [r0] ; CHECK-NEXT: vldr.16 s2, [r0]
; CHECK-NEXT: vldr.16 s4, [r2] ; CHECK-NEXT: vldr.16 s4, [r2]
; CHECK-NEXT: vmla.f16 s4, s2, s0 ; CHECK-NEXT: vfma.f16 s4, s2, s0
; CHECK-NEXT: vstr.16 s4, [r0] ; CHECK-NEXT: vstr.16 s4, [r0]
; CHECK-NEXT: bx lr ; CHECK-NEXT: bx lr
%a = load half, half* %p, align 2 %a = load half, half* %p, align 2

View File

@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=thumbv8.1-m-none-eabi -mattr=+fullfp16 -fp-contract=fast | FileCheck %s ; RUN: llc < %s -mtriple=thumbv8.1-m-none-eabi -mattr=+fullfp16 -fp-contract=fast | FileCheck %s
; RUN: llc < %s -mtriple=thumbv8.1-m-none-eabi -mattr=+fullfp16,+slowfpvmlx -fp-contract=fast | FileCheck %s -check-prefix=DONT-FUSE ; RUN: llc < %s -mtriple=thumbv8.1-m-none-eabi -mattr=+fullfp16,+slowfpvfmx -fp-contract=fast | FileCheck %s -check-prefix=DONT-FUSE
; Check generated fp16 fused MAC and MLS. ; Check generated fp16 fused MAC and MLS.

View File

@ -201,7 +201,7 @@ define double @fmuladd_d(double %a, double %b, double %c) {
; SOFT: bl __aeabi_dadd ; SOFT: bl __aeabi_dadd
; VFP4: vmul.f64 ; VFP4: vmul.f64
; VFP4: vadd.f64 ; VFP4: vadd.f64
; FP-ARMv8: vmla.f64 ; FP-ARMv8: vfma.f64
%1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c) %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
ret double %1 ret double %1
} }

View File

@ -194,7 +194,7 @@ define float @fmuladd_f(float %a, float %b, float %c) {
; CHECK-LABEL: fmuladd_f: ; CHECK-LABEL: fmuladd_f:
; SOFT: bl __aeabi_fmul ; SOFT: bl __aeabi_fmul
; SOFT: bl __aeabi_fadd ; SOFT: bl __aeabi_fadd
; VMLA: vmla.f32 ; VMLA: vfma.f32
; NO-VMLA: vmul.f32 ; NO-VMLA: vmul.f32
; NO-VMLA: vadd.f32 ; NO-VMLA: vadd.f32
%1 = call float @llvm.fmuladd.f32(float %a, float %b, float %c) %1 = call float @llvm.fmuladd.f32(float %a, float %b, float %c)