1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

[ARM] Always enable UseAA in the arm backend

This feature controls whether AA is used into the backend, and was
previously turned on for certain subtargets to help create less
constrained scheduling graphs. This patch turns it on for all
subtargets, so that they can all make use of the extra information to
produce better code.

Differential Revision: https://reviews.llvm.org/D69796
This commit is contained in:
David Green 2019-11-05 10:46:56 +00:00
parent 1643bee451
commit eebf5e9394
6 changed files with 47 additions and 36 deletions

View File

@ -415,10 +415,6 @@ def FeatureNoPostRASched : SubtargetFeature<"disable-postra-scheduler",
"DisablePostRAScheduler", "true",
"Don't schedule again after register allocation">;
// Enable use of alias analysis during code generation
def FeatureUseAA : SubtargetFeature<"use-aa", "UseAA", "true",
"Use alias analysis during codegen">;
// Armv8.5-A extensions
def FeatureSB : SubtargetFeature<"sb", "HasSB", "true",
@ -584,7 +580,6 @@ def ProcExynos : SubtargetFeature<"exynos", "ARMProcFamily", "Exynos",
"Samsung Exynos processors",
[FeatureZCZeroing,
FeatureUseWideStrideVFP,
FeatureUseAA,
FeatureSplatVFPToNeon,
FeatureSlowVGETLNi32,
FeatureSlowVDUP32,
@ -1067,13 +1062,11 @@ def : ProcessorModel<"cortex-m3", CortexM4Model, [ARMv7m,
ProcM3,
FeaturePrefLoopAlign32,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
def : ProcessorModel<"sc300", CortexM4Model, [ARMv7m,
ProcM3,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
def : ProcessorModel<"cortex-m4", CortexM4Model, [ARMv7em,
@ -1081,7 +1074,6 @@ def : ProcessorModel<"cortex-m4", CortexM4Model, [ARMv7em,
FeaturePrefLoopAlign32,
FeatureHasSlowFPVMLx,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
def : ProcNoItin<"cortex-m7", [ARMv7em,
@ -1096,7 +1088,6 @@ def : ProcessorModel<"cortex-m33", CortexM4Model, [ARMv8mMainline,
FeaturePrefLoopAlign32,
FeatureHasSlowFPVMLx,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
def : ProcessorModel<"cortex-m35p", CortexM4Model, [ARMv8mMainline,
@ -1105,7 +1096,6 @@ def : ProcessorModel<"cortex-m35p", CortexM4Model, [ARMv8mMainline,
FeaturePrefLoopAlign32,
FeatureHasSlowFPVMLx,
FeatureUseMISched,
FeatureUseAA,
FeatureHasNoBranchPredictor]>;
@ -1213,8 +1203,7 @@ def : ProcNoItin<"kryo", [ARMv8a, ProcKryo,
def : ProcessorModel<"cortex-r52", CortexR52Model, [ARMv8r, ProcR52,
FeatureUseMISched,
FeatureFPAO,
FeatureUseAA]>;
FeatureFPAO]>;
//===----------------------------------------------------------------------===//
// Register File Description

View File

@ -223,9 +223,6 @@ protected:
/// register allocation.
bool DisablePostRAScheduler = false;
/// UseAA - True if using AA during codegen (DAGCombine, MISched, etc)
bool UseAA = false;
/// HasThumb2 - True if Thumb2 instructions are supported.
bool HasThumb2 = false;
@ -811,7 +808,7 @@ public:
/// Enable use of alias analysis during code generation (during MI
/// scheduling, DAGCombine, etc.).
bool useAA() const override { return UseAA; }
bool useAA() const override { return true; }
// enableAtomicExpand- True if we need to expand our atomics.
bool enableAtomicExpand() const override;

View File

@ -36,10 +36,10 @@ entry:
; CHECKV6-NEXT: ldr [[SB:r[0-7]]],
; CHECKV6-NEXT: ldm{{(\.w)?}} [[LB]]!,
; CHECKV6-NEXT: stm{{(\.w)?}} [[SB]]!,
; CHECKV6-NEXT: ldrh{{(\.w)?}} {{.*}}, {{\[}}[[LB]]]
; CHECKV6-NEXT: ldrb{{(\.w)?}} {{.*}}, {{\[}}[[LB]], #2]
; CHECKV6-NEXT: strb{{(\.w)?}} {{.*}}, {{\[}}[[SB]], #2]
; CHECKV6-NEXT: strh{{(\.w)?}} {{.*}}, {{\[}}[[SB]]]
; CHECKV6-DAG: ldrh{{(\.w)?}} {{.*}}, {{\[}}[[LB]]]
; CHECKV6-DAG: ldrb{{(\.w)?}} {{.*}}, {{\[}}[[LB]], #2]
; CHECKV6-DAG: strb{{(\.w)?}} {{.*}}, {{\[}}[[SB]], #2]
; CHECKV6-DAG: strh{{(\.w)?}} {{.*}}, {{\[}}[[SB]]]
; CHECKV7: movt [[LB:[rl0-9]+]], :upper16:d
; CHECKV7-NEXT: movt [[SB:[rl0-9]+]], :upper16:s
; CHECKV7: ldr{{(\.w)?}} {{.*}}, {{\[}}[[LB]], #11]

View File

@ -57,14 +57,14 @@ entry:
; Epilogue
; --------
; CHECK-V4T: ldr [[POP:r[4567]]], [sp, #16]
; CHECK-V4T: ldr [[POP:r[4567]]], [sp, #12]
; CHECK-V4T-NEXT: mov lr, [[POP]]
; CHECK-V4T-NEXT: pop {[[SAVED]]}
; CHECK-V4T-NEXT: add sp, #16
; CHECK-V4T-NEXT: bx lr
; CHECK-V5T: lsls r4
; CHECK-V5T-NEXT: mov sp, r4
; CHECK-V5T: ldr [[POP:r[4567]]], [sp, #16]
; CHECK-V5T: ldr [[POP:r[4567]]], [sp, #12]
; CHECK-V5T-NEXT: mov lr, [[POP]]
; CHECK-V5T-NEXT: pop {[[SAVED]]}
; CHECK-V5T-NEXT: add sp, #16

View File

@ -7,9 +7,9 @@
; CHECK-LABEL: test
; GENERIC: ldr
; GENERIC: str
; GENERIC: ldr
; GENERIC: str
; GENERIC: str
; USEAA: ldr
; USEAA: ldr
; USEAA: str

View File

@ -1,13 +1,27 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -pre-RA-sched=source | FileCheck %s
; Test that we correctly align elements when using va_arg
; CHECK-LABEL: test1:
; CHECK-NOT: bfc
; CHECK: add [[REG:(r[0-9]+)|(lr)]], {{(r[0-9]+)|(lr)}}, #7
; CHECK: bic {{(r[0-9]+)|(lr)}}, [[REG]], #7
; CHECK-NOT: bic
define i64 @test1(i32 %i, ...) nounwind optsize {
; CHECK-LABEL: test1:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .pad #12
; CHECK-NEXT: sub sp, sp, #12
; CHECK-NEXT: .pad #4
; CHECK-NEXT: sub sp, sp, #4
; CHECK-NEXT: add r0, sp, #4
; CHECK-NEXT: stmib sp, {r1, r2, r3}
; CHECK-NEXT: add r0, r0, #7
; CHECK-NEXT: bic r1, r0, #7
; CHECK-NEXT: orr r2, r1, #4
; CHECK-NEXT: str r2, [sp]
; CHECK-NEXT: ldr r0, [r1]
; CHECK-NEXT: add r2, r2, #4
; CHECK-NEXT: str r2, [sp]
; CHECK-NEXT: ldr r1, [r1, #4]
; CHECK-NEXT: add sp, sp, #4
; CHECK-NEXT: add sp, sp, #12
; CHECK-NEXT: bx lr
entry:
%g = alloca i8*, align 4
%g1 = bitcast i8** %g to i8*
@ -17,14 +31,25 @@ entry:
ret i64 %0
}
; CHECK-LABEL: test2:
; CHECK-NOT: bfc
; CHECK: add [[REG:(r[0-9]+)|(lr)]], {{(r[0-9]+)|(lr)}}, #7
; CHECK: bic {{(r[0-9]+)|(lr)}}, [[REG]], #7
; CHECK-NOT: bic
; CHECK: bx lr
define double @test2(i32 %a, i32* %b, ...) nounwind optsize {
; CHECK-LABEL: test2:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .pad #8
; CHECK-NEXT: sub sp, sp, #8
; CHECK-NEXT: .pad #4
; CHECK-NEXT: sub sp, sp, #4
; CHECK-NEXT: add r0, sp, #4
; CHECK-NEXT: stmib sp, {r2, r3}
; CHECK-NEXT: add r0, r0, #11
; CHECK-NEXT: bic r0, r0, #3
; CHECK-NEXT: str r2, [r1]
; CHECK-NEXT: add r1, r0, #8
; CHECK-NEXT: str r1, [sp]
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: add sp, sp, #4
; CHECK-NEXT: add sp, sp, #8
; CHECK-NEXT: bx lr
entry:
%ap = alloca i8*, align 4 ; <i8**> [#uses=3]
%ap1 = bitcast i8** %ap to i8* ; <i8*> [#uses=2]