1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[GVN LoadPRE] Add an option to disable splitting backedge

GVN Load PRE can split the backedge causing breaking the loop structure where the latch
contains the conditional branch with for example induction variable.

Different optimizations expect this form of the loop, so it is better to preserve it for some time.
This CL adds an option to control an ability to split backedge.

Default value is true so technically it is NFC and current behavior is not changed.

Reviewers: fedor.sergeev, mkazantsev, nikic, reames, fhahn
Reviewed By: mkazasntsev
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D89854
This commit is contained in:
Serguei Katkov 2020-10-20 17:32:08 +07:00
parent e22ba25d54
commit fd4f36fcb9
4 changed files with 85 additions and 0 deletions

View File

@ -73,6 +73,7 @@ struct GVNOptions {
Optional<bool> AllowPRE = None;
Optional<bool> AllowLoadPRE = None;
Optional<bool> AllowLoadInLoopPRE = None;
Optional<bool> AllowLoadPRESplitBackedge = None;
Optional<bool> AllowMemDep = None;
GVNOptions() = default;
@ -94,6 +95,12 @@ struct GVNOptions {
return *this;
}
/// Enables or disables PRE of loads in GVN.
GVNOptions &setLoadPRESplitBackedge(bool LoadPRESplitBackedge) {
AllowLoadPRESplitBackedge = LoadPRESplitBackedge;
return *this;
}
/// Enables or disables use of MemDepAnalysis.
GVNOptions &setMemDep(bool MemDep) {
AllowMemDep = MemDep;
@ -130,6 +137,7 @@ public:
bool isPREEnabled() const;
bool isLoadPREEnabled() const;
bool isLoadInLoopPREEnabled() const;
bool isLoadPRESplitBackedgeEnabled() const;
bool isMemDepEnabled() const;
/// This class holds the mapping between values and value numbers. It is used

View File

@ -1926,6 +1926,8 @@ Expected<GVNOptions> parseGVNOptions(StringRef Params) {
Result.setPRE(Enable);
} else if (ParamName == "load-pre") {
Result.setLoadPRE(Enable);
} else if (ParamName == "split-backedge-load-pre") {
Result.setLoadPRESplitBackedge(Enable);
} else if (ParamName == "memdep") {
Result.setMemDep(Enable);
} else {

View File

@ -110,6 +110,9 @@ static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
cl::init(true));
static cl::opt<bool>
GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
cl::init(true));
static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
static cl::opt<uint32_t> MaxNumDeps(
@ -639,6 +642,11 @@ bool GVN::isLoadInLoopPREEnabled() const {
return Options.AllowLoadInLoopPRE.getValueOr(GVNEnableLoadInLoopPRE);
}
bool GVN::isLoadPRESplitBackedgeEnabled() const {
return Options.AllowLoadPRESplitBackedge.getValueOr(
GVNEnableSplitBackedgeInLoadPRE);
}
bool GVN::isMemDepEnabled() const {
return Options.AllowMemDep.getValueOr(GVNEnableMemDep);
}
@ -1222,6 +1230,16 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
return false;
}
// Do not split backedge as it will break the canonical loop form.
if (!isLoadPRESplitBackedgeEnabled())
if (DT->dominates(LoadBB, Pred)) {
LLVM_DEBUG(
dbgs()
<< "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
<< Pred->getName() << "': " << *LI << '\n');
return false;
}
CriticalEdgePred.push_back(Pred);
} else {
// Only add the predecessors that will not be split for now.

View File

@ -0,0 +1,57 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -basic-aa -gvn -enable-split-backedge-in-load-pre=true < %s | FileCheck %s --check-prefix=ON
; RUN: opt -S -basic-aa -gvn -enable-split-backedge-in-load-pre=false < %s | FileCheck %s --check-prefix=OFF
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
define i32 @test(i1 %b, i1 %c, i32* noalias %p, i32* noalias %q) {
; ON-LABEL: @test(
; ON-NEXT: entry:
; ON-NEXT: [[Y1:%.*]] = load i32, i32* [[P:%.*]], align 4
; ON-NEXT: call void @use(i32 [[Y1]])
; ON-NEXT: br label [[HEADER:%.*]]
; ON: header:
; ON-NEXT: [[Y:%.*]] = phi i32 [ [[Y_PRE:%.*]], [[SKIP_HEADER_CRIT_EDGE:%.*]] ], [ [[Y]], [[HEADER]] ], [ [[Y1]], [[ENTRY:%.*]] ]
; ON-NEXT: call void @use(i32 [[Y]])
; ON-NEXT: br i1 [[B:%.*]], label [[SKIP:%.*]], label [[HEADER]]
; ON: skip:
; ON-NEXT: call void @clobber(i32* [[P]], i32* [[Q:%.*]])
; ON-NEXT: br i1 [[C:%.*]], label [[SKIP_HEADER_CRIT_EDGE]], label [[EXIT:%.*]]
; ON: skip.header_crit_edge:
; ON-NEXT: [[Y_PRE]] = load i32, i32* [[P]], align 4
; ON-NEXT: br label [[HEADER]]
; ON: exit:
; ON-NEXT: ret i32 [[Y]]
;
; OFF-LABEL: @test(
; OFF-NEXT: entry:
; OFF-NEXT: [[Y1:%.*]] = load i32, i32* [[P:%.*]], align 4
; OFF-NEXT: call void @use(i32 [[Y1]])
; OFF-NEXT: br label [[HEADER:%.*]]
; OFF: header:
; OFF-NEXT: [[Y:%.*]] = load i32, i32* [[P]], align 4
; OFF-NEXT: call void @use(i32 [[Y]])
; OFF-NEXT: br i1 [[B:%.*]], label [[SKIP:%.*]], label [[HEADER]]
; OFF: skip:
; OFF-NEXT: call void @clobber(i32* [[P]], i32* [[Q:%.*]])
; OFF-NEXT: br i1 [[C:%.*]], label [[HEADER]], label [[EXIT:%.*]]
; OFF: exit:
; OFF-NEXT: ret i32 [[Y]]
;
entry:
%y1 = load i32, i32* %p
call void @use(i32 %y1)
br label %header
header:
%y = load i32, i32* %p
call void @use(i32 %y)
br i1 %b, label %skip, label %header
skip:
call void @clobber(i32* %p, i32* %q)
br i1 %c, label %header, label %exit
exit:
ret i32 %y
}
declare void @use(i32) readonly
declare void @clobber(i32* %p, i32* %q)