2017-09-07 01:05:38 +02:00
|
|
|
//===- llvm/InitializePasses.h - Initialize All Passes ----------*- C++ -*-===//
|
2010-10-07 06:17:38 +02:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2010-10-07 06:17:38 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the declarations for the pass initialization routines
|
|
|
|
// for the entire LLVM project.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_INITIALIZEPASSES_H
|
|
|
|
#define LLVM_INITIALIZEPASSES_H
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
class PassRegistry;
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the TransformUtils library.
|
2010-10-07 21:51:21 +02:00
|
|
|
void initializeCore(PassRegistry&);
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the TransformUtils library.
|
2010-10-07 19:55:47 +02:00
|
|
|
void initializeTransformUtils(PassRegistry&);
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the ScalarOpts library.
|
2010-10-07 19:55:47 +02:00
|
|
|
void initializeScalarOpts(PassRegistry&);
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the ObjCARCOpts library.
|
2013-01-28 02:35:51 +01:00
|
|
|
void initializeObjCARCOpts(PassRegistry&);
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the Vectorize library.
|
2012-02-01 04:51:43 +01:00
|
|
|
void initializeVectorization(PassRegistry&);
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the InstCombine library.
|
2010-10-07 22:04:55 +02:00
|
|
|
void initializeInstCombine(PassRegistry&);
|
|
|
|
|
2018-04-24 02:05:21 +02:00
|
|
|
/// Initialize all passes linked into the AggressiveInstCombine library.
|
|
|
|
void initializeAggressiveInstCombine(PassRegistry&);
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the IPO library.
|
2010-10-07 20:09:59 +02:00
|
|
|
void initializeIPO(PassRegistry&);
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the Instrumentation library.
|
2010-10-07 22:17:24 +02:00
|
|
|
void initializeInstrumentation(PassRegistry&);
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the Analysis library.
|
2010-10-07 20:31:00 +02:00
|
|
|
void initializeAnalysis(PassRegistry&);
|
|
|
|
|
2016-07-28 23:04:31 +02:00
|
|
|
/// Initialize all passes linked into the Coroutines library.
|
|
|
|
void initializeCoroutines(PassRegistry&);
|
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the CodeGen library.
|
2010-10-07 20:41:20 +02:00
|
|
|
void initializeCodeGen(PassRegistry&);
|
|
|
|
|
2016-03-08 02:38:55 +01:00
|
|
|
/// Initialize all passes linked into the GlobalISel library.
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeGlobalISel(PassRegistry&);
|
2016-03-08 02:38:55 +01:00
|
|
|
|
2016-06-09 21:58:30 +02:00
|
|
|
/// Initialize all passes linked into the CodeGen library.
|
2010-10-07 20:50:11 +02:00
|
|
|
void initializeTarget(PassRegistry&);
|
|
|
|
|
2016-02-20 04:46:03 +01:00
|
|
|
void initializeAAEvalLegacyPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeAAResultsWrapperPassPass(PassRegistry&);
|
2015-10-31 00:13:18 +01:00
|
|
|
void initializeADCELegacyPassPass(PassRegistry&);
|
2016-06-15 23:51:30 +02:00
|
|
|
void initializeAddDiscriminatorsLegacyPassPass(PassRegistry&);
|
2019-02-13 23:22:48 +01:00
|
|
|
void initializeModuleAddressSanitizerLegacyPassPass(PassRegistry &);
|
|
|
|
void initializeASanGlobalsMetadataWrapperPassPass(PassRegistry &);
|
|
|
|
void initializeAddressSanitizerLegacyPassPass(PassRegistry &);
|
2018-01-25 13:06:32 +01:00
|
|
|
void initializeAggressiveInstCombinerLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeAliasSetPrinterPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeAlignmentFromAssumptionsPass(PassRegistry&);
|
[PM] Port the always inliner to the new pass manager in a much more
minimal and boring form than the old pass manager's version.
This pass does the very minimal amount of work necessary to inline
functions declared as always-inline. It doesn't support a wide array of
things that the legacy pass manager did support, but is alse ... about
20 lines of code. So it has that going for it. Notably things this
doesn't support:
- Array alloca merging
- To support the above, bottom-up inlining with careful history
tracking and call graph updates
- DCE of the functions that become dead after this inlining.
- Inlining through call instructions with the always_inline attribute.
Instead, it focuses on inlining functions with that attribute.
The first I've omitted because I'm hoping to just turn it off for the
primary pass manager. If that doesn't pan out, I can add it here but it
will be reasonably expensive to do so.
The second should really be handled by running global-dce after the
inliner. I don't want to re-implement the non-trivial logic necessary to
do comdat-correct DCE of functions. This means the -O0 pipeline will
have to be at least 'always-inline,global-dce', but that seems
reasonable to me. If others are seriously worried about this I'd like to
hear about it and understand why. Again, this is all solveable by
factoring that logic into a utility and calling it here, but I'd like to
wait to do that until there is a clear reason why the existing
pass-based factoring won't work.
The final point is a serious one. I can fairly easily add support for
this, but it seems both costly and a confusing construct for the use
case of the always inliner running at -O0. This attribute can of course
still impact the normal inliner easily (although I find that
a questionable re-use of the same attribute). I've started a discussion
to sort out what semantics we want here and based on that can figure out
if it makes sense ta have this complexity at O0 or not.
One other advantage of this design is that it should be quite a bit
faster due to checking for whether the function is a viable candidate
for inlining exactly once per function instead of doing it for each call
site.
Anyways, hopefully a reasonable starting point for this pass.
Differential Revision: https://reviews.llvm.org/D23299
llvm-svn: 278896
2016-08-17 04:56:20 +02:00
|
|
|
void initializeAlwaysInlinerLegacyPassPass(PassRegistry&);
|
2019-11-07 06:20:06 +01:00
|
|
|
void initializeOpenMPOptLegacyPassPass(PassRegistry &);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeArgPromotionPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeAssumptionCacheTrackerPass(PassRegistry&);
|
2014-08-21 23:50:01 +02:00
|
|
|
void initializeAtomicExpandPass(PassRegistry&);
|
[Attributor] Pass infrastructure and fixpoint framework
NOTE: Note that no attributes are derived yet. This patch will not go in
alone but only with others that derive attributes. The framework is
split for review purposes.
This commit introduces the Attributor pass infrastructure and fixpoint
iteration framework. Further patches will introduce abstract attributes
into this framework.
In a nutshell, the Attributor will update instances of abstract
arguments until a fixpoint, or a "timeout", is reached. Communication
between the Attributor and the abstract attributes that are derived is
restricted to the AbstractState and AbstractAttribute interfaces.
Please see the file comment in Attributor.h for detailed information
including design decisions and typical use case. Also consider the class
documentation for Attributor, AbstractState, and AbstractAttribute.
Reviewers: chandlerc, homerdin, hfinkel, fedor.sergeev, sanjoy, spatel, nlopes, nicholas, reames
Subscribers: mehdi_amini, mgorny, hiraditya, bollu, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59918
llvm-svn: 362578
2019-06-05 05:02:24 +02:00
|
|
|
void initializeAttributorLegacyPassPass(PassRegistry&);
|
2019-11-27 07:30:12 +01:00
|
|
|
void initializeAttributorCGSCCLegacyPassPass(PassRegistry &);
|
2020-03-16 23:56:02 +01:00
|
|
|
void initializeBBSectionsPreparePass(PassRegistry &);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeBDCELegacyPassPass(PassRegistry&);
|
Introduce a BarrierNoop pass, a hack designed to allow *some* control
over the implicitly-formed-and-nesting CGSCC pass manager and function
pass managers, especially when using them on the opt commandline or
using extension points in the module builder. The '-barrier' opt flag
(or the pass itself) will create a no-op module pass in the pipeline,
resetting the pass manager stack, and allowing the creation of a new
pipeline of function passes or CGSCC passes to be created that is
independent from any previous pipelines.
For example, this can be used to test running two CGSCC passes in
independent CGSCC pass managers as opposed to in the same CGSCC pass
manager. It also allows us to introduce a further hack into the
PassManagerBuilder to separate the O0 pipeline extension passes from the
always-inliner's CGSCC pass manager, which they likely do not want to
participate in... At the very least none of the Sanitizer passes want
this behavior.
This fixes a bug with ASan at O0 currently, and I'll commit the ASan
test which covers this pass. I'm happy to add a test case that this pass
exists and works, but not sure how much time folks would like me to
spend adding test cases for the details of its behavior of partition
pass managers.... The whole thing is just vile, and mostly intended to
unblock ASan, so I'm hoping to rip this all out in a brave new pass
manager world.
llvm-svn: 166172
2012-10-18 10:05:46 +02:00
|
|
|
void initializeBarrierNoopPass(PassRegistry&);
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 19:55:00 +02:00
|
|
|
void initializeBasicAAWrapperPassPass(PassRegistry&);
|
2018-01-23 22:51:34 +01:00
|
|
|
void initializeBlockExtractorPass(PassRegistry &);
|
2015-07-15 01:40:50 +02:00
|
|
|
void initializeBlockFrequencyInfoWrapperPassPass(PassRegistry&);
|
2017-11-14 02:30:04 +01:00
|
|
|
void initializeBoundsCheckingLegacyPassPass(PassRegistry&);
|
2012-02-08 22:22:48 +01:00
|
|
|
void initializeBranchFolderPassPass(PassRegistry&);
|
2015-07-16 00:48:29 +02:00
|
|
|
void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry&);
|
2016-10-06 17:38:53 +02:00
|
|
|
void initializeBranchRelaxationPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeBreakCriticalEdgesPass(PassRegistry&);
|
2018-01-22 11:05:23 +01:00
|
|
|
void initializeBreakFalseDepsPass(PassRegistry&);
|
[ThinLTO] Handle chains of aliases
At -O0, globalopt is not run during the compile step, and we can have a
chain of an alias having an immediate aliasee of another alias. The
summaries are constructed assuming aliases in a canonical form
(flattened chains), and as a result only the base object but no
intermediate aliases were preserved.
Fix by adding a pass that canonicalize aliases, which ensures each
alias is a direct alias of the base object.
Reviewers: pcc, davidxl
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, arphaman, llvm-commits
Differential Revision: https://reviews.llvm.org/D54507
llvm-svn: 350423
2019-01-04 20:04:54 +01:00
|
|
|
void initializeCanonicalizeAliasesLegacyPassPass(PassRegistry &);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&);
|
2016-09-15 20:35:27 +02:00
|
|
|
void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&);
|
|
|
|
void initializeCFGPrinterLegacyPassPass(PassRegistry&);
|
2013-08-06 04:43:45 +02:00
|
|
|
void initializeCFGSimplifyPassPass(PassRegistry&);
|
Add Windows Control Flow Guard checks (/guard:cf).
Summary:
A new function pass (Transforms/CFGuard/CFGuard.cpp) inserts CFGuard checks on
indirect function calls, using either the check mechanism (X86, ARM, AArch64) or
or the dispatch mechanism (X86-64). The check mechanism requires a new calling
convention for the supported targets. The dispatch mechanism adds the target as
an operand bundle, which is processed by SelectionDAG. Another pass
(CodeGen/CFGuardLongjmp.cpp) identifies and emits valid longjmp targets, as
required by /guard:cf. This feature is enabled using the `cfguard` CC1 option.
Reviewers: thakis, rnk, theraven, pcc
Subscribers: ychen, hans, metalcanine, dmajor, tomrittervg, alex, mehdi_amini, mgorny, javed.absar, kristof.beyls, hiraditya, steven_wu, dexonsmith, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D65761
2019-10-28 14:22:19 +01:00
|
|
|
void initializeCFGuardPass(PassRegistry&);
|
|
|
|
void initializeCFGuardLongjmpPass(PassRegistry&);
|
2016-09-15 20:35:27 +02:00
|
|
|
void initializeCFGViewerLegacyPassPass(PassRegistry&);
|
2018-04-24 12:32:08 +02:00
|
|
|
void initializeCFIInstrInserterPass(PassRegistry&);
|
2016-07-06 02:26:41 +02:00
|
|
|
void initializeCFLAndersAAWrapperPassPass(PassRegistry&);
|
|
|
|
void initializeCFLSteensAAWrapperPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeCallGraphDOTPrinterPass(PassRegistry&);
|
|
|
|
void initializeCallGraphPrinterLegacyPassPass(PassRegistry&);
|
|
|
|
void initializeCallGraphViewerPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeCallGraphWrapperPassPass(PassRegistry&);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeCallSiteSplittingLegacyPassPass(PassRegistry&);
|
|
|
|
void initializeCalledValuePropagationLegacyPassPass(PassRegistry &);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeCodeGenPreparePass(PassRegistry&);
|
2016-07-02 02:16:47 +02:00
|
|
|
void initializeConstantHoistingLegacyPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeConstantMergeLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeConstantPropagationPass(PassRegistry&);
|
2018-09-04 19:19:13 +02:00
|
|
|
void initializeControlHeightReductionLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeCorrelatedValuePropagationPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeCostModelAnalysisPass(PassRegistry&);
|
2015-12-16 00:00:08 +01:00
|
|
|
void initializeCrossDSOCFIPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeDAEPass(PassRegistry&);
|
|
|
|
void initializeDAHPass(PassRegistry&);
|
2016-04-22 21:40:41 +02:00
|
|
|
void initializeDCELegacyPassPass(PassRegistry&);
|
2016-05-17 23:38:13 +02:00
|
|
|
void initializeDSELegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeDataFlowSanitizerPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeDeadInstEliminationPass(PassRegistry&);
|
|
|
|
void initializeDeadMachineInstructionElimPass(PassRegistry&);
|
2020-04-04 01:18:45 +02:00
|
|
|
void initializeDebugifyMachineModulePass(PassRegistry &);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeDelinearizationPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeDemandedBitsWrapperPassPass(PassRegistry&);
|
dependence analysis
Patch from Preston Briggs <preston.briggs@gmail.com>.
This is an updated version of the dependence-analysis patch, including an MIV
test based on Banerjee's inequalities.
It's a fairly complete implementation of the paper
Practical Dependence Testing
Gina Goff, Ken Kennedy, and Chau-Wen Tseng
PLDI 1991
It cannot yet propagate constraints between coupled RDIV subscripts (discussed
in Section 5.3.2 of the paper).
It's organized as a FunctionPass with a single entry point that supports testing
for dependence between two instructions in a function. If there's no dependence,
it returns null. If there's a dependence, it returns a pointer to a Dependence
which can be queried about details (what kind of dependence, is it loop
independent, direction and distance vector entries, etc). I haven't included
every imaginable feature, but there's a good selection that should be adequate
for supporting many loop transformations. Of course, it can be extended as
necessary.
Included in the patch file are many test cases, commented with C code showing
the loops and array references.
llvm-svn: 165708
2012-10-11 09:32:34 +02:00
|
|
|
void initializeDependenceAnalysisPass(PassRegistry&);
|
2016-05-13 00:19:39 +02:00
|
|
|
void initializeDependenceAnalysisWrapperPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeDetectDeadLanesPass(PassRegistry&);
|
2017-09-09 15:38:18 +02:00
|
|
|
void initializeDivRemPairsLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeDomOnlyPrinterPass(PassRegistry&);
|
|
|
|
void initializeDomOnlyViewerPass(PassRegistry&);
|
|
|
|
void initializeDomPrinterPass(PassRegistry&);
|
|
|
|
void initializeDomViewerPass(PassRegistry&);
|
2016-02-25 18:54:15 +01:00
|
|
|
void initializeDominanceFrontierWrapperPassPass(PassRegistry&);
|
2014-01-13 14:07:17 +01:00
|
|
|
void initializeDominatorTreeWrapperPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeDwarfEHPreparePass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeEarlyCSELegacyPassPass(PassRegistry&);
|
|
|
|
void initializeEarlyCSEMemSSALegacyPassPass(PassRegistry&);
|
2012-07-04 02:09:54 +02:00
|
|
|
void initializeEarlyIfConverterPass(PassRegistry&);
|
2019-08-20 17:54:59 +02:00
|
|
|
void initializeEarlyIfPredicatorPass(PassRegistry &);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeEarlyMachineLICMPass(PassRegistry&);
|
|
|
|
void initializeEarlyTailDuplicatePass(PassRegistry&);
|
2011-01-04 22:10:05 +01:00
|
|
|
void initializeEdgeBundlesPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeEliminateAvailableExternallyLegacyPassPass(PassRegistry&);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeEntryExitInstrumenterPass(PassRegistry&);
|
2017-11-03 13:12:27 +01:00
|
|
|
void initializeExpandMemCmpPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeExpandPostRAPass(PassRegistry&);
|
2017-05-10 11:42:49 +02:00
|
|
|
void initializeExpandReductionsPass(PassRegistry&);
|
Introduce llvm.experimental.widenable_condition intrinsic
This patch introduces a new instinsic `@llvm.experimental.widenable_condition`
that allows explicit representation for guards. It is an alternative to using
`@llvm.experimental.guard` intrinsic that does not contain implicit control flow.
We keep finding places where `@llvm.experimental.guard` is not supported or
treated too conservatively, and there are 2 reasons to that:
- `@llvm.experimental.guard` has memory write side effect to model implicit control flow,
and this sometimes confuses passes and analyzes that work with memory;
- Not all passes and analysis are aware of the semantics of guards. These passes treat them
as regular throwing call and have no idea that the condition of guard may be used to prove
something. One well-known place which had caused us troubles in the past is explicit loop
iteration count calculation in SCEV. Another example is new loop unswitching which is not
aware of guards. Whenever a new pass appears, we potentially have this problem there.
Rather than go and fix all these places (and commit to keep track of them and add support
in future), it seems more reasonable to leverage the existing optimizer's logic as much as possible.
The only significant difference between guards and regular explicit branches is that guard's condition
can be widened. It means that a guard contains (explicitly or implicitly) a `deopt` block successor,
and it is always legal to go there no matter what the guard condition is. The other successor is
a guarded block, and it is only legal to go there if the condition is true.
This patch introduces a new explicit form of guards alternative to `@llvm.experimental.guard`
intrinsic. Now a widenable guard can be represented in the CFG explicitly like this:
%widenable_condition = call i1 @llvm.experimental.widenable.condition()
%new_condition = and i1 %cond, %widenable_condition
br i1 %new_condition, label %guarded, label %deopt
guarded:
; Guarded instructions
deopt:
call type @llvm.experimental.deoptimize(<args...>) [ "deopt"(<deopt_args...>) ]
The new intrinsic `@llvm.experimental.widenable.condition` has semantics of an
`undef`, but the intrinsic prevents the optimizer from folding it early. This form
should exploit all optimization boons provided to `br` instuction, and it still can be
widened by replacing the result of `@llvm.experimental.widenable.condition()`
with `and` with any arbitrary boolean value (as long as the branch that is taken when
it is `false` has a deopt and has no side-effects).
For more motivation, please check llvm-dev discussion "[llvm-dev] Giving up using
implicit control flow in guards".
This patch introduces this new intrinsic with respective LangRef changes and a pass
that converts old-style guards (expressed as intrinsics) into the new form.
The naming discussion is still ungoing. Merging this to unblock further items. We can
later change the name of this intrinsic.
Reviewed By: reames, fedor.sergeev, sanjoy
Differential Revision: https://reviews.llvm.org/D51207
llvm-svn: 348593
2018-12-07 15:39:46 +01:00
|
|
|
void initializeMakeGuardsExplicitLegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeExternalAAWrapperPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeFEntryInserterPass(PassRegistry&);
|
2019-06-19 02:25:39 +02:00
|
|
|
void initializeFinalizeISelPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeFinalizeMachineBundlesPass(PassRegistry&);
|
2020-04-15 11:35:51 +02:00
|
|
|
void initializeFixIrreduciblePass(PassRegistry &);
|
2020-04-09 13:40:53 +02:00
|
|
|
void initializeFixupStatepointCallerSavedPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeFlattenCFGPassPass(PassRegistry&);
|
2016-06-25 01:32:02 +02:00
|
|
|
void initializeFloat2IntLegacyPassPass(PassRegistry&);
|
2015-12-27 09:13:45 +01:00
|
|
|
void initializeForceFunctionAttrsLegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeForwardControlFlowIntegrityPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeFuncletLayoutPass(PassRegistry&);
|
|
|
|
void initializeFunctionImportLegacyPassPass(PassRegistry&);
|
2012-02-08 22:23:13 +01:00
|
|
|
void initializeGCMachineCodeAnalysisPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeGCModuleInfoPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeGCOVProfilerLegacyPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeGVNHoistLegacyPassPass(PassRegistry&);
|
2016-03-11 09:50:55 +01:00
|
|
|
void initializeGVNLegacyPassPass(PassRegistry&);
|
2017-06-03 01:01:38 +02:00
|
|
|
void initializeGVNSinkLegacyPassPass(PassRegistry&);
|
2016-05-03 21:39:15 +02:00
|
|
|
void initializeGlobalDCELegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeGlobalMergePass(PassRegistry&);
|
2016-04-26 02:28:01 +02:00
|
|
|
void initializeGlobalOptLegacyPassPass(PassRegistry&);
|
2016-11-17 00:40:26 +01:00
|
|
|
void initializeGlobalSplitPass(PassRegistry&);
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 19:55:00 +02:00
|
|
|
void initializeGlobalsAAWrapperPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeGuardWideningLegacyPassPass(PassRegistry&);
|
2019-06-07 09:35:30 +02:00
|
|
|
void initializeHardwareLoopsPass(PassRegistry&);
|
2018-09-07 17:03:49 +02:00
|
|
|
void initializeHotColdSplittingLegacyPassPass(PassRegistry&);
|
2019-05-14 23:17:21 +02:00
|
|
|
void initializeHWAddressSanitizerLegacyPassPass(PassRegistry &);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeIPCPPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeIPSCCPLegacyPassPass(PassRegistry&);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeIRCELegacyPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeIRTranslatorPass(PassRegistry&);
|
2016-07-17 00:51:33 +02:00
|
|
|
void initializeIVUsersWrapperPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeIfConverterPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeImplicitNullChecksPass(PassRegistry&);
|
2016-05-29 23:42:00 +02:00
|
|
|
void initializeIndVarSimplifyLegacyPassPass(PassRegistry&);
|
Introduce the "retpoline" x86 mitigation technique for variant #2 of the speculative execution vulnerabilities disclosed today, specifically identified by CVE-2017-5715, "Branch Target Injection", and is one of the two halves to Spectre..
Summary:
First, we need to explain the core of the vulnerability. Note that this
is a very incomplete description, please see the Project Zero blog post
for details:
https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
The basis for branch target injection is to direct speculative execution
of the processor to some "gadget" of executable code by poisoning the
prediction of indirect branches with the address of that gadget. The
gadget in turn contains an operation that provides a side channel for
reading data. Most commonly, this will look like a load of secret data
followed by a branch on the loaded value and then a load of some
predictable cache line. The attacker then uses timing of the processors
cache to determine which direction the branch took *in the speculative
execution*, and in turn what one bit of the loaded value was. Due to the
nature of these timing side channels and the branch predictor on Intel
processors, this allows an attacker to leak data only accessible to
a privileged domain (like the kernel) back into an unprivileged domain.
The goal is simple: avoid generating code which contains an indirect
branch that could have its prediction poisoned by an attacker. In many
cases, the compiler can simply use directed conditional branches and
a small search tree. LLVM already has support for lowering switches in
this way and the first step of this patch is to disable jump-table
lowering of switches and introduce a pass to rewrite explicit indirectbr
sequences into a switch over integers.
However, there is no fully general alternative to indirect calls. We
introduce a new construct we call a "retpoline" to implement indirect
calls in a non-speculatable way. It can be thought of loosely as
a trampoline for indirect calls which uses the RET instruction on x86.
Further, we arrange for a specific call->ret sequence which ensures the
processor predicts the return to go to a controlled, known location. The
retpoline then "smashes" the return address pushed onto the stack by the
call with the desired target of the original indirect call. The result
is a predicted return to the next instruction after a call (which can be
used to trap speculative execution within an infinite loop) and an
actual indirect branch to an arbitrary address.
On 64-bit x86 ABIs, this is especially easily done in the compiler by
using a guaranteed scratch register to pass the target into this device.
For 32-bit ABIs there isn't a guaranteed scratch register and so several
different retpoline variants are introduced to use a scratch register if
one is available in the calling convention and to otherwise use direct
stack push/pop sequences to pass the target address.
This "retpoline" mitigation is fully described in the following blog
post: https://support.google.com/faqs/answer/7625886
We also support a target feature that disables emission of the retpoline
thunk by the compiler to allow for custom thunks if users want them.
These are particularly useful in environments like kernels that
routinely do hot-patching on boot and want to hot-patch their thunk to
different code sequences. They can write this custom thunk and use
`-mretpoline-external-thunk` *in addition* to `-mretpoline`. In this
case, on x86-64 thu thunk names must be:
```
__llvm_external_retpoline_r11
```
or on 32-bit:
```
__llvm_external_retpoline_eax
__llvm_external_retpoline_ecx
__llvm_external_retpoline_edx
__llvm_external_retpoline_push
```
And the target of the retpoline is passed in the named register, or in
the case of the `push` suffix on the top of the stack via a `pushl`
instruction.
There is one other important source of indirect branches in x86 ELF
binaries: the PLT. These patches also include support for LLD to
generate PLT entries that perform a retpoline-style indirection.
The only other indirect branches remaining that we are aware of are from
precompiled runtimes (such as crt0.o and similar). The ones we have
found are not really attackable, and so we have not focused on them
here, but eventually these runtimes should also be replicated for
retpoline-ed configurations for completeness.
For kernels or other freestanding or fully static executables, the
compiler switch `-mretpoline` is sufficient to fully mitigate this
particular attack. For dynamic executables, you must compile *all*
libraries with `-mretpoline` and additionally link the dynamic
executable and all shared libraries with LLD and pass `-z retpolineplt`
(or use similar functionality from some other linker). We strongly
recommend also using `-z now` as non-lazy binding allows the
retpoline-mitigated PLT to be substantially smaller.
When manually apply similar transformations to `-mretpoline` to the
Linux kernel we observed very small performance hits to applications
running typical workloads, and relatively minor hits (approximately 2%)
even for extremely syscall-heavy applications. This is largely due to
the small number of indirect branches that occur in performance
sensitive paths of the kernel.
When using these patches on statically linked applications, especially
C++ applications, you should expect to see a much more dramatic
performance hit. For microbenchmarks that are switch, indirect-, or
virtual-call heavy we have seen overheads ranging from 10% to 50%.
However, real-world workloads exhibit substantially lower performance
impact. Notably, techniques such as PGO and ThinLTO dramatically reduce
the impact of hot indirect calls (by speculatively promoting them to
direct calls) and allow optimized search trees to be used to lower
switches. If you need to deploy these techniques in C++ applications, we
*strongly* recommend that you ensure all hot call targets are statically
linked (avoiding PLT indirection) and use both PGO and ThinLTO. Well
tuned servers using all of these techniques saw 5% - 10% overhead from
the use of retpoline.
We will add detailed documentation covering these components in
subsequent patches, but wanted to make the core functionality available
as soon as possible. Happy for more code review, but we'd really like to
get these patches landed and backported ASAP for obvious reasons. We're
planning to backport this to both 6.0 and 5.0 release streams and get
a 5.0 release with just this cherry picked ASAP for distros and vendors.
This patch is the work of a number of people over the past month: Eric, Reid,
Rui, and myself. I'm mailing it out as a single commit due to the time
sensitive nature of landing this and the need to backport it. Huge thanks to
everyone who helped out here, and everyone at Intel who helped out in
discussions about how to craft this. Also, credit goes to Paul Turner (at
Google, but not an LLVM contributor) for much of the underlying retpoline
design.
Reviewers: echristo, rnk, ruiu, craig.topper, DavidKreitzer
Subscribers: sanjoy, emaste, mcrosier, mgorny, mehdi_amini, hiraditya, llvm-commits
Differential Revision: https://reviews.llvm.org/D41723
llvm-svn: 323155
2018-01-22 23:05:25 +01:00
|
|
|
void initializeIndirectBrExpandPassPass(PassRegistry&);
|
2017-01-31 02:10:58 +01:00
|
|
|
void initializeInferAddressSpacesPass(PassRegistry&);
|
2015-12-27 09:41:34 +01:00
|
|
|
void initializeInferFunctionAttrsLegacyPassPass(PassRegistry&);
|
2019-11-11 20:42:18 +01:00
|
|
|
void initializeInjectTLIMappingsLegacyPass(PassRegistry &);
|
2013-01-21 12:39:18 +01:00
|
|
|
void initializeInlineCostAnalysisPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeInstCountPass(PassRegistry&);
|
|
|
|
void initializeInstNamerPass(PassRegistry&);
|
2018-06-30 01:36:03 +02:00
|
|
|
void initializeInstSimplifyLegacyPassPass(PassRegistry &);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeInstrProfilingLegacyPassPass(PassRegistry&);
|
2019-02-28 21:13:38 +01:00
|
|
|
void initializeInstrOrderFileLegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeInstructionCombiningPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeInstructionSelectPass(PassRegistry&);
|
|
|
|
void initializeInterleavedAccessPass(PassRegistry&);
|
2018-11-19 15:26:10 +01:00
|
|
|
void initializeInterleavedLoadCombinePass(PassRegistry &);
|
2016-04-26 22:15:52 +02:00
|
|
|
void initializeInternalizeLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeIntervalPartitionPass(PassRegistry&);
|
|
|
|
void initializeJumpThreadingPass(PassRegistry&);
|
2016-10-28 14:57:20 +02:00
|
|
|
void initializeLCSSAVerificationPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeLCSSAWrapperPassPass(PassRegistry&);
|
|
|
|
void initializeLazyBlockFrequencyInfoPassPass(PassRegistry&);
|
2016-07-29 01:31:12 +02:00
|
|
|
void initializeLazyBranchProbabilityInfoPassPass(PassRegistry&);
|
2017-02-14 18:21:09 +01:00
|
|
|
void initializeLazyMachineBlockFrequencyInfoPassPass(PassRegistry&);
|
2017-06-03 01:01:38 +02:00
|
|
|
void initializeLazyValueInfoPrinterPass(PassRegistry&);
|
2016-06-14 00:01:25 +02:00
|
|
|
void initializeLazyValueInfoWrapperPassPass(PassRegistry&);
|
2018-08-30 16:21:36 +02:00
|
|
|
void initializeLegacyDivergenceAnalysisPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeLegacyLICMPassPass(PassRegistry&);
|
|
|
|
void initializeLegacyLoopSinkPassPass(PassRegistry&);
|
2016-10-15 00:18:18 +02:00
|
|
|
void initializeLegalizerPass(PassRegistry&);
|
2019-01-16 01:40:37 +01:00
|
|
|
void initializeGISelCSEAnalysisWrapperPassPass(PassRegistry &);
|
2019-08-06 19:18:29 +02:00
|
|
|
void initializeGISelKnownBitsAnalysisPass(PassRegistry &);
|
Conditionally eliminate library calls where the result value is not used
Summary:
This pass shrink-wraps a condition to some library calls where the call
result is not used. For example:
sqrt(val);
is transformed to
if (val < 0)
sqrt(val);
Even if the result of library call is not being used, the compiler cannot
safely delete the call because the function can set errno on error
conditions.
Note in many functions, the error condition solely depends on the incoming
parameter. In this optimization, we can generate the condition can lead to
the errno to shrink-wrap the call. Since the chances of hitting the error
condition is low, the runtime call is effectively eliminated.
These partially dead calls are usually results of C++ abstraction penalty
exposed by inlining. This optimization hits 108 times in 19 C/C++ programs
in SPEC2006.
Reviewers: hfinkel, mehdi_amini, davidxl
Subscribers: modocache, mgorny, mehdi_amini, xur, llvm-commits, beanz
Differential Revision: https://reviews.llvm.org/D24414
llvm-svn: 284542
2016-10-18 23:36:27 +02:00
|
|
|
void initializeLibCallsShrinkWrapLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeLintPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeLiveDebugValuesPass(PassRegistry&);
|
2010-11-30 03:17:10 +01:00
|
|
|
void initializeLiveDebugVariablesPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeLiveIntervalsPass(PassRegistry&);
|
Add LiveRangeShrink pass to shrink live range within BB.
Summary: LiveRangeShrink pass moves instruction right after the definition with the same BB if the instruction and its operands all have more than one use. This pass is inexpensive and guarantees optimal live-range within BB.
Reviewers: davidxl, wmi, hfinkel, MatzeB, andreadb
Reviewed By: MatzeB, andreadb
Subscribers: hiraditya, jyknight, sanjoy, skatkov, gberry, jholewinski, qcolombet, javed.absar, krytarowski, atrick, spatel, RKSimon, andreadb, MatzeB, mehdi_amini, mgorny, efriedma, davide, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32563
llvm-svn: 304371
2017-06-01 01:25:25 +02:00
|
|
|
void initializeLiveRangeShrinkPass(PassRegistry&);
|
2012-06-09 04:13:10 +02:00
|
|
|
void initializeLiveRegMatrixPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeLiveStacksPass(PassRegistry&);
|
|
|
|
void initializeLiveVariablesPass(PassRegistry&);
|
2018-12-07 09:23:37 +01:00
|
|
|
void initializeLoadStoreVectorizerLegacyPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeLoaderPassPass(PassRegistry&);
|
2012-02-08 22:23:13 +01:00
|
|
|
void initializeLocalStackSlotPassPass(PassRegistry&);
|
2017-06-03 01:01:38 +02:00
|
|
|
void initializeLocalizerPass(PassRegistry&);
|
2016-07-08 22:55:26 +02:00
|
|
|
void initializeLoopAccessLegacyAnalysisPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeLoopDataPrefetchLegacyPassPass(PassRegistry&);
|
2016-07-14 20:28:29 +02:00
|
|
|
void initializeLoopDeletionLegacyPassPass(PassRegistry&);
|
2016-07-18 18:29:27 +02:00
|
|
|
void initializeLoopDistributeLegacyPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeLoopExtractorPass(PassRegistry&);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeLoopGuardWideningLegacyPassPass(PassRegistry&);
|
2019-04-17 20:53:27 +02:00
|
|
|
void initializeLoopFuseLegacyPass(PassRegistry&);
|
2016-07-12 20:45:51 +02:00
|
|
|
void initializeLoopIdiomRecognizeLegacyPassPass(PassRegistry&);
|
2015-01-17 15:16:18 +01:00
|
|
|
void initializeLoopInfoWrapperPassPass(PassRegistry&);
|
2018-05-25 03:32:36 +02:00
|
|
|
void initializeLoopInstSimplifyLegacyPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeLoopInterchangePass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeLoopLoadEliminationPass(PassRegistry&);
|
|
|
|
void initializeLoopPassPass(PassRegistry&);
|
2017-01-25 17:00:44 +01:00
|
|
|
void initializeLoopPredicationLegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeLoopRerollPass(PassRegistry&);
|
2016-05-04 00:02:31 +02:00
|
|
|
void initializeLoopRotateLegacyPassPass(PassRegistry&);
|
2016-05-03 23:47:32 +02:00
|
|
|
void initializeLoopSimplifyCFGLegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeLoopSimplifyPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeLoopStrengthReducePass(PassRegistry&);
|
2018-07-01 14:47:30 +02:00
|
|
|
void initializeLoopUnrollAndJamPass(PassRegistry&);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeLoopUnrollPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeLoopUnswitchPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeLoopVectorizePass(PassRegistry&);
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 08:47:48 +01:00
|
|
|
void initializeLoopVersioningLICMPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeLoopVersioningPassPass(PassRegistry&);
|
|
|
|
void initializeLowerAtomicLegacyPassPass(PassRegistry&);
|
2019-10-14 18:15:14 +02:00
|
|
|
void initializeLowerConstantIntrinsicsPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeLowerEmuTLSPass(PassRegistry&);
|
2011-07-06 20:22:43 +02:00
|
|
|
void initializeLowerExpectIntrinsicPass(PassRegistry&);
|
2016-07-29 00:08:41 +02:00
|
|
|
void initializeLowerGuardIntrinsicLegacyPassPass(PassRegistry&);
|
2019-01-31 10:10:17 +01:00
|
|
|
void initializeLowerWidenableConditionLegacyPassPass(PassRegistry&);
|
2010-10-19 19:21:58 +02:00
|
|
|
void initializeLowerIntrinsicsPass(PassRegistry&);
|
2016-08-12 19:28:27 +02:00
|
|
|
void initializeLowerInvokeLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeLowerSwitchPass(PassRegistry&);
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-24 23:21:32 +02:00
|
|
|
void initializeLowerTypeTestsPass(PassRegistry&);
|
[Matrix] Add first set of matrix intrinsics and initial lowering pass.
This is the first patch adding an initial set of matrix intrinsics and a
corresponding lowering pass. This has been discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2019-October/136240.html
The first patch introduces four new intrinsics (transpose, multiply,
columnwise load and store) and a LowerMatrixIntrinsics pass, that
lowers those intrinsics to vector operations.
Matrixes are embedded in a 'flat' vector (e.g. a 4 x 4 float matrix
embedded in a <16 x float> vector) and the intrinsics take the dimension
information as parameters. Those parameters need to be ConstantInt.
For the memory layout, we initially assume column-major, but in the RFC
we also described how to extend the intrinsics to support row-major as
well.
For the initial lowering, we split the input of the intrinsics into a
set of column vectors, transform those column vectors and concatenate
the result columns to a flat result vector.
This allows us to lower the intrinsics without any shape propagation, as
mentioned in the RFC. In follow-up patches, we plan to submit the
following improvements:
* Shape propagation to eliminate the embedding/splitting for each
intrinsic.
* Fused & tiled lowering of multiply and other operations.
* Optimization remarks highlighting matrix expressions and costs.
* Generate loops for operations on large matrixes.
* More general block processing for operation on large vectors,
exploiting shape information.
We would like to add dedicated transpose, columnwise load and store
intrinsics, even though they are not strictly necessary. For example, we
could instead emit a large shufflevector instruction instead of the
transpose. But we expect that to
(1) become unwieldy for larger matrixes (even for 16x16 matrixes,
the resulting shufflevector masks would be huge),
(2) risk instcombine making small changes, causing us to fail to
detect the transpose, preventing better lowerings
For the load/store, we are additionally planning on exploiting the
intrinsics for better alias analysis.
Reviewers: anemet, Gerolf, reames, hfinkel, andrew.w.kaylor, efriedma, rengolin
Reviewed By: anemet
Differential Revision: https://reviews.llvm.org/D70456
2019-12-12 16:27:28 +01:00
|
|
|
void initializeLowerMatrixIntrinsicsLegacyPassPass(PassRegistry &);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeMIRCanonicalizerPass(PassRegistry &);
|
2019-09-05 22:44:33 +02:00
|
|
|
void initializeMIRNamerPass(PassRegistry &);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeMIRPrintingPassPass(PassRegistry&);
|
2011-07-25 21:25:40 +02:00
|
|
|
void initializeMachineBlockFrequencyInfoPass(PassRegistry&);
|
Implement a block placement pass based on the branch probability and
block frequency analyses. This differs substantially from the existing
block-placement pass in LLVM:
1) It operates on the Machine-IR in the CodeGen layer. This exposes much
more (and more precise) information and opportunities. Also, the
results are more stable due to fewer transforms ocurring after the
pass runs.
2) It uses the generalized probability and frequency analyses. These can
model static heuristics, code annotation derived heuristics as well
as eventual profile loading. By basing the optimization on the
analysis interface it can work from any (or a combination) of these
inputs.
3) It uses a more aggressive algorithm, both building chains from tho
bottom up to maximize benefit, and using an SCC-based walk to layout
chains of blocks in a profitable ordering without O(N^2) iterations
which the old pass involves.
The pass is currently gated behind a flag, and not enabled by default
because it still needs to grow some important features. Most notably, it
needs to support loop aligning and careful layout of loop structures
much as done by hand currently in CodePlacementOpt. Once it supports
these, and has sufficient testing and quality tuning, it should replace
both of these passes.
Thanks to Nick Lewycky and Richard Smith for help authoring & debugging
this, and to Jakob, Andy, Eric, Jim, and probably a few others I'm
forgetting for reviewing and answering all my questions. Writing
a backend pass is *sooo* much better now than it used to be. =D
llvm-svn: 142641
2011-10-21 08:46:38 +02:00
|
|
|
void initializeMachineBlockPlacementPass(PassRegistry&);
|
2011-11-02 08:17:12 +01:00
|
|
|
void initializeMachineBlockPlacementStatsPass(PassRegistry&);
|
2011-06-16 22:22:37 +02:00
|
|
|
void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeMachineCSEPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeMachineCombinerPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeMachineCopyPropagationPass(PassRegistry&);
|
2014-07-12 23:59:52 +02:00
|
|
|
void initializeMachineDominanceFrontierPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeMachineDominatorTreePass(PassRegistry&);
|
|
|
|
void initializeMachineFunctionPrinterPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeMachineLICMPass(PassRegistry&);
|
|
|
|
void initializeMachineLoopInfoPass(PassRegistry&);
|
2019-09-30 19:54:50 +02:00
|
|
|
void initializeMachineModuleInfoWrapperPassPass(PassRegistry &);
|
2017-01-26 00:20:33 +01:00
|
|
|
void initializeMachineOptimizationRemarkEmitterPassPass(PassRegistry&);
|
2017-03-06 22:31:18 +01:00
|
|
|
void initializeMachineOutlinerPass(PassRegistry&);
|
2016-07-29 18:44:44 +02:00
|
|
|
void initializeMachinePipelinerPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeMachinePostDominatorTreePass(PassRegistry&);
|
2014-07-19 20:29:29 +02:00
|
|
|
void initializeMachineRegionInfoPassPass(PassRegistry&);
|
2012-01-17 07:55:03 +01:00
|
|
|
void initializeMachineSchedulerPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeMachineSinkingPass(PassRegistry&);
|
2012-07-26 20:38:11 +02:00
|
|
|
void initializeMachineTraceMetricsPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeMachineVerifierPassPass(PassRegistry&);
|
2016-06-14 04:44:55 +02:00
|
|
|
void initializeMemCpyOptLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeMemDepPrinterPass(PassRegistry&);
|
2015-02-06 02:46:42 +01:00
|
|
|
void initializeMemDerefPrinterPass(PassRegistry&);
|
2016-03-10 01:55:30 +01:00
|
|
|
void initializeMemoryDependenceWrapperPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeMemorySSAPrinterLegacyPassPass(PassRegistry&);
|
2016-06-01 23:30:40 +02:00
|
|
|
void initializeMemorySSAWrapperPassPass(PassRegistry&);
|
[NewPM] Port Msan
Summary:
Keeping msan a function pass requires replacing the module level initialization:
That means, don't define a ctor function which calls __msan_init, instead just
declare the init function at the first access, and add that to the global ctors
list.
Changes:
- Pull the actual sanitizer and the wrapper pass apart.
- Add a newpm msan pass. The function pass inserts calls to runtime
library functions, for which it inserts declarations as necessary.
- Update tests.
Caveats:
- There is one test that I dropped, because it specifically tested the
definition of the ctor.
Reviewers: chandlerc, fedor.sergeev, leonardchan, vitalybuka
Subscribers: sdardis, nemanjai, javed.absar, hiraditya, kbarton, bollu, atanasyan, jsji
Differential Revision: https://reviews.llvm.org/D55647
llvm-svn: 350305
2019-01-03 14:42:44 +01:00
|
|
|
void initializeMemorySanitizerLegacyPassPass(PassRegistry&);
|
2020-01-10 21:52:19 +01:00
|
|
|
void initializeMergeFunctionsLegacyPassPass(PassRegistry&);
|
2019-05-23 14:35:26 +02:00
|
|
|
void initializeMergeICmpsLegacyPassPass(PassRegistry &);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeMergedLoadStoreMotionLegacyPassPass(PassRegistry&);
|
2012-09-11 04:46:18 +02:00
|
|
|
void initializeMetaRenamerPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeModuleDebugInfoPrinterPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeModuleSummaryIndexWrapperPassPass(PassRegistry&);
|
2019-09-03 10:20:31 +02:00
|
|
|
void initializeModuloScheduleTestPass(PassRegistry&);
|
2018-03-20 18:09:21 +01:00
|
|
|
void initializeMustExecutePrinterPass(PassRegistry&);
|
2019-08-23 17:17:27 +02:00
|
|
|
void initializeMustBeExecutedContextPrinterPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeNameAnonGlobalLegacyPassPass(PassRegistry&);
|
2020-04-20 23:41:30 +02:00
|
|
|
void initializeUniqueInternalLinkageNamesLegacyPassPass(PassRegistry &);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeNaryReassociateLegacyPassPass(PassRegistry&);
|
2017-03-12 05:46:45 +01:00
|
|
|
void initializeNewGVNLegacyPassPass(PassRegistry&);
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 19:55:00 +02:00
|
|
|
void initializeObjCARCAAWrapperPassPass(PassRegistry&);
|
2012-01-17 21:52:24 +01:00
|
|
|
void initializeObjCARCAPElimPass(PassRegistry&);
|
2011-06-16 01:37:01 +02:00
|
|
|
void initializeObjCARCContractPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeObjCARCExpandPass(PassRegistry&);
|
2011-06-16 01:37:01 +02:00
|
|
|
void initializeObjCARCOptPass(PassRegistry&);
|
2016-07-18 18:29:21 +02:00
|
|
|
void initializeOptimizationRemarkEmitterWrapperPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeOptimizePHIsPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializePAEvalPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializePEIPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializePGOIndirectCallPromotionLegacyPassPass(PassRegistry&);
|
|
|
|
void initializePGOInstrumentationGenLegacyPassPass(PassRegistry&);
|
|
|
|
void initializePGOInstrumentationUseLegacyPassPass(PassRegistry&);
|
2019-02-27 18:24:33 +01:00
|
|
|
void initializePGOInstrumentationGenCreateVarLegacyPassPass(PassRegistry&);
|
2017-04-04 18:42:20 +02:00
|
|
|
void initializePGOMemOPSizeOptLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializePHIEliminationPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializePartialInlinerLegacyPassPass(PassRegistry&);
|
|
|
|
void initializePartiallyInlineLibCallsLegacyPassPass(PassRegistry&);
|
|
|
|
void initializePatchableFunctionPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializePeepholeOptimizerPass(PassRegistry&);
|
2018-06-28 16:13:06 +02:00
|
|
|
void initializePhiValuesWrapperPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializePhysicalRegisterUsageInfoPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializePlaceBackedgeSafepointsImplPass(PassRegistry&);
|
|
|
|
void initializePlaceSafepointsPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializePostDomOnlyPrinterPass(PassRegistry&);
|
|
|
|
void initializePostDomOnlyViewerPass(PassRegistry&);
|
|
|
|
void initializePostDomPrinterPass(PassRegistry&);
|
|
|
|
void initializePostDomViewerPass(PassRegistry&);
|
2016-02-25 18:54:07 +01:00
|
|
|
void initializePostDominatorTreeWrapperPassPass(PassRegistry&);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializePostInlineEntryExitInstrumenterPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializePostMachineSchedulerPass(PassRegistry&);
|
2016-02-18 12:03:11 +01:00
|
|
|
void initializePostOrderFunctionAttrsLegacyPassPass(PassRegistry&);
|
2016-04-22 16:43:50 +02:00
|
|
|
void initializePostRAHazardRecognizerPass(PassRegistry&);
|
[CodeGen] Add a new pass for PostRA sink
Summary:
This pass sinks COPY instructions into a successor block, if the COPY is not
used in the current block and the COPY is live-in to a single successor
(i.e., doesn't require the COPY to be duplicated). This avoids executing the
the copy on paths where their results aren't needed. This also exposes
additional opportunites for dead copy elimination and shrink wrapping.
These copies were either not handled by or are inserted after the MachineSink
pass. As an example of the former case, the MachineSink pass cannot sink
COPY instructions with allocatable source registers; for AArch64 these type
of copy instructions are frequently used to move function parameters (PhyReg)
into virtual registers in the entry block..
For the machine IR below, this pass will sink %w19 in the entry into its
successor (%bb.1) because %w19 is only live-in in %bb.1.
```
%bb.0:
%wzr = SUBSWri %w1, 1
%w19 = COPY %w0
Bcc 11, %bb.2
%bb.1:
Live Ins: %w19
BL @fun
%w0 = ADDWrr %w0, %w19
RET %w0
%bb.2:
%w0 = COPY %wzr
RET %w0
```
As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
able to see %bb.0 as a candidate.
With this change I observed 12% more shrink-wrapping candidate and 13% more dead copies deleted in spec2000/2006/2017 on AArch64.
Reviewers: qcolombet, MatzeB, thegameg, mcrosier, gberry, hfinkel, john.brawn, twoh, RKSimon, sebpop, kparzysz
Reviewed By: sebpop
Subscribers: evandro, sebpop, sfertile, aemerson, mgorny, javed.absar, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D41463
llvm-svn: 328237
2018-03-22 21:06:47 +01:00
|
|
|
void initializePostRAMachineSinkingPass(PassRegistry&);
|
2012-02-08 22:23:13 +01:00
|
|
|
void initializePostRASchedulerPass(PassRegistry&);
|
2016-06-24 22:13:42 +02:00
|
|
|
void initializePreISelIntrinsicLoweringLegacyPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializePredicateInfoPrinterLegacyPassPass(PassRegistry&);
|
2014-01-12 13:15:39 +01:00
|
|
|
void initializePrintFunctionPassWrapperPass(PassRegistry&);
|
|
|
|
void initializePrintModulePassWrapperPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeProcessImplicitDefsPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeProfileSummaryInfoWrapperPassPass(PassRegistry&);
|
|
|
|
void initializePromoteLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializePruneEHPass(PassRegistry&);
|
2017-06-03 01:07:58 +02:00
|
|
|
void initializeRABasicPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeRAGreedyPass(PassRegistry&);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeReachingDefAnalysisPass(PassRegistry&);
|
2016-04-27 01:39:29 +02:00
|
|
|
void initializeReassociateLegacyPassPass(PassRegistry&);
|
[BasicBlockUtils] Add utility to remove redundant dbg.value instrs
Summary:
Add a RemoveRedundantDbgInstrs to BasicBlockUtils with the
goal to remove redundant dbg intrinsics from a basic block.
This can be useful after various transforms, as it might
be simpler to do a filtering of dbg intrinsics after the
transform than during the transform.
One primary use case would be to replace a too aggressive
removal done by MergeBlockIntoPredecessor, seen at loop
rotate (not done in this patch).
The elimination algorithm currently focuses on dbg.value
intrinsics and is doing two iterations over the BB.
First we iterate backward starting at the last instruction
in the BB. Whenever a consecutive sequence of dbg.value
instructions are found we keep the last dbg.value for
each variable found (variable fragments are identified
using the {DILocalVariable, FragmentInfo, inlinedAt}
triple as given by the DebugVariable helper class).
Next we iterate forward starting at the first instruction
in the BB. Whenever we find a dbg.value describing a
DebugVariable (identified by {DILocalVariable, inlinedAt})
we save the {DIValue, DIExpression} that describes that
variables value. But if the variable already was mapped
to the same {DIValue, DIExpression} pair we instead drop
the second dbg.value.
To ease the process of making lit tests for this utility a
new pass is introduced called RedundantDbgInstElimination.
It can be executed by opt using -redundant-dbg-inst-elim.
Reviewers: aprantl, jmorse, vsk
Subscribers: hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71478
2019-12-12 20:51:13 +01:00
|
|
|
void initializeRedundantDbgInstEliminationPass(PassRegistry&);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeRegAllocFastPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeRegBankSelectPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeRegToMemPass(PassRegistry&);
|
2018-07-26 02:27:51 +02:00
|
|
|
void initializeRegUsageInfoCollectorPass(PassRegistry&);
|
|
|
|
void initializeRegUsageInfoPropagationPass(PassRegistry&);
|
2014-07-19 20:29:29 +02:00
|
|
|
void initializeRegionInfoPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeRegionOnlyPrinterPass(PassRegistry&);
|
|
|
|
void initializeRegionOnlyViewerPass(PassRegistry&);
|
|
|
|
void initializeRegionPrinterPass(PassRegistry&);
|
|
|
|
void initializeRegionViewerPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeRegisterCoalescerPass(PassRegistry&);
|
2016-06-01 00:38:06 +02:00
|
|
|
void initializeRenameIndependentSubregsPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeResetMachineFunctionPass(PassRegistry&);
|
[PM] Port ReversePostOrderFunctionAttrs to the new PM
Below are my super rough notes when porting. They can probably serve as
a basic guide for porting other passes to the new PM. As I port more
passes I'll expand and generalize this and make a proper
docs/HowToPortToNewPassManager.rst document. There is also missing
documentation for general concepts and API's in the new PM which will
require some documentation.
Once there is proper documentation in place we can put up a list of
passes that have to be ported and game-ify/crowdsource the rest of the
porting (at least of the middle end; the backend is still unclear).
I will however be taking personal responsibility for ensuring that the
LLD/ELF LTO pipeline is ported in a timely fashion. The remaining passes
to be ported are (do something like
`git grep "<the string in the bullet point below>"` to find the pass):
General Scalar:
[ ] Simplify the CFG
[ ] Jump Threading
[ ] MemCpy Optimization
[ ] Promote Memory to Register
[ ] MergedLoadStoreMotion
[ ] Lazy Value Information Analysis
General IPO:
[ ] Dead Argument Elimination
[ ] Deduce function attributes in RPO
Loop stuff / vectorization stuff:
[ ] Alignment from assumptions
[ ] Canonicalize natural loops
[ ] Delete dead loops
[ ] Loop Access Analysis
[ ] Loop Invariant Code Motion
[ ] Loop Vectorization
[ ] SLP Vectorizer
[ ] Unroll loops
Devirtualization / CFI:
[ ] Cross-DSO CFI
[ ] Whole program devirtualization
[ ] Lower bitset metadata
CGSCC passes:
[ ] Function Integration/Inlining
[ ] Remove unused exception handling info
[ ] Promote 'by reference' arguments to scalars
Please let me know if you are interested in working on any of the passes
in the above list (e.g. reply to the post-commit thread for this patch).
I'll probably be tackling "General Scalar" and "General IPO" first FWIW.
Steps as I port "Deduce function attributes in RPO"
---------------------------------------------------
(note: if you are doing any work based on these notes, please leave a
note in the post-commit review thread for this commit with any
improvements / suggestions / incompleteness you ran into!)
Note: "Deduce function attributes in RPO" is a module pass.
1. Do preparatory refactoring.
Do preparatory factoring. In this case all I had to do was to pull out a static helper (r272503).
(TODO: give more advice here e.g. if pass holds state or something)
2. Rename the old pass class.
llvm/lib/Transforms/IPO/FunctionAttrs.cpp
Rename class ReversePostOrderFunctionAttrs -> ReversePostOrderFunctionAttrsLegacyPass
in preparation for adding a class ReversePostOrderFunctionAttrs as the pass in the new PM.
(edit: actually wait what? The new class name will be
ReversePostOrderFunctionAttrsPass, so it doesn't conflict. So this step is
sort of useless churn).
llvm/include/llvm/InitializePasses.h
llvm/lib/LTO/LTOCodeGenerator.cpp
llvm/lib/Transforms/IPO/IPO.cpp
llvm/lib/Transforms/IPO/FunctionAttrs.cpp
Rename initializeReversePostOrderFunctionAttrsPass -> initializeReversePostOrderFunctionAttrsLegacyPassPass
(note that the "PassPass" thing falls out of `s/ReversePostOrderFunctionAttrs/ReversePostOrderFunctionAttrsLegacyPass/`)
Note that the INITIALIZE_PASS macro is what creates this identifier name, so renaming the class requires this renaming too.
Note that createReversePostOrderFunctionAttrsPass does not need to be
renamed since its name is not generated from the class name.
3. Add the new PM pass class.
In the new PM all passes need to have their
declaration in a header somewhere, so you will often need to add a header.
In this case
llvm/include/llvm/Transforms/IPO/FunctionAttrs.h is already there because
PostOrderFunctionAttrsPass was already ported.
The file-level comment from the .cpp file can be used as the file-level
comment for the new header. You may want to tweak the wording slightly
from "this file implements" to "this file provides" or similar.
Add declaration for the new PM pass in this header:
class ReversePostOrderFunctionAttrsPass
: public PassInfoMixin<ReversePostOrderFunctionAttrsPass> {
public:
PreservedAnalyses run(Module &M, AnalysisManager<Module> &AM);
};
Its name should end with `Pass` for consistency (note that this doesn't
collide with the names of most old PM passes). E.g. call it
`<name of the old PM pass>Pass`.
Also, move the doxygen comment from the old PM pass to the declaration of
this class in the header.
Also, include the declaration for the new PM class
`llvm/Transforms/IPO/FunctionAttrs.h` at the top of the file (in this case,
it was already done when the other pass in this file was ported).
Now define the `run` method for the new class.
The main things here are:
a) Use AM.getResult<...>(M) to get results instead of `getAnalysis<...>()`
b) If the old PM pass would have returned "false" (i.e. `Changed ==
false`), then you should return PreservedAnalyses::all();
c) In the old PM getAnalysisUsage method, observe the calls
`AU.addPreserved<...>();`.
In the case `Changed == true`, for each preserved analysis you should do
call `PA.preserve<...>()` on a PreservedAnalyses object and return it.
E.g.:
PreservedAnalyses PA;
PA.preserve<CallGraphAnalysis>();
return PA;
Note that calls to skipModule/skipFunction are not supported in the new PM
currently, so optnone and optimization bisect support do not work. You can
just drop those calls for now.
4. Add the pass to the new PM pass registry to make it available in opt.
In llvm/lib/Passes/PassBuilder.cpp add a #include for your header.
`#include "llvm/Transforms/IPO/FunctionAttrs.h"`
In this case there is already an include (from when
PostOrderFunctionAttrsPass was ported).
Add your pass to llvm/lib/Passes/PassRegistry.def
In this case, I added
`MODULE_PASS("rpo-functionattrs", ReversePostOrderFunctionAttrsPass())`
The string is from the `INITIALIZE_PASS*` macros used in the old pass
manager.
Then choose a test that uses the pass and use the new PM `-passes=...` to
run it.
E.g. in this case there is a test that does:
; RUN: opt < %s -basicaa -functionattrs -rpo-functionattrs -S | FileCheck %s
I have added the line:
; RUN: opt < %s -aa-pipeline=basic-aa -passes='require<targetlibinfo>,cgscc(function-attrs),rpo-functionattrs' -S | FileCheck %s
The `-aa-pipeline=basic-aa` and
`require<targetlibinfo>,cgscc(function-attrs)` are what is needed to run
functionattrs in the new PM (note that in the new PM "functionattrs"
becomes "function-attrs" for some reason). This is just pulled from
`readattrs.ll` which contains the change from when functionattrs was ported
to the new PM.
Adding rpo-functionattrs causes the pass that was just ported to run.
llvm-svn: 272505
2016-06-12 09:48:51 +02:00
|
|
|
void initializeReversePostOrderFunctionAttrsLegacyPassPass(PassRegistry&);
|
2017-12-15 10:32:11 +01:00
|
|
|
void initializeRewriteStatepointsForGCLegacyPassPass(PassRegistry &);
|
2016-07-25 22:52:00 +02:00
|
|
|
void initializeRewriteSymbolsLegacyPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeSCCPLegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeSCEVAAWrapperPassPass(PassRegistry&);
|
|
|
|
void initializeSLPVectorizerPass(PassRegistry&);
|
2015-09-12 11:09:14 +02:00
|
|
|
void initializeSROALegacyPassPass(PassRegistry&);
|
2017-05-10 02:39:22 +02:00
|
|
|
void initializeSafeStackLegacyPassPass(PassRegistry&);
|
2018-07-26 02:27:48 +02:00
|
|
|
void initializeSafepointIRVerifierPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeSampleProfileLoaderLegacyPassPass(PassRegistry&);
|
2019-07-25 22:53:15 +02:00
|
|
|
void initializeModuleSanitizerCoverageLegacyPassPass(PassRegistry &);
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
void initializeScalarEvolutionWrapperPassPass(PassRegistry&);
|
2017-05-15 13:30:54 +02:00
|
|
|
void initializeScalarizeMaskedMemIntrinPass(PassRegistry&);
|
2018-11-21 15:00:17 +01:00
|
|
|
void initializeScalarizerLegacyPassPass(PassRegistry&);
|
2017-06-03 01:01:42 +02:00
|
|
|
void initializeScavengerTestPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeScopedNoAliasAAWrapperPassPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeSeparateConstOffsetFromGEPPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeShadowStackGCLoweringPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeShrinkWrapPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeSimpleInlinerPass(PassRegistry&);
|
[PM/LoopUnswitch] Introduce a new, simpler loop unswitch pass.
Currently, this pass only focuses on *trivial* loop unswitching. At that
reduced problem it remains significantly better than the current loop
unswitch:
- Old pass is worse than cubic complexity. New pass is (I think) linear.
- New pass is much simpler in its design by focusing on full unswitching. (See
below for details on this).
- New pass doesn't carry state for thresholds between pass iterations.
- New pass doesn't carry state for correctness (both miscompile and
infloop) between pass iterations.
- New pass produces substantially better code after unswitching.
- New pass can handle more trivial unswitch cases.
- New pass doesn't recompute the dominator tree for the entire function
and instead incrementally updates it.
I've ported all of the trivial unswitching test cases from the old pass
to the new one to make sure that major functionality isn't lost in the
process. For several of the test cases I've worked to improve the
precision and rigor of the CHECKs, but for many I've just updated them
to handle the new IR produced.
My initial motivation was the fact that the old pass carried state in
very unreliable ways between pass iterations, and these mechansims were
incompatible with the new pass manager. However, I discovered many more
improvements to make along the way.
This pass makes two very significant assumptions that enable most of these
improvements:
1) Focus on *full* unswitching -- that is, completely removing whatever
control flow construct is being unswitched from the loop. In the case
of trivial unswitching, this means removing the trivial (exiting)
edge. In non-trivial unswitching, this means removing the branch or
switch itself. This is in opposition to *partial* unswitching where
some part of the unswitched control flow remains in the loop. Partial
unswitching only really applies to switches and to folded branches.
These are very similar to full unrolling and partial unrolling. The
full form is an effective canonicalization, the partial form needs
a complex cost model, cannot be iterated, isn't canonicalizing, and
should be a separate pass that runs very late (much like unrolling).
2) Leverage LLVM's Loop machinery to the fullest. The original unswitch
dates from a time when a great deal of LLVM's loop infrastructure was
missing, ineffective, and/or unreliable. As a consequence, a lot of
complexity was added which we no longer need.
With these two overarching principles, I think we can build a fast and
effective unswitcher that fits in well in the new PM and in the
canonicalization pipeline. Some of the remaining functionality around
partial unswitching may not be relevant today (not many test cases or
benchmarks I can find) but if they are I'd like to add support for them
as a separate layer that runs very late in the pipeline.
Purely to make reviewing and introducing this code more manageable, I've
split this into first a trivial-unswitch-only pass and in the next patch
I'll add support for full non-trivial unswitching against a *fixed*
threshold, exactly like full unrolling. I even plan to re-use the
unrolling thresholds, as these are incredibly similar cost tradeoffs:
we're cloning a loop body in order to end up with simplified control
flow. We should only do that when the total growth is reasonably small.
One of the biggest changes with this pass compared to the previous one
is that previously, each individual trivial exiting edge from a switch
was unswitched separately as a branch. Now, we unswitch the entire
switch at once, with cases going to the various destinations. This lets
us unswitch multiple exiting edges in a single operation and also avoids
numerous extremely bad behaviors, where we would introduce 1000s of
branches to test for thousands of possible values, all of which would
take the exact same exit path bypassing the loop. Now we will use
a switch with 1000s of cases that can be efficiently lowered into
a jumptable. This avoids relying on somehow forming a switch out of the
branches or getting horrible code if that fails for any reason.
Another significant change is that this pass actively updates the CFG
based on unswitching. For trivial unswitching, this is actually very
easy because of the definition of loop simplified form. Doing this makes
the code coming out of loop unswitch dramatically more friendly. We
still should run loop-simplifycfg (at the least) after this to clean up,
but it will have to do a lot less work.
Finally, this pass makes much fewer attempts to simplify instructions
based on the unswitch. Something like loop-instsimplify, instcombine, or
GVN can be used to do increasingly powerful simplifications based on the
now dominating predicate. The old simplifications are things that
something like loop-instsimplify should get today or a very, very basic
loop-instcombine could get. Keeping that logic separate is a big
simplifying technique.
Most of the code in this pass that isn't in the old one has to do with
achieving specific goals:
- Updating the dominator tree as we go
- Unswitching all cases in a switch in a single step.
I think it is still shorter than just the trivial unswitching code in
the old pass despite having this functionality.
Differential Revision: https://reviews.llvm.org/D32409
llvm-svn: 301576
2017-04-27 20:45:20 +02:00
|
|
|
void initializeSimpleLoopUnswitchLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeSingleLoopExtractorPass(PassRegistry&);
|
2016-04-22 21:54:10 +02:00
|
|
|
void initializeSinkingLegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeSjLjEHPreparePass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeSlotIndexesPass(PassRegistry&);
|
2016-08-01 23:48:33 +02:00
|
|
|
void initializeSpeculativeExecutionLegacyPassPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeSpillPlacementPass(PassRegistry&);
|
2012-09-06 11:17:37 +02:00
|
|
|
void initializeStackColoringPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeStackMapLivenessPass(PassRegistry&);
|
|
|
|
void initializeStackProtectorPass(PassRegistry&);
|
2018-11-27 00:05:48 +01:00
|
|
|
void initializeStackSafetyGlobalInfoWrapperPassPass(PassRegistry &);
|
2018-11-26 22:57:47 +01:00
|
|
|
void initializeStackSafetyInfoWrapperPassPass(PassRegistry &);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeStackSlotColoringPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeStraightLineStrengthReducePass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeStripDeadDebugInfoPass(PassRegistry&);
|
2015-10-31 00:28:12 +01:00
|
|
|
void initializeStripDeadPrototypesLegacyPassPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeStripDebugDeclarePass(PassRegistry&);
|
2020-04-08 19:27:17 +02:00
|
|
|
void initializeStripDebugMachineModulePass(PassRegistry &);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeStripGCRelocatesPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeStripNonDebugSymbolsPass(PassRegistry&);
|
2016-10-25 20:44:13 +02:00
|
|
|
void initializeStripNonLineTableDebugInfoPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeStripSymbolsPass(PassRegistry&);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeStructurizeCFGPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
void initializeTailCallElimPass(PassRegistry&);
|
2018-01-19 07:08:17 +01:00
|
|
|
void initializeTailDuplicatePass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeTargetLibraryInfoWrapperPassPass(PassRegistry&);
|
2012-02-04 03:56:45 +01:00
|
|
|
void initializeTargetPassConfigPass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeTargetTransformInfoWrapperPassPass(PassRegistry&);
|
2019-01-16 10:28:01 +01:00
|
|
|
void initializeThreadSanitizerLegacyPassPass(PassRegistry&);
|
2015-02-19 20:14:34 +01:00
|
|
|
void initializeTwoAddressInstructionPassPass(PassRegistry&);
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 19:55:00 +02:00
|
|
|
void initializeTypeBasedAAWrapperPassPass(PassRegistry&);
|
2019-12-03 12:00:32 +01:00
|
|
|
void initializeTypePromotionPass(PassRegistry&);
|
2015-02-19 20:14:34 +01:00
|
|
|
void initializeUnifyFunctionExitNodesPass(PassRegistry&);
|
2020-03-28 12:13:35 +01:00
|
|
|
void initializeUnifyLoopExitsPass(PassRegistry &);
|
2016-06-09 21:58:30 +02:00
|
|
|
void initializeUnpackMachineBundlesPass(PassRegistry&);
|
2016-07-08 05:32:49 +02:00
|
|
|
void initializeUnreachableBlockElimLegacyPassPass(PassRegistry&);
|
2015-02-19 20:14:34 +01:00
|
|
|
void initializeUnreachableMachineBlockElimPass(PassRegistry&);
|
[VectorCombine] new IR transform pass for partial vector ops
We have several bug reports that could be characterized as "reducing scalarization",
and this topic was also raised on llvm-dev recently:
http://lists.llvm.org/pipermail/llvm-dev/2020-January/138157.html
...so I'm proposing that we deal with these patterns in a new, lightweight IR vector
pass that runs before/after other vectorization passes.
There are 4 alternate options that I can think of to deal with this kind of problem
(and we've seen various attempts at all of these), but they all have flaws:
InstCombine - can't happen without TTI, but we don't want target-specific
folds there.
SDAG - too late to assist other vectorization passes; TLI is not equipped
for these kind of cost queries; limited to a single basic block.
CGP - too late to assist other vectorization passes; would need to re-implement
basic cleanups like CSE/instcombine.
SLP - doesn't fit with existing transforms; limited to a single basic block.
This initial patch/transform is based on existing code in AggressiveInstCombine:
we walk backwards through the function looking for a pattern match. But we diverge
from that cost-independent IR canonicalization pass by using TTI to decide if the
vector alternative is profitable.
We probably have at least 10 similar bug reports/patterns (binops, constants,
inserts, cheap shuffles, etc) that would fit in this pass as follow-up enhancements.
It's possible that we could iterate on a worklist to fix-point like InstCombine does,
but it's safer to start with a most basic case and evolve from there, so I didn't
try to do anything fancy with this initial implementation.
Differential Revision: https://reviews.llvm.org/D73480
2020-02-09 16:04:41 +01:00
|
|
|
void initializeVectorCombineLegacyPassPass(PassRegistry&);
|
2015-02-19 20:14:34 +01:00
|
|
|
void initializeVerifierLegacyPassPass(PassRegistry&);
|
|
|
|
void initializeVirtRegMapPass(PassRegistry&);
|
|
|
|
void initializeVirtRegRewriterPass(PassRegistry&);
|
[Unroll/UnrollAndJam/Vectorizer/Distribute] Add followup loop attributes.
When multiple loop transformation are defined in a loop's metadata, their order of execution is defined by the order of their respective passes in the pass pipeline. For instance, e.g.
#pragma clang loop unroll_and_jam(enable)
#pragma clang loop distribute(enable)
is the same as
#pragma clang loop distribute(enable)
#pragma clang loop unroll_and_jam(enable)
and will try to loop-distribute before Unroll-And-Jam because the LoopDistribute pass is scheduled after UnrollAndJam pass. UnrollAndJamPass only supports one inner loop, i.e. it will necessarily fail after loop distribution. It is not possible to specify another execution order. Also,t the order of passes in the pipeline is subject to change between versions of LLVM, optimization options and which pass manager is used.
This patch adds 'followup' attributes to various loop transformation passes. These attributes define which attributes the resulting loop of a transformation should have. For instance,
!0 = !{!0, !1, !2}
!1 = !{!"llvm.loop.unroll_and_jam.enable"}
!2 = !{!"llvm.loop.unroll_and_jam.followup_inner", !3}
!3 = !{!"llvm.loop.distribute.enable"}
defines a loop ID (!0) to be unrolled-and-jammed (!1) and then the attribute !3 to be added to the jammed inner loop, which contains the instruction to distribute the inner loop.
Currently, in both pass managers, pass execution is in a fixed order and UnrollAndJamPass will not execute again after LoopDistribute. We hope to fix this in the future by allowing pass managers to run passes until a fixpoint is reached, use Polly to perform these transformations, or add a loop transformation pass which takes the order issue into account.
For mandatory/forced transformations (e.g. by having been declared by #pragma omp simd), the user must be notified when a transformation could not be performed. It is not possible that the responsible pass emits such a warning because the transformation might be 'hidden' in a followup attribute when it is executed, or it is not present in the pipeline at all. For this reason, this patche introduces a WarnMissedTransformations pass, to warn about orphaned transformations.
Since this changes the user-visible diagnostic message when a transformation is applied, two test cases in the clang repository need to be updated.
To ensure that no other transformation is executed before the intended one, the attribute `llvm.loop.disable_nonforced` can be added which should disable transformation heuristics before the intended transformation is applied. E.g. it would be surprising if a loop is distributed before a #pragma unroll_and_jam is applied.
With more supported code transformations (loop fusion, interchange, stripmining, offloading, etc.), transformations can be used as building blocks for more complex transformations (e.g. stripmining+stripmining+interchange -> tiling).
Reviewed By: hfinkel, dmgreen
Differential Revision: https://reviews.llvm.org/D49281
Differential Revision: https://reviews.llvm.org/D55288
llvm-svn: 348944
2018-12-12 18:32:52 +01:00
|
|
|
void initializeWarnMissedTransformationsLegacyPass(PassRegistry &);
|
2018-06-01 00:02:34 +02:00
|
|
|
void initializeWasmEHPreparePass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeWholeProgramDevirtPass(PassRegistry&);
|
2015-01-29 01:41:44 +01:00
|
|
|
void initializeWinEHPreparePass(PassRegistry&);
|
2017-03-18 06:05:29 +01:00
|
|
|
void initializeWriteBitcodePassPass(PassRegistry&);
|
|
|
|
void initializeWriteThinLTOBitcodePass(PassRegistry&);
|
|
|
|
void initializeXRayInstrumentationPass(PassRegistry&);
|
2010-10-07 06:17:38 +02:00
|
|
|
|
2017-09-07 01:05:38 +02:00
|
|
|
} // end namespace llvm
|
|
|
|
|
|
|
|
#endif // LLVM_INITIALIZEPASSES_H
|