2017-10-09 23:19:02 +00:00
|
|
|
//===- OptimizationRemarkEmitter.h - Optimization Diagnostic ----*- C++ -*-===//
|
2016-07-15 17:23:20 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Optimization diagnostic interfaces. It's packaged as an analysis pass so
|
|
|
|
// that by using this service passes become dependent on BFI as well. BFI is
|
|
|
|
// used to compute the "hotness" of the diagnostic message.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_IR_OPTIMIZATIONDIAGNOSTICINFO_H
|
|
|
|
#define LLVM_IR_OPTIMIZATIONDIAGNOSTICINFO_H
|
|
|
|
|
|
|
|
#include "llvm/ADT/Optional.h"
|
2016-08-10 00:44:44 +00:00
|
|
|
#include "llvm/Analysis/BlockFrequencyInfo.h"
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 20:55:07 +00:00
|
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
2016-12-01 17:34:50 +00:00
|
|
|
#include "llvm/IR/Function.h"
|
2016-07-18 16:29:21 +00:00
|
|
|
#include "llvm/IR/PassManager.h"
|
2016-07-15 17:23:20 +00:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
class DebugLoc;
|
|
|
|
class Loop;
|
|
|
|
class Pass;
|
|
|
|
class Twine;
|
|
|
|
class Value;
|
|
|
|
|
2016-08-10 00:44:56 +00:00
|
|
|
/// The optimization diagnostic interface.
|
|
|
|
///
|
|
|
|
/// It allows reporting when optimizations are performed and when they are not
|
|
|
|
/// along with the reasons for it. Hotness information of the corresponding
|
[ORE] Unify spelling as "diagnostics hotness"
Summary:
To enable profile hotness information in diagnostics output, Clang takes
the option `-fdiagnostics-show-hotness` -- that's "diagnostics", with an
"s" at the end. Clang also defines `CodeGenOptions::DiagnosticsWithHotness`.
LLVM, on the other hand, defines
`LLVMContext::getDiagnosticHotnessRequested` -- that's "diagnostic", not
"diagnostics". It's a small difference, but it's confusing, typo-inducing, and
frustrating.
Add a new method with the spelling "diagnostics", and "deprecate" the
old spelling.
Reviewers: anemet, davidxl
Reviewed By: anemet
Subscribers: llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D34864
llvm-svn: 306848
2017-06-30 18:13:59 +00:00
|
|
|
/// code region can be included in the remark if DiagnosticsHotnessRequested is
|
2016-08-10 00:44:56 +00:00
|
|
|
/// enabled in the LLVM context.
|
2016-07-18 16:29:21 +00:00
|
|
|
class OptimizationRemarkEmitter {
|
2016-07-15 17:23:20 +00:00
|
|
|
public:
|
2017-02-22 07:38:17 +00:00
|
|
|
OptimizationRemarkEmitter(const Function *F, BlockFrequencyInfo *BFI)
|
2016-07-18 16:29:21 +00:00
|
|
|
: F(F), BFI(BFI) {}
|
|
|
|
|
2018-05-01 15:54:18 +00:00
|
|
|
/// This variant can be used to generate ORE on demand (without the
|
2016-08-10 00:44:44 +00:00
|
|
|
/// analysis pass).
|
|
|
|
///
|
|
|
|
/// Note that this ctor has a very different cost depending on whether
|
[ORE] Unify spelling as "diagnostics hotness"
Summary:
To enable profile hotness information in diagnostics output, Clang takes
the option `-fdiagnostics-show-hotness` -- that's "diagnostics", with an
"s" at the end. Clang also defines `CodeGenOptions::DiagnosticsWithHotness`.
LLVM, on the other hand, defines
`LLVMContext::getDiagnosticHotnessRequested` -- that's "diagnostic", not
"diagnostics". It's a small difference, but it's confusing, typo-inducing, and
frustrating.
Add a new method with the spelling "diagnostics", and "deprecate" the
old spelling.
Reviewers: anemet, davidxl
Reviewed By: anemet
Subscribers: llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D34864
llvm-svn: 306848
2017-06-30 18:13:59 +00:00
|
|
|
/// F->getContext().getDiagnosticsHotnessRequested() is on or not. If it's off
|
2016-08-10 00:44:44 +00:00
|
|
|
/// the operation is free.
|
|
|
|
///
|
[ORE] Unify spelling as "diagnostics hotness"
Summary:
To enable profile hotness information in diagnostics output, Clang takes
the option `-fdiagnostics-show-hotness` -- that's "diagnostics", with an
"s" at the end. Clang also defines `CodeGenOptions::DiagnosticsWithHotness`.
LLVM, on the other hand, defines
`LLVMContext::getDiagnosticHotnessRequested` -- that's "diagnostic", not
"diagnostics". It's a small difference, but it's confusing, typo-inducing, and
frustrating.
Add a new method with the spelling "diagnostics", and "deprecate" the
old spelling.
Reviewers: anemet, davidxl
Reviewed By: anemet
Subscribers: llvm-commits, mehdi_amini
Differential Revision: https://reviews.llvm.org/D34864
llvm-svn: 306848
2017-06-30 18:13:59 +00:00
|
|
|
/// Whereas if DiagnosticsHotnessRequested is on, it is fairly expensive
|
2016-08-10 00:44:44 +00:00
|
|
|
/// operation since BFI and all its required analyses are computed. This is
|
|
|
|
/// for example useful for CGSCC passes that can't use function analyses
|
|
|
|
/// passes in the old PM.
|
2017-02-22 07:38:17 +00:00
|
|
|
OptimizationRemarkEmitter(const Function *F);
|
2016-08-10 00:44:44 +00:00
|
|
|
|
2016-07-18 16:29:21 +00:00
|
|
|
OptimizationRemarkEmitter(OptimizationRemarkEmitter &&Arg)
|
|
|
|
: F(Arg.F), BFI(Arg.BFI) {}
|
|
|
|
|
|
|
|
OptimizationRemarkEmitter &operator=(OptimizationRemarkEmitter &&RHS) {
|
|
|
|
F = RHS.F;
|
|
|
|
BFI = RHS.BFI;
|
|
|
|
return *this;
|
|
|
|
}
|
2016-07-15 17:23:20 +00:00
|
|
|
|
2017-01-15 08:20:50 +00:00
|
|
|
/// Handle invalidation events in the new pass manager.
|
|
|
|
bool invalidate(Function &F, const PreservedAnalyses &PA,
|
|
|
|
FunctionAnalysisManager::Invalidator &Inv);
|
|
|
|
|
2018-05-01 15:54:18 +00:00
|
|
|
/// Output the remark via the diagnostic handler and to the
|
2017-09-15 20:10:09 +00:00
|
|
|
/// optimization record file.
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 20:55:07 +00:00
|
|
|
void emit(DiagnosticInfoOptimizationBase &OptDiag);
|
|
|
|
|
2018-05-01 15:54:18 +00:00
|
|
|
/// Take a lambda that returns a remark which will be emitted. Second
|
2017-09-19 23:00:55 +00:00
|
|
|
/// argument is only used to restrict this to functions.
|
|
|
|
template <typename T>
|
|
|
|
void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
|
|
|
|
// Avoid building the remark unless we know there are at least *some*
|
|
|
|
// remarks enabled. We can't currently check whether remarks are requested
|
|
|
|
// for the calling pass since that requires actually building the remark.
|
|
|
|
|
|
|
|
if (F->getContext().getDiagnosticsOutputFile() ||
|
|
|
|
F->getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) {
|
|
|
|
auto R = RemarkBuilder();
|
2017-09-20 04:39:02 +00:00
|
|
|
emit((DiagnosticInfoOptimizationBase &)R);
|
2017-09-19 23:00:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 15:54:18 +00:00
|
|
|
/// Whether we allow for extra compile-time budget to perform more
|
2016-12-01 17:34:50 +00:00
|
|
|
/// analysis to produce fewer false positives.
|
|
|
|
///
|
|
|
|
/// This is useful when reporting missed optimizations. In this case we can
|
|
|
|
/// use the extra analysis (1) to filter trivial false positives or (2) to
|
|
|
|
/// provide more context so that non-trivial false positives can be quickly
|
|
|
|
/// detected by the user.
|
2017-09-15 20:10:09 +00:00
|
|
|
bool allowExtraAnalysis(StringRef PassName) const {
|
|
|
|
return (F->getContext().getDiagnosticsOutputFile() ||
|
|
|
|
F->getContext().getDiagHandlerPtr()->isAnyRemarkEnabled(PassName));
|
2016-12-01 17:34:50 +00:00
|
|
|
}
|
|
|
|
|
2016-07-18 16:29:21 +00:00
|
|
|
private:
|
2017-02-22 07:38:17 +00:00
|
|
|
const Function *F;
|
2016-07-18 16:29:21 +00:00
|
|
|
|
|
|
|
BlockFrequencyInfo *BFI;
|
|
|
|
|
2016-08-10 00:44:44 +00:00
|
|
|
/// If we generate BFI on demand, we need to free it when ORE is freed.
|
|
|
|
std::unique_ptr<BlockFrequencyInfo> OwnedBFI;
|
|
|
|
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 20:55:07 +00:00
|
|
|
/// Compute hotness from IR value (currently assumed to be a block) if PGO is
|
|
|
|
/// available.
|
2016-07-20 21:44:22 +00:00
|
|
|
Optional<uint64_t> computeHotness(const Value *V);
|
2016-07-18 16:29:21 +00:00
|
|
|
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 20:55:07 +00:00
|
|
|
/// Similar but use value from \p OptDiag and update hotness there.
|
2017-01-25 23:20:25 +00:00
|
|
|
void computeHotness(DiagnosticInfoIROptimization &OptDiag);
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 20:55:07 +00:00
|
|
|
|
2018-05-01 15:54:18 +00:00
|
|
|
/// Only allow verbose messages if we know we're filtering by hotness
|
2016-08-26 20:21:05 +00:00
|
|
|
/// (BFI is only set in this case).
|
|
|
|
bool shouldEmitVerbose() { return BFI != nullptr; }
|
|
|
|
|
2016-07-18 16:29:21 +00:00
|
|
|
OptimizationRemarkEmitter(const OptimizationRemarkEmitter &) = delete;
|
|
|
|
void operator=(const OptimizationRemarkEmitter &) = delete;
|
|
|
|
};
|
|
|
|
|
2018-05-01 15:54:18 +00:00
|
|
|
/// Add a small namespace to avoid name clashes with the classes used in
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 20:55:07 +00:00
|
|
|
/// the streaming interface. We want these to be short for better
|
|
|
|
/// write/readability.
|
|
|
|
namespace ore {
|
|
|
|
using NV = DiagnosticInfoOptimizationBase::Argument;
|
|
|
|
using setIsVerbose = DiagnosticInfoOptimizationBase::setIsVerbose;
|
2016-12-01 17:34:44 +00:00
|
|
|
using setExtraArgs = DiagnosticInfoOptimizationBase::setExtraArgs;
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 20:55:07 +00:00
|
|
|
}
|
|
|
|
|
2016-08-26 15:58:34 +00:00
|
|
|
/// OptimizationRemarkEmitter legacy analysis pass
|
|
|
|
///
|
|
|
|
/// Note that this pass shouldn't generally be marked as preserved by other
|
|
|
|
/// passes. It's holding onto BFI, so if the pass does not preserve BFI, BFI
|
|
|
|
/// could be freed.
|
2016-07-18 16:29:21 +00:00
|
|
|
class OptimizationRemarkEmitterWrapperPass : public FunctionPass {
|
|
|
|
std::unique_ptr<OptimizationRemarkEmitter> ORE;
|
|
|
|
|
|
|
|
public:
|
|
|
|
OptimizationRemarkEmitterWrapperPass();
|
|
|
|
|
2016-07-15 17:23:20 +00:00
|
|
|
bool runOnFunction(Function &F) override;
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
|
|
|
|
2016-07-18 16:29:21 +00:00
|
|
|
OptimizationRemarkEmitter &getORE() {
|
|
|
|
assert(ORE && "pass not run yet");
|
|
|
|
return *ORE;
|
|
|
|
}
|
|
|
|
|
2016-07-15 17:23:20 +00:00
|
|
|
static char ID;
|
2016-07-18 16:29:21 +00:00
|
|
|
};
|
2016-07-15 17:23:20 +00:00
|
|
|
|
2016-07-18 16:29:21 +00:00
|
|
|
class OptimizationRemarkEmitterAnalysis
|
|
|
|
: public AnalysisInfoMixin<OptimizationRemarkEmitterAnalysis> {
|
|
|
|
friend AnalysisInfoMixin<OptimizationRemarkEmitterAnalysis>;
|
2016-11-23 17:53:26 +00:00
|
|
|
static AnalysisKey Key;
|
2016-07-15 17:23:20 +00:00
|
|
|
|
2016-07-18 16:29:21 +00:00
|
|
|
public:
|
2018-05-01 15:54:18 +00:00
|
|
|
/// Provide the result typedef for this analysis pass.
|
2016-07-18 16:29:21 +00:00
|
|
|
typedef OptimizationRemarkEmitter Result;
|
2016-07-15 17:23:20 +00:00
|
|
|
|
2018-05-01 15:54:18 +00:00
|
|
|
/// Run the analysis pass over a function and produce BFI.
|
2016-08-09 00:28:15 +00:00
|
|
|
Result run(Function &F, FunctionAnalysisManager &AM);
|
2016-07-15 17:23:20 +00:00
|
|
|
};
|
|
|
|
}
|
|
|
|
#endif // LLVM_IR_OPTIMIZATIONDIAGNOSTICINFO_H
|