2017-05-13 00:25:07 +02:00
|
|
|
//===- llvm/LLVMContext.h - Class for managing "global" state ---*- C++ -*-===//
|
2009-06-30 02:48:55 +02:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2009-06-30 19:06:46 +02:00
|
|
|
//
|
|
|
|
// This file declares LLVMContext, a container of "global" state in LLVM, such
|
|
|
|
// as the global type and constant uniquing tables.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2009-06-30 02:48:55 +02:00
|
|
|
|
2013-01-10 01:45:19 +01:00
|
|
|
#ifndef LLVM_IR_LLVMCONTEXT_H
|
|
|
|
#define LLVM_IR_LLVMCONTEXT_H
|
2009-06-30 02:48:55 +02:00
|
|
|
|
2016-12-06 23:00:57 +01:00
|
|
|
#include "llvm-c/Types.h"
|
2013-05-01 22:59:00 +02:00
|
|
|
#include "llvm/Support/CBindingWrapping.h"
|
2014-10-15 23:54:35 +02:00
|
|
|
#include "llvm/Support/Options.h"
|
2016-12-06 23:00:57 +01:00
|
|
|
#include <cstdint>
|
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
2012-09-17 09:16:40 +02:00
|
|
|
|
2009-06-30 02:48:55 +02:00
|
|
|
namespace llvm {
|
|
|
|
|
2014-05-15 19:49:20 +02:00
|
|
|
class DiagnosticInfo;
|
2016-05-16 16:28:02 +02:00
|
|
|
enum DiagnosticSeverity : char;
|
2014-05-15 19:49:20 +02:00
|
|
|
class Function;
|
2016-12-06 23:00:57 +01:00
|
|
|
class Instruction;
|
|
|
|
class LLVMContextImpl;
|
|
|
|
class Module;
|
2016-04-23 00:06:11 +02:00
|
|
|
class OptBisect;
|
2016-12-06 23:00:57 +01:00
|
|
|
template <typename T> class SmallVectorImpl;
|
|
|
|
class SMDiagnostic;
|
|
|
|
class StringRef;
|
|
|
|
class Twine;
|
|
|
|
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 22:55:07 +02:00
|
|
|
namespace yaml {
|
2017-05-13 00:25:07 +02:00
|
|
|
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 22:55:07 +02:00
|
|
|
class Output;
|
2017-05-13 00:25:07 +02:00
|
|
|
|
2016-12-06 23:00:57 +01:00
|
|
|
} // end namespace yaml
|
2009-10-06 19:43:57 +02:00
|
|
|
|
2009-06-30 19:06:46 +02:00
|
|
|
/// This is an important class for using LLVM in a threaded context. It
|
2013-05-25 00:53:06 +02:00
|
|
|
/// (opaquely) owns and manages the core "global" data of LLVM's core
|
2009-06-30 19:06:46 +02:00
|
|
|
/// infrastructure, including the type and constant uniquing tables.
|
|
|
|
/// LLVMContext itself provides no locking guarantees, so you should be careful
|
|
|
|
/// to have one context per thread.
|
2009-08-11 19:45:13 +02:00
|
|
|
class LLVMContext {
|
2010-09-08 20:22:11 +02:00
|
|
|
public:
|
|
|
|
LLVMContextImpl *const pImpl;
|
|
|
|
LLVMContext();
|
2016-12-06 23:00:57 +01:00
|
|
|
LLVMContext(LLVMContext &) = delete;
|
|
|
|
LLVMContext &operator=(const LLVMContext &) = delete;
|
2010-09-08 20:22:11 +02:00
|
|
|
~LLVMContext();
|
2013-05-25 00:53:06 +02:00
|
|
|
|
2010-03-31 01:03:27 +02:00
|
|
|
// Pinned metadata names, which always have the same value. This is a
|
|
|
|
// compile-time performance optimization, not a correctness optimization.
|
|
|
|
enum {
|
2016-03-25 01:35:38 +01:00
|
|
|
MD_dbg = 0, // "dbg"
|
|
|
|
MD_tbaa = 1, // "tbaa"
|
|
|
|
MD_prof = 2, // "prof"
|
|
|
|
MD_fpmath = 3, // "fpmath"
|
|
|
|
MD_range = 4, // "range"
|
|
|
|
MD_tbaa_struct = 5, // "tbaa.struct"
|
|
|
|
MD_invariant_load = 6, // "invariant.load"
|
|
|
|
MD_alias_scope = 7, // "alias.scope"
|
|
|
|
MD_noalias = 8, // "noalias",
|
|
|
|
MD_nontemporal = 9, // "nontemporal"
|
2014-10-21 02:13:20 +02:00
|
|
|
MD_mem_parallel_loop_access = 10, // "llvm.mem.parallel_loop_access"
|
2016-03-25 01:35:38 +01:00
|
|
|
MD_nonnull = 11, // "nonnull"
|
|
|
|
MD_dereferenceable = 12, // "dereferenceable"
|
|
|
|
MD_dereferenceable_or_null = 13, // "dereferenceable_or_null"
|
|
|
|
MD_make_implicit = 14, // "make.implicit"
|
|
|
|
MD_unpredictable = 15, // "unpredictable"
|
|
|
|
MD_invariant_group = 16, // "invariant.group"
|
|
|
|
MD_align = 17, // "align"
|
|
|
|
MD_loop = 18, // "llvm.loop"
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-24 23:21:32 +02:00
|
|
|
MD_type = 19, // "type"
|
2016-10-18 22:42:47 +02:00
|
|
|
MD_section_prefix = 20, // "section_prefix"
|
2016-12-08 20:01:00 +01:00
|
|
|
MD_absolute_symbol = 21, // "absolute_symbol"
|
2017-03-17 23:17:24 +01:00
|
|
|
MD_associated = 22, // "associated"
|
2010-03-31 01:03:27 +02:00
|
|
|
};
|
2013-05-25 00:53:06 +02:00
|
|
|
|
2015-11-11 22:38:02 +01:00
|
|
|
/// Known operand bundle tag IDs, which always have the same value. All
|
|
|
|
/// operand bundle tags that LLVM has special knowledge of are listed here.
|
|
|
|
/// Additionally, this scheme allows LLVM to efficiently check for specific
|
|
|
|
/// operand bundle tags without comparing strings.
|
|
|
|
enum {
|
2016-01-20 20:50:25 +01:00
|
|
|
OB_deopt = 0, // "deopt"
|
|
|
|
OB_funclet = 1, // "funclet"
|
|
|
|
OB_gc_transition = 2, // "gc-transition"
|
2015-11-11 22:38:02 +01:00
|
|
|
};
|
|
|
|
|
2009-12-29 10:01:33 +01:00
|
|
|
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
|
|
|
|
/// This ID is uniqued across modules in the current LLVMContext.
|
|
|
|
unsigned getMDKindID(StringRef Name) const;
|
2013-05-25 00:53:06 +02:00
|
|
|
|
2009-12-29 10:01:33 +01:00
|
|
|
/// getMDKindNames - Populate client supplied SmallVector with the name for
|
2010-07-20 23:45:17 +02:00
|
|
|
/// custom metadata IDs registered in this LLVMContext.
|
2009-12-29 10:01:33 +01:00
|
|
|
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
|
2013-05-25 00:53:06 +02:00
|
|
|
|
2015-09-24 21:14:18 +02:00
|
|
|
/// getOperandBundleTags - Populate client supplied SmallVector with the
|
|
|
|
/// bundle tags registered in this LLVMContext. The bundle tags are ordered
|
|
|
|
/// by increasing bundle IDs.
|
|
|
|
/// \see LLVMContext::getOperandBundleTagID
|
|
|
|
void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;
|
|
|
|
|
|
|
|
/// getOperandBundleTagID - Maps a bundle tag to an integer ID. Every bundle
|
|
|
|
/// tag registered with an LLVMContext has an unique ID.
|
|
|
|
uint32_t getOperandBundleTagID(StringRef Tag) const;
|
2013-05-25 00:53:06 +02:00
|
|
|
|
2016-01-08 03:28:20 +01:00
|
|
|
/// Define the GC for a function
|
|
|
|
void setGC(const Function &Fn, std::string GCName);
|
|
|
|
|
|
|
|
/// Return the GC for a function
|
|
|
|
const std::string &getGC(const Function &Fn);
|
|
|
|
|
|
|
|
/// Remove the GC for a function
|
|
|
|
void deleteGC(const Function &Fn);
|
|
|
|
|
Add a flag to the LLVMContext to disable name for Value other than GlobalValue
Summary:
This is intended to be a performance flag, on the same level as clang
cc1 option "--disable-free". LLVM will never initialize it by default,
it will be up to the client creating the LLVMContext to request this
behavior. Clang will do it by default in Release build (just like
--disable-free).
"opt" and "llc" can opt-in using -disable-named-value command line
option.
When performing LTO on llvm-tblgen, the initial merging of IR peaks
at 92MB without this patch, and 86MB after this patch,setNameImpl()
drops from 6.5MB to 0.5MB.
The total link time goes from ~29.5s to ~27.8s.
Compared to a compile-time flag (like the IRBuilder one), it performs
very close. I profiled on SROA and obtain these results:
420ms with IRBuilder that preserve name
372ms with IRBuilder that strip name
375ms with IRBuilder that preserve name, and a runtime flag to strip
Reviewers: chandlerc, dexonsmith, bogner
Subscribers: joker.eph, llvm-commits
Differential Revision: http://reviews.llvm.org/D17946
From: Mehdi Amini <mehdi.amini@apple.com>
llvm-svn: 263086
2016-03-10 02:28:54 +01:00
|
|
|
/// Return true if the Context runtime configuration is set to discard all
|
|
|
|
/// value names. When true, only GlobalValue names will be available in the
|
|
|
|
/// IR.
|
2016-04-02 05:46:17 +02:00
|
|
|
bool shouldDiscardValueNames() const;
|
Add a flag to the LLVMContext to disable name for Value other than GlobalValue
Summary:
This is intended to be a performance flag, on the same level as clang
cc1 option "--disable-free". LLVM will never initialize it by default,
it will be up to the client creating the LLVMContext to request this
behavior. Clang will do it by default in Release build (just like
--disable-free).
"opt" and "llc" can opt-in using -disable-named-value command line
option.
When performing LTO on llvm-tblgen, the initial merging of IR peaks
at 92MB without this patch, and 86MB after this patch,setNameImpl()
drops from 6.5MB to 0.5MB.
The total link time goes from ~29.5s to ~27.8s.
Compared to a compile-time flag (like the IRBuilder one), it performs
very close. I profiled on SROA and obtain these results:
420ms with IRBuilder that preserve name
372ms with IRBuilder that strip name
375ms with IRBuilder that preserve name, and a runtime flag to strip
Reviewers: chandlerc, dexonsmith, bogner
Subscribers: joker.eph, llvm-commits
Differential Revision: http://reviews.llvm.org/D17946
From: Mehdi Amini <mehdi.amini@apple.com>
llvm-svn: 263086
2016-03-10 02:28:54 +01:00
|
|
|
|
|
|
|
/// Set the Context runtime configuration to discard all value name (but
|
|
|
|
/// GlobalValue). Clients can use this flag to save memory and runtime,
|
|
|
|
/// especially in release mode.
|
|
|
|
void setDiscardValueNames(bool Discard);
|
2016-01-08 03:28:20 +01:00
|
|
|
|
2016-04-19 06:55:25 +02:00
|
|
|
/// Whether there is a string map for uniquing debug info
|
2016-04-17 05:58:21 +02:00
|
|
|
/// identifiers across the context. Off by default.
|
2016-04-19 06:55:25 +02:00
|
|
|
bool isODRUniquingDebugTypes() const;
|
|
|
|
void enableDebugTypeODRUniquing();
|
|
|
|
void disableDebugTypeODRUniquing();
|
2016-04-17 05:58:21 +02:00
|
|
|
|
2017-05-13 00:25:07 +02:00
|
|
|
using InlineAsmDiagHandlerTy = void (*)(const SMDiagnostic&, void *Context,
|
|
|
|
unsigned LocCookie);
|
2013-05-25 00:53:06 +02:00
|
|
|
|
2013-12-17 18:47:22 +01:00
|
|
|
/// Defines the type of a diagnostic handler.
|
|
|
|
/// \see LLVMContext::setDiagnosticHandler.
|
|
|
|
/// \see LLVMContext::diagnose.
|
2017-05-13 00:25:07 +02:00
|
|
|
using DiagnosticHandlerTy = void (*)(const DiagnosticInfo &DI, void *Context);
|
2013-12-17 18:47:22 +01:00
|
|
|
|
2014-05-16 04:33:15 +02:00
|
|
|
/// Defines the type of a yield callback.
|
|
|
|
/// \see LLVMContext::setYieldCallback.
|
2017-05-13 00:25:07 +02:00
|
|
|
using YieldCallbackTy = void (*)(LLVMContext *Context, void *OpaqueHandle);
|
2014-05-16 04:33:15 +02:00
|
|
|
|
2013-02-11 06:37:07 +01:00
|
|
|
/// setInlineAsmDiagnosticHandler - This method sets a handler that is invoked
|
|
|
|
/// when problems with inline asm are detected by the backend. The first
|
|
|
|
/// argument is a function pointer and the second is a context pointer that
|
|
|
|
/// gets passed into the DiagHandler.
|
2010-04-06 02:44:45 +02:00
|
|
|
///
|
2010-11-17 09:13:01 +01:00
|
|
|
/// LLVMContext doesn't take ownership or interpret either of these
|
2010-04-06 02:44:45 +02:00
|
|
|
/// pointers.
|
2013-02-11 06:37:07 +01:00
|
|
|
void setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler,
|
2014-04-09 08:08:46 +02:00
|
|
|
void *DiagContext = nullptr);
|
2010-04-06 02:44:45 +02:00
|
|
|
|
2013-02-11 06:37:07 +01:00
|
|
|
/// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
|
|
|
|
/// setInlineAsmDiagnosticHandler.
|
|
|
|
InlineAsmDiagHandlerTy getInlineAsmDiagnosticHandler() const;
|
2010-04-06 02:44:45 +02:00
|
|
|
|
2013-02-11 06:37:07 +01:00
|
|
|
/// getInlineAsmDiagnosticContext - Return the diagnostic context set by
|
|
|
|
/// setInlineAsmDiagnosticHandler.
|
|
|
|
void *getInlineAsmDiagnosticContext() const;
|
2013-05-25 00:53:06 +02:00
|
|
|
|
2013-12-17 18:47:22 +01:00
|
|
|
/// setDiagnosticHandler - This method sets a handler that is invoked
|
|
|
|
/// when the backend needs to report anything to the user. The first
|
|
|
|
/// argument is a function pointer and the second is a context pointer that
|
2014-10-01 20:36:03 +02:00
|
|
|
/// gets passed into the DiagHandler. The third argument should be set to
|
|
|
|
/// true if the handler only expects enabled diagnostics.
|
2013-12-17 18:47:22 +01:00
|
|
|
///
|
|
|
|
/// LLVMContext doesn't take ownership or interpret either of these
|
|
|
|
/// pointers.
|
|
|
|
void setDiagnosticHandler(DiagnosticHandlerTy DiagHandler,
|
2014-10-01 20:36:03 +02:00
|
|
|
void *DiagContext = nullptr,
|
|
|
|
bool RespectFilters = false);
|
2013-12-17 18:47:22 +01:00
|
|
|
|
|
|
|
/// getDiagnosticHandler - Return the diagnostic handler set by
|
|
|
|
/// setDiagnosticHandler.
|
|
|
|
DiagnosticHandlerTy getDiagnosticHandler() const;
|
|
|
|
|
|
|
|
/// getDiagnosticContext - Return the diagnostic context set by
|
|
|
|
/// setDiagnosticContext.
|
|
|
|
void *getDiagnosticContext() const;
|
|
|
|
|
2016-07-15 19:23:20 +02:00
|
|
|
/// \brief Return if a code hotness metric should be included in optimization
|
|
|
|
/// diagnostics.
|
|
|
|
bool getDiagnosticHotnessRequested() const;
|
|
|
|
/// \brief Set if a code hotness metric should be included in optimization
|
|
|
|
/// diagnostics.
|
|
|
|
void setDiagnosticHotnessRequested(bool Requested);
|
|
|
|
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 22:55:07 +02:00
|
|
|
/// \brief Return the YAML file used by the backend to save optimization
|
|
|
|
/// diagnostics. If null, diagnostics are not saved in a file but only
|
|
|
|
/// emitted via the diagnostic handler.
|
|
|
|
yaml::Output *getDiagnosticsOutputFile();
|
|
|
|
/// Set the diagnostics output file used for optimization diagnostics.
|
|
|
|
///
|
|
|
|
/// By default or if invoked with null, diagnostics are not saved in a file
|
|
|
|
/// but only emitted via the diagnostic handler. Even if an output file is
|
|
|
|
/// set, the handler is invoked for each diagnostic message.
|
2016-11-19 19:19:41 +01:00
|
|
|
void setDiagnosticsOutputFile(std::unique_ptr<yaml::Output> F);
|
Output optimization remarks in YAML
(Re-committed after moving the template specialization under the yaml
namespace. GCC was complaining about this.)
This allows various presentation of this data using an external tool.
This was first recommended here[1].
As an example, consider this module:
1 int foo();
2 int bar();
3
4 int baz() {
5 return foo() + bar();
6 }
The inliner generates these missed-optimization remarks today (the
hotness information is pulled from PGO):
remark: /tmp/s.c:5:10: foo will not be inlined into baz (hotness: 30)
remark: /tmp/s.c:5:18: bar will not be inlined into baz (hotness: 30)
Now with -pass-remarks-output=<yaml-file>, we generate this YAML file:
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 10 }
Function: baz
Hotness: 30
Args:
- Callee: foo
- String: will not be inlined into
- Caller: baz
...
--- !Missed
Pass: inline
Name: NotInlined
DebugLoc: { File: /tmp/s.c, Line: 5, Column: 18 }
Function: baz
Hotness: 30
Args:
- Callee: bar
- String: will not be inlined into
- Caller: baz
...
This is a summary of the high-level decisions:
* There is a new streaming interface to emit optimization remarks.
E.g. for the inliner remark above:
ORE.emit(DiagnosticInfoOptimizationRemarkMissed(
DEBUG_TYPE, "NotInlined", &I)
<< NV("Callee", Callee) << " will not be inlined into "
<< NV("Caller", CS.getCaller()) << setIsVerbose());
NV stands for named value and allows the YAML client to process a remark
using its name (NotInlined) and the named arguments (Callee and Caller)
without parsing the text of the message.
Subsequent patches will update ORE users to use the new streaming API.
* I am using YAML I/O for writing the YAML file. YAML I/O requires you
to specify reading and writing at once but reading is highly non-trivial
for some of the more complex LLVM types. Since it's not clear that we
(ever) want to use LLVM to parse this YAML file, the code supports and
asserts that we're writing only.
On the other hand, I did experiment that the class hierarchy starting at
DiagnosticInfoOptimizationBase can be mapped back from YAML generated
here (see D24479).
* The YAML stream is stored in the LLVM context.
* In the example, we can probably further specify the IR value used,
i.e. print "Function" rather than "Value".
* As before hotness is computed in the analysis pass instead of
DiganosticInfo. This avoids the layering problem since BFI is in
Analysis while DiagnosticInfo is in IR.
[1] https://reviews.llvm.org/D19678#419445
Differential Revision: https://reviews.llvm.org/D24587
llvm-svn: 282539
2016-09-27 22:55:07 +02:00
|
|
|
|
2016-05-16 16:28:02 +02:00
|
|
|
/// \brief Get the prefix that should be printed in front of a diagnostic of
|
|
|
|
/// the given \p Severity
|
|
|
|
static const char *getDiagnosticMessagePrefix(DiagnosticSeverity Severity);
|
|
|
|
|
2014-08-04 23:49:15 +02:00
|
|
|
/// \brief Report a message to the currently installed diagnostic handler.
|
|
|
|
///
|
2013-12-17 18:47:22 +01:00
|
|
|
/// This function returns, in particular in the case of error reporting
|
2014-08-04 23:49:15 +02:00
|
|
|
/// (DI.Severity == \a DS_Error), so the caller should leave the compilation
|
2013-12-17 18:47:22 +01:00
|
|
|
/// process in a self-consistent state, even though the generated code
|
|
|
|
/// need not be correct.
|
2014-08-04 23:49:15 +02:00
|
|
|
///
|
|
|
|
/// The diagnostic message will be implicitly prefixed with a severity keyword
|
|
|
|
/// according to \p DI.getSeverity(), i.e., "error: " for \a DS_Error,
|
|
|
|
/// "warning: " for \a DS_Warning, and "note: " for \a DS_Note.
|
2013-12-17 18:47:22 +01:00
|
|
|
void diagnose(const DiagnosticInfo &DI);
|
2013-05-25 00:53:06 +02:00
|
|
|
|
2014-05-16 04:33:15 +02:00
|
|
|
/// \brief Registers a yield callback with the given context.
|
|
|
|
///
|
|
|
|
/// The yield callback function may be called by LLVM to transfer control back
|
|
|
|
/// to the client that invoked the LLVM compilation. This can be used to yield
|
|
|
|
/// control of the thread, or perform periodic work needed by the client.
|
|
|
|
/// There is no guaranteed frequency at which callbacks must occur; in fact,
|
|
|
|
/// the client is not guaranteed to ever receive this callback. It is at the
|
|
|
|
/// sole discretion of LLVM to do so and only if it can guarantee that
|
|
|
|
/// suspending the thread won't block any forward progress in other LLVM
|
|
|
|
/// contexts in the same process.
|
|
|
|
///
|
|
|
|
/// At a suspend point, the state of the current LLVM context is intentionally
|
|
|
|
/// undefined. No assumptions about it can or should be made. Only LLVM
|
|
|
|
/// context API calls that explicitly state that they can be used during a
|
|
|
|
/// yield callback are allowed to be used. Any other API calls into the
|
|
|
|
/// context are not supported until the yield callback function returns
|
|
|
|
/// control to LLVM. Other LLVM contexts are unaffected by this restriction.
|
|
|
|
void setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle);
|
|
|
|
|
|
|
|
/// \brief Calls the yield callback (if applicable).
|
|
|
|
///
|
|
|
|
/// This transfers control of the current thread back to the client, which may
|
|
|
|
/// suspend the current thread. Only call this method when LLVM doesn't hold
|
|
|
|
/// any global mutex or cannot block the execution in another LLVM context.
|
|
|
|
void yield();
|
|
|
|
|
introduce a new recoverable error handling API to LLVMContext
and use it in one place in inline asm handling stuff. Before
we'd generate this for an invalid modifier letter:
$ clang asm.c -c -o t.o
fatal error: error in backend: Invalid operand found in inline asm: 'abc incl ${0:Z}'
INLINEASM <es:abc incl ${0:Z}>, 10, %EAX<def>, 2147483657, %EAX, 14, %EFLAGS<earlyclobber,def,dead>, <!-1>
Now we generate this:
$ clang asm.c -c -o t.o
error: invalid operand in inline asm: 'incl ${0:Z}'
asm.c:3:12: note: generated from here
__asm__ ("incl %Z0" : "+r" (X));
^
1 error generated.
This is much better but still admittedly not great ("why" is the operand
invalid??), codegen should try harder with its diagnostics :)
llvm-svn: 100723
2010-04-08 01:40:44 +02:00
|
|
|
/// emitError - Emit an error message to the currently installed error handler
|
|
|
|
/// with optional location information. This function returns, so code should
|
|
|
|
/// be prepared to drop the erroneous construct on the floor and "not crash".
|
|
|
|
/// The generated code need not be correct. The error message will be
|
|
|
|
/// implicitly prefixed with "error: " and should not end with a ".".
|
2012-01-04 00:47:05 +01:00
|
|
|
void emitError(unsigned LocCookie, const Twine &ErrorStr);
|
|
|
|
void emitError(const Instruction *I, const Twine &ErrorStr);
|
|
|
|
void emitError(const Twine &ErrorStr);
|
2010-09-08 20:41:07 +02:00
|
|
|
|
2014-10-15 23:54:35 +02:00
|
|
|
/// \brief Query for a debug option's value.
|
|
|
|
///
|
|
|
|
/// This function returns typed data populated from command line parsing.
|
|
|
|
template <typename ValT, typename Base, ValT(Base::*Mem)>
|
|
|
|
ValT getOption() const {
|
|
|
|
return OptionRegistry::instance().template get<ValT, Base, Mem>();
|
|
|
|
}
|
|
|
|
|
2016-04-23 00:06:11 +02:00
|
|
|
/// \brief Access the object which manages optimization bisection for failure
|
|
|
|
/// analysis.
|
|
|
|
OptBisect &getOptBisect();
|
2010-09-08 20:41:07 +02:00
|
|
|
private:
|
2016-12-06 23:00:57 +01:00
|
|
|
// Module needs access to the add/removeModule methods.
|
|
|
|
friend class Module;
|
2010-09-08 20:41:07 +02:00
|
|
|
|
|
|
|
/// addModule - Register a module as being instantiated in this context. If
|
|
|
|
/// the context is deleted, the module will be deleted as well.
|
|
|
|
void addModule(Module*);
|
2013-05-25 00:53:06 +02:00
|
|
|
|
2010-09-08 20:41:07 +02:00
|
|
|
/// removeModule - Unregister a module from this context.
|
|
|
|
void removeModule(Module*);
|
2009-06-30 02:48:55 +02:00
|
|
|
};
|
|
|
|
|
2013-05-01 22:59:00 +02:00
|
|
|
// Create wrappers for C Binding types (see CBindingWrapping.h).
|
|
|
|
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLVMContext, LLVMContextRef)
|
|
|
|
|
|
|
|
/* Specialized opaque context conversions.
|
|
|
|
*/
|
|
|
|
inline LLVMContext **unwrap(LLVMContextRef* Tys) {
|
|
|
|
return reinterpret_cast<LLVMContext**>(Tys);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline LLVMContextRef *wrap(const LLVMContext **Tys) {
|
|
|
|
return reinterpret_cast<LLVMContextRef*>(const_cast<LLVMContext**>(Tys));
|
|
|
|
}
|
|
|
|
|
2016-12-06 23:00:57 +01:00
|
|
|
} // end namespace llvm
|
2009-06-30 02:48:55 +02:00
|
|
|
|
2016-12-06 23:00:57 +01:00
|
|
|
#endif // LLVM_IR_LLVMCONTEXT_H
|