1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00
llvm-mirror/include/llvm/LTO/LTO.h

479 lines
19 KiB
C
Raw Normal View History

//===-LTO.h - LLVM Link Time Optimizer ------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares functions and classes used to support LTO. It is intended
// to be used both by LTO classes as well as by clients (gold-plugin) that
// don't utilize the LTO code generator interfaces.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LTO_LTO_H
#define LLVM_LTO_LTO_H
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/LTO/Config.h"
#include "llvm/Object/IRSymtab.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/thread.h"
#include "llvm/Transforms/IPO/FunctionImport.h"
namespace llvm {
class Error;
class IRMover;
class LLVMContext;
class MemoryBufferRef;
class Module;
class raw_pwrite_stream;
class Target;
class ToolOutputFile;
/// Resolve linkage for prevailing symbols in the \p Index. Linkage changes
/// recorded in the index and the ThinLTO backends must apply the changes to
/// the module via thinLTOResolvePrevailingInModule.
///
/// This is done for correctness (if value exported, ensure we always
/// emit a copy), and compile-time optimization (allow drop of duplicates).
void thinLTOResolvePrevailingInIndex(
const lto::Config &C, ModuleSummaryIndex &Index,
function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
isPrevailing,
function_ref<void(StringRef, GlobalValue::GUID, GlobalValue::LinkageTypes)>
recordNewLinkage,
const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols);
/// Update the linkages in the given \p Index to mark exported values
/// as external and non-exported values as internal. The ThinLTO backends
/// must apply the changes to the Module via thinLTOInternalizeModule.
void thinLTOInternalizeAndPromoteInIndex(
ModuleSummaryIndex &Index,
function_ref<bool(StringRef, ValueInfo)> isExported,
[ThinLTO] Fix handling of weak interposable symbols Summary: Keep aliasees alive if their alias is live, otherwise we end up with an alias to a declaration, which is invalid. This can happen when the aliasee is weak and non-prevailing. This fix exposed the fact that we were then attempting to internalize the weak symbol, which was not exported as it was not prevailing. We should not internalize interposable symbols in general, unless this is the prevailing copy, since it can lead to incorrect inlining and other optimizations. Most of the changes in this patch are due to the restructuring required to pass down the prevailing callback. Finally, while implementing the test cases, I found that in the case of a weak aliasee that is still marked not live because its alias isn't live, after dropping the definition we incorrectly marked the declaration with weak linkage when resolving prevailing symbols in the module. This was due to some special case handling for symbols marked WeakLinkage in the summary located before instead of after a subsequent check for the symbol being a declaration. It turns out that we don't actually need this special case handling any more (looking back at the history, when that was added the code was structured quite differently) - we will correctly mark with weak linkage further below when the definition hasn't been dropped. Fixes PR42542. Reviewers: pcc Subscribers: mehdi_amini, inglorion, steven_wu, dexonsmith, dang, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D66264 llvm-svn: 369766
2019-08-23 17:18:58 +02:00
function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
isPrevailing);
[ThinLTO] Consolidate cache key computation between new/old LTO APIs Summary: The old legacy LTO API had a separate cache key computation, which was a subset of the cache key computation in the new LTO API (from what I can tell this is largely just because certain features such as CFI, dsoLocal, etc are only utilized via the new LTO API). However, having separate computations is unnecessary (much of the code is duplicated), and can lead to bugs when adding new optimizations if both cache computation algorithms aren't updated properly - it's much easier to maintain if we have a single facility. This patch refactors the old LTO API code to use the cache key computation from the new LTO API. To do this, we set up an lto::Config object and fill in the fields that the old LTO was hashing (the others will just use the defaults). There are two notable changes: - I added a Freestanding flag to the LTO Config. Currently this is only used by the legacy LTO API. In the patch that added it (D30791) I had asked about adding it to the new LTO API, but it looks like that was not addressed. This should probably be discussed as a follow up to this change, as it is orthogonal. - The legacy LTO API had some code that was hashing the GUID of all preserved symbols defined in the module. I looked back at the history of this (which was added with the original hashing in the legacy LTO API in D18494), and there is a comment in the review thread that it was added in preparation for future internalization. We now do the internalization of course, and that is handled in the new LTO API cache key computation by hashing the recorded linkage type of all defined globals. Therefore I didn't try to move over and keep the preserved symbols handling. Reviewers: steven_wu, pcc Subscribers: mehdi_amini, inglorion, eraman, dexonsmith, dang, llvm-commits Differential Revision: https://reviews.llvm.org/D54635 llvm-svn: 347592
2018-11-26 21:40:37 +01:00
/// Computes a unique hash for the Module considering the current list of
/// export/import and other global analysis results.
/// The hash is produced in \p Key.
void computeLTOCacheKey(
SmallString<40> &Key, const lto::Config &Conf,
const ModuleSummaryIndex &Index, StringRef ModuleID,
const FunctionImporter::ImportMapTy &ImportList,
const FunctionImporter::ExportSetTy &ExportList,
const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes> &ResolvedODR,
const GVSummaryMapTy &DefinedGlobals,
const std::set<GlobalValue::GUID> &CfiFunctionDefs = {},
const std::set<GlobalValue::GUID> &CfiFunctionDecls = {});
namespace lto {
/// Given the original \p Path to an output file, replace any path
/// prefix matching \p OldPrefix with \p NewPrefix. Also, create the
/// resulting directory if it does not yet exist.
std::string getThinLTOOutputFile(const std::string &Path,
const std::string &OldPrefix,
const std::string &NewPrefix);
/// Setup optimization remarks.
Expected<std::unique_ptr<ToolOutputFile>> setupLLVMOptimizationRemarks(
LLVMContext &Context, StringRef RemarksFilename, StringRef RemarksPasses,
StringRef RemarksFormat, bool RemarksWithHotness,
Optional<uint64_t> RemarksHotnessThreshold = 0, int Count = -1);
/// Setups the output file for saving statistics.
Expected<std::unique_ptr<ToolOutputFile>>
setupStatsFile(StringRef StatsFilename);
/// Produces a container ordering for optimal multi-threaded processing. Returns
/// ordered indices to elements in the input array.
std::vector<int> generateModulesOrdering(ArrayRef<BitcodeModule *> R);
class LTO;
struct SymbolResolution;
class ThinBackendProc;
/// An input file. This is a symbol table wrapper that only exposes the
/// information that an LTO client should need in order to do symbol resolution.
class InputFile {
public:
class Symbol;
private:
// FIXME: Remove LTO class friendship once we have bitcode symbol tables.
friend LTO;
InputFile() = default;
std::vector<BitcodeModule> Mods;
SmallVector<char, 0> Strtab;
std::vector<Symbol> Symbols;
// [begin, end) for each module
std::vector<std::pair<size_t, size_t>> ModuleSymIndices;
StringRef TargetTriple, SourceFileName, COFFLinkerOpts;
[ELF] Implement Dependent Libraries Feature This patch implements a limited form of autolinking primarily designed to allow either the --dependent-library compiler option, or "comment lib" pragmas ( https://docs.microsoft.com/en-us/cpp/preprocessor/comment-c-cpp?view=vs-2017) in C/C++ e.g. #pragma comment(lib, "foo"), to cause an ELF linker to automatically add the specified library to the link when processing the input file generated by the compiler. Currently this extension is unique to LLVM and LLD. However, care has been taken to design this feature so that it could be supported by other ELF linkers. The design goals were to provide: - A simple linking model for developers to reason about. - The ability to to override autolinking from the linker command line. - Source code compatibility, where possible, with "comment lib" pragmas in other environments (MSVC in particular). Dependent library support is implemented differently for ELF platforms than on the other platforms. Primarily this difference is that on ELF we pass the dependent library specifiers directly to the linker without manipulating them. This is in contrast to other platforms where they are mapped to a specific linker option by the compiler. This difference is a result of the greater variety of ELF linkers and the fact that ELF linkers tend to handle libraries in a more complicated fashion than on other platforms. This forces us to defer handling the specifiers to the linker. In order to achieve a level of source code compatibility with other platforms we have restricted this feature to work with libraries that meet the following "reasonable" requirements: 1. There are no competing defined symbols in a given set of libraries, or if they exist, the program owner doesn't care which is linked to their program. 2. There may be circular dependencies between libraries. The binary representation is a mergeable string section (SHF_MERGE, SHF_STRINGS), called .deplibs, with custom type SHT_LLVM_DEPENDENT_LIBRARIES (0x6fff4c04). The compiler forms this section by concatenating the arguments of the "comment lib" pragmas and --dependent-library options in the order they are encountered. Partial (-r, -Ur) links are handled by concatenating .deplibs sections with the normal mergeable string section rules. As an example, #pragma comment(lib, "foo") would result in: .section ".deplibs","MS",@llvm_dependent_libraries,1 .asciz "foo" For LTO, equivalent information to the contents of a the .deplibs section can be retrieved by the LLD for bitcode input files. LLD processes the dependent library specifiers in the following way: 1. Dependent libraries which are found from the specifiers in .deplibs sections of relocatable object files are added when the linker decides to include that file (which could itself be in a library) in the link. Dependent libraries behave as if they were appended to the command line after all other options. As a consequence the set of dependent libraries are searched last to resolve symbols. 2. It is an error if a file cannot be found for a given specifier. 3. Any command line options in effect at the end of the command line parsing apply to the dependent libraries, e.g. --whole-archive. 4. The linker tries to add a library or relocatable object file from each of the strings in a .deplibs section by; first, handling the string as if it was specified on the command line; second, by looking for the string in each of the library search paths in turn; third, by looking for a lib<string>.a or lib<string>.so (depending on the current mode of the linker) in each of the library search paths. 5. A new command line option --no-dependent-libraries tells LLD to ignore the dependent libraries. Rationale for the above points: 1. Adding the dependent libraries last makes the process simple to understand from a developers perspective. All linkers are able to implement this scheme. 2. Error-ing for libraries that are not found seems like better behavior than failing the link during symbol resolution. 3. It seems useful for the user to be able to apply command line options which will affect all of the dependent libraries. There is a potential problem of surprise for developers, who might not realize that these options would apply to these "invisible" input files; however, despite the potential for surprise, this is easy for developers to reason about and gives developers the control that they may require. 4. This algorithm takes into account all of the different ways that ELF linkers find input files. The different search methods are tried by the linker in most obvious to least obvious order. 5. I considered adding finer grained control over which dependent libraries were ignored (e.g. MSVC has /nodefaultlib:<library>); however, I concluded that this is not necessary: if finer control is required developers can fall back to using the command line directly. RFC thread: http://lists.llvm.org/pipermail/llvm-dev/2019-March/131004.html. Differential Revision: https://reviews.llvm.org/D60274 llvm-svn: 360984
2019-05-17 05:44:15 +02:00
std::vector<StringRef> DependentLibraries;
std::vector<std::pair<StringRef, Comdat::SelectionKind>> ComdatTable;
public:
~InputFile();
/// Create an InputFile.
static Expected<std::unique_ptr<InputFile>> create(MemoryBufferRef Object);
/// The purpose of this class is to only expose the symbol information that an
/// LTO client should need in order to do symbol resolution.
class Symbol : irsymtab::Symbol {
friend LTO;
public:
Symbol(const irsymtab::Symbol &S) : irsymtab::Symbol(S) {}
using irsymtab::Symbol::isUndefined;
using irsymtab::Symbol::isCommon;
using irsymtab::Symbol::isWeak;
using irsymtab::Symbol::isIndirect;
using irsymtab::Symbol::getName;
using irsymtab::Symbol::getIRName;
using irsymtab::Symbol::getVisibility;
using irsymtab::Symbol::canBeOmittedFromSymbolTable;
using irsymtab::Symbol::isTLS;
using irsymtab::Symbol::getComdatIndex;
using irsymtab::Symbol::getCommonSize;
using irsymtab::Symbol::getCommonAlignment;
using irsymtab::Symbol::getCOFFWeakExternalFallback;
using irsymtab::Symbol::getSectionName;
using irsymtab::Symbol::isExecutable;
using irsymtab::Symbol::isUsed;
};
/// A range over the symbols in this InputFile.
ArrayRef<Symbol> symbols() const { return Symbols; }
/// Returns linker options specified in the input file.
StringRef getCOFFLinkerOpts() const { return COFFLinkerOpts; }
[ELF] Implement Dependent Libraries Feature This patch implements a limited form of autolinking primarily designed to allow either the --dependent-library compiler option, or "comment lib" pragmas ( https://docs.microsoft.com/en-us/cpp/preprocessor/comment-c-cpp?view=vs-2017) in C/C++ e.g. #pragma comment(lib, "foo"), to cause an ELF linker to automatically add the specified library to the link when processing the input file generated by the compiler. Currently this extension is unique to LLVM and LLD. However, care has been taken to design this feature so that it could be supported by other ELF linkers. The design goals were to provide: - A simple linking model for developers to reason about. - The ability to to override autolinking from the linker command line. - Source code compatibility, where possible, with "comment lib" pragmas in other environments (MSVC in particular). Dependent library support is implemented differently for ELF platforms than on the other platforms. Primarily this difference is that on ELF we pass the dependent library specifiers directly to the linker without manipulating them. This is in contrast to other platforms where they are mapped to a specific linker option by the compiler. This difference is a result of the greater variety of ELF linkers and the fact that ELF linkers tend to handle libraries in a more complicated fashion than on other platforms. This forces us to defer handling the specifiers to the linker. In order to achieve a level of source code compatibility with other platforms we have restricted this feature to work with libraries that meet the following "reasonable" requirements: 1. There are no competing defined symbols in a given set of libraries, or if they exist, the program owner doesn't care which is linked to their program. 2. There may be circular dependencies between libraries. The binary representation is a mergeable string section (SHF_MERGE, SHF_STRINGS), called .deplibs, with custom type SHT_LLVM_DEPENDENT_LIBRARIES (0x6fff4c04). The compiler forms this section by concatenating the arguments of the "comment lib" pragmas and --dependent-library options in the order they are encountered. Partial (-r, -Ur) links are handled by concatenating .deplibs sections with the normal mergeable string section rules. As an example, #pragma comment(lib, "foo") would result in: .section ".deplibs","MS",@llvm_dependent_libraries,1 .asciz "foo" For LTO, equivalent information to the contents of a the .deplibs section can be retrieved by the LLD for bitcode input files. LLD processes the dependent library specifiers in the following way: 1. Dependent libraries which are found from the specifiers in .deplibs sections of relocatable object files are added when the linker decides to include that file (which could itself be in a library) in the link. Dependent libraries behave as if they were appended to the command line after all other options. As a consequence the set of dependent libraries are searched last to resolve symbols. 2. It is an error if a file cannot be found for a given specifier. 3. Any command line options in effect at the end of the command line parsing apply to the dependent libraries, e.g. --whole-archive. 4. The linker tries to add a library or relocatable object file from each of the strings in a .deplibs section by; first, handling the string as if it was specified on the command line; second, by looking for the string in each of the library search paths in turn; third, by looking for a lib<string>.a or lib<string>.so (depending on the current mode of the linker) in each of the library search paths. 5. A new command line option --no-dependent-libraries tells LLD to ignore the dependent libraries. Rationale for the above points: 1. Adding the dependent libraries last makes the process simple to understand from a developers perspective. All linkers are able to implement this scheme. 2. Error-ing for libraries that are not found seems like better behavior than failing the link during symbol resolution. 3. It seems useful for the user to be able to apply command line options which will affect all of the dependent libraries. There is a potential problem of surprise for developers, who might not realize that these options would apply to these "invisible" input files; however, despite the potential for surprise, this is easy for developers to reason about and gives developers the control that they may require. 4. This algorithm takes into account all of the different ways that ELF linkers find input files. The different search methods are tried by the linker in most obvious to least obvious order. 5. I considered adding finer grained control over which dependent libraries were ignored (e.g. MSVC has /nodefaultlib:<library>); however, I concluded that this is not necessary: if finer control is required developers can fall back to using the command line directly. RFC thread: http://lists.llvm.org/pipermail/llvm-dev/2019-March/131004.html. Differential Revision: https://reviews.llvm.org/D60274 llvm-svn: 360984
2019-05-17 05:44:15 +02:00
/// Returns dependent library specifiers from the input file.
ArrayRef<StringRef> getDependentLibraries() const { return DependentLibraries; }
/// Returns the path to the InputFile.
StringRef getName() const;
/// Returns the input file's target triple.
StringRef getTargetTriple() const { return TargetTriple; }
/// Returns the source file path specified at compile time.
StringRef getSourceFileName() const { return SourceFileName; }
// Returns a table with all the comdats used by this file.
ArrayRef<std::pair<StringRef, Comdat::SelectionKind>> getComdatTable() const {
return ComdatTable;
}
// Returns the only BitcodeModule from InputFile.
BitcodeModule &getSingleBitcodeModule();
private:
ArrayRef<Symbol> module_symbols(unsigned I) const {
const auto &Indices = ModuleSymIndices[I];
return {Symbols.data() + Indices.first, Symbols.data() + Indices.second};
}
};
/// This class wraps an output stream for a native object. Most clients should
/// just be able to return an instance of this base class from the stream
/// callback, but if a client needs to perform some action after the stream is
/// written to, that can be done by deriving from this class and overriding the
/// destructor.
class NativeObjectStream {
public:
NativeObjectStream(std::unique_ptr<raw_pwrite_stream> OS) : OS(std::move(OS)) {}
std::unique_ptr<raw_pwrite_stream> OS;
virtual ~NativeObjectStream() = default;
};
/// This type defines the callback to add a native object that is generated on
/// the fly.
///
/// Stream callbacks must be thread safe.
using AddStreamFn =
std::function<std::unique_ptr<NativeObjectStream>(unsigned Task)>;
/// This is the type of a native object cache. To request an item from the
/// cache, pass a unique string as the Key. For hits, the cached file will be
/// added to the link and this function will return AddStreamFn(). For misses,
/// the cache will return a stream callback which must be called at most once to
/// produce content for the stream. The native object stream produced by the
/// stream callback will add the file to the link after the stream is written
/// to.
///
/// Clients generally look like this:
///
/// if (AddStreamFn AddStream = Cache(Task, Key))
/// ProduceContent(AddStream);
using NativeObjectCache =
std::function<AddStreamFn(unsigned Task, StringRef Key)>;
/// A ThinBackend defines what happens after the thin-link phase during ThinLTO.
/// The details of this type definition aren't important; clients can only
/// create a ThinBackend using one of the create*ThinBackend() functions below.
using ThinBackend = std::function<std::unique_ptr<ThinBackendProc>(
const Config &C, ModuleSummaryIndex &CombinedIndex,
StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
AddStreamFn AddStream, NativeObjectCache Cache)>;
/// This ThinBackend runs the individual backend jobs in-process.
[Support] On Windows, ensure hardware_concurrency() extends to all CPU sockets and all NUMA groups The goal of this patch is to maximize CPU utilization on multi-socket or high core count systems, so that parallel computations such as LLD/ThinLTO can use all hardware threads in the system. Before this patch, on Windows, a maximum of 64 hardware threads could be used at most, in some cases dispatched only on one CPU socket. == Background == Windows doesn't have a flat cpu_set_t like Linux. Instead, it projects hardware CPUs (or NUMA nodes) to applications through a concept of "processor groups". A "processor" is the smallest unit of execution on a CPU, that is, an hyper-thread if SMT is active; a core otherwise. There's a limit of 32-bit processors on older 32-bit versions of Windows, which later was raised to 64-processors with 64-bit versions of Windows. This limit comes from the affinity mask, which historically is represented by the sizeof(void*). Consequently, the concept of "processor groups" was introduced for dealing with systems with more than 64 hyper-threads. By default, the Windows OS assigns only one "processor group" to each starting application, in a round-robin manner. If the application wants to use more processors, it needs to programmatically enable it, by assigning threads to other "processor groups". This also means that affinity cannot cross "processor group" boundaries; one can only specify a "preferred" group on start-up, but the application is free to allocate more groups if it wants to. This creates a peculiar situation, where newer CPUs like the AMD EPYC 7702P (64-cores, 128-hyperthreads) are projected by the OS as two (2) "processor groups". This means that by default, an application can only use half of the cores. This situation could only get worse in the years to come, as dies with more cores will appear on the market. == The problem == The heavyweight_hardware_concurrency() API was introduced so that only *one hardware thread per core* was used. Once that API returns, that original intention is lost, only the number of threads is retained. Consider a situation, on Windows, where the system has 2 CPU sockets, 18 cores each, each core having 2 hyper-threads, for a total of 72 hyper-threads. Both heavyweight_hardware_concurrency() and hardware_concurrency() currently return 36, because on Windows they are simply wrappers over std::thread::hardware_concurrency() -- which can only return processors from the current "processor group". == The changes in this patch == To solve this situation, we capture (and retain) the initial intention until the point of usage, through a new ThreadPoolStrategy class. The number of threads to use is deferred as late as possible, until the moment where the std::threads are created (ThreadPool in the case of ThinLTO). When using hardware_concurrency(), setting ThreadCount to 0 now means to use all the possible hardware CPU (SMT) threads. Providing a ThreadCount above to the maximum number of threads will have no effect, the maximum will be used instead. The heavyweight_hardware_concurrency() is similar to hardware_concurrency(), except that only one thread per hardware *core* will be used. When LLVM_ENABLE_THREADS is OFF, the threading APIs will always return 1, to ensure any caller loops will be exercised at least once. Differential Revision: https://reviews.llvm.org/D71775
2020-02-14 04:49:57 +01:00
/// The default value means to use one job per hardware core (not hyper-thread).
ThinBackend createInProcessThinBackend(ThreadPoolStrategy Parallelism);
/// This ThinBackend writes individual module indexes to files, instead of
/// running the individual backend jobs. This backend is for distributed builds
/// where separate processes will invoke the real backends.
///
/// To find the path to write the index to, the backend checks if the path has a
/// prefix of OldPrefix; if so, it replaces that prefix with NewPrefix. It then
/// appends ".thinlto.bc" and writes the index to that path. If
/// ShouldEmitImportsFiles is true it also writes a list of imported files to a
/// similar path with ".imports" appended instead.
/// LinkedObjectsFile is an output stream to write the list of object files for
/// the final ThinLTO linking. Can be nullptr.
/// OnWrite is callback which receives module identifier and notifies LTO user
/// that index file for the module (and optionally imports file) was created.
using IndexWriteCallback = std::function<void(const std::string &)>;
ThinBackend createWriteIndexesThinBackend(std::string OldPrefix,
std::string NewPrefix,
bool ShouldEmitImportsFiles,
raw_fd_ostream *LinkedObjectsFile,
IndexWriteCallback OnWrite);
/// This class implements a resolution-based interface to LLVM's LTO
/// functionality. It supports regular LTO, parallel LTO code generation and
/// ThinLTO. You can use it from a linker in the following way:
/// - Set hooks and code generation options (see lto::Config struct defined in
/// Config.h), and use the lto::Config object to create an lto::LTO object.
/// - Create lto::InputFile objects using lto::InputFile::create(), then use
/// the symbols() function to enumerate its symbols and compute a resolution
/// for each symbol (see SymbolResolution below).
/// - After the linker has visited each input file (and each regular object
/// file) and computed a resolution for each symbol, take each lto::InputFile
/// and pass it and an array of symbol resolutions to the add() function.
/// - Call the getMaxTasks() function to get an upper bound on the number of
/// native object files that LTO may add to the link.
/// - Call the run() function. This function will use the supplied AddStream
/// and Cache functions to add up to getMaxTasks() native object files to
/// the link.
class LTO {
friend InputFile;
public:
/// Create an LTO object. A default constructed LTO object has a reasonable
/// production configuration, but you can customize it by passing arguments to
/// this constructor.
/// FIXME: We do currently require the DiagHandler field to be set in Conf.
/// Until that is fixed, a Config argument is required.
LTO(Config Conf, ThinBackend Backend = nullptr,
unsigned ParallelCodeGenParallelismLevel = 1);
~LTO();
/// Add an input file to the LTO link, using the provided symbol resolutions.
/// The symbol resolutions must appear in the enumeration order given by
/// InputFile::symbols().
Error add(std::unique_ptr<InputFile> Obj, ArrayRef<SymbolResolution> Res);
/// Returns an upper bound on the number of tasks that the client may expect.
/// This may only be called after all IR object files have been added. For a
/// full description of tasks see LTOBackend.h.
unsigned getMaxTasks() const;
/// Runs the LTO pipeline. This function calls the supplied AddStream
/// function to add native object files to the link.
///
/// The Cache parameter is optional. If supplied, it will be used to cache
/// native object files and add them to the link.
///
/// The client will receive at most one callback (via either AddStream or
/// Cache) for each task identifier.
Error run(AddStreamFn AddStream, NativeObjectCache Cache = nullptr);
/// Static method that returns a list of libcall symbols that can be generated
/// by LTO but might not be visible from bitcode symbol table.
static ArrayRef<const char*> getRuntimeLibcallSymbols();
private:
Config Conf;
struct RegularLTOState {
RegularLTOState(unsigned ParallelCodeGenParallelismLevel,
const Config &Conf);
struct CommonResolution {
uint64_t Size = 0;
MaybeAlign Align;
/// Record if at least one instance of the common was marked as prevailing
bool Prevailing = false;
};
std::map<std::string, CommonResolution> Commons;
unsigned ParallelCodeGenParallelismLevel;
LTOLLVMContext Ctx;
std::unique_ptr<Module> CombinedModule;
std::unique_ptr<IRMover> Mover;
// This stores the information about a regular LTO module that we have added
// to the link. It will either be linked immediately (for modules without
// summaries) or after summary-based dead stripping (for modules with
// summaries).
struct AddedModule {
std::unique_ptr<Module> M;
std::vector<GlobalValue *> Keep;
};
std::vector<AddedModule> ModsWithSummaries;
bool EmptyCombinedModule = true;
} RegularLTO;
using ModuleMapType = MapVector<StringRef, BitcodeModule>;
struct ThinLTOState {
ThinLTOState(ThinBackend Backend);
ThinBackend Backend;
ModuleSummaryIndex CombinedIndex;
// The full set of bitcode modules in input order.
ModuleMapType ModuleMap;
// The bitcode modules to compile, if specified by the LTO Config.
Optional<ModuleMapType> ModulesToCompile;
DenseMap<GlobalValue::GUID, StringRef> PrevailingModuleForGUID;
} ThinLTO;
// The global resolution for a particular (mangled) symbol name. This is in
// particular necessary to track whether each symbol can be internalized.
// Because any input file may introduce a new cross-partition reference, we
// cannot make any final internalization decisions until all input files have
// been added and the client has called run(). During run() we apply
// internalization decisions either directly to the module (for regular LTO)
// or to the combined index (for ThinLTO).
struct GlobalResolution {
/// The unmangled name of the global.
std::string IRName;
/// Keep track if the symbol is visible outside of a module with a summary
/// (i.e. in either a regular object or a regular LTO module without a
/// summary).
bool VisibleOutsideSummary = false;
/// The symbol was exported dynamically, and therefore could be referenced
/// by a shared library not visible to the linker.
bool ExportDynamic = false;
bool UnnamedAddr = true;
/// True if module contains the prevailing definition.
bool Prevailing = false;
/// Returns true if module contains the prevailing definition and symbol is
/// an IR symbol. For example when module-level inline asm block is used,
/// symbol can be prevailing in module but have no IR name.
bool isPrevailingIRSymbol() const { return Prevailing && !IRName.empty(); }
/// This field keeps track of the partition number of this global. The
/// regular LTO object is partition 0, while each ThinLTO object has its own
/// partition number from 1 onwards.
///
/// Any global that is defined or used by more than one partition, or that
/// is referenced externally, may not be internalized.
///
/// Partitions generally have a one-to-one correspondence with tasks, except
/// that we use partition 0 for all parallel LTO code generation partitions.
/// Any partitioning of the combined LTO object is done internally by the
/// LTO backend.
unsigned Partition = Unknown;
/// Special partition numbers.
enum : unsigned {
/// A partition number has not yet been assigned to this global.
Unknown = -1u,
/// This global is either used by more than one partition or has an
/// external reference, and therefore cannot be internalized.
External = -2u,
/// The RegularLTO partition
RegularLTO = 0,
};
};
// Global mapping from mangled symbol names to resolutions.
StringMap<GlobalResolution> GlobalResolutions;
void addModuleToGlobalRes(ArrayRef<InputFile::Symbol> Syms,
ArrayRef<SymbolResolution> Res, unsigned Partition,
bool InSummary);
// These functions take a range of symbol resolutions [ResI, ResE) and consume
// the resolutions used by a single input module by incrementing ResI. After
// these functions return, [ResI, ResE) will refer to the resolution range for
// the remaining modules in the InputFile.
Error addModule(InputFile &Input, unsigned ModI,
const SymbolResolution *&ResI, const SymbolResolution *ResE);
Expected<RegularLTOState::AddedModule>
addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
const SymbolResolution *&ResI, const SymbolResolution *ResE);
Error linkRegularLTO(RegularLTOState::AddedModule Mod,
bool LivenessFromIndex);
Error addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
const SymbolResolution *&ResI, const SymbolResolution *ResE);
Error runRegularLTO(AddStreamFn AddStream);
Error runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache,
const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols);
Error checkPartiallySplit();
mutable bool CalledGetMaxTasks = false;
// Use Optional to distinguish false from not yet initialized.
Optional<bool> EnableSplitLTOUnit;
// Identify symbols exported dynamically, and that therefore could be
// referenced by a shared library not visible to the linker.
DenseSet<GlobalValue::GUID> DynamicExportSymbols;
};
/// The resolution for a symbol. The linker must provide a SymbolResolution for
/// each global symbol based on its internal resolution of that symbol.
struct SymbolResolution {
SymbolResolution()
: Prevailing(0), FinalDefinitionInLinkageUnit(0), VisibleToRegularObj(0),
ExportDynamic(0), LinkerRedefined(0) {}
/// The linker has chosen this definition of the symbol.
unsigned Prevailing : 1;
/// The definition of this symbol is unpreemptable at runtime and is known to
/// be in this linkage unit.
unsigned FinalDefinitionInLinkageUnit : 1;
/// The definition of this symbol is visible outside of the LTO unit.
unsigned VisibleToRegularObj : 1;
/// The symbol was exported dynamically, and therefore could be referenced
/// by a shared library not visible to the linker.
unsigned ExportDynamic : 1;
/// Linker redefined version of the symbol which appeared in -wrap or -defsym
/// linker option.
unsigned LinkerRedefined : 1;
};
} // namespace lto
} // namespace llvm
#endif