mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-19 11:02:59 +02:00
[ThinLTO] Re-order modules for optimal multi-threaded processing
Re-use an optimizition from the old LTO API (used by ld64). This sorts modules in ascending order, based on bitcode size, so that larger modules are processed first. This allows for smaller modules to be process last, and better fill free threads 'slots', and thusly allow for better multi-thread load balancing. In our case (on dual Intel Xeon Gold 6140, Windows 10 version 2004, two-stage build), this saves 15 sec when linking `clang.exe` with LLD & `-flto=thin`, `/opt:lldltojobs=all`, no ThinLTO cache, -DLLVM_INTEGRATED_CRT_ALLOC=d:\git\rpmalloc. Before patch: 102 sec After patch: 85 sec Inspired by the work done by David Callahan in D60495. Differential Revision: https://reviews.llvm.org/D87966
This commit is contained in:
parent
ac65fcae49
commit
09fd7108ad
@ -91,6 +91,10 @@ setupLLVMOptimizationRemarks(LLVMContext &Context, StringRef RemarksFilename,
|
||||
Expected<std::unique_ptr<ToolOutputFile>>
|
||||
setupStatsFile(StringRef StatsFilename);
|
||||
|
||||
/// Produces a container ordering for optimal multi-threaded processing. Returns
|
||||
/// ordered indices to elements in the input array.
|
||||
std::vector<int> generateModulesOrdering(ArrayRef<BitcodeModule *> R);
|
||||
|
||||
class LTO;
|
||||
struct SymbolResolution;
|
||||
class ThinBackendProc;
|
||||
|
@ -1443,15 +1443,21 @@ Error LTO::runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache,
|
||||
auto &ModuleMap =
|
||||
ThinLTO.ModulesToCompile ? *ThinLTO.ModulesToCompile : ThinLTO.ModuleMap;
|
||||
|
||||
std::vector<BitcodeModule *> ModulesVec;
|
||||
ModulesVec.reserve(ModuleMap.size());
|
||||
for (auto &Mod : ModuleMap)
|
||||
ModulesVec.push_back(&Mod.second);
|
||||
std::vector<int> ModulesOrdering = generateModulesOrdering(ModulesVec);
|
||||
|
||||
// Tasks 0 through ParallelCodeGenParallelismLevel-1 are reserved for combined
|
||||
// module and parallel code generation partitions.
|
||||
unsigned Task = RegularLTO.ParallelCodeGenParallelismLevel;
|
||||
for (auto &Mod : ModuleMap) {
|
||||
if (Error E = BackendProc->start(Task, Mod.second, ImportLists[Mod.first],
|
||||
ExportLists[Mod.first],
|
||||
ResolvedODR[Mod.first], ThinLTO.ModuleMap))
|
||||
for (auto IndexCount : ModulesOrdering) {
|
||||
auto &Mod = *(ModuleMap.begin() + IndexCount);
|
||||
if (Error E = BackendProc->start(
|
||||
RegularLTO.ParallelCodeGenParallelismLevel + IndexCount, Mod.second,
|
||||
ImportLists[Mod.first], ExportLists[Mod.first],
|
||||
ResolvedODR[Mod.first], ThinLTO.ModuleMap))
|
||||
return E;
|
||||
++Task;
|
||||
}
|
||||
|
||||
return BackendProc->wait();
|
||||
@ -1495,3 +1501,18 @@ lto::setupStatsFile(StringRef StatsFilename) {
|
||||
StatsFile->keep();
|
||||
return std::move(StatsFile);
|
||||
}
|
||||
|
||||
// Compute the ordering we will process the inputs: the rough heuristic here
|
||||
// is to sort them per size so that the largest module get schedule as soon as
|
||||
// possible. This is purely a compile-time optimization.
|
||||
std::vector<int> lto::generateModulesOrdering(ArrayRef<BitcodeModule *> R) {
|
||||
std::vector<int> ModulesOrdering;
|
||||
ModulesOrdering.resize(R.size());
|
||||
std::iota(ModulesOrdering.begin(), ModulesOrdering.end(), 0);
|
||||
llvm::sort(ModulesOrdering, [&](int LeftIndex, int RightIndex) {
|
||||
auto LSize = R[LeftIndex]->getBuffer().size();
|
||||
auto RSize = R[RightIndex]->getBuffer().size();
|
||||
return LSize > RSize;
|
||||
});
|
||||
return ModulesOrdering;
|
||||
}
|
||||
|
@ -1054,19 +1054,11 @@ void ThinLTOCodeGenerator::run() {
|
||||
ModuleToDefinedGVSummaries[ModuleIdentifier];
|
||||
}
|
||||
|
||||
// Compute the ordering we will process the inputs: the rough heuristic here
|
||||
// is to sort them per size so that the largest module get schedule as soon as
|
||||
// possible. This is purely a compile-time optimization.
|
||||
std::vector<int> ModulesOrdering;
|
||||
ModulesOrdering.resize(Modules.size());
|
||||
std::iota(ModulesOrdering.begin(), ModulesOrdering.end(), 0);
|
||||
llvm::sort(ModulesOrdering, [&](int LeftIndex, int RightIndex) {
|
||||
auto LSize =
|
||||
Modules[LeftIndex]->getSingleBitcodeModule().getBuffer().size();
|
||||
auto RSize =
|
||||
Modules[RightIndex]->getSingleBitcodeModule().getBuffer().size();
|
||||
return LSize > RSize;
|
||||
});
|
||||
std::vector<BitcodeModule *> ModulesVec;
|
||||
ModulesVec.reserve(Modules.size());
|
||||
for (auto &Mod : Modules)
|
||||
ModulesVec.push_back(&Mod->getSingleBitcodeModule());
|
||||
std::vector<int> ModulesOrdering = lto::generateModulesOrdering(ModulesVec);
|
||||
|
||||
// Parallel optimizer + codegen
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user