1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[llvm][Inliner] Add an optional PriorityInlineOrder

This patch adds an optional PriorityInlineOrder, which uses the heap to order inlining.
The callsite which size is smaller would have a higher priority.

Reviewed By: mtrofin

Differential Revision: https://reviews.llvm.org/D104028
This commit is contained in:
Liqiang Tao 2021-06-19 10:17:19 +08:00
parent b6ca0c60bb
commit d5e843fe26
4 changed files with 82 additions and 17 deletions

View File

@ -99,6 +99,10 @@ static cl::opt<std::string> CGSCCInlineReplayFile(
"by inlining from cgscc inline remarks."),
cl::Hidden);
static cl::opt<bool> InlineEnablePriorityOrder(
"inline-enable-priority-order", cl::Hidden, cl::init(false),
cl::desc("Enable the priority inline order for the inliner"));
LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {}
LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime)
@ -673,6 +677,7 @@ InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM,
template <typename T> class InlineOrder {
public:
using reference = T &;
using const_reference = const T &;
virtual ~InlineOrder() {}
@ -680,9 +685,9 @@ public:
virtual void push(const T &Elt) = 0;
virtual void pop() = 0;
virtual T pop() = 0;
virtual reference front() = 0;
virtual const_reference front() = 0;
virtual void erase_if(function_ref<bool(T)> Pred) = 0;
@ -692,18 +697,19 @@ public:
template <typename T, typename Container = SmallVector<T, 16>>
class DefaultInlineOrder : public InlineOrder<T> {
using reference = T &;
using const_reference = const T &;
public:
size_t size() override { return Calls.size() - FirstIndex; }
void push(const T &Elt) override { Calls.push_back(Elt); }
void pop() override {
T pop() override {
assert(size() > 0);
FirstIndex++;
return Calls[FirstIndex++];
}
reference front() override {
const_reference front() override {
assert(size() > 0);
return Calls[FirstIndex];
}
@ -718,6 +724,57 @@ private:
size_t FirstIndex = 0;
};
class PriorityInlineOrder : public InlineOrder<std::pair<CallBase *, int>> {
using T = std::pair<CallBase *, int>;
using reference = T &;
using const_reference = const T &;
static bool cmp(const T &P1, const T &P2) { return P1.second > P2.second; }
int evaluate(CallBase *CB) {
Function *Callee = CB->getCalledFunction();
return (int)Callee->getInstructionCount();
}
public:
size_t size() override { return Heap.size(); }
void push(const T &Elt) override {
CallBase *CB = Elt.first;
const int InlineHistoryID = Elt.second;
const int Goodness = evaluate(CB);
Heap.push_back({CB, Goodness});
std::push_heap(Heap.begin(), Heap.end(), cmp);
InlineHistoryMap[CB] = InlineHistoryID;
}
T pop() override {
assert(size() > 0);
CallBase *CB = Heap.front().first;
T Result = std::make_pair(CB, InlineHistoryMap[CB]);
InlineHistoryMap.erase(CB);
std::pop_heap(Heap.begin(), Heap.end(), cmp);
Heap.pop_back();
return Result;
}
const_reference front() override {
assert(size() > 0);
CallBase *CB = Heap.front().first;
return *InlineHistoryMap.find(CB);
}
void erase_if(function_ref<bool(T)> Pred) override {
Heap.erase(std::remove_if(Heap.begin(), Heap.end(), Pred), Heap.end());
std::make_heap(Heap.begin(), Heap.end(), cmp);
}
private:
SmallVector<T, 16> Heap;
DenseMap<CallBase *, int> InlineHistoryMap;
};
PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
CGSCCAnalysisManager &AM, LazyCallGraph &CG,
CGSCCUpdateResult &UR) {
@ -740,7 +797,8 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// We use a single common worklist for calls across the entire SCC. We
// process these in-order and append new calls introduced during inlining to
// the end.
// the end. The PriorityInlineOrder is optional here, in which the smaller
// callee would have a higher priority to inline.
//
// Note that this particular order of processing is actually critical to
// avoid very bad behaviors. Consider *highly connected* call graphs where
@ -762,7 +820,12 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// this model, but it is uniformly spread across all the functions in the SCC
// and eventually they all become too large to inline, rather than
// incrementally maknig a single function grow in a super linear fashion.
DefaultInlineOrder<std::pair<CallBase *, int>> Calls;
std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>> Calls;
if (InlineEnablePriorityOrder)
Calls = std::make_unique<PriorityInlineOrder>();
else
Calls = std::make_unique<DefaultInlineOrder<std::pair<CallBase *, int>>>();
assert(Calls != nullptr && "Expected an initialized InlineOrder");
// Populate the initial list of calls in this SCC.
for (auto &N : InitialC) {
@ -777,7 +840,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
if (auto *CB = dyn_cast<CallBase>(&I))
if (Function *Callee = CB->getCalledFunction()) {
if (!Callee->isDeclaration())
Calls.push({CB, -1});
Calls->push({CB, -1});
else if (!isa<IntrinsicInst>(I)) {
using namespace ore;
setInlineRemark(*CB, "unavailable definition");
@ -791,7 +854,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
}
}
}
if (Calls.empty())
if (Calls->empty())
return PreservedAnalyses::all();
// Capture updatable variable for the current SCC.
@ -813,15 +876,15 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
SmallVector<Function *, 4> DeadFunctions;
// Loop forward over all of the calls.
while (!Calls.empty()) {
while (!Calls->empty()) {
// We expect the calls to typically be batched with sequences of calls that
// have the same caller, so we first set up some shared infrastructure for
// this caller. We also do any pruning we can at this layer on the caller
// alone.
Function &F = *Calls.front().first->getCaller();
Function &F = *Calls->front().first->getCaller();
LazyCallGraph::Node &N = *CG.lookup(F);
if (CG.lookupSCC(N) != C) {
Calls.pop();
Calls->pop();
continue;
}
@ -837,9 +900,8 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// We bail out as soon as the caller has to change so we can update the
// call graph and prepare the context of that new caller.
bool DidInline = false;
while (!Calls.empty() && Calls.front().first->getCaller() == &F) {
auto &P = Calls.front();
Calls.pop();
while (!Calls->empty() && Calls->front().first->getCaller() == &F) {
auto P = Calls->pop();
CallBase *CB = P.first;
const int InlineHistoryID = P.second;
Function &Callee = *CB->getCalledFunction();
@ -909,7 +971,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
}
if (NewCallee)
if (!NewCallee->isDeclaration())
Calls.push({ICB, NewHistoryID});
Calls->push({ICB, NewHistoryID});
}
}
@ -926,7 +988,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// made dead by this operation on other functions).
Callee.removeDeadConstantUsers();
if (Callee.use_empty() && !CG.isLibFunction(Callee)) {
Calls.erase_if([&](const std::pair<CallBase *, int> &Call) {
Calls->erase_if([&](const std::pair<CallBase *, int> &Call) {
return Call.first->getCaller() == &Callee;
});
// Clear the body and queue the function itself for deletion when we

View File

@ -1,5 +1,6 @@
; Check the optimizer doesn't crash at inlining the function top and all of its callees are inlined.
; RUN: opt < %s -O3 -S | FileCheck %s
; RUN: opt < %s -O3 -inline-enable-priority-order=true -S | FileCheck %s
define dso_local void (...)* @second(i8** %p) {
entry:

View File

@ -1,5 +1,6 @@
; RUN: opt < %s -inline -S | FileCheck %s
; RUN: opt < %s -passes='cgscc(inline)' -S | FileCheck %s
; RUN: opt < %s -passes='cgscc(inline)' -inline-enable-priority-order=true -S | FileCheck %s
; Test that the inliner correctly handles inlining into invoke sites
; by appending selectors and forwarding _Unwind_Resume directly to the

View File

@ -1,4 +1,5 @@
; RUN: opt < %s -passes='cgscc(inline)' -inline-threshold=0 -S | FileCheck %s
; RUN: opt < %s -passes='cgscc(inline)' -inline-threshold=0 -inline-enable-priority-order=true -S | FileCheck %s
; The 'test1_' prefixed functions test the basic 'last callsite' inline
; threshold adjustment where we specifically inline the last call site of an