1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

Cleanup : Reformat PartialInliner.cpp to have current LLVM style conventions

Modify the variable names and code style to be that of modern LLVM.

Patch by River Riddle!

Differential Revision: https://reviews.llvm.org/D22743

llvm-svn: 276610
This commit is contained in:
Sean Silva 2016-07-25 05:57:59 +00:00
parent 4efed6f1fe
commit abda103a0a

View File

@ -51,94 +51,95 @@ struct PartialInlinerLegacyPass : public ModulePass {
return false;
AssumptionCacheTracker *ACT = &getAnalysis<AssumptionCacheTracker>();
std::function<AssumptionCache &(Function &)> GetAssumptionCache = [&ACT](
Function &F) -> AssumptionCache & {
std::function<AssumptionCache &(Function &)> GetAssumptionCache =
[&ACT](Function &F) -> AssumptionCache & {
return ACT->getAssumptionCache(F);
};
InlineFunctionInfo IFI(nullptr, &GetAssumptionCache);
return PartialInlinerImpl(IFI).run(M);
}
};
};
}
Function *PartialInlinerImpl::unswitchFunction(Function *F) {
// First, verify that this function is an unswitching candidate...
BasicBlock *entryBlock = &F->front();
BranchInst *BR = dyn_cast<BranchInst>(entryBlock->getTerminator());
BasicBlock *EntryBlock = &F->front();
BranchInst *BR = dyn_cast<BranchInst>(EntryBlock->getTerminator());
if (!BR || BR->isUnconditional())
return nullptr;
BasicBlock* returnBlock = nullptr;
BasicBlock* nonReturnBlock = nullptr;
unsigned returnCount = 0;
for (BasicBlock *BB : successors(entryBlock)) {
BasicBlock *ReturnBlock = nullptr;
BasicBlock *NonReturnBlock = nullptr;
unsigned ReturnCount = 0;
for (BasicBlock *BB : successors(EntryBlock)) {
if (isa<ReturnInst>(BB->getTerminator())) {
returnBlock = BB;
returnCount++;
ReturnBlock = BB;
ReturnCount++;
} else
nonReturnBlock = BB;
NonReturnBlock = BB;
}
if (returnCount != 1)
if (ReturnCount != 1)
return nullptr;
// Clone the function, so that we can hack away on it.
ValueToValueMapTy VMap;
Function* duplicateFunction = CloneFunction(F, VMap);
duplicateFunction->setLinkage(GlobalValue::InternalLinkage);
BasicBlock* newEntryBlock = cast<BasicBlock>(VMap[entryBlock]);
BasicBlock* newReturnBlock = cast<BasicBlock>(VMap[returnBlock]);
BasicBlock* newNonReturnBlock = cast<BasicBlock>(VMap[nonReturnBlock]);
Function *DuplicateFunction = CloneFunction(F, VMap);
DuplicateFunction->setLinkage(GlobalValue::InternalLinkage);
BasicBlock *NewEntryBlock = cast<BasicBlock>(VMap[EntryBlock]);
BasicBlock *NewReturnBlock = cast<BasicBlock>(VMap[ReturnBlock]);
BasicBlock *NewNonReturnBlock = cast<BasicBlock>(VMap[NonReturnBlock]);
// Go ahead and update all uses to the duplicate, so that we can just
// use the inliner functionality when we're done hacking.
F->replaceAllUsesWith(duplicateFunction);
F->replaceAllUsesWith(DuplicateFunction);
// Special hackery is needed with PHI nodes that have inputs from more than
// one extracted block. For simplicity, just split the PHIs into a two-level
// sequence of PHIs, some of which will go in the extracted region, and some
// of which will go outside.
BasicBlock* preReturn = newReturnBlock;
newReturnBlock = newReturnBlock->splitBasicBlock(
newReturnBlock->getFirstNonPHI()->getIterator());
BasicBlock::iterator I = preReturn->begin();
Instruction *Ins = &newReturnBlock->front();
while (I != preReturn->end()) {
PHINode* OldPhi = dyn_cast<PHINode>(I);
if (!OldPhi) break;
BasicBlock *PreReturn = NewReturnBlock;
NewReturnBlock = NewReturnBlock->splitBasicBlock(
NewReturnBlock->getFirstNonPHI()->getIterator());
BasicBlock::iterator I = PreReturn->begin();
Instruction *Ins = &NewReturnBlock->front();
while (I != PreReturn->end()) {
PHINode *OldPhi = dyn_cast<PHINode>(I);
if (!OldPhi)
break;
PHINode *retPhi = PHINode::Create(OldPhi->getType(), 2, "", Ins);
OldPhi->replaceAllUsesWith(retPhi);
Ins = newReturnBlock->getFirstNonPHI();
PHINode *RetPhi = PHINode::Create(OldPhi->getType(), 2, "", Ins);
OldPhi->replaceAllUsesWith(RetPhi);
Ins = NewReturnBlock->getFirstNonPHI();
retPhi->addIncoming(&*I, preReturn);
retPhi->addIncoming(OldPhi->getIncomingValueForBlock(newEntryBlock),
newEntryBlock);
OldPhi->removeIncomingValue(newEntryBlock);
RetPhi->addIncoming(&*I, PreReturn);
RetPhi->addIncoming(OldPhi->getIncomingValueForBlock(NewEntryBlock),
NewEntryBlock);
OldPhi->removeIncomingValue(NewEntryBlock);
++I;
}
newEntryBlock->getTerminator()->replaceUsesOfWith(preReturn, newReturnBlock);
NewEntryBlock->getTerminator()->replaceUsesOfWith(PreReturn, NewReturnBlock);
// Gather up the blocks that we're going to extract.
std::vector<BasicBlock*> toExtract;
toExtract.push_back(newNonReturnBlock);
for (BasicBlock &BB : *duplicateFunction)
if (&BB != newEntryBlock && &BB != newReturnBlock &&
&BB != newNonReturnBlock)
toExtract.push_back(&BB);
std::vector<BasicBlock *> ToExtract;
ToExtract.push_back(NewNonReturnBlock);
for (BasicBlock &BB : *DuplicateFunction)
if (&BB != NewEntryBlock && &BB != NewReturnBlock &&
&BB != NewNonReturnBlock)
ToExtract.push_back(&BB);
// The CodeExtractor needs a dominator tree.
DominatorTree DT;
DT.recalculate(*duplicateFunction);
DT.recalculate(*DuplicateFunction);
// Extract the body of the if.
Function* extractedFunction
= CodeExtractor(toExtract, &DT).extractCodeRegion();
Function *ExtractedFunction =
CodeExtractor(ToExtract, &DT).extractCodeRegion();
// Inline the top-level if test into all callers.
std::vector<User *> Users(duplicateFunction->user_begin(),
duplicateFunction->user_end());
std::vector<User *> Users(DuplicateFunction->user_begin(),
DuplicateFunction->user_end());
for (User *User : Users)
if (CallInst *CI = dyn_cast<CallInst>(User))
InlineFunction(CI, IFI);
@ -147,44 +148,46 @@ Function *PartialInlinerImpl::unswitchFunction(Function *F) {
// Ditch the duplicate, since we're done with it, and rewrite all remaining
// users (function pointers, etc.) back to the original function.
duplicateFunction->replaceAllUsesWith(F);
duplicateFunction->eraseFromParent();
DuplicateFunction->replaceAllUsesWith(F);
DuplicateFunction->eraseFromParent();
++NumPartialInlined;
return extractedFunction;
return ExtractedFunction;
}
bool PartialInlinerImpl::run(Module &M) {
std::vector<Function*> worklist;
worklist.reserve(M.size());
std::vector<Function *> Worklist;
Worklist.reserve(M.size());
for (Function &F : M)
if (!F.use_empty() && !F.isDeclaration())
worklist.push_back(&F);
Worklist.push_back(&F);
bool changed = false;
while (!worklist.empty()) {
Function* currFunc = worklist.back();
worklist.pop_back();
bool Changed = false;
while (!Worklist.empty()) {
Function *CurrFunc = Worklist.back();
Worklist.pop_back();
if (currFunc->use_empty()) continue;
if (CurrFunc->use_empty())
continue;
bool recursive = false;
for (User *U : currFunc->users())
if (Instruction* I = dyn_cast<Instruction>(U))
if (I->getParent()->getParent() == currFunc) {
recursive = true;
bool Recursive = false;
for (User *U : CurrFunc->users())
if (Instruction *I = dyn_cast<Instruction>(U))
if (I->getParent()->getParent() == CurrFunc) {
Recursive = true;
break;
}
if (recursive) continue;
if (Recursive)
continue;
if (Function* newFunc = unswitchFunction(currFunc)) {
worklist.push_back(newFunc);
changed = true;
if (Function *newFunc = unswitchFunction(CurrFunc)) {
Worklist.push_back(newFunc);
Changed = true;
}
}
return changed;
return Changed;
}
char PartialInlinerLegacyPass::ID = 0;
@ -201,8 +204,8 @@ ModulePass *llvm::createPartialInliningPass() {
PreservedAnalyses PartialInlinerPass::run(Module &M,
ModuleAnalysisManager &AM) {
auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
std::function<AssumptionCache &(Function &)> GetAssumptionCache = [&FAM](
Function &F) -> AssumptionCache & {
std::function<AssumptionCache &(Function &)> GetAssumptionCache =
[&FAM](Function &F) -> AssumptionCache & {
return FAM.getResult<AssumptionAnalysis>(F);
};
InlineFunctionInfo IFI(nullptr, &GetAssumptionCache);