1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[Transforms] Use llvm::erase_if (NFC)

This commit is contained in:
Kazu Hirata 2020-12-17 19:53:09 -08:00
parent d53504a97c
commit 4088b88d51
8 changed files with 60 additions and 92 deletions

View File

@ -950,10 +950,9 @@ void CHR::checkScopeHoistable(CHRScope *Scope) {
<< "Dropped select due to unhoistable branch";
});
}
Selects.erase(std::remove_if(Selects.begin(), Selects.end(),
[EntryBB](SelectInst *SI) {
return SI->getParent() == EntryBB;
}), Selects.end());
llvm::erase_if(Selects, [EntryBB](SelectInst *SI) {
return SI->getParent() == EntryBB;
});
Unhoistables.clear();
InsertPoint = Branch;
}

View File

@ -666,7 +666,7 @@ bool GuardWideningImpl::combineRangeChecks(
};
copy_if(Checks, std::back_inserter(CurrentChecks), IsCurrentCheck);
Checks.erase(remove_if(Checks, IsCurrentCheck), Checks.end());
erase_if(Checks, IsCurrentCheck);
assert(CurrentChecks.size() != 0 && "We know we have at least one!");

View File

@ -1399,7 +1399,7 @@ bool IndVarSimplify::optimizeLoopExits(Loop *L, SCEVExpander &Rewriter) {
// Remove all exits which aren't both rewriteable and execute on every
// iteration.
auto NewEnd = llvm::remove_if(ExitingBlocks, [&](BasicBlock *ExitingBB) {
llvm::erase_if(ExitingBlocks, [&](BasicBlock *ExitingBB) {
// If our exitting block exits multiple loops, we can only rewrite the
// innermost one. Otherwise, we're changing how many times the innermost
// loop runs before it exits.
@ -1421,7 +1421,6 @@ bool IndVarSimplify::optimizeLoopExits(Loop *L, SCEVExpander &Rewriter) {
return false;
});
ExitingBlocks.erase(NewEnd, ExitingBlocks.end());
if (ExitingBlocks.empty())
return false;

View File

@ -2038,8 +2038,7 @@ static void relocationViaAlloca(
/// tests in ways which make them less useful in testing fused safepoints.
template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
SmallSet<T, 8> Seen;
Vec.erase(remove_if(Vec, [&](const T &V) { return !Seen.insert(V).second; }),
Vec.end());
erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; });
}
/// Insert holders so that each Value is obviously live through the entire

View File

@ -467,12 +467,8 @@ class AllocaSlices::partition_iterator
// Remove the uses which have ended in the prior partition. This
// cannot change the max split slice end because we just checked that
// the prior partition ended prior to that max.
P.SplitTails.erase(llvm::remove_if(P.SplitTails,
[&](Slice *S) {
return S->endOffset() <=
P.EndOffset;
}),
P.SplitTails.end());
llvm::erase_if(P.SplitTails,
[&](Slice *S) { return S->endOffset() <= P.EndOffset; });
assert(llvm::any_of(P.SplitTails,
[&](Slice *S) {
return S->endOffset() == MaxSplitSliceEndOffset;
@ -1076,9 +1072,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
return;
}
Slices.erase(
llvm::remove_if(Slices, [](const Slice &S) { return S.isDead(); }),
Slices.end());
llvm::erase_if(Slices, [](const Slice &S) { return S.isDead(); });
// Sort the uses. This arranges for the offsets to be in ascending order,
// and the sizes to be in descending order.
@ -1932,12 +1926,9 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
// do that until all the backends are known to produce good code for all
// integer vector types.
if (!HaveCommonEltTy) {
CandidateTys.erase(
llvm::remove_if(CandidateTys,
[](VectorType *VTy) {
return !VTy->getElementType()->isIntegerTy();
}),
CandidateTys.end());
llvm::erase_if(CandidateTys, [](VectorType *VTy) {
return !VTy->getElementType()->isIntegerTy();
});
// If there were no integer vector types, give up.
if (CandidateTys.empty())
@ -3902,63 +3893,53 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
// such loads and stores, we can only pre-split them if their splits exactly
// match relative to their starting offset. We have to verify this prior to
// any rewriting.
Stores.erase(
llvm::remove_if(Stores,
[&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) {
// Lookup the load we are storing in our map of split
// offsets.
auto *LI = cast<LoadInst>(SI->getValueOperand());
// If it was completely unsplittable, then we're done,
// and this store can't be pre-split.
if (UnsplittableLoads.count(LI))
return true;
llvm::erase_if(Stores, [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) {
// Lookup the load we are storing in our map of split
// offsets.
auto *LI = cast<LoadInst>(SI->getValueOperand());
// If it was completely unsplittable, then we're done,
// and this store can't be pre-split.
if (UnsplittableLoads.count(LI))
return true;
auto LoadOffsetsI = SplitOffsetsMap.find(LI);
if (LoadOffsetsI == SplitOffsetsMap.end())
return false; // Unrelated loads are definitely safe.
auto &LoadOffsets = LoadOffsetsI->second;
auto LoadOffsetsI = SplitOffsetsMap.find(LI);
if (LoadOffsetsI == SplitOffsetsMap.end())
return false; // Unrelated loads are definitely safe.
auto &LoadOffsets = LoadOffsetsI->second;
// Now lookup the store's offsets.
auto &StoreOffsets = SplitOffsetsMap[SI];
// Now lookup the store's offsets.
auto &StoreOffsets = SplitOffsetsMap[SI];
// If the relative offsets of each split in the load and
// store match exactly, then we can split them and we
// don't need to remove them here.
if (LoadOffsets.Splits == StoreOffsets.Splits)
return false;
// If the relative offsets of each split in the load and
// store match exactly, then we can split them and we
// don't need to remove them here.
if (LoadOffsets.Splits == StoreOffsets.Splits)
return false;
LLVM_DEBUG(
dbgs()
<< " Mismatched splits for load and store:\n"
<< " " << *LI << "\n"
<< " " << *SI << "\n");
LLVM_DEBUG(dbgs() << " Mismatched splits for load and store:\n"
<< " " << *LI << "\n"
<< " " << *SI << "\n");
// We've found a store and load that we need to split
// with mismatched relative splits. Just give up on them
// and remove both instructions from our list of
// candidates.
UnsplittableLoads.insert(LI);
return true;
}),
Stores.end());
// We've found a store and load that we need to split
// with mismatched relative splits. Just give up on them
// and remove both instructions from our list of
// candidates.
UnsplittableLoads.insert(LI);
return true;
});
// Now we have to go *back* through all the stores, because a later store may
// have caused an earlier store's load to become unsplittable and if it is
// unsplittable for the later store, then we can't rely on it being split in
// the earlier store either.
Stores.erase(llvm::remove_if(Stores,
[&UnsplittableLoads](StoreInst *SI) {
auto *LI =
cast<LoadInst>(SI->getValueOperand());
return UnsplittableLoads.count(LI);
}),
Stores.end());
llvm::erase_if(Stores, [&UnsplittableLoads](StoreInst *SI) {
auto *LI = cast<LoadInst>(SI->getValueOperand());
return UnsplittableLoads.count(LI);
});
// Once we've established all the loads that can't be split for some reason,
// filter any that made it into our list out.
Loads.erase(llvm::remove_if(Loads,
[&UnsplittableLoads](LoadInst *LI) {
return UnsplittableLoads.count(LI);
}),
Loads.end());
llvm::erase_if(Loads, [&UnsplittableLoads](LoadInst *LI) {
return UnsplittableLoads.count(LI);
});
// If no loads or stores are left, there is no pre-splitting to be done for
// this alloca.
@ -4232,8 +4213,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
}
// Remove the killed slices that have ben pre-split.
AS.erase(llvm::remove_if(AS, [](const Slice &S) { return S.isDead(); }),
AS.end());
llvm::erase_if(AS, [](const Slice &S) { return S.isDead(); });
// Insert our new slices. This will sort and merge them into the sorted
// sequence.
@ -4247,11 +4227,9 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
// Finally, don't try to promote any allocas that new require re-splitting.
// They have already been added to the worklist above.
PromotableAllocas.erase(
llvm::remove_if(
PromotableAllocas,
[&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }),
PromotableAllocas.end());
llvm::erase_if(PromotableAllocas, [&](AllocaInst *AI) {
return ResplitPromotableAllocas.count(AI);
});
return true;
}
@ -4768,8 +4746,7 @@ PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT,
auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); };
Worklist.remove_if(IsInSet);
PostPromotionWorklist.remove_if(IsInSet);
PromotableAllocas.erase(llvm::remove_if(PromotableAllocas, IsInSet),
PromotableAllocas.end());
llvm::erase_if(PromotableAllocas, IsInSet);
DeletedAllocas.clear();
}
}

View File

@ -756,13 +756,10 @@ static bool tryToSpeculatePHIs(SmallVectorImpl<PHINode *> &PNs,
// For each PHI node in this block, check whether there are immediate folding
// opportunities from speculation, and whether that speculation will be
// valid. This determise the set of safe PHIs to speculate.
PNs.erase(llvm::remove_if(PNs,
[&](PHINode *PN) {
return !isSafeAndProfitableToSpeculateAroundPHI(
*PN, CostSavingsMap, PotentialSpecSet,
UnsafeSet, DT, TTI);
}),
PNs.end());
llvm::erase_if(PNs, [&](PHINode *PN) {
return !isSafeAndProfitableToSpeculateAroundPHI(
*PN, CostSavingsMap, PotentialSpecSet, UnsafeSet, DT, TTI);
});
// If no PHIs were profitable, skip.
if (PNs.empty()) {
LLVM_DEBUG(dbgs() << " No safe and profitable PHIs found!\n");

View File

@ -2194,10 +2194,9 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// match the callee's return type, we also need to change the return type of
// the intrinsic.
if (Caller->getReturnType() == CB.getType()) {
auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
llvm::erase_if(Returns, [](ReturnInst *RI) {
return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
});
Returns.erase(NewEnd, Returns.end());
} else {
SmallVector<ReturnInst *, 8> NormalReturns;
Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(

View File

@ -474,10 +474,8 @@ void ProcessSwitchInst(SwitchInst *SI,
// cases.
assert(MaxPop > 0 && PopSucc);
Default = PopSucc;
Cases.erase(
llvm::remove_if(
Cases, [PopSucc](const CaseRange &R) { return R.BB == PopSucc; }),
Cases.end());
llvm::erase_if(Cases,
[PopSucc](const CaseRange &R) { return R.BB == PopSucc; });
// If there are no cases left, just branch.
if (Cases.empty()) {