1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 02:33:06 +01:00

Now that we have C++11, turn simple functors into lambdas and remove a ton of boilerplate.

No intended functionality change.

llvm-svn: 202588
This commit is contained in:
Benjamin Kramer 2014-03-01 11:47:00 +00:00
parent ae85177b38
commit 803ba41365
20 changed files with 114 additions and 312 deletions

View File

@ -173,29 +173,19 @@ void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map,
}
}
// Optimize constant ordering.
namespace {
struct CstSortPredicate {
ValueEnumerator &VE;
explicit CstSortPredicate(ValueEnumerator &ve) : VE(ve) {}
bool operator()(const std::pair<const Value*, unsigned> &LHS,
const std::pair<const Value*, unsigned> &RHS) {
// Sort by plane.
if (LHS.first->getType() != RHS.first->getType())
return VE.getTypeID(LHS.first->getType()) <
VE.getTypeID(RHS.first->getType());
// Then by frequency.
return LHS.second > RHS.second;
}
};
}
/// OptimizeConstants - Reorder constant pool for denser encoding.
void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) {
if (CstStart == CstEnd || CstStart+1 == CstEnd) return;
CstSortPredicate P(*this);
std::stable_sort(Values.begin()+CstStart, Values.begin()+CstEnd, P);
std::stable_sort(Values.begin() + CstStart, Values.begin() + CstEnd,
[this](const std::pair<const Value *, unsigned> &LHS,
const std::pair<const Value *, unsigned> &RHS) {
// Sort by plane.
if (LHS.first->getType() != RHS.first->getType())
return getTypeID(LHS.first->getType()) < getTypeID(RHS.first->getType());
// Then by frequency.
return LHS.second > RHS.second;
});
// Ensure that integer and vector of integer constants are at the start of the
// constant pool. This is important so that GEP structure indices come before

View File

@ -409,23 +409,6 @@ MachineBasicBlock *MachineBlockPlacement::selectBestSuccessor(
return BestSucc;
}
namespace {
/// \brief Predicate struct to detect blocks already placed.
class IsBlockPlaced {
const BlockChain &PlacedChain;
const BlockToChainMapType &BlockToChain;
public:
IsBlockPlaced(const BlockChain &PlacedChain,
const BlockToChainMapType &BlockToChain)
: PlacedChain(PlacedChain), BlockToChain(BlockToChain) {}
bool operator()(MachineBasicBlock *BB) const {
return BlockToChain.lookup(BB) == &PlacedChain;
}
};
}
/// \brief Select the best block from a worklist.
///
/// This looks through the provided worklist as a list of candidate basic
@ -444,7 +427,9 @@ MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock(
// FIXME: If this shows up on profiles, it could be folded (at the cost of
// some code complexity) into the loop below.
WorkList.erase(std::remove_if(WorkList.begin(), WorkList.end(),
IsBlockPlaced(Chain, BlockToChain)),
[&](MachineBasicBlock *BB) {
return BlockToChain.lookup(BB) == &Chain;
}),
WorkList.end());
MachineBasicBlock *BestBlock = 0;

View File

@ -98,16 +98,6 @@ namespace {
bool PerformTrivialForwardCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB);
};
// SuccessorSorter - Sort Successors according to their loop depth.
struct SuccessorSorter {
SuccessorSorter(MachineLoopInfo *LoopInfo) : LI(LoopInfo) {}
bool operator()(const MachineBasicBlock *LHS,
const MachineBasicBlock *RHS) const {
return LI->getLoopDepth(LHS) < LI->getLoopDepth(RHS);
}
MachineLoopInfo *LI;
};
} // end anonymous namespace
char MachineSinking::ID = 0;
@ -553,7 +543,12 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// we should sink to.
// We give successors with smaller loop depth higher priority.
SmallVector<MachineBasicBlock*, 4> Succs(MBB->succ_begin(), MBB->succ_end());
std::stable_sort(Succs.begin(), Succs.end(), SuccessorSorter(LI));
// Sort Successors according to their loop depth.
std::stable_sort(
Succs.begin(), Succs.end(),
[this](const MachineBasicBlock *LHS, const MachineBasicBlock *RHS) {
return LI->getLoopDepth(LHS) < LI->getLoopDepth(RHS);
});
for (SmallVectorImpl<MachineBasicBlock *>::iterator SI = Succs.begin(),
E = Succs.end(); SI != E; ++SI) {
MachineBasicBlock *SuccBlock = *SI;

View File

@ -8197,14 +8197,6 @@ struct LoadedSlice {
};
}
/// \brief Sorts LoadedSlice according to their offset.
struct LoadedSliceSorter {
bool operator()(const LoadedSlice &LHS, const LoadedSlice &RHS) {
assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
}
};
/// \brief Check that all bits set in \p UsedBits form a dense region, i.e.,
/// \p UsedBits looks like 0..0 1..1 0..0.
static bool areUsedBitsDense(const APInt &UsedBits) {
@ -8248,7 +8240,11 @@ static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
// Sort the slices so that elements that are likely to be next to each
// other in memory are next to each other in the list.
std::sort(LoadedSlices.begin(), LoadedSlices.end(), LoadedSliceSorter());
std::sort(LoadedSlices.begin(), LoadedSlices.end(),
[](const LoadedSlice &LHS, const LoadedSlice &RHS) {
assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
});
const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo();
// First (resp. Second) is the first (resp. Second) potentially candidate
// to be placed in a paired load.
@ -8852,17 +8848,6 @@ struct MemOpLink {
unsigned SequenceNum;
};
/// Sorts store nodes in a link according to their offset from a shared
// base ptr.
struct ConsecutiveMemoryChainSorter {
bool operator()(MemOpLink LHS, MemOpLink RHS) {
return
LHS.OffsetFromBase < RHS.OffsetFromBase ||
(LHS.OffsetFromBase == RHS.OffsetFromBase &&
LHS.SequenceNum > RHS.SequenceNum);
}
};
bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
EVT MemVT = St->getMemoryVT();
int64_t ElementSizeBytes = MemVT.getSizeInBits()/8;
@ -8981,7 +8966,11 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
// Sort the memory operands according to their distance from the base pointer.
std::sort(StoreNodes.begin(), StoreNodes.end(),
ConsecutiveMemoryChainSorter());
[](MemOpLink LHS, MemOpLink RHS) {
return LHS.OffsetFromBase < RHS.OffsetFromBase ||
(LHS.OffsetFromBase == RHS.OffsetFromBase &&
LHS.SequenceNum > RHS.SequenceNum);
});
// Scan the memory operations on the chain and find the first non-consecutive
// store memory address.

View File

@ -125,20 +125,6 @@ class StackColoring : public MachineFunctionPass {
/// once the coloring is done.
SmallVector<MachineInstr*, 8> Markers;
/// SlotSizeSorter - A Sort utility for arranging stack slots according
/// to their size.
struct SlotSizeSorter {
MachineFrameInfo *MFI;
SlotSizeSorter(MachineFrameInfo *mfi) : MFI(mfi) { }
bool operator()(int LHS, int RHS) {
// We use -1 to denote a uninteresting slot. Place these slots at the end.
if (LHS == -1) return false;
if (RHS == -1) return true;
// Sort according to size.
return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
}
};
public:
static char ID;
StackColoring() : MachineFunctionPass(ID) {
@ -767,7 +753,13 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
// Sort the slots according to their size. Place unused slots at the end.
// Use stable sort to guarantee deterministic code generation.
std::stable_sort(SortedSlots.begin(), SortedSlots.end(),
SlotSizeSorter(MFI));
[this](int LHS, int RHS) {
// We use -1 to denote a uninteresting slot. Place these slots at the end.
if (LHS == -1) return false;
if (RHS == -1) return true;
// Sort according to size.
return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
});
bool Changed = true;
while (Changed) {

View File

@ -125,20 +125,12 @@ static ManagedStatic<OptionCatSet> RegisteredOptionCategories;
// Initialise the general option category.
OptionCategory llvm::cl::GeneralCategory("General options");
struct HasName {
HasName(StringRef Name) : Name(Name) {}
bool operator()(const OptionCategory *Category) const {
return Name == Category->getName();
}
StringRef Name;
};
void OptionCategory::registerCategory()
{
void OptionCategory::registerCategory() {
assert(std::count_if(RegisteredOptionCategories->begin(),
RegisteredOptionCategories->end(),
HasName(getName())) == 0 &&
"Duplicate option categories");
[this](const OptionCategory *Category) {
return getName() == Category->getName();
}) == 0 && "Duplicate option categories");
RegisteredOptionCategories->insert(this);
}

View File

@ -84,20 +84,6 @@ void Statistic::RegisterStatistic() {
}
}
namespace {
struct NameCompare {
bool operator()(const Statistic *LHS, const Statistic *RHS) const {
int Cmp = std::strcmp(LHS->getName(), RHS->getName());
if (Cmp != 0) return Cmp < 0;
// Secondary key is the description.
return std::strcmp(LHS->getDesc(), RHS->getDesc()) < 0;
}
};
}
// Print information when destroyed, iff command line option is specified.
StatisticInfo::~StatisticInfo() {
llvm::PrintStatistics();
@ -124,7 +110,14 @@ void llvm::PrintStatistics(raw_ostream &OS) {
}
// Sort the fields by name.
std::stable_sort(Stats.Stats.begin(), Stats.Stats.end(), NameCompare());
std::stable_sort(Stats.Stats.begin(), Stats.Stats.end(),
[](const Statistic *LHS, const Statistic *RHS) {
if (int Cmp = std::strcmp(LHS->getName(), RHS->getName()))
return Cmp < 0;
// Secondary key is the description.
return std::strcmp(LHS->getDesc(), RHS->getDesc()) < 0;
});
// Print out the statistics header...
OS << "===" << std::string(73, '-') << "===\n"

View File

@ -1724,17 +1724,6 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
return true;
}
namespace {
struct OffsetCompare {
bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
int LOffset = getMemoryOpOffset(LHS);
int ROffset = getMemoryOpOffset(RHS);
assert(LHS == RHS || LOffset != ROffset);
return LOffset > ROffset;
}
};
}
bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
SmallVectorImpl<MachineInstr *> &Ops,
unsigned Base, bool isLd,
@ -1742,7 +1731,13 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
bool RetVal = false;
// Sort by offset (in reverse order).
std::sort(Ops.begin(), Ops.end(), OffsetCompare());
std::sort(Ops.begin(), Ops.end(),
[](const MachineInstr *LHS, const MachineInstr *RHS) {
int LOffset = getMemoryOpOffset(LHS);
int ROffset = getMemoryOpOffset(RHS);
assert(LHS == RHS || LOffset != ROffset);
return LOffset > ROffset;
});
// The loads / stores of the same base are in order. Scan them from first to
// last and check for the following:

View File

@ -65,20 +65,6 @@ namespace {
typedef MachineBasicBlock::reverse_iterator ReverseIter;
typedef SmallDenseMap<MachineBasicBlock*, MachineInstr*, 2> BB2BrMap;
/// \brief A functor comparing edge weight of two blocks.
struct CmpWeight {
CmpWeight(const MachineBasicBlock &S,
const MachineBranchProbabilityInfo &P) : Src(S), Prob(P) {}
bool operator()(const MachineBasicBlock *Dst0,
const MachineBasicBlock *Dst1) const {
return Prob.getEdgeWeight(&Src, Dst0) < Prob.getEdgeWeight(&Src, Dst1);
}
const MachineBasicBlock &Src;
const MachineBranchProbabilityInfo &Prob;
};
class RegDefsUses {
public:
RegDefsUses(TargetMachine &TM);
@ -640,8 +626,12 @@ MachineBasicBlock *Filler::selectSuccBB(MachineBasicBlock &B) const {
return NULL;
// Select the successor with the larget edge weight.
CmpWeight Cmp(B, getAnalysis<MachineBranchProbabilityInfo>());
MachineBasicBlock *S = *std::max_element(B.succ_begin(), B.succ_end(), Cmp);
auto &Prob = getAnalysis<MachineBranchProbabilityInfo>();
MachineBasicBlock *S = *std::max_element(B.succ_begin(), B.succ_end(),
[&](const MachineBasicBlock *Dst0,
const MachineBasicBlock *Dst1) {
return Prob.getEdgeWeight(&B, Dst0) < Prob.getEdgeWeight(&B, Dst1);
});
return S->isLandingPad() ? NULL : S;
}

View File

@ -239,14 +239,6 @@ namespace {
};
// Sorting function for deterministic behaviour in GCOVBlock::writeOut.
struct StringKeySort {
bool operator()(StringMapEntry<GCOVLines *> *LHS,
StringMapEntry<GCOVLines *> *RHS) const {
return LHS->getKey() < RHS->getKey();
}
};
// Represent a basic block in GCOV. Each block has a unique number in the
// function, number of lines belonging to each block, and a set of edges to
// other blocks.
@ -277,8 +269,11 @@ namespace {
write(Len);
write(Number);
StringKeySort Sorter;
std::sort(SortedLinesByFile.begin(), SortedLinesByFile.end(), Sorter);
std::sort(SortedLinesByFile.begin(), SortedLinesByFile.end(),
[](StringMapEntry<GCOVLines *> *LHS,
StringMapEntry<GCOVLines *> *RHS) {
return LHS->getKey() < RHS->getKey();
});
for (SmallVectorImpl<StringMapEntry<GCOVLines *> *>::iterator
I = SortedLinesByFile.begin(), E = SortedLinesByFile.end();
I != E; ++I)

View File

@ -702,22 +702,6 @@ bool DSE::HandleFree(CallInst *F) {
return MadeChange;
}
namespace {
struct CouldRef {
typedef Value *argument_type;
const CallSite CS;
AliasAnalysis *AA;
bool operator()(Value *I) {
// See if the call site touches the value.
AliasAnalysis::ModRefResult A =
AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
}
};
}
/// handleEndBlock - Remove dead stores to stack-allocated locations in the
/// function end block. Ex:
/// %A = alloca i32
@ -819,7 +803,13 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// If the call might load from any of our allocas, then any store above
// the call is live.
CouldRef Pred = { CS, AA };
std::function<bool(Value *)> Pred = [&](Value *I) {
// See if the call site touches the value.
AliasAnalysis::ModRefResult A =
AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
};
DeadStackObjects.remove_if(Pred);
// If all of the allocas were clobbered by the call then we're not going
@ -863,20 +853,6 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
return MadeChange;
}
namespace {
struct CouldAlias {
typedef Value *argument_type;
const AliasAnalysis::Location &LoadedLoc;
AliasAnalysis *AA;
bool operator()(Value *I) {
// See if the loaded location could alias the stack location.
AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
return !AA->isNoAlias(StackLoc, LoadedLoc);
}
};
}
/// RemoveAccessedObjects - Check to see if the specified location may alias any
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
@ -896,6 +872,10 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
}
// Remove objects that could alias LoadedLoc.
CouldAlias Pred = { LoadedLoc, AA };
std::function<bool(Value *)> Pred = [&](Value *I) {
// See if the loaded location could alias the stack location.
AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
return !AA->isNoAlias(StackLoc, LoadedLoc);
};
DeadStackObjects.remove_if(Pred);
}

View File

@ -124,19 +124,6 @@ namespace {
AU.setPreservesCFG();
FunctionPass::getAnalysisUsage(AU);
}
struct GlobalCmp {
const DataLayout *DL;
GlobalCmp(const DataLayout *DL) : DL(DL) { }
bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {
Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));
}
};
};
} // end anonymous namespace
@ -156,7 +143,13 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
// FIXME: Find better heuristics
std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(DL));
std::stable_sort(Globals.begin(), Globals.end(),
[DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));
});
Type *Int32Ty = Type::getInt32Ty(M.getContext());

View File

@ -1547,19 +1547,6 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
return 0;
}
namespace {
/// \brief Predicate tests whether a ValueEntry's op is in a map.
struct IsValueInMap {
const DenseMap<Value *, unsigned> &Map;
IsValueInMap(const DenseMap<Value *, unsigned> &Map) : Map(Map) {}
bool operator()(const ValueEntry &Entry) {
return Map.find(Entry.Op) != Map.end();
}
};
}
/// \brief Build up a vector of value/power pairs factoring a product.
///
/// Given a series of multiplication operands, build a vector of factors and

View File

@ -3255,18 +3255,6 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
return true;
}
namespace {
struct IsSliceEndLessOrEqualTo {
uint64_t UpperBound;
IsSliceEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {}
bool operator()(const AllocaSlices::iterator &I) {
return I->endOffset() <= UpperBound;
}
};
}
static void
removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses,
uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
@ -3278,7 +3266,9 @@ removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses,
size_t SplitUsesOldSize = SplitUses.size();
SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(),
IsSliceEndLessOrEqualTo(Offset)),
[Offset](const AllocaSlices::iterator &I) {
return I->endOffset() <= Offset;
}),
SplitUses.end());
if (SplitUsesOldSize == SplitUses.size())
return;
@ -3616,20 +3606,6 @@ bool SROA::promoteAllocas(Function &F) {
return true;
}
namespace {
/// \brief A predicate to test whether an alloca belongs to a set.
class IsAllocaInSet {
typedef SmallPtrSet<AllocaInst *, 4> SetType;
const SetType &Set;
public:
typedef AllocaInst *argument_type;
IsAllocaInSet(const SetType &Set) : Set(Set) {}
bool operator()(AllocaInst *AI) const { return Set.count(AI); }
};
}
bool SROA::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
@ -3665,11 +3641,14 @@ bool SROA::runOnFunction(Function &F) {
// Remove the deleted allocas from various lists so that we don't try to
// continue processing them.
if (!DeletedAllocas.empty()) {
Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
std::function<bool(AllocaInst *)> IsInSet = [&](AllocaInst *AI) {
return DeletedAllocas.count(AI);
};
Worklist.remove_if(IsInSet);
PostPromotionWorklist.remove_if(IsInSet);
PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
PromotableAllocas.end(),
IsAllocaInSet(DeletedAllocas)),
IsInSet),
PromotableAllocas.end());
DeletedAllocas.clear();
}

View File

@ -1682,16 +1682,6 @@ Value *BoUpSLP::vectorizeTree() {
return VectorizableTree[0].VectorizedValue;
}
class DTCmp {
const DominatorTree *DT;
public:
DTCmp(const DominatorTree *DT) : DT(DT) {}
bool operator()(const BasicBlock *A, const BasicBlock *B) const {
return DT->properlyDominates(A, B);
}
};
void BoUpSLP::optimizeGatherSequence() {
DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
<< " gather sequences instructions.\n");
@ -1730,7 +1720,10 @@ void BoUpSLP::optimizeGatherSequence() {
// Sort blocks by domination. This ensures we visit a block after all blocks
// dominating it are visited.
SmallVector<BasicBlock *, 8> CSEWorkList(CSEBlocks.begin(), CSEBlocks.end());
std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), DTCmp(DT));
std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(),
[this](const BasicBlock *A, const BasicBlock *B) {
return DT->properlyDominates(A, B);
});
// Perform O(N^2) search over the gather sequences and merge identical
// instructions. TODO: We can further optimize this scan if we split the

View File

@ -475,16 +475,6 @@ void addMember(std::vector<NewArchiveIterator> &Members, T I, StringRef Name,
Members[Pos] = NI;
}
namespace {
class HasName {
StringRef Name;
public:
HasName(StringRef Name) : Name(Name) {}
bool operator()(StringRef Path) { return Name == sys::path::filename(Path); }
};
}
enum InsertAction {
IA_AddOldMember,
IA_AddNewMeber,
@ -500,8 +490,9 @@ computeInsertAction(ArchiveOperation Operation,
if (Operation == QuickAppend || Members.empty())
return IA_AddOldMember;
std::vector<std::string>::iterator MI =
std::find_if(Members.begin(), Members.end(), HasName(Name));
std::vector<std::string>::iterator MI = std::find_if(
Members.begin(), Members.end(),
[Name](StringRef Path) { return Name == sys::path::filename(Path); });
if (MI == Members.end())
return IA_AddOldMember;

View File

@ -288,15 +288,6 @@ public:
}
};
namespace {
/// Sort ClassInfo pointers independently of pointer value.
struct LessClassInfoPtr {
bool operator()(const ClassInfo *LHS, const ClassInfo *RHS) const {
return *LHS < *RHS;
}
};
}
/// MatchableInfo - Helper class for storing the necessary information for an
/// instruction or alias which is capable of being matched.
struct MatchableInfo {
@ -1288,7 +1279,7 @@ void AsmMatcherInfo::buildOperandMatchInfo() {
/// Map containing a mask with all operands indices that can be found for
/// that class inside a instruction.
typedef std::map<ClassInfo*, unsigned, LessClassInfoPtr> OpClassMaskTy;
typedef std::map<ClassInfo *, unsigned, less_ptr<ClassInfo>> OpClassMaskTy;
OpClassMaskTy OpClassMask;
for (std::vector<MatchableInfo*>::const_iterator it =

View File

@ -1705,16 +1705,6 @@ void CodeGenRegBank::computeRegUnitSets() {
}
}
struct LessUnits {
const CodeGenRegBank &RegBank;
LessUnits(const CodeGenRegBank &RB): RegBank(RB) {}
bool operator()(unsigned ID1, unsigned ID2) {
return RegBank.getRegPressureSet(ID1).Units.size()
< RegBank.getRegPressureSet(ID2).Units.size();
}
};
void CodeGenRegBank::computeDerivedInfo() {
computeComposites();
computeSubRegIndexLaneMasks();
@ -1737,7 +1727,10 @@ void CodeGenRegBank::computeDerivedInfo() {
RegUnitSetOrder.push_back(Idx);
std::stable_sort(RegUnitSetOrder.begin(), RegUnitSetOrder.end(),
LessUnits(*this));
[this](unsigned ID1, unsigned ID2) {
return getRegPressureSet(ID1).Units.size() <
getRegPressureSet(ID2).Units.size();
});
for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
RegUnitSets[RegUnitSetOrder[Idx]].Order = Idx;
}

View File

@ -289,17 +289,6 @@ GetInstByName(const char *Name,
return I->second;
}
namespace {
/// SortInstByName - Sorting predicate to sort instructions by name.
///
struct SortInstByName {
bool operator()(const CodeGenInstruction *Rec1,
const CodeGenInstruction *Rec2) const {
return Rec1->TheDef->getName() < Rec2->TheDef->getName();
}
};
}
/// \brief Return all of the instructions defined by the target, ordered by
/// their enum value.
void CodeGenTarget::ComputeInstrsByEnum() const {
@ -346,8 +335,10 @@ void CodeGenTarget::ComputeInstrsByEnum() const {
// All of the instructions are now in random order based on the map iteration.
// Sort them by name.
std::sort(InstrsByEnum.begin()+EndOfPredefines, InstrsByEnum.end(),
SortInstByName());
std::sort(InstrsByEnum.begin() + EndOfPredefines, InstrsByEnum.end(),
[](const CodeGenInstruction *Rec1, const CodeGenInstruction *Rec2) {
return Rec1->TheDef->getName() < Rec2->TheDef->getName();
});
}

View File

@ -131,20 +131,6 @@ void IntrinsicEmitter::EmitEnumInfo(const std::vector<CodeGenIntrinsic> &Ints,
OS << "#endif\n\n";
}
struct IntrinsicNameSorter {
IntrinsicNameSorter(const std::vector<CodeGenIntrinsic> &I)
: Ints(I) {}
// Sort in reverse order of intrinsic name so "abc.def" appears after
// "abd.def.ghi" in the overridden name matcher
bool operator()(unsigned i, unsigned j) {
return Ints[i].Name > Ints[j].Name;
}
private:
const std::vector<CodeGenIntrinsic> &Ints;
};
void IntrinsicEmitter::
EmitFnNameRecognizer(const std::vector<CodeGenIntrinsic> &Ints,
raw_ostream &OS) {
@ -158,15 +144,17 @@ EmitFnNameRecognizer(const std::vector<CodeGenIntrinsic> &Ints,
OS << " StringRef NameR(Name+6, Len-6); // Skip over 'llvm.'\n";
OS << " switch (Name[5]) { // Dispatch on first letter.\n";
OS << " default: break;\n";
IntrinsicNameSorter Sorter(Ints);
// Emit the intrinsic matching stuff by first letter.
for (std::map<char, std::vector<unsigned> >::iterator I = IntMapping.begin(),
E = IntMapping.end(); I != E; ++I) {
OS << " case '" << I->first << "':\n";
std::vector<unsigned> &IntList = I->second;
// Sort intrinsics in reverse order of their names
std::sort(IntList.begin(), IntList.end(), Sorter);
// Sort in reverse order of intrinsic name so "abc.def" appears after
// "abd.def.ghi" in the overridden name matcher
std::sort(IntList.begin(), IntList.end(), [&](unsigned i, unsigned j) {
return Ints[i].Name > Ints[j].Name;
});
// Emit all the overloaded intrinsics first, build a table of the
// non-overloaded ones.