1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

Avoid overly large SmallPtrSet/SmallSet

These sets perform linear searching in small mode so it is never a good
idea to use SmallSize/N bigger than 32.

llvm-svn: 259283
This commit is contained in:
Matthias Braun 2016-01-30 01:24:31 +00:00
parent a0a04f68a4
commit 882ae69776
22 changed files with 25 additions and 25 deletions

View File

@ -63,7 +63,7 @@ private:
bool Analyzed;
// The set of visited instructions (non-integer-typed only).
SmallPtrSet<Instruction*, 128> Visited;
SmallPtrSet<Instruction*, 32> Visited;
DenseMap<Instruction *, APInt> AliveBits;
};

View File

@ -136,7 +136,7 @@ private:
SmallVector<DIGlobalVariable *, 8> GVs;
SmallVector<DIType *, 8> TYs;
SmallVector<DIScope *, 8> Scopes;
SmallPtrSet<const MDNode *, 64> NodesSeen;
SmallPtrSet<const MDNode *, 32> NodesSeen;
DITypeIdentifierMap TypeIdentifierMap;
/// \brief Specify if TypeIdentifierMap is initialized.

View File

@ -123,7 +123,7 @@ private:
// here. Maybe when the relocation stuff moves to target specific,
// this can go with it? The streamer would need some target specific
// refactoring too.
mutable SmallPtrSet<const MCSymbol *, 64> ThumbFuncs;
mutable SmallPtrSet<const MCSymbol *, 32> ThumbFuncs;
/// \brief The bundle alignment size currently set in the assembler.
///

View File

@ -138,7 +138,7 @@ bool llvm::isPotentiallyReachableFromMany(
// Limit the number of blocks we visit. The goal is to avoid run-away compile
// times on large CFGs without hampering sensible code. Arbitrarily chosen.
unsigned Limit = 32;
SmallSet<const BasicBlock*, 64> Visited;
SmallPtrSet<const BasicBlock*, 32> Visited;
do {
BasicBlock *BB = Worklist.pop_back_val();
if (!Visited.insert(BB).second)

View File

@ -269,7 +269,7 @@ GlobalsAAResult::getFunctionInfo(const Function *F) {
/// (really, their address passed to something nontrivial), record this fact,
/// and record the functions that they are used directly in.
void GlobalsAAResult::AnalyzeGlobals(Module &M) {
SmallPtrSet<Function *, 64> TrackedFunctions;
SmallPtrSet<Function *, 32> TrackedFunctions;
for (Function &F : M)
if (F.hasLocalLinkage())
if (!AnalyzeUsesOfPointer(&F)) {
@ -281,7 +281,7 @@ void GlobalsAAResult::AnalyzeGlobals(Module &M) {
++NumNonAddrTakenFunctions;
}
SmallPtrSet<Function *, 64> Readers, Writers;
SmallPtrSet<Function *, 16> Readers, Writers;
for (GlobalVariable &GV : M.globals())
if (GV.hasLocalLinkage()) {
if (!AnalyzeUsesOfPointer(&GV, &Readers,

View File

@ -854,7 +854,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
// isReadonlyCall - If this is a read-only call, we can be more aggressive.
bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
SmallPtrSet<BasicBlock*, 64> Visited;
SmallPtrSet<BasicBlock*, 32> Visited;
unsigned NumSortedEntries = Cache.size();
DEBUG(AssertSorted(Cache));

View File

@ -112,7 +112,7 @@ namespace {
///
/// This is used to allow us to reliably add any operands of a DAG node
/// which have not yet been combined to the worklist.
SmallPtrSet<SDNode *, 64> CombinedNodes;
SmallPtrSet<SDNode *, 32> CombinedNodes;
// AA - Used for DAG load/store alias analysis.
AliasAnalysis &AA;

View File

@ -321,7 +321,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
// Add all nodes in depth first order.
SmallVector<SDNode*, 64> Worklist;
SmallPtrSet<SDNode*, 64> Visited;
SmallPtrSet<SDNode*, 32> Visited;
Worklist.push_back(DAG->getRoot().getNode());
Visited.insert(DAG->getRoot().getNode());

View File

@ -630,7 +630,7 @@ static bool printOperand(raw_ostream &OS, const SelectionDAG *G,
}
}
typedef SmallPtrSet<const SDNode *, 128> VisitedSDNodeSet;
typedef SmallPtrSet<const SDNode *, 32> VisitedSDNodeSet;
static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
const SelectionDAG *G, VisitedSDNodeSet &once) {
if (!once.insert(N).second) // If we've been here before, return now.

View File

@ -669,7 +669,7 @@ void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
}
void SelectionDAGISel::ComputeLiveOutVRegInfo() {
SmallPtrSet<SDNode*, 128> VisitedNodes;
SmallPtrSet<SDNode*, 16> VisitedNodes;
SmallVector<SDNode*, 128> Worklist;
Worklist.push_back(CurDAG->getRoot().getNode());

View File

@ -258,7 +258,7 @@ static void removeDuplicatesGCPtrs(SmallVectorImpl<const Value *> &Bases,
SelectionDAGBuilder &Builder) {
// This is horribly inefficient, but I don't care right now
SmallSet<SDValue, 64> Seen;
SmallSet<SDValue, 32> Seen;
SmallVector<const Value *, 64> NewBases, NewPtrs, NewRelocs;
for (size_t i = 0; i < Ptrs.size(); i++) {

View File

@ -303,7 +303,7 @@ void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
}
// Find all of the blocks that this value is live in.
SmallPtrSet<BasicBlock *, 64> LiveBBs;
SmallPtrSet<BasicBlock *, 32> LiveBBs;
LiveBBs.insert(Inst->getParent());
while (!Users.empty()) {
Instruction *U = Users.back();

View File

@ -1449,7 +1449,7 @@ static int OptNameCompare(const std::pair<const char *, Option *> *LHS,
static void sortOpts(StringMap<Option *> &OptMap,
SmallVectorImpl<std::pair<const char *, Option *>> &Opts,
bool ShowHidden) {
SmallPtrSet<Option *, 128> OptionSet; // Duplicate option detection.
SmallPtrSet<Option *, 32> OptionSet; // Duplicate option detection.
for (StringMap<Option *>::iterator I = OptMap.begin(), E = OptMap.end();
I != E; ++I) {

View File

@ -7184,7 +7184,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
// Get an ordered list of the machine basic blocks for the jump table.
std::vector<MachineBasicBlock*> LPadList;
SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs;
SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
LPadList.reserve(CallSiteNumToLPad.size());
for (unsigned I = 1; I <= MaxCSNum; ++I) {
SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];

View File

@ -1249,7 +1249,7 @@ int FunctionComparator::compare() {
// functions, then takes each block from each terminator in order. As an
// artifact, this also means that unreachable blocks are ignored.
SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F1.
SmallPtrSet<const BasicBlock *, 32> VisitedBBs; // in terms of F1.
FnLBBs.push_back(&FnL->getEntryBlock());
FnRBBs.push_back(&FnR->getEntryBlock());

View File

@ -163,10 +163,10 @@ protected:
EdgeWeightMap EdgeWeights;
/// \brief Set of visited blocks during propagation.
SmallPtrSet<const BasicBlock *, 128> VisitedBlocks;
SmallPtrSet<const BasicBlock *, 32> VisitedBlocks;
/// \brief Set of visited edges during propagation.
SmallSet<Edge, 128> VisitedEdges;
SmallSet<Edge, 32> VisitedEdges;
/// \brief Equivalence classes for block weights.
///

View File

@ -3001,7 +3001,7 @@ static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
// Do a depth-first traversal of the function, populate the worklist with
// the reachable instructions. Ignore blocks that are not reachable. Keep
// track of which blocks we visit.
SmallPtrSet<BasicBlock *, 64> Visited;
SmallPtrSet<BasicBlock *, 32> Visited;
MadeIRChange |=
AddReachableCodeToWorklist(&F.front(), DL, Visited, ICWorklist, TLI);

View File

@ -34,7 +34,7 @@ using namespace llvm;
STATISTIC(NumRemoved, "Number of instructions removed");
static bool aggressiveDCE(Function& F) {
SmallPtrSet<Instruction*, 128> Alive;
SmallPtrSet<Instruction*, 32> Alive;
SmallVector<Instruction*, 128> Worklist;
// Collect the set of "root" instructions that are known live.

View File

@ -2718,7 +2718,7 @@ static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data,
static void computeLiveInValues(DominatorTree &DT, Function &F,
GCPtrLivenessData &Data) {
SmallSetVector<BasicBlock *, 200> Worklist;
SmallSetVector<BasicBlock *, 32> Worklist;
auto AddPredsToWorklist = [&](BasicBlock *BB) {
// We use a SetVector so that we don't have duplicates in the worklist.
Worklist.insert(pred_begin(BB), pred_end(BB));

View File

@ -1465,7 +1465,7 @@ void llvm::removeUnwindEdge(BasicBlock *BB) {
/// if they are in a dead cycle. Return true if a change was made, false
/// otherwise.
bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI) {
SmallPtrSet<BasicBlock*, 128> Reachable;
SmallPtrSet<BasicBlock*, 16> Reachable;
bool Changed = markAliveBlocks(F, Reachable);
// If there are unreachable blocks in the CFG...

View File

@ -138,7 +138,7 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap,
typedef std::pair<unsigned, ClusterMapType::iterator> SortType;
SmallVector<SortType, 64> Sets;
SmallPtrSet<const GlobalValue *, 64> Visited;
SmallPtrSet<const GlobalValue *, 32> Visited;
// To guarantee determinism, we have to sort SCC according to size.
// When size is the same, use leader's name.

View File

@ -459,7 +459,7 @@ bool ReduceCrashingInstructions::TestInsts(std::vector<const Instruction*>
Module *M = CloneModule(BD.getProgram(), VMap).release();
// Convert list to set for fast lookup...
SmallPtrSet<Instruction*, 64> Instructions;
SmallPtrSet<Instruction*, 32> Instructions;
for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
assert(!isa<TerminatorInst>(Insts[i]));
Instructions.insert(cast<Instruction>(VMap[Insts[i]]));
@ -600,7 +600,7 @@ public:
bool ReduceCrashingNamedMDOps::TestNamedMDOps(
std::vector<const MDNode *> &NamedMDOps) {
// Convert list to set for fast lookup...
SmallPtrSet<const MDNode *, 64> OldMDNodeOps;
SmallPtrSet<const MDNode *, 32> OldMDNodeOps;
for (unsigned i = 0, e = NamedMDOps.size(); i != e; ++i) {
OldMDNodeOps.insert(NamedMDOps[i]);
}