1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

Fix more spelling mistakes in comments of LLVM Analysis passes

Patch by Reshabh Sharma!

Differential Revision: https://reviews.llvm.org/D43939

llvm-svn: 326601
This commit is contained in:
Vedant Kumar 2018-03-02 18:57:02 +00:00
parent 306e5cccd6
commit 1268108615
8 changed files with 15 additions and 15 deletions

View File

@ -13,7 +13,7 @@
/// Summary-based analysis, also known as bottom-up analysis, is a style of
/// interprocedrual static analysis that tries to analyze the callees before the
/// callers get analyzed. The key idea of summary-based analysis is to first
/// process each function indepedently, outline its behavior in a condensed
/// process each function independently, outline its behavior in a condensed
/// summary, and then instantiate the summary at the callsite when the said
/// function is called elsewhere. This is often in contrast to another style
/// called top-down analysis, in which callers are always analyzed first before

View File

@ -392,7 +392,7 @@ bool BranchProbabilityInfo::calcColdCallHeuristics(const BasicBlock *BB) {
return true;
}
// Calculate Edge Weights using "Pointer Heuristics". Predict a comparsion
// Calculate Edge Weights using "Pointer Heuristics". Predict a comparison
// between two pointer or pointer and NULL will fail.
bool BranchProbabilityInfo::calcPointerHeuristics(const BasicBlock *BB) {
const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());

View File

@ -18,7 +18,7 @@
//
// The algorithm used here is based on recursive state machine matching scheme
// proposed in "Demand-driven alias analysis for C" by Xin Zheng and Radu
// Rugina. The general idea is to extend the tranditional transitive closure
// Rugina. The general idea is to extend the traditional transitive closure
// algorithm to perform CFL matching along the way: instead of recording
// "whether X is reachable from Y", we keep track of "whether X is reachable
// from Y at state Z", where the "state" field indicates where we are in the CFL
@ -645,7 +645,7 @@ static void processWorkListItem(const WorkListItem &Item, const CFLGraph &Graph,
// relations that are symmetric, we could actually cut the storage by half by
// sorting FromNode and ToNode before insertion happens.
// The newly added value alias pair may pontentially generate more memory
// The newly added value alias pair may potentially generate more memory
// alias pairs. Check for them here.
auto FromNodeBelow = getNodeBelow(Graph, FromNode);
auto ToNodeBelow = getNodeBelow(Graph, ToNode);

View File

@ -427,7 +427,7 @@ bool LazyCallGraph::RefSCC::isAncestorOf(const RefSCC &RC) const {
/// source to target.
///
/// This helper routine, in addition to updating the postorder sequence itself
/// will also update a map from SCCs to indices within that sequecne.
/// will also update a map from SCCs to indices within that sequence.
///
/// The sequence and the map must operate on pointers to the SCC type.
///
@ -713,7 +713,7 @@ LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) {
//
// However, we specially handle the target node. The target node is known to
// reach all other nodes in the original SCC by definition. This means that
// we want the old SCC to be replaced with an SCC contaning that node as it
// we want the old SCC to be replaced with an SCC containing that node as it
// will be the root of whatever SCC DAG results from the DFS. Assumptions
// about an SCC such as the set of functions called will continue to hold,
// etc.
@ -822,7 +822,7 @@ LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) {
// Cleared the DFS early, start another round.
break;
// We've finished processing N and its descendents, put it on our pending
// We've finished processing N and its descendants, put it on our pending
// SCC stack to eventually get merged into an SCC of nodes.
PendingSCCStack.push_back(N);
@ -1234,7 +1234,7 @@ LazyCallGraph::RefSCC::removeInternalRefEdge(Node &SourceN,
++I;
}
// We've finished processing N and its descendents, put it on our pending
// We've finished processing N and its descendants, put it on our pending
// stack to eventually get merged into a RefSCC.
PendingRefSCCStack.push_back(N);
@ -1617,7 +1617,7 @@ void LazyCallGraph::buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
++I;
}
// We've finished processing N and its descendents, put it on our pending
// We've finished processing N and its descendants, put it on our pending
// SCC stack to eventually get merged into an SCC of nodes.
PendingSCCStack.push_back(N);

View File

@ -238,7 +238,7 @@ bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory similiar to malloc or calloc.
/// allocates memory similar to malloc or calloc.
bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast) {
return getAllocationData(V, MallocOrCallocLike, TLI,

View File

@ -1387,7 +1387,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// IVUsers tries to prevent this case, so it is rare. However, it can
// happen when an IVUser outside the loop is not dominated by the latch
// block. Adjusting IVIncInsertPos before expansion begins cannot handle
// all cases. Consider a phi outide whose operand is replaced during
// all cases. Consider a phi outside whose operand is replaced during
// expansion with the value of the postinc user. Without fundamentally
// changing the way postinc users are tracked, the only remedy is
// inserting an extra IV increment. StepV might fold into PostLoopOffset,
@ -1407,7 +1407,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
}
// We have decided to reuse an induction variable of a dominating loop. Apply
// truncation and/or invertion of the step.
// truncation and/or inversion of the step.
if (TruncTy) {
Type *ResTy = Result->getType();
// Normalize the result type.
@ -2209,7 +2209,7 @@ Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
// If the backedge taken count type is larger than the AR type,
// check that we don't drop any bits by truncating it. If we are
// droping bits, then we have overflow (unless the step is zero).
// dropping bits, then we have overflow (unless the step is zero).
if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
auto *BackedgeCheck =

View File

@ -636,7 +636,7 @@ static bool mayBeAccessToSubobjectOf(TBAAStructTagNode BaseTag,
// If the base object has a direct or indirect field of the subobject's type,
// then this may be an access to that field. We need this to check now that
// we support aggreagtes as access types.
// we support aggregates as access types.
if (NewFormat) {
// TBAAStructTypeNode BaseAccessType(BaseTag.getAccessType());
TBAAStructTypeNode FieldType(SubobjectTag.getBaseType());

View File

@ -4508,7 +4508,7 @@ static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
///
/// The function processes the case when type of true and false values of a
/// select instruction differs from type of the cmp instruction operands because
/// of a cast instructon. The function checks if it is legal to move the cast
/// of a cast instruction. The function checks if it is legal to move the cast
/// operation after "select". If yes, it returns the new second value of
/// "select" (with the assumption that cast is moved):
/// 1. As operand of cast instruction when both values of "select" are same cast