1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

Fixed spelling mistake in comments of LLVM Analysis passes

Patch by Reshabh Sharma!

Differential Revision: https://reviews.llvm.org/D43861

llvm-svn: 326352
This commit is contained in:
Vedant Kumar 2018-02-28 19:08:52 +00:00
parent 76fc38039a
commit 6758a0cd13
4 changed files with 14 additions and 14 deletions

View File

@ -32,7 +32,7 @@
using namespace llvm;
// Explicit template instantiations and specialization defininitions for core
// Explicit template instantiations and specialization definitions for core
// template typedefs.
namespace llvm {
@ -96,7 +96,7 @@ PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
// ...getContext().yield();
}
// Invaliadtion was handled after each pass in the above loop for the current
// Invalidation was handled after each pass in the above loop for the current
// SCC. Therefore, the remaining analysis results in the AnalysisManager are
// preserved. We mark this with a set so that we don't need to inspect each
// one individually.
@ -372,7 +372,7 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
// We need to propagate an invalidation call to all but the newly current SCC
// because the outer pass manager won't do that for us after splitting them.
// FIXME: We should accept a PreservedAnalysis from the CG updater so that if
// there are preserved ananalyses we can avoid invalidating them here for
// there are preserved analysis we can avoid invalidating them here for
// split-off SCCs.
// We know however that this will preserve any FAM proxy so go ahead and mark
// that.
@ -635,7 +635,7 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
// If one of the invalidated SCCs had a cached proxy to a function
// analysis manager, we need to create a proxy in the new current SCC as
// the invaliadted SCCs had their functions moved.
// the invalidated SCCs had their functions moved.
if (HasFunctionAnalysisProxy)
AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G);

View File

@ -24,7 +24,7 @@ cl::opt<bool> EnableMSSALoopDependency(
"enable-mssa-loop-dependency", cl::Hidden, cl::init(false),
cl::desc("Enable MemorySSA dependency for loop pass manager"));
// Explicit template instantiations and specialization defininitions for core
// Explicit template instantiations and specialization definitions for core
// template typedefs.
template class AllAnalysesOn<Loop>;
template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;

View File

@ -530,7 +530,7 @@ bool llvm::isValidAssumeForContext(const Instruction *Inv,
if (Inv->getParent() != CxtI->getParent())
return false;
// If we have a dom tree, then we now know that the assume doens't dominate
// If we have a dom tree, then we now know that the assume doesn't dominate
// the other instruction. If we don't have a dom tree then we can check if
// the assume is first in the BB.
if (!DT) {
@ -574,7 +574,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
if (Q.isExcluded(I))
continue;
// Warning: This loop can end up being somewhat performance sensetive.
// Warning: This loop can end up being somewhat performance sensitive.
// We're running this loop for once for each value queried resulting in a
// runtime of ~O(#assumes * #values).
@ -856,7 +856,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
/// Compute known bits from a shift operator, including those with a
/// non-constant shift amount. Known is the output of this function. Known2 is a
/// pre-allocated temporary with the same bit width as Known. KZF and KOF are
/// operator-specific functors that, given the known-zero or known-one bits
/// operator-specific functions that, given the known-zero or known-one bits
/// respectively, and a shift amount, compute the implied known-zero or
/// known-one bits of the shift operator's result respectively for that shift
/// amount. The results from calling KZF and KOF are conservatively combined for
@ -2192,7 +2192,7 @@ static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
/// (itself), but other cases can give us information. For example, immediately
/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
/// other, so we return 3. For vectors, return the number of sign bits for the
/// vector element with the mininum number of known sign bits.
/// vector element with the minimum number of known sign bits.
static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
const Query &Q) {
assert(Depth <= MaxDepth && "Limit Search Depth");
@ -3003,7 +3003,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
if (!V)
return nullptr;
// Insert the value in the new (sub) aggregrate
// Insert the value in the new (sub) aggregate
return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
"tmp", InsertBefore);
}
@ -3032,9 +3032,9 @@ static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
}
/// Given an aggregrate and an sequence of indices, see if
/// the scalar value indexed is already around as a register, for example if it
/// were inserted directly into the aggregrate.
/// Given an aggregate and a sequence of indices, see if the scalar value
/// indexed is already around as a register, for example if it was inserted
/// directly into the aggregate.
///
/// If InsertBefore is not null, this function will duplicate (modified)
/// insertvalues when a part of a nested struct is extracted.

View File

@ -163,7 +163,7 @@ Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
return nullptr;
// Try to remove a gep instruction to make the pointer (actually index at this
// point) easier analyzable. If OrigPtr is equal to Ptr we are analzying the
// point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
// pointer, otherwise, we are analyzing the index.
Value *OrigPtr = Ptr;