1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[NFC] fix trivial typos in comments

"the the" -> "the"

llvm-svn: 323176
This commit is contained in:
Hiroshi Inoue 2018-01-23 05:49:30 +00:00
parent 4281c34f62
commit 2f37cdd743
17 changed files with 21 additions and 21 deletions

View File

@ -7,7 +7,7 @@
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file defines types used by the the C interface to LLVM. *|
|* This file defines types used by the C interface to LLVM. *|
|* *|
\*===----------------------------------------------------------------------===*/

View File

@ -793,7 +793,7 @@ extern void thinlto_codegen_set_cache_pruning_interval(thinlto_code_gen_t cg,
/**
* Sets the maximum cache size that can be persistent across build, in terms of
* percentage of the available space on the the disk. Set to 100 to indicate
* percentage of the available space on the disk. Set to 100 to indicate
* no limit, 50 to indicate that the cache size will not be left over half the
* available space. A value over 100 will be reduced to 100, a value of 0 will
* be ignored. An unspecified default value will be applied.

View File

@ -397,7 +397,7 @@ public:
DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RC
<< "\n");
// Push the initial SCCs in reverse post-order as we'll pop off the the
// Push the initial SCCs in reverse post-order as we'll pop off the
// back and so see this in post-order.
for (LazyCallGraph::SCC &C : llvm::reverse(*RC))
CWorklist.insert(&C);

View File

@ -170,7 +170,7 @@ InlineParams getInlineParams(int Threshold);
/// line options. If -inline-threshold option is not explicitly passed,
/// the default threshold is computed from \p OptLevel and \p SizeOptLevel.
/// An \p OptLevel value above 3 is considered an aggressive optimization mode.
/// \p SizeOptLevel of 1 corresponds to the the -Os flag and 2 corresponds to
/// \p SizeOptLevel of 1 corresponds to the -Os flag and 2 corresponds to
/// the -Oz flag.
InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);

View File

@ -391,7 +391,7 @@ public:
/// Access to memory operands of the instruction
mmo_iterator memoperands_begin() const { return MemRefs; }
mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
/// Return true if we don't have any memory operands which described the the
/// Return true if we don't have any memory operands which described the
/// memory access done by this instruction. If this is true, calling code
/// must be conservative.
bool memoperands_empty() const { return NumMemRefs == 0; }

View File

@ -635,8 +635,8 @@ public:
return true;
}
/// Generate code to reduce the loop iteration by one and check if the loop is
/// finished. Return the value/register of the the new loop count. We need
/// Generate code to reduce the loop iteration by one and check if the loop
/// is finished. Return the value/register of the new loop count. We need
/// this function when peeling off one or more iterations of a loop. This
/// function assumes the nth iteration is peeled first.
virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar,

View File

@ -820,7 +820,7 @@ public:
/// Return true if lowering to a jump table is suitable for a set of case
/// clusters which may contain \p NumCases cases, \p Range range of values.
/// FIXME: This function check the maximum table size and density, but the
/// minimum size is not checked. It would be nice if the the minimum size is
/// minimum size is not checked. It would be nice if the minimum size is
/// also combined within this function. Currently, the minimum size check is
/// performed in findJumpTable() in SelectionDAGBuiler and
/// getEstimatedNumberOfCaseClusters() in BasicTTIImpl.

View File

@ -20,7 +20,7 @@ class TypeCollection;
class TypeVisitorCallbacks;
enum VisitorDataSource {
VDS_BytesPresent, // The record bytes are passed into the the visitation
VDS_BytesPresent, // The record bytes are passed into the visitation
// function. The algorithm should first deserialize them
// before passing them on through the pipeline.
VDS_BytesExternal // The record bytes are not present, and it is the

View File

@ -531,7 +531,7 @@ enum LineFlags : uint16_t {
LF_HaveColumns = 1, // CV_LINES_HAVE_COLUMNS
};
/// Data in the the SUBSEC_FRAMEDATA subection.
/// Data in the SUBSEC_FRAMEDATA subection.
struct FrameData {
support::ulittle32_t RvaStart;
support::ulittle32_t CodeSize;

View File

@ -200,7 +200,7 @@ private:
/// references for the .debug_info section
unsigned verifyDebugInfoReferences();
/// Verify the the DW_AT_stmt_list encoding and value and ensure that no
/// Verify the DW_AT_stmt_list encoding and value and ensure that no
/// compile units that have the same DW_AT_stmt_list value.
void verifyDebugLineStmtOffsets();

View File

@ -1384,7 +1384,7 @@ public:
///
/// The above 3 components are encoded into a 32bit unsigned integer in
/// order. If the lowest bit is 1, the current component is empty, and the
/// next component will start in the next bit. Otherwise, the the current
/// next component will start in the next bit. Otherwise, the current
/// component is non-empty, and its content starts in the next bit. The
/// length of each components is either 5 bit or 12 bit: if the 7th bit
/// is 0, the bit 2~6 (5 bits) are used to represent the component; if the

View File

@ -465,7 +465,7 @@ struct StatepointDirectives {
/// AS.
StatepointDirectives parseStatepointDirectivesFromAttrs(AttributeList AS);
/// Return \c true if the the \p Attr is an attribute that is a statepoint
/// Return \c true if the \p Attr is an attribute that is a statepoint
/// directive.
bool isStatepointDirectiveAttr(Attribute Attr);

View File

@ -168,8 +168,8 @@ public:
/**
* Sets the maximum cache size that can be persistent across build, in terms
* of percentage of the available space on the the disk. Set to 100 to
* indicate no limit, 50 to indicate that the cache size will not be left over
* of percentage of the available space on the disk. Set to 100 to indicate
* no limit, 50 to indicate that the cache size will not be left over
* half the available space. A value over 100 will be reduced to 100, and a
* value of 0 will be ignored.
*

View File

@ -174,7 +174,7 @@ public:
AnalysisImpls.push_back(pir);
}
/// Clear cache that is used to connect a pass to the the analysis (PassInfo).
/// Clear cache that is used to connect a pass to the analysis (PassInfo).
void clearAnalysisImpls() {
AnalysisImpls.clear();
}

View File

@ -178,7 +178,7 @@ VALUE_PROF_FUNC_PARAM(uint64_t, LargeValue, Type::getInt64Ty(Ctx))
* functions are profiled by the instrumented code. The target addresses are
* written in the raw profile data and converted to target function name's MD5
* hash by the profile reader during deserialization. Typically, this happens
* when the the raw profile data is read during profile merging.
* when the raw profile data is read during profile merging.
*
* For this remapping the ProfData is used. ProfData contains both the function
* name hash and the function address.

View File

@ -56,7 +56,7 @@ public:
/// otherwise returns an appropriate error code.
Error writeBytes(ArrayRef<uint8_t> Buffer);
/// Write the the integer \p Value to the underlying stream in the
/// Write the integer \p Value to the underlying stream in the
/// specified endianness. On success, updates the offset so that
/// subsequent writes occur at the next unwritten position.
///
@ -80,7 +80,7 @@ public:
return writeInteger<U>(static_cast<U>(Num));
}
/// Write the the string \p Str to the underlying stream followed by a null
/// Write the string \p Str to the underlying stream followed by a null
/// terminator. On success, updates the offset so that subsequent writes
/// occur at the next unwritten position. \p Str need not be null terminated
/// on input.
@ -89,7 +89,7 @@ public:
/// otherwise returns an appropriate error code.
Error writeCString(StringRef Str);
/// Write the the string \p Str to the underlying stream without a null
/// Write the string \p Str to the underlying stream without a null
/// terminator. On success, updates the offset so that subsequent writes
/// occur at the next unwritten position.
///

View File

@ -37,7 +37,7 @@ struct CachePruningPolicy {
std::chrono::seconds Expiration = std::chrono::hours(7 * 24); // 1w
/// The maximum size for the cache directory, in terms of percentage of the
/// available space on the the disk. Set to 100 to indicate no limit, 50 to
/// available space on the disk. Set to 100 to indicate no limit, 50 to
/// indicate that the cache size will not be left over half the available disk
/// space. A value over 100 will be reduced to 100. A value of 0 disables the
/// percentage size-based pruning.