1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

Fix "the the" in comments.

llvm-svn: 240112
This commit is contained in:
Eric Christopher 2015-06-19 01:53:21 +00:00
parent 30d23ac69f
commit 0b2dfae3ba
32 changed files with 40 additions and 40 deletions

View File

@ -161,7 +161,7 @@ ADDITIONAL OPTIONS
.. option:: --show-tests
List all of the the discovered tests and exit.
List all of the discovered tests and exit.
EXIT STATUS
-----------

View File

@ -112,7 +112,7 @@ Here we show how to use lib/Fuzzer on something real, yet simple: pcre2_::
(cd pcre; ./autogen.sh; CC="clang -fsanitize=address $COV_FLAGS" ./configure --prefix=`pwd`/../inst && make -j && make install)
# Build lib/Fuzzer files.
clang -c -g -O2 -std=c++11 Fuzzer/*.cpp -IFuzzer
# Build the the actual function that does something interesting with PCRE2.
# Build the actual function that does something interesting with PCRE2.
cat << EOF > pcre_fuzzer.cc
#include <string.h>
#include "pcre2posix.h"

View File

@ -873,7 +873,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
///
/// \pre \a computeMassInLoop() has been called for each subloop of \c
/// OuterLoop.
/// \pre \c Insert points at the the last loop successfully processed by \a
/// \pre \c Insert points at the last loop successfully processed by \a
/// computeMassInLoop().
/// \pre \c OuterLoop has irreducible SCCs.
void computeIrreducibleMass(LoopData *OuterLoop,

View File

@ -124,7 +124,7 @@ public:
static ErrorOr<std::unique_ptr<MemoryBuffer>>
getFileOrSTDIN(const Twine &Filename, int64_t FileSize = -1);
/// Map a subrange of the the specified file as a MemoryBuffer.
/// Map a subrange of the specified file as a MemoryBuffer.
static ErrorOr<std::unique_ptr<MemoryBuffer>>
getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset);

View File

@ -2394,7 +2394,7 @@ public:
/// outgoing token chain. It calls LowerCall to do the actual lowering.
std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
/// This hook must be implemented to lower calls into the the specified
/// This hook must be implemented to lower calls into the specified
/// DAG. The outgoing arguments to the call are described by the Outs array,
/// and the values to be returned by the call are described by the Ins
/// array. The implementation should fill in the InVals array with legal-type

View File

@ -1702,7 +1702,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
unsigned NumElim = 0;
DenseMap<const SCEV *, PHINode *> ExprToIVMap;
// Process phis from wide to narrow. Mapping wide phis to the their truncation
// Process phis from wide to narrow. Map wide phis to their truncation
// so narrow phis can reuse them.
for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
PEnd = Phis.end(); PIter != PEnd; ++PIter) {

View File

@ -464,7 +464,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
Value *ShouldStore =
Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
// If the the cmpxchg doesn't actually need any ordering when it fails, we can
// If the cmpxchg doesn't actually need any ordering when it fails, we can
// jump straight past that fence instruction (if it exists).
Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);

View File

@ -61,10 +61,10 @@ class ImplicitNullChecks : public MachineFunctionPass {
// The block the check resides in.
MachineBasicBlock *CheckBlock;
// The block branched to if the the pointer is non-null.
// The block branched to if the pointer is non-null.
MachineBasicBlock *NotNullSucc;
// The block branched to if the the pointer is null.
// The block branched to if the pointer is null.
MachineBasicBlock *NullSucc;
NullCheck()

View File

@ -2150,7 +2150,7 @@ void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
bool IsPostRA,
SchedBoundary &CurrZone,
SchedBoundary *OtherZone) {
// Apply preemptive heuristics based on the the total latency and resources
// Apply preemptive heuristics based on the total latency and resources
// inside and outside this zone. Potential stalls should be considered before
// following this policy.

View File

@ -2296,7 +2296,7 @@ void WinEHPrepare::findCleanupHandlers(LandingPadActions &Actions,
// value for this block but the value is a nullptr. This means that
// we have previously analyzed the block and determined that it did
// not contain any cleanup code. Based on the earlier analysis, we
// know the the block must end in either an unconditional branch, a
// know the block must end in either an unconditional branch, a
// resume or a conditional branch that is predicated on a comparison
// with a selector. Either the resume or the selector dispatch
// would terminate the search for cleanup code, so the unconditional

View File

@ -15,7 +15,7 @@ int columnWidth(StringRef Text) {
bool isPrint(int UCS) {
#if LLVM_ON_WIN32
// Restrict characters that we'll try to print to the the lower part of ASCII
// Restrict characters that we'll try to print to the lower part of ASCII
// except for the control characters (0x20 - 0x7E). In general one can not
// reliably output code points U+0080 and higher using narrow character C/C++
// output functions in Windows, because the meaning of the upper 128 codes is

View File

@ -1424,7 +1424,7 @@ static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) {
ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
// The the values aren't constants, this isn't the pattern we're looking for.
// The values aren't constants, this isn't the pattern we're looking for.
if (!CFVal || !CTVal)
return Op;
@ -3420,7 +3420,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
EltVT = MVT::i64;
VecVT = MVT::v2i64;
// We want to materialize a mask with the the high bit set, but the AdvSIMD
// We want to materialize a mask with the high bit set, but the AdvSIMD
// immediate moves cannot materialize that in a single instruction for
// 64-bit elements. Instead, materialize zero and then negate it.
EltMask = 0;

View File

@ -52,7 +52,7 @@ getVariant(uint64_t LLVMDisassembler_VariantKind) {
/// returns zero and isBranch is Success then a symbol look up for
/// Address + Value is done and if a symbol is found an MCExpr is created with
/// that, else an MCExpr with Address + Value is created. If GetOpInfo()
/// returns zero and isBranch is Fail then the the Opcode of the MCInst is
/// returns zero and isBranch is Fail then the Opcode of the MCInst is
/// tested and for ADRP an other instructions that help to load of pointers
/// a symbol look up is done to see it is returns a specific reference type
/// to add to the comment stream. This function returns Success if it adds

View File

@ -132,7 +132,7 @@ enum amd_code_property_mask_t {
/// private memory do not exceed this size. For example, if the
/// element size is 4 (32-bits or dword) and a 64-bit value must be
/// loaded, the finalizer will generate two 32-bit loads. This
/// ensures that the interleaving will get the the work-item
/// ensures that the interleaving will get the work-item
/// specific dword for both halves of the 64-bit value. If it just
/// did a 64-bit load then it would get one dword which belonged to
/// its own work-item, but the second dword would belong to the

View File

@ -1806,7 +1806,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
}
MachineBasicBlock &MBB = *MI->getParent();
// Extract the the ptr from the resource descriptor.
// Extract the ptr from the resource descriptor.
// SRsrcPtrLo = srsrc:sub0
unsigned SRsrcPtrLo = buildExtractSubReg(MI, MRI, *SRsrc,

View File

@ -5841,7 +5841,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// do and don't have a cc_out optional-def operand. With some spot-checks
// of the operand list, we can figure out which variant we're trying to
// parse and adjust accordingly before actually matching. We shouldn't ever
// try to remove a cc_out operand that was explicitly set on the the
// try to remove a cc_out operand that was explicitly set on the
// mnemonic, of course (CarrySetting == true). Reason number #317 the
// table driven matcher doesn't fit well with the ARM instruction set.
if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))

View File

@ -1065,7 +1065,7 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
// it's just a plain immediate expression, previously those evaluated to
// the lower 16 bits of the expression regardless of whether
// we have a movt or a movw, but that led to misleadingly results.
// This is now disallowed in the the AsmParser in validateInstruction()
// This is disallowed in the AsmParser in validateInstruction()
// so this should never happen.
llvm_unreachable("expression without :upper16: or :lower16:");
}

View File

@ -271,7 +271,7 @@ namespace X86II {
/// register DI/EDI/ESI.
RawFrmDst = 9,
/// RawFrmSrc - This form is for instructions that use the the source index
/// RawFrmSrc - This form is for instructions that use the source index
/// register SI/ESI/ERI with a possible segment override, and also the
/// destination index register DI/ESI/RDI.
RawFrmDstSrc = 10,

View File

@ -44,7 +44,7 @@ class FixupLEAPass : public MachineFunctionPass {
/// \brief Given a machine register, look for the instruction
/// which writes it in the current basic block. If found,
/// try to replace it with an equivalent LEA instruction.
/// If replacement succeeds, then also process the the newly created
/// If replacement succeeds, then also process the newly created
/// instruction.
void seekLEAFixup(MachineOperand &p, MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI);

View File

@ -5446,7 +5446,7 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
///
/// Otherwise, the first horizontal binop dag node takes as input the lower
/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
/// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
/// Example:
/// HADD V0_LO, V1_LO
/// HADD V0_HI, V1_HI

View File

@ -212,7 +212,7 @@ static StoreInst *findSafeStoreForStoreStrongContraction(LoadInst *Load,
break;
// Now we know that we have not seen either the store or the release. If I
// is the the release, mark that we saw the release and continue.
// is the release, mark that we saw the release and continue.
Instruction *Inst = &*I;
if (Inst == Release) {
SawRelease = true;

View File

@ -759,7 +759,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) {
// If we're branching on a conditional, LVI might be able to determine
// it's value at the the branch instruction. We only handle comparisons
// it's value at the branch instruction. We only handle comparisons
// against a constant at this time.
// TODO: This should be extended to handle switches as well.
BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());

View File

@ -282,7 +282,7 @@ bool SampleProfileLoader::computeBlockWeights(Function &F) {
/// \brief Find equivalence classes for the given block.
///
/// This finds all the blocks that are guaranteed to execute the same
/// number of times as \p BB1. To do this, it traverses all the the
/// number of times as \p BB1. To do this, it traverses all the
/// descendants of \p BB1 in the dominator or post-dominator tree.
///
/// A block BB2 will be in the same equivalence class as \p BB1 if

View File

@ -4058,7 +4058,7 @@ static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
return false;
// Figure out the corresponding result for each case value and phi node in the
// common destination, as well as the the min and max case values.
// common destination, as well as the min and max case values.
assert(SI->case_begin() != SI->case_end());
SwitchInst::CaseIt CI = SI->case_begin();
ConstantInt *MinCaseVal = CI.getCaseValue();

View File

@ -24,7 +24,7 @@ false:
}
; Check that we manage to form a zextload is an operation with only one
; argument to explicitly extend is in the the way.
; argument to explicitly extend is in the way.
; OPTALL-LABEL: @promoteOneArg
; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
@ -49,7 +49,7 @@ false:
}
; Check that we manage to form a sextload is an operation with only one
; argument to explicitly extend is in the the way.
; argument to explicitly extend is in the way.
; Version with sext.
; OPTALL-LABEL: @promoteOneArgSExt
; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
@ -74,7 +74,7 @@ false:
}
; Check that we manage to form a zextload is an operation with two
; arguments to explicitly extend is in the the way.
; arguments to explicitly extend is in the way.
; Extending %add will create two extensions:
; 1. One for %b.
; 2. One for %t.
@ -113,7 +113,7 @@ false:
}
; Check that we manage to form a sextload is an operation with two
; arguments to explicitly extend is in the the way.
; arguments to explicitly extend is in the way.
; Version with sext.
; OPTALL-LABEL: @promoteTwoArgSExt
; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p

View File

@ -14,7 +14,7 @@
; EG: {{^}}local_memory_two_objects:
; We would like to check the the lds writes are using different
; We would like to check the lds writes are using different
; addresses, but due to variations in the scheduler, we can't do
; this consistently on evergreen GPUs.
; EG: LDS_WRITE

View File

@ -4,7 +4,7 @@
; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s -disable-fp-elim | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM
; Tests that the initial space allocated to the varargs on the stack is
; taken into account in the the .cfi_ directives.
; taken into account in the .cfi_ directives.
; Generated from the C program:
; #include <stdarg.h>

View File

@ -18,7 +18,7 @@
; RUN: llc -march=mips64 -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s
; RUN: llc -march=mips64el -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s
; Test the the callee-saved registers are callee-saved as specified by section
; Test the callee-saved registers are callee-saved as specified by section
; 2 of the MIPSpro N32 Handbook and section 3 of the SYSV ABI spec.
define void @gpr_clobber() nounwind {

View File

@ -30,7 +30,7 @@ false:
}
; Check that we manage to form a zextload is an operation with only one
; argument to explicitly extend is in the the way.
; argument to explicitly extend is in the way.
; OPTALL-LABEL: @promoteOneArg
; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
@ -55,7 +55,7 @@ false:
}
; Check that we manage to form a sextload is an operation with only one
; argument to explicitly extend is in the the way.
; argument to explicitly extend is in the way.
; Version with sext.
; OPTALL-LABEL: @promoteOneArgSExt
; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
@ -80,7 +80,7 @@ false:
}
; Check that we manage to form a zextload is an operation with two
; arguments to explicitly extend is in the the way.
; arguments to explicitly extend is in the way.
; Extending %add will create two extensions:
; 1. One for %b.
; 2. One for %t.
@ -119,7 +119,7 @@ false:
}
; Check that we manage to form a sextload is an operation with two
; arguments to explicitly extend is in the the way.
; arguments to explicitly extend is in the way.
; Version with sext.
; OPTALL-LABEL: @promoteTwoArgSExt
; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p

View File

@ -788,7 +788,7 @@ static void DumpLiteralPointerSection(MachOObjectFile *O,
// Set the size of the literal pointer.
uint32_t lp_size = O->is64Bit() ? 8 : 4;
// Collect the external relocation symbols for the the literal pointers.
// Collect the external relocation symbols for the literal pointers.
std::vector<std::pair<uint64_t, SymbolRef>> Relocs;
for (const RelocationRef &Reloc : Section.relocations()) {
DataRefImpl Rel;

View File

@ -97,7 +97,7 @@ static size_t getNumLengthAsString(uint64_t num) {
return result.size();
}
/// @brief Return the the printing format for the Radix.
/// @brief Return the printing format for the Radix.
static const char *getRadixFmt(void) {
switch (Radix) {
case octal:

View File

@ -129,7 +129,7 @@ TEST(AllocatorTest, TestAlignmentPastSlab) {
// Aligning the current slab pointer is likely to move it past the end of the
// slab, which would confuse any unsigned comparisons with the difference of
// the the end pointer and the aligned pointer.
// the end pointer and the aligned pointer.
Alloc.Allocate(1024, 8192);
EXPECT_EQ(2U, Alloc.GetNumSlabs());