1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 02:33:06 +01:00

[NFC] Remove trailing space

sed -Ei 's/[[:space:]]+$//' include/**/*.{def,h,td} lib/**/*.{cpp,h,td}
This commit is contained in:
Jim Lin 2020-02-18 10:48:38 +08:00
parent dfbf17f1a2
commit 0596dad096
21 changed files with 81 additions and 81 deletions

View File

@ -2690,7 +2690,7 @@ LLVMValueRef LLVMGetNextGlobalIFunc(LLVMValueRef IFunc);
* no previous global aliases.
*/
LLVMValueRef LLVMGetPreviousGlobalIFunc(LLVMValueRef IFunc);
/**
* Retrieves the resolver function associated with this indirect function, or
* NULL if it doesn't not exist.
@ -2944,7 +2944,7 @@ void LLVMInsertExistingBasicBlockAfterInsertBlock(LLVMBuilderRef Builder,
*/
void LLVMAppendExistingBasicBlock(LLVMValueRef Fn,
LLVMBasicBlockRef BB);
/**
* Create a new basic block without inserting it into a function.
*
@ -3755,7 +3755,7 @@ LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef, LLVMTypeRef Ty,
LLVMValueRef Val, const char *Name);
/**
* Creates and inserts a memset to the specified pointer and the
* Creates and inserts a memset to the specified pointer and the
* specified value.
*
* @see llvm::IRRBuilder::CreateMemSet()
@ -3768,7 +3768,7 @@ LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr,
*
* @see llvm::IRRBuilder::CreateMemCpy()
*/
LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B,
LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B,
LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign,
LLVMValueRef Size);
@ -3777,7 +3777,7 @@ LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B,
*
* @see llvm::IRRBuilder::CreateMemMove()
*/
LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B,
LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B,
LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign,
LLVMValueRef Size);

View File

@ -677,7 +677,7 @@ void AliasSet::print(raw_ostream &OS) const {
I.getPointer()->printAsOperand(OS << "(");
if (I.getSize() == LocationSize::unknown())
OS << ", unknown)";
else
else
OS << ", " << I.getSize() << ")";
}
}

View File

@ -47,7 +47,7 @@ bool llvm::parseWidenableBranch(const User *U, Value *&Condition,
Use *C, *WC;
if (parseWidenableBranch(const_cast<User*>(U), C, WC, IfTrueBB, IfFalseBB)) {
if (C)
if (C)
Condition = C->get();
else
Condition = ConstantInt::getTrue(IfTrueBB->getContext());
@ -66,10 +66,10 @@ bool llvm::parseWidenableBranch(User *U, Use *&C,Use *&WC,
auto *Cond = BI->getCondition();
if (!Cond->hasOneUse())
return false;
IfTrueBB = BI->getSuccessor(0);
IfFalseBB = BI->getSuccessor(1);
if (match(Cond, m_Intrinsic<Intrinsic::experimental_widenable_condition>())) {
WC = &BI->getOperandUse(0);
C = nullptr;
@ -88,7 +88,7 @@ bool llvm::parseWidenableBranch(User *U, Use *&C,Use *&WC,
if (!And)
// Could be a constexpr
return false;
if (match(A, m_Intrinsic<Intrinsic::experimental_widenable_condition>()) &&
A->hasOneUse()) {
WC = &And->getOperandUse(0);

View File

@ -78,7 +78,7 @@ static bool isDereferenceableAndAlignedPointer(
if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) {
// As we recursed through GEPs to get here, we've incrementally checked
// that each step advanced by a multiple of the alignment. If our base is
// properly aligned, then the original offset accessed must also be.
// properly aligned, then the original offset accessed must also be.
Type *Ty = V->getType();
assert(Ty->isSized() && "must be sized");
APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
@ -150,7 +150,7 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
// are dereferenced, so bail out.
if (!Ty->isSized() || (Ty->isVectorTy() && Ty->getVectorIsScalable()))
return false;
// When dereferenceability information is provided by a dereferenceable
// attribute, we know exactly how many bytes are dereferenceable. If we can
// determine the exact offset to the attributed variable, we can use that

View File

@ -83,7 +83,7 @@ cl::opt<std::string> ModuleSummaryDotFile(
// to know when computing summary for global var, because if global variable
// references basic block address we can't import it separately from function
// containing that basic block. For simplicity we currently don't import such
// global vars at all. When importing function we aren't interested if any
// global vars at all. When importing function we aren't interested if any
// instruction in it takes an address of any basic block, because instruction
// can only take an address of basic block located in the same function.
static bool findRefEdges(ModuleSummaryIndex &Index, const User *CurUser,

View File

@ -6640,7 +6640,7 @@ const SCEV *ScalarEvolution::getExitCount(const Loop *L,
BasicBlock *ExitingBlock,
ExitCountKind Kind) {
switch (Kind) {
case Exact:
case Exact:
return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
case ConstantMaximum:
return getBackedgeTakenInfo(L).getMax(ExitingBlock, this);
@ -6657,7 +6657,7 @@ ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L,
const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L,
ExitCountKind Kind) {
switch (Kind) {
case Exact:
case Exact:
return getBackedgeTakenInfo(L).getExact(L, this);
case ConstantMaximum:
return getBackedgeTakenInfo(L).getMax(this);

View File

@ -1488,9 +1488,9 @@ bool TargetLibraryInfoImpl::getLibFunc(const Function &FDecl,
LibFunc &F) const {
// Intrinsics don't overlap w/libcalls; if our module has a large number of
// intrinsics, this ends up being an interesting compile time win since we
// avoid string normalization and comparison.
// avoid string normalization and comparison.
if (FDecl.isIntrinsic()) return false;
const DataLayout *DL =
FDecl.getParent() ? &FDecl.getParent()->getDataLayout() : nullptr;
return getLibFunc(FDecl.getName(), F) &&

View File

@ -47,7 +47,7 @@ struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) {
// If the loop has irreducible control flow, it can not be converted to
// Hardware loop.
LoopBlocksRPO RPOT(L);
LoopBlocksRPO RPOT(L);
RPOT.perform(&LI);
if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
return false;

View File

@ -684,7 +684,7 @@ llvm::createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF,
return ConstantVector::get(Mask);
}
Constant *llvm::createReplicatedMask(IRBuilder<> &Builder,
Constant *llvm::createReplicatedMask(IRBuilder<> &Builder,
unsigned ReplicationFactor, unsigned VF) {
SmallVector<Constant *, 16> MaskVec;
for (unsigned i = 0; i < VF; i++)
@ -951,7 +951,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
// create a group for B, we continue with the bottom-up algorithm to ensure
// we don't break any of B's dependences.
InterleaveGroup<Instruction> *Group = nullptr;
if (isStrided(DesB.Stride) &&
if (isStrided(DesB.Stride) &&
(!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
Group = getInterleaveGroup(B);
if (!Group) {
@ -1052,8 +1052,8 @@ void InterleavedAccessInfo::analyzeInterleaving(
// All members of a predicated interleave-group must have the same predicate,
// and currently must reside in the same BB.
BasicBlock *BlockA = A->getParent();
BasicBlock *BlockB = B->getParent();
BasicBlock *BlockA = A->getParent();
BasicBlock *BlockB = B->getParent();
if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
(!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
continue;

View File

@ -3416,7 +3416,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
ID.Kind = ValID::t_Constant;
return false;
}
// Unary Operators.
case lltok::kw_fneg: {
unsigned Opc = Lex.getUIntVal();
@ -3426,7 +3426,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
ParseGlobalTypeAndValue(Val) ||
ParseToken(lltok::rparen, "expected ')' in unary constantexpr"))
return true;
// Check that the type is valid for the operator.
switch (Opc) {
case Instruction::FNeg:
@ -4764,7 +4764,7 @@ bool LLParser::ParseDICommonBlock(MDNode *&Result, bool IsDistinct) {
OPTIONAL(declaration, MDField, ); \
OPTIONAL(name, MDStringField, ); \
OPTIONAL(file, MDField, ); \
OPTIONAL(line, LineField, );
OPTIONAL(line, LineField, );
PARSE_MD_FIELDS();
#undef VISIT_MD_FIELDS

View File

@ -1953,7 +1953,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
case Intrinsic::experimental_widenable_condition: {
// Give up on future widening oppurtunties so that we can fold away dead
// paths and merge blocks before going into block-local instruction
// selection.
// selection.
if (II->use_empty()) {
II->eraseFromParent();
return true;

View File

@ -189,12 +189,12 @@ bool LowerIntrinsics::runOnFunction(Function &F) {
/// need to be able to ensure each root has been initialized by the point the
/// first safepoint is reached. This really should have been done by the
/// frontend, but the old API made this non-obvious, so we do a potentially
/// redundant store just in case.
/// redundant store just in case.
bool LowerIntrinsics::DoLowering(Function &F, GCStrategy &S) {
SmallVector<AllocaInst *, 32> Roots;
bool MadeChange = false;
for (BasicBlock &BB : F)
for (BasicBlock &BB : F)
for (BasicBlock::iterator II = BB.begin(), E = BB.end(); II != E;) {
IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++);
if (!CI)

View File

@ -300,7 +300,7 @@ void StackMaps::recordStackMapOpers(const MCSymbol &MILabel,
MachineInstr::const_mop_iterator MOE,
bool recordResult) {
MCContext &OutContext = AP.OutStreamer->getContext();
LocationVec Locations;
LiveOutVec LiveOuts;

View File

@ -1066,7 +1066,7 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI));
MIB->addMemOperand(MF, MMO);
}
// Replace the instruction and update the operand index.
MBB->insert(MachineBasicBlock::iterator(MI), MIB);
OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;

View File

@ -230,89 +230,89 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
case MVT::v2f64: return VectorType::get(Type::getDoubleTy(Context), 2);
case MVT::v4f64: return VectorType::get(Type::getDoubleTy(Context), 4);
case MVT::v8f64: return VectorType::get(Type::getDoubleTy(Context), 8);
case MVT::nxv1i1:
case MVT::nxv1i1:
return VectorType::get(Type::getInt1Ty(Context), 1, /*Scalable=*/ true);
case MVT::nxv2i1:
case MVT::nxv2i1:
return VectorType::get(Type::getInt1Ty(Context), 2, /*Scalable=*/ true);
case MVT::nxv4i1:
case MVT::nxv4i1:
return VectorType::get(Type::getInt1Ty(Context), 4, /*Scalable=*/ true);
case MVT::nxv8i1:
case MVT::nxv8i1:
return VectorType::get(Type::getInt1Ty(Context), 8, /*Scalable=*/ true);
case MVT::nxv16i1:
case MVT::nxv16i1:
return VectorType::get(Type::getInt1Ty(Context), 16, /*Scalable=*/ true);
case MVT::nxv32i1:
case MVT::nxv32i1:
return VectorType::get(Type::getInt1Ty(Context), 32, /*Scalable=*/ true);
case MVT::nxv1i8:
case MVT::nxv1i8:
return VectorType::get(Type::getInt8Ty(Context), 1, /*Scalable=*/ true);
case MVT::nxv2i8:
case MVT::nxv2i8:
return VectorType::get(Type::getInt8Ty(Context), 2, /*Scalable=*/ true);
case MVT::nxv4i8:
case MVT::nxv4i8:
return VectorType::get(Type::getInt8Ty(Context), 4, /*Scalable=*/ true);
case MVT::nxv8i8:
case MVT::nxv8i8:
return VectorType::get(Type::getInt8Ty(Context), 8, /*Scalable=*/ true);
case MVT::nxv16i8:
case MVT::nxv16i8:
return VectorType::get(Type::getInt8Ty(Context), 16, /*Scalable=*/ true);
case MVT::nxv32i8:
case MVT::nxv32i8:
return VectorType::get(Type::getInt8Ty(Context), 32, /*Scalable=*/ true);
case MVT::nxv1i16:
case MVT::nxv1i16:
return VectorType::get(Type::getInt16Ty(Context), 1, /*Scalable=*/ true);
case MVT::nxv2i16:
case MVT::nxv2i16:
return VectorType::get(Type::getInt16Ty(Context), 2, /*Scalable=*/ true);
case MVT::nxv4i16:
case MVT::nxv4i16:
return VectorType::get(Type::getInt16Ty(Context), 4, /*Scalable=*/ true);
case MVT::nxv8i16:
case MVT::nxv8i16:
return VectorType::get(Type::getInt16Ty(Context), 8, /*Scalable=*/ true);
case MVT::nxv16i16:
return VectorType::get(Type::getInt16Ty(Context), 16, /*Scalable=*/ true);
case MVT::nxv32i16:
return VectorType::get(Type::getInt16Ty(Context), 32, /*Scalable=*/ true);
case MVT::nxv1i32:
case MVT::nxv1i32:
return VectorType::get(Type::getInt32Ty(Context), 1, /*Scalable=*/ true);
case MVT::nxv2i32:
case MVT::nxv2i32:
return VectorType::get(Type::getInt32Ty(Context), 2, /*Scalable=*/ true);
case MVT::nxv4i32:
case MVT::nxv4i32:
return VectorType::get(Type::getInt32Ty(Context), 4, /*Scalable=*/ true);
case MVT::nxv8i32:
case MVT::nxv8i32:
return VectorType::get(Type::getInt32Ty(Context), 8, /*Scalable=*/ true);
case MVT::nxv16i32:
return VectorType::get(Type::getInt32Ty(Context), 16,/*Scalable=*/ true);
case MVT::nxv32i32:
return VectorType::get(Type::getInt32Ty(Context), 32,/*Scalable=*/ true);
case MVT::nxv1i64:
case MVT::nxv1i64:
return VectorType::get(Type::getInt64Ty(Context), 1, /*Scalable=*/ true);
case MVT::nxv2i64:
case MVT::nxv2i64:
return VectorType::get(Type::getInt64Ty(Context), 2, /*Scalable=*/ true);
case MVT::nxv4i64:
case MVT::nxv4i64:
return VectorType::get(Type::getInt64Ty(Context), 4, /*Scalable=*/ true);
case MVT::nxv8i64:
case MVT::nxv8i64:
return VectorType::get(Type::getInt64Ty(Context), 8, /*Scalable=*/ true);
case MVT::nxv16i64:
return VectorType::get(Type::getInt64Ty(Context), 16, /*Scalable=*/ true);
case MVT::nxv32i64:
return VectorType::get(Type::getInt64Ty(Context), 32, /*Scalable=*/ true);
case MVT::nxv2f16:
case MVT::nxv2f16:
return VectorType::get(Type::getHalfTy(Context), 2, /*Scalable=*/ true);
case MVT::nxv4f16:
case MVT::nxv4f16:
return VectorType::get(Type::getHalfTy(Context), 4, /*Scalable=*/ true);
case MVT::nxv8f16:
case MVT::nxv8f16:
return VectorType::get(Type::getHalfTy(Context), 8, /*Scalable=*/ true);
case MVT::nxv1f32:
case MVT::nxv1f32:
return VectorType::get(Type::getFloatTy(Context), 1, /*Scalable=*/ true);
case MVT::nxv2f32:
case MVT::nxv2f32:
return VectorType::get(Type::getFloatTy(Context), 2, /*Scalable=*/ true);
case MVT::nxv4f32:
case MVT::nxv4f32:
return VectorType::get(Type::getFloatTy(Context), 4, /*Scalable=*/ true);
case MVT::nxv8f32:
case MVT::nxv8f32:
return VectorType::get(Type::getFloatTy(Context), 8, /*Scalable=*/ true);
case MVT::nxv16f32:
return VectorType::get(Type::getFloatTy(Context), 16, /*Scalable=*/ true);
case MVT::nxv1f64:
case MVT::nxv1f64:
return VectorType::get(Type::getDoubleTy(Context), 1, /*Scalable=*/ true);
case MVT::nxv2f64:
case MVT::nxv2f64:
return VectorType::get(Type::getDoubleTy(Context), 2, /*Scalable=*/ true);
case MVT::nxv4f64:
case MVT::nxv4f64:
return VectorType::get(Type::getDoubleTy(Context), 4, /*Scalable=*/ true);
case MVT::nxv8f64:
case MVT::nxv8f64:
return VectorType::get(Type::getDoubleTy(Context), 8, /*Scalable=*/ true);
case MVT::Metadata: return Type::getMetadataTy(Context);
}

View File

@ -826,10 +826,10 @@ Constant *ConstantFP::getQNaN(Type *Ty, bool Negative, APInt *Payload) {
const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
APFloat NaN = APFloat::getQNaN(Semantics, Negative, Payload);
Constant *C = get(Ty->getContext(), NaN);
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::getSplat(VTy->getNumElements(), C);
return C;
}
@ -837,10 +837,10 @@ Constant *ConstantFP::getSNaN(Type *Ty, bool Negative, APInt *Payload) {
const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
APFloat NaN = APFloat::getSNaN(Semantics, Negative, Payload);
Constant *C = get(Ty->getContext(), NaN);
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::getSplat(VTy->getNumElements(), C);
return C;
}
@ -1908,7 +1908,7 @@ Constant *ConstantExpr::getAddrSpaceCast(Constant *C, Type *DstTy,
return getFoldedCast(Instruction::AddrSpaceCast, C, DstTy, OnlyIfReduced);
}
Constant *ConstantExpr::get(unsigned Opcode, Constant *C, unsigned Flags,
Constant *ConstantExpr::get(unsigned Opcode, Constant *C, unsigned Flags,
Type *OnlyIfReducedTy) {
// Check the operands for consistency first.
assert(Instruction::isUnaryOp(Opcode) &&

View File

@ -3436,14 +3436,14 @@ LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
return wrap(unwrap(B)->Insert(Malloc, Twine(Name)));
}
LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr,
LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr,
LLVMValueRef Val, LLVMValueRef Len,
unsigned Align) {
return wrap(unwrap(B)->CreateMemSet(unwrap(Ptr), unwrap(Val), unwrap(Len),
MaybeAlign(Align)));
}
LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B,
LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B,
LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign,
LLVMValueRef Size) {

View File

@ -119,7 +119,7 @@ DiagnosticLocation::DiagnosticLocation(const DebugLoc &DL) {
DiagnosticLocation::DiagnosticLocation(const DISubprogram *SP) {
if (!SP)
return;
File = SP->getFile();
Line = SP->getScopeLine();
Column = 0;

View File

@ -3174,7 +3174,7 @@ void Verifier::visitInvokeInst(InvokeInst &II) {
/// visitUnaryOperator - Check the argument to the unary operator.
///
void Verifier::visitUnaryOperator(UnaryOperator &U) {
Assert(U.getType() == U.getOperand(0)->getType(),
Assert(U.getType() == U.getOperand(0)->getType(),
"Unary operators must have same type for"
"operands and result!",
&U);
@ -4813,7 +4813,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
Type *ResultTy = FPI.getType();
Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
"Intrinsic does not support vectors", &FPI);
}
}
break;
case Intrinsic::experimental_constrained_lround:
@ -4823,7 +4823,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
"Intrinsic does not support vectors", &FPI);
break;
}
}
case Intrinsic::experimental_constrained_fcmp:
case Intrinsic::experimental_constrained_fcmps: {
@ -4834,7 +4834,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
}
case Intrinsic::experimental_constrained_fptosi:
case Intrinsic::experimental_constrained_fptoui: {
case Intrinsic::experimental_constrained_fptoui: {
Value *Operand = FPI.getArgOperand(0);
uint64_t NumSrcElem = 0;
Assert(Operand->getType()->isFPOrFPVectorTy(),
@ -4906,7 +4906,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
"Intrinsic first argument's type must be smaller than result type",
&FPI);
}
}
}
break;
default:
@ -5172,7 +5172,7 @@ struct VerifierLegacyPass : public FunctionPass {
bool runOnFunction(Function &F) override {
if (!V->verify(F) && FatalErrors) {
errs() << "in function " << F.getName() << '\n';
errs() << "in function " << F.getName() << '\n';
report_fatal_error("Broken function found, compilation aborted!");
}
return false;

View File

@ -764,7 +764,7 @@ void XCOFFObjectWriter::assignAddressesAndIndices(const MCAsmLayout &Layout) {
SymbolIndexMap[MCSec->getQualNameSymbol()] = Csect.SymbolTableIndex;
// 1 main and 1 auxiliary symbol table entry for the csect.
SymbolTableIndex += 2;
for (auto &Sym : Csect.Syms) {
Sym.SymbolTableIndex = SymbolTableIndex;
SymbolIndexMap[Sym.MCSym] = Sym.SymbolTableIndex;

View File

@ -1255,7 +1255,7 @@ StringRef sys::getHostCPUName() {
return "swift";
default:;
}
return "generic";
}
#else