diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h index 7a39731d3e0..852e16400f3 100644 --- a/include/llvm-c/Core.h +++ b/include/llvm-c/Core.h @@ -2690,7 +2690,7 @@ LLVMValueRef LLVMGetNextGlobalIFunc(LLVMValueRef IFunc); * no previous global aliases. */ LLVMValueRef LLVMGetPreviousGlobalIFunc(LLVMValueRef IFunc); - + /** * Retrieves the resolver function associated with this indirect function, or * NULL if it doesn't not exist. @@ -2944,7 +2944,7 @@ void LLVMInsertExistingBasicBlockAfterInsertBlock(LLVMBuilderRef Builder, */ void LLVMAppendExistingBasicBlock(LLVMValueRef Fn, LLVMBasicBlockRef BB); - + /** * Create a new basic block without inserting it into a function. * @@ -3755,7 +3755,7 @@ LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef, LLVMTypeRef Ty, LLVMValueRef Val, const char *Name); /** - * Creates and inserts a memset to the specified pointer and the + * Creates and inserts a memset to the specified pointer and the * specified value. * * @see llvm::IRRBuilder::CreateMemSet() @@ -3768,7 +3768,7 @@ LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, * * @see llvm::IRRBuilder::CreateMemCpy() */ -LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, +LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size); @@ -3777,7 +3777,7 @@ LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, * * @see llvm::IRRBuilder::CreateMemMove() */ -LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B, +LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size); diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp index 5cc5ab597ef..5cc68f05dc0 100644 --- a/lib/Analysis/AliasSetTracker.cpp +++ b/lib/Analysis/AliasSetTracker.cpp @@ -677,7 +677,7 @@ void AliasSet::print(raw_ostream &OS) const { I.getPointer()->printAsOperand(OS << "("); if (I.getSize() == LocationSize::unknown()) OS << ", unknown)"; - else + else OS << ", " << I.getSize() << ")"; } } diff --git a/lib/Analysis/GuardUtils.cpp b/lib/Analysis/GuardUtils.cpp index d4828327985..cd132c56991 100644 --- a/lib/Analysis/GuardUtils.cpp +++ b/lib/Analysis/GuardUtils.cpp @@ -47,7 +47,7 @@ bool llvm::parseWidenableBranch(const User *U, Value *&Condition, Use *C, *WC; if (parseWidenableBranch(const_cast(U), C, WC, IfTrueBB, IfFalseBB)) { - if (C) + if (C) Condition = C->get(); else Condition = ConstantInt::getTrue(IfTrueBB->getContext()); @@ -66,10 +66,10 @@ bool llvm::parseWidenableBranch(User *U, Use *&C,Use *&WC, auto *Cond = BI->getCondition(); if (!Cond->hasOneUse()) return false; - + IfTrueBB = BI->getSuccessor(0); IfFalseBB = BI->getSuccessor(1); - + if (match(Cond, m_Intrinsic())) { WC = &BI->getOperandUse(0); C = nullptr; @@ -88,7 +88,7 @@ bool llvm::parseWidenableBranch(User *U, Use *&C,Use *&WC, if (!And) // Could be a constexpr return false; - + if (match(A, m_Intrinsic()) && A->hasOneUse()) { WC = &And->getOperandUse(0); diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp index c2e9b8b882e..b2d20cef04a 100644 --- a/lib/Analysis/Loads.cpp +++ b/lib/Analysis/Loads.cpp @@ -78,7 +78,7 @@ static bool isDereferenceableAndAlignedPointer( if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) { // As we recursed through GEPs to get here, we've incrementally checked // that each step advanced by a multiple of the alignment. If our base is - // properly aligned, then the original offset accessed must also be. + // properly aligned, then the original offset accessed must also be. Type *Ty = V->getType(); assert(Ty->isSized() && "must be sized"); APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0); @@ -150,7 +150,7 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, // are dereferenced, so bail out. if (!Ty->isSized() || (Ty->isVectorTy() && Ty->getVectorIsScalable())) return false; - + // When dereferenceability information is provided by a dereferenceable // attribute, we know exactly how many bytes are dereferenceable. If we can // determine the exact offset to the attributed variable, we can use that diff --git a/lib/Analysis/ModuleSummaryAnalysis.cpp b/lib/Analysis/ModuleSummaryAnalysis.cpp index 3dddf4b7d60..1ff47e10bd9 100644 --- a/lib/Analysis/ModuleSummaryAnalysis.cpp +++ b/lib/Analysis/ModuleSummaryAnalysis.cpp @@ -83,7 +83,7 @@ cl::opt ModuleSummaryDotFile( // to know when computing summary for global var, because if global variable // references basic block address we can't import it separately from function // containing that basic block. For simplicity we currently don't import such -// global vars at all. When importing function we aren't interested if any +// global vars at all. When importing function we aren't interested if any // instruction in it takes an address of any basic block, because instruction // can only take an address of basic block located in the same function. static bool findRefEdges(ModuleSummaryIndex &Index, const User *CurUser, diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 56c1c514ed8..cd74815a895 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -6640,7 +6640,7 @@ const SCEV *ScalarEvolution::getExitCount(const Loop *L, BasicBlock *ExitingBlock, ExitCountKind Kind) { switch (Kind) { - case Exact: + case Exact: return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); case ConstantMaximum: return getBackedgeTakenInfo(L).getMax(ExitingBlock, this); @@ -6657,7 +6657,7 @@ ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, ExitCountKind Kind) { switch (Kind) { - case Exact: + case Exact: return getBackedgeTakenInfo(L).getExact(L, this); case ConstantMaximum: return getBackedgeTakenInfo(L).getMax(this); diff --git a/lib/Analysis/TargetLibraryInfo.cpp b/lib/Analysis/TargetLibraryInfo.cpp index 9f6ed75cd8d..2c4809b201e 100644 --- a/lib/Analysis/TargetLibraryInfo.cpp +++ b/lib/Analysis/TargetLibraryInfo.cpp @@ -1488,9 +1488,9 @@ bool TargetLibraryInfoImpl::getLibFunc(const Function &FDecl, LibFunc &F) const { // Intrinsics don't overlap w/libcalls; if our module has a large number of // intrinsics, this ends up being an interesting compile time win since we - // avoid string normalization and comparison. + // avoid string normalization and comparison. if (FDecl.isIntrinsic()) return false; - + const DataLayout *DL = FDecl.getParent() ? &FDecl.getParent()->getDataLayout() : nullptr; return getLibFunc(FDecl.getName(), F) && diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp index 62c021435b3..865f8975825 100644 --- a/lib/Analysis/TargetTransformInfo.cpp +++ b/lib/Analysis/TargetTransformInfo.cpp @@ -47,7 +47,7 @@ struct NoTTIImpl : TargetTransformInfoImplCRTPBase { bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) { // If the loop has irreducible control flow, it can not be converted to // Hardware loop. - LoopBlocksRPO RPOT(L); + LoopBlocksRPO RPOT(L); RPOT.perform(&LI); if (containsIrreducibleCFG(RPOT, LI)) return false; diff --git a/lib/Analysis/VectorUtils.cpp b/lib/Analysis/VectorUtils.cpp index d2c521ac9c9..9bdf0f334d2 100644 --- a/lib/Analysis/VectorUtils.cpp +++ b/lib/Analysis/VectorUtils.cpp @@ -684,7 +684,7 @@ llvm::createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF, return ConstantVector::get(Mask); } -Constant *llvm::createReplicatedMask(IRBuilder<> &Builder, +Constant *llvm::createReplicatedMask(IRBuilder<> &Builder, unsigned ReplicationFactor, unsigned VF) { SmallVector MaskVec; for (unsigned i = 0; i < VF; i++) @@ -951,7 +951,7 @@ void InterleavedAccessInfo::analyzeInterleaving( // create a group for B, we continue with the bottom-up algorithm to ensure // we don't break any of B's dependences. InterleaveGroup *Group = nullptr; - if (isStrided(DesB.Stride) && + if (isStrided(DesB.Stride) && (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { Group = getInterleaveGroup(B); if (!Group) { @@ -1052,8 +1052,8 @@ void InterleavedAccessInfo::analyzeInterleaving( // All members of a predicated interleave-group must have the same predicate, // and currently must reside in the same BB. - BasicBlock *BlockA = A->getParent(); - BasicBlock *BlockB = B->getParent(); + BasicBlock *BlockA = A->getParent(); + BasicBlock *BlockB = B->getParent(); if ((isPredicated(BlockA) || isPredicated(BlockB)) && (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) continue; diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index b22e7cb4f58..d8e514112aa 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -3416,7 +3416,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) { ID.Kind = ValID::t_Constant; return false; } - + // Unary Operators. case lltok::kw_fneg: { unsigned Opc = Lex.getUIntVal(); @@ -3426,7 +3426,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) { ParseGlobalTypeAndValue(Val) || ParseToken(lltok::rparen, "expected ')' in unary constantexpr")) return true; - + // Check that the type is valid for the operator. switch (Opc) { case Instruction::FNeg: @@ -4764,7 +4764,7 @@ bool LLParser::ParseDICommonBlock(MDNode *&Result, bool IsDistinct) { OPTIONAL(declaration, MDField, ); \ OPTIONAL(name, MDStringField, ); \ OPTIONAL(file, MDField, ); \ - OPTIONAL(line, LineField, ); + OPTIONAL(line, LineField, ); PARSE_MD_FIELDS(); #undef VISIT_MD_FIELDS diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp index ce758d698c9..306f9dcd91c 100644 --- a/lib/CodeGen/CodeGenPrepare.cpp +++ b/lib/CodeGen/CodeGenPrepare.cpp @@ -1953,7 +1953,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { case Intrinsic::experimental_widenable_condition: { // Give up on future widening oppurtunties so that we can fold away dead // paths and merge blocks before going into block-local instruction - // selection. + // selection. if (II->use_empty()) { II->eraseFromParent(); return true; diff --git a/lib/CodeGen/GCRootLowering.cpp b/lib/CodeGen/GCRootLowering.cpp index 90e5f32f53b..2a85048cc97 100644 --- a/lib/CodeGen/GCRootLowering.cpp +++ b/lib/CodeGen/GCRootLowering.cpp @@ -189,12 +189,12 @@ bool LowerIntrinsics::runOnFunction(Function &F) { /// need to be able to ensure each root has been initialized by the point the /// first safepoint is reached. This really should have been done by the /// frontend, but the old API made this non-obvious, so we do a potentially -/// redundant store just in case. +/// redundant store just in case. bool LowerIntrinsics::DoLowering(Function &F, GCStrategy &S) { SmallVector Roots; bool MadeChange = false; - for (BasicBlock &BB : F) + for (BasicBlock &BB : F) for (BasicBlock::iterator II = BB.begin(), E = BB.end(); II != E;) { IntrinsicInst *CI = dyn_cast(II++); if (!CI) diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp index 2ef71d6f59f..daf3fd36c00 100644 --- a/lib/CodeGen/StackMaps.cpp +++ b/lib/CodeGen/StackMaps.cpp @@ -300,7 +300,7 @@ void StackMaps::recordStackMapOpers(const MCSymbol &MILabel, MachineInstr::const_mop_iterator MOE, bool recordResult) { MCContext &OutContext = AP.OutStreamer->getContext(); - + LocationVec Locations; LiveOutVec LiveOuts; diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index 3a3bb8cd240..bd717a8585e 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -1066,7 +1066,7 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI)); MIB->addMemOperand(MF, MMO); } - + // Replace the instruction and update the operand index. MBB->insert(MachineBasicBlock::iterator(MI), MIB); OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1; diff --git a/lib/CodeGen/ValueTypes.cpp b/lib/CodeGen/ValueTypes.cpp index 41cbdf03555..264982983fc 100644 --- a/lib/CodeGen/ValueTypes.cpp +++ b/lib/CodeGen/ValueTypes.cpp @@ -230,89 +230,89 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const { case MVT::v2f64: return VectorType::get(Type::getDoubleTy(Context), 2); case MVT::v4f64: return VectorType::get(Type::getDoubleTy(Context), 4); case MVT::v8f64: return VectorType::get(Type::getDoubleTy(Context), 8); - case MVT::nxv1i1: + case MVT::nxv1i1: return VectorType::get(Type::getInt1Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i1: + case MVT::nxv2i1: return VectorType::get(Type::getInt1Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i1: + case MVT::nxv4i1: return VectorType::get(Type::getInt1Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i1: + case MVT::nxv8i1: return VectorType::get(Type::getInt1Ty(Context), 8, /*Scalable=*/ true); - case MVT::nxv16i1: + case MVT::nxv16i1: return VectorType::get(Type::getInt1Ty(Context), 16, /*Scalable=*/ true); - case MVT::nxv32i1: + case MVT::nxv32i1: return VectorType::get(Type::getInt1Ty(Context), 32, /*Scalable=*/ true); - case MVT::nxv1i8: + case MVT::nxv1i8: return VectorType::get(Type::getInt8Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i8: + case MVT::nxv2i8: return VectorType::get(Type::getInt8Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i8: + case MVT::nxv4i8: return VectorType::get(Type::getInt8Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i8: + case MVT::nxv8i8: return VectorType::get(Type::getInt8Ty(Context), 8, /*Scalable=*/ true); - case MVT::nxv16i8: + case MVT::nxv16i8: return VectorType::get(Type::getInt8Ty(Context), 16, /*Scalable=*/ true); - case MVT::nxv32i8: + case MVT::nxv32i8: return VectorType::get(Type::getInt8Ty(Context), 32, /*Scalable=*/ true); - case MVT::nxv1i16: + case MVT::nxv1i16: return VectorType::get(Type::getInt16Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i16: + case MVT::nxv2i16: return VectorType::get(Type::getInt16Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i16: + case MVT::nxv4i16: return VectorType::get(Type::getInt16Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i16: + case MVT::nxv8i16: return VectorType::get(Type::getInt16Ty(Context), 8, /*Scalable=*/ true); case MVT::nxv16i16: return VectorType::get(Type::getInt16Ty(Context), 16, /*Scalable=*/ true); case MVT::nxv32i16: return VectorType::get(Type::getInt16Ty(Context), 32, /*Scalable=*/ true); - case MVT::nxv1i32: + case MVT::nxv1i32: return VectorType::get(Type::getInt32Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i32: + case MVT::nxv2i32: return VectorType::get(Type::getInt32Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i32: + case MVT::nxv4i32: return VectorType::get(Type::getInt32Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i32: + case MVT::nxv8i32: return VectorType::get(Type::getInt32Ty(Context), 8, /*Scalable=*/ true); case MVT::nxv16i32: return VectorType::get(Type::getInt32Ty(Context), 16,/*Scalable=*/ true); case MVT::nxv32i32: return VectorType::get(Type::getInt32Ty(Context), 32,/*Scalable=*/ true); - case MVT::nxv1i64: + case MVT::nxv1i64: return VectorType::get(Type::getInt64Ty(Context), 1, /*Scalable=*/ true); - case MVT::nxv2i64: + case MVT::nxv2i64: return VectorType::get(Type::getInt64Ty(Context), 2, /*Scalable=*/ true); - case MVT::nxv4i64: + case MVT::nxv4i64: return VectorType::get(Type::getInt64Ty(Context), 4, /*Scalable=*/ true); - case MVT::nxv8i64: + case MVT::nxv8i64: return VectorType::get(Type::getInt64Ty(Context), 8, /*Scalable=*/ true); case MVT::nxv16i64: return VectorType::get(Type::getInt64Ty(Context), 16, /*Scalable=*/ true); case MVT::nxv32i64: return VectorType::get(Type::getInt64Ty(Context), 32, /*Scalable=*/ true); - case MVT::nxv2f16: + case MVT::nxv2f16: return VectorType::get(Type::getHalfTy(Context), 2, /*Scalable=*/ true); - case MVT::nxv4f16: + case MVT::nxv4f16: return VectorType::get(Type::getHalfTy(Context), 4, /*Scalable=*/ true); - case MVT::nxv8f16: + case MVT::nxv8f16: return VectorType::get(Type::getHalfTy(Context), 8, /*Scalable=*/ true); - case MVT::nxv1f32: + case MVT::nxv1f32: return VectorType::get(Type::getFloatTy(Context), 1, /*Scalable=*/ true); - case MVT::nxv2f32: + case MVT::nxv2f32: return VectorType::get(Type::getFloatTy(Context), 2, /*Scalable=*/ true); - case MVT::nxv4f32: + case MVT::nxv4f32: return VectorType::get(Type::getFloatTy(Context), 4, /*Scalable=*/ true); - case MVT::nxv8f32: + case MVT::nxv8f32: return VectorType::get(Type::getFloatTy(Context), 8, /*Scalable=*/ true); case MVT::nxv16f32: return VectorType::get(Type::getFloatTy(Context), 16, /*Scalable=*/ true); - case MVT::nxv1f64: + case MVT::nxv1f64: return VectorType::get(Type::getDoubleTy(Context), 1, /*Scalable=*/ true); - case MVT::nxv2f64: + case MVT::nxv2f64: return VectorType::get(Type::getDoubleTy(Context), 2, /*Scalable=*/ true); - case MVT::nxv4f64: + case MVT::nxv4f64: return VectorType::get(Type::getDoubleTy(Context), 4, /*Scalable=*/ true); - case MVT::nxv8f64: + case MVT::nxv8f64: return VectorType::get(Type::getDoubleTy(Context), 8, /*Scalable=*/ true); case MVT::Metadata: return Type::getMetadataTy(Context); } diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp index 79c30286736..399bd41c82b 100644 --- a/lib/IR/Constants.cpp +++ b/lib/IR/Constants.cpp @@ -826,10 +826,10 @@ Constant *ConstantFP::getQNaN(Type *Ty, bool Negative, APInt *Payload) { const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType()); APFloat NaN = APFloat::getQNaN(Semantics, Negative, Payload); Constant *C = get(Ty->getContext(), NaN); - + if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); - + return C; } @@ -837,10 +837,10 @@ Constant *ConstantFP::getSNaN(Type *Ty, bool Negative, APInt *Payload) { const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType()); APFloat NaN = APFloat::getSNaN(Semantics, Negative, Payload); Constant *C = get(Ty->getContext(), NaN); - + if (VectorType *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); - + return C; } @@ -1908,7 +1908,7 @@ Constant *ConstantExpr::getAddrSpaceCast(Constant *C, Type *DstTy, return getFoldedCast(Instruction::AddrSpaceCast, C, DstTy, OnlyIfReduced); } -Constant *ConstantExpr::get(unsigned Opcode, Constant *C, unsigned Flags, +Constant *ConstantExpr::get(unsigned Opcode, Constant *C, unsigned Flags, Type *OnlyIfReducedTy) { // Check the operands for consistency first. assert(Instruction::isUnaryOp(Opcode) && diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp index 3b4224d78a2..313bc65e775 100644 --- a/lib/IR/Core.cpp +++ b/lib/IR/Core.cpp @@ -3436,14 +3436,14 @@ LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef B, LLVMTypeRef Ty, return wrap(unwrap(B)->Insert(Malloc, Twine(Name))); } -LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, +LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, LLVMValueRef Val, LLVMValueRef Len, unsigned Align) { return wrap(unwrap(B)->CreateMemSet(unwrap(Ptr), unwrap(Val), unwrap(Len), MaybeAlign(Align))); } -LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, +LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size) { diff --git a/lib/IR/DiagnosticInfo.cpp b/lib/IR/DiagnosticInfo.cpp index fda541a296a..6528c723fbf 100644 --- a/lib/IR/DiagnosticInfo.cpp +++ b/lib/IR/DiagnosticInfo.cpp @@ -119,7 +119,7 @@ DiagnosticLocation::DiagnosticLocation(const DebugLoc &DL) { DiagnosticLocation::DiagnosticLocation(const DISubprogram *SP) { if (!SP) return; - + File = SP->getFile(); Line = SP->getScopeLine(); Column = 0; diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp index cf8e73f3022..6af581b178d 100644 --- a/lib/IR/Verifier.cpp +++ b/lib/IR/Verifier.cpp @@ -3174,7 +3174,7 @@ void Verifier::visitInvokeInst(InvokeInst &II) { /// visitUnaryOperator - Check the argument to the unary operator. /// void Verifier::visitUnaryOperator(UnaryOperator &U) { - Assert(U.getType() == U.getOperand(0)->getType(), + Assert(U.getType() == U.getOperand(0)->getType(), "Unary operators must have same type for" "operands and result!", &U); @@ -4813,7 +4813,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { Type *ResultTy = FPI.getType(); Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(), "Intrinsic does not support vectors", &FPI); - } + } break; case Intrinsic::experimental_constrained_lround: @@ -4823,7 +4823,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(), "Intrinsic does not support vectors", &FPI); break; - } + } case Intrinsic::experimental_constrained_fcmp: case Intrinsic::experimental_constrained_fcmps: { @@ -4834,7 +4834,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { } case Intrinsic::experimental_constrained_fptosi: - case Intrinsic::experimental_constrained_fptoui: { + case Intrinsic::experimental_constrained_fptoui: { Value *Operand = FPI.getArgOperand(0); uint64_t NumSrcElem = 0; Assert(Operand->getType()->isFPOrFPVectorTy(), @@ -4906,7 +4906,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { "Intrinsic first argument's type must be smaller than result type", &FPI); } - } + } break; default: @@ -5172,7 +5172,7 @@ struct VerifierLegacyPass : public FunctionPass { bool runOnFunction(Function &F) override { if (!V->verify(F) && FatalErrors) { - errs() << "in function " << F.getName() << '\n'; + errs() << "in function " << F.getName() << '\n'; report_fatal_error("Broken function found, compilation aborted!"); } return false; diff --git a/lib/MC/XCOFFObjectWriter.cpp b/lib/MC/XCOFFObjectWriter.cpp index acb0e77807d..67202833cfb 100644 --- a/lib/MC/XCOFFObjectWriter.cpp +++ b/lib/MC/XCOFFObjectWriter.cpp @@ -764,7 +764,7 @@ void XCOFFObjectWriter::assignAddressesAndIndices(const MCAsmLayout &Layout) { SymbolIndexMap[MCSec->getQualNameSymbol()] = Csect.SymbolTableIndex; // 1 main and 1 auxiliary symbol table entry for the csect. SymbolTableIndex += 2; - + for (auto &Sym : Csect.Syms) { Sym.SymbolTableIndex = SymbolTableIndex; SymbolIndexMap[Sym.MCSym] = Sym.SymbolTableIndex; diff --git a/lib/Support/Host.cpp b/lib/Support/Host.cpp index cafdc2ff380..955c1b30629 100644 --- a/lib/Support/Host.cpp +++ b/lib/Support/Host.cpp @@ -1255,7 +1255,7 @@ StringRef sys::getHostCPUName() { return "swift"; default:; } - + return "generic"; } #else