diff --git a/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp b/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp index 74f417994a0..cc45c6642ac 100644 --- a/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp +++ b/lib/Target/AArch64/GISel/AArch64PostSelectOptimize.cpp @@ -180,7 +180,7 @@ bool AArch64PostSelectOptimize::runOnMachineFunction(MachineFunction &MF) { bool Changed = false; for (auto &BB : MF) Changed |= optimizeNZCVDefs(BB); - return true; + return Changed; } char AArch64PostSelectOptimize::ID = 0; diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 8a9d02901dc..8dd013d5a0a 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -6861,12 +6861,10 @@ static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, return SDValue(); // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. uint64_t BitMask = 0xff; - uint64_t Val = 0; unsigned ImmMask = 1; Imm = 0; for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { if (((SplatBits | SplatUndef) & BitMask) == BitMask) { - Val |= BitMask; Imm |= ImmMask; } else if ((SplatBits & BitMask) != 0) { return SDValue(); diff --git a/lib/Target/ARM/ARMLowOverheadLoops.cpp b/lib/Target/ARM/ARMLowOverheadLoops.cpp index d40b01927c6..e1d77a585d2 100644 --- a/lib/Target/ARM/ARMLowOverheadLoops.cpp +++ b/lib/Target/ARM/ARMLowOverheadLoops.cpp @@ -1227,7 +1227,7 @@ bool ARMLowOverheadLoops::ProcessLoop(MachineLoop *ML) { if (LoLoop.Preheader) LoLoop.Start = SearchForStart(LoLoop.Preheader); else - return false; + return Changed; // Find the low-overhead loop components and decide whether or not to fall // back to a normal loop. Also look for a vctp instructions and decide @@ -1261,7 +1261,7 @@ bool ARMLowOverheadLoops::ProcessLoop(MachineLoop *ML) { LLVM_DEBUG(LoLoop.dump()); if (!LoLoop.FoundAllComponents()) { LLVM_DEBUG(dbgs() << "ARM Loops: Didn't find loop start, update, end\n"); - return false; + return Changed; } assert(LoLoop.Start->getOpcode() != ARM::t2WhileLoopStart && diff --git a/lib/Target/PowerPC/PPCBranchSelector.cpp b/lib/Target/PowerPC/PPCBranchSelector.cpp index 47b9e97f0d6..fa6713dcca8 100644 --- a/lib/Target/PowerPC/PPCBranchSelector.cpp +++ b/lib/Target/PowerPC/PPCBranchSelector.cpp @@ -415,5 +415,5 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) { } BlockSizes.clear(); - return true; + return EverMadeChange; } diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 90c34272cec..1268d15fc33 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -1526,7 +1526,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) { // Scan the assembly for ST registers used, defined and clobbered. We can // only tell clobbers from defs by looking at the asm descriptor. - unsigned STUses = 0, STDefs = 0, STClobbers = 0, STDeadDefs = 0; + unsigned STUses = 0, STDefs = 0, STClobbers = 0; unsigned NumOps = 0; SmallSet FRegIdx; unsigned RCID; @@ -1559,8 +1559,6 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) { case InlineAsm::Kind_RegDef: case InlineAsm::Kind_RegDefEarlyClobber: STDefs |= (1u << STReg); - if (MO.isDead()) - STDeadDefs |= (1u << STReg); break; case InlineAsm::Kind_Clobber: STClobbers |= (1u << STReg); diff --git a/utils/benchmark/src/complexity.cc b/utils/benchmark/src/complexity.cc index 97bf6e09b30..97cb0a88271 100644 --- a/utils/benchmark/src/complexity.cc +++ b/utils/benchmark/src/complexity.cc @@ -76,7 +76,6 @@ std::string GetBigOString(BigO complexity) { LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, BigOFunc* fitting_curve) { - double sigma_gn = 0.0; double sigma_gn_squared = 0.0; double sigma_time = 0.0; double sigma_time_gn = 0.0; @@ -84,7 +83,6 @@ LeastSq MinimalLeastSq(const std::vector& n, // Calculate least square fitting parameter for (size_t i = 0; i < n.size(); ++i) { double gn_i = fitting_curve(n[i]); - sigma_gn += gn_i; sigma_gn_squared += gn_i * gn_i; sigma_time += time[i]; sigma_time_gn += time[i] * gn_i;