From f8a414589edbc01554016ecd1b452396058099b2 Mon Sep 17 00:00:00 2001 From: Daniel Sanders Date: Thu, 15 Aug 2019 19:22:08 +0000 Subject: [PATCH] Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM Summary: This clang-tidy check is looking for unsigned integer variables whose initializer starts with an implicit cast from llvm::Register and changes the type of the variable to llvm::Register (dropping the llvm:: where possible). Partial reverts in: X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned& MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register PPCFastISel.cpp - No Register::operator-=() PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned& MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor Manual fixups in: ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned& HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register. PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned& Depends on D65919 Reviewers: arsenm, bogner, craig.topper, RKSimon Reviewed By: arsenm Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D65962 llvm-svn: 369041 --- lib/CodeGen/AggressiveAntiDepBreaker.cpp | 14 +- lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 2 +- .../AsmPrinter/DbgEntityHistoryCalculator.cpp | 6 +- lib/CodeGen/AsmPrinter/DwarfDebug.cpp | 4 +- lib/CodeGen/BranchFolding.cpp | 12 +- lib/CodeGen/BreakFalseDeps.cpp | 4 +- lib/CodeGen/CalcSpillWeights.cpp | 2 +- lib/CodeGen/CriticalAntiDepBreaker.cpp | 8 +- lib/CodeGen/DeadMachineInstructionElim.cpp | 6 +- lib/CodeGen/DetectDeadLanes.cpp | 18 +- lib/CodeGen/EarlyIfConversion.cpp | 8 +- lib/CodeGen/ExpandPostRAPseudos.cpp | 6 +- lib/CodeGen/GlobalISel/CSEInfo.cpp | 2 +- lib/CodeGen/GlobalISel/CombinerHelper.cpp | 14 +- lib/CodeGen/GlobalISel/InstructionSelect.cpp | 4 +- lib/CodeGen/GlobalISel/Localizer.cpp | 8 +- lib/CodeGen/GlobalISel/Utils.cpp | 10 +- lib/CodeGen/IfConversion.cpp | 4 +- lib/CodeGen/ImplicitNullChecks.cpp | 8 +- lib/CodeGen/InlineSpiller.cpp | 4 +- lib/CodeGen/LiveDebugValues.cpp | 6 +- lib/CodeGen/LiveDebugVariables.cpp | 8 +- lib/CodeGen/LiveIntervals.cpp | 4 +- lib/CodeGen/LivePhysRegs.cpp | 10 +- lib/CodeGen/LiveRangeEdit.cpp | 6 +- lib/CodeGen/LiveRangeShrink.cpp | 2 +- lib/CodeGen/LiveRegMatrix.cpp | 2 +- lib/CodeGen/LiveRegUnits.cpp | 6 +- lib/CodeGen/LiveVariables.cpp | 6 +- lib/CodeGen/MIRCanonicalizerPass.cpp | 8 +- lib/CodeGen/MachineBasicBlock.cpp | 10 +- lib/CodeGen/MachineCSE.cpp | 18 +- lib/CodeGen/MachineCopyPropagation.cpp | 34 +-- lib/CodeGen/MachineInstrBundle.cpp | 6 +- lib/CodeGen/MachineLICM.cpp | 34 +-- lib/CodeGen/MachineOperand.cpp | 2 +- lib/CodeGen/MachinePipeliner.cpp | 38 +-- lib/CodeGen/MachineSSAUpdater.cpp | 4 +- lib/CodeGen/MachineScheduler.cpp | 6 +- lib/CodeGen/MachineSink.cpp | 18 +- lib/CodeGen/MachineTraceMetrics.cpp | 10 +- lib/CodeGen/MachineVerifier.cpp | 4 +- lib/CodeGen/OptimizePHIs.cpp | 8 +- lib/CodeGen/PHIElimination.cpp | 8 +- lib/CodeGen/PeepholeOptimizer.cpp | 26 +- lib/CodeGen/ProcessImplicitDefs.cpp | 4 +- lib/CodeGen/RegAllocFast.cpp | 30 +-- lib/CodeGen/RegAllocGreedy.cpp | 4 +- lib/CodeGen/RegisterCoalescer.cpp | 12 +- lib/CodeGen/RegisterPressure.cpp | 4 +- lib/CodeGen/RegisterScavenging.cpp | 8 +- lib/CodeGen/RenameIndependentSubregs.cpp | 2 +- lib/CodeGen/ScheduleDAGInstrs.cpp | 18 +- lib/CodeGen/SelectionDAG/InstrEmitter.cpp | 12 +- .../SelectionDAG/ScheduleDAGSDNodes.cpp | 2 +- .../SelectionDAG/SelectionDAGBuilder.cpp | 7 +- lib/CodeGen/SelectionDAG/TargetLowering.cpp | 2 +- lib/CodeGen/ShrinkWrap.cpp | 2 +- lib/CodeGen/SplitKit.cpp | 4 +- lib/CodeGen/StackMaps.cpp | 4 +- lib/CodeGen/TailDuplicator.cpp | 20 +- lib/CodeGen/TargetInstrInfo.cpp | 20 +- lib/CodeGen/TargetSchedule.cpp | 2 +- lib/CodeGen/TwoAddressInstructionPass.cpp | 42 ++-- lib/CodeGen/UnreachableBlockElim.cpp | 4 +- lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp | 2 +- .../AMDGPU/AMDGPUInstructionSelector.cpp | 40 ++- .../AMDGPU/AMDGPUMachineCFGStructurizer.cpp | 26 +- lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 10 +- lib/Target/AMDGPU/AMDILCFGStructurizer.cpp | 8 +- lib/Target/AMDGPU/GCNHazardRecognizer.cpp | 20 +- lib/Target/AMDGPU/GCNNSAReassign.cpp | 6 +- lib/Target/AMDGPU/GCNRegBankReassign.cpp | 6 +- lib/Target/AMDGPU/GCNRegPressure.cpp | 2 +- .../AMDGPU/R600ControlFlowFinalizer.cpp | 4 +- lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp | 22 +- lib/Target/AMDGPU/R600ISelLowering.cpp | 2 +- lib/Target/AMDGPU/R600InstrInfo.cpp | 12 +- lib/Target/AMDGPU/R600MachineScheduler.cpp | 4 +- .../AMDGPU/R600OptimizeVectorRegisters.cpp | 10 +- lib/Target/AMDGPU/R600Packetizer.cpp | 4 +- lib/Target/AMDGPU/SIAddIMGInit.cpp | 4 +- lib/Target/AMDGPU/SIFixSGPRCopies.cpp | 24 +- lib/Target/AMDGPU/SIFoldOperands.cpp | 12 +- lib/Target/AMDGPU/SIFormMemoryClauses.cpp | 6 +- lib/Target/AMDGPU/SIFrameLowering.cpp | 28 +-- lib/Target/AMDGPU/SIISelLowering.cpp | 62 ++--- lib/Target/AMDGPU/SIInstrInfo.cpp | 234 +++++++++--------- lib/Target/AMDGPU/SILoadStoreOptimizer.cpp | 28 +-- lib/Target/AMDGPU/SILowerControlFlow.cpp | 6 +- lib/Target/AMDGPU/SILowerI1Copies.cpp | 12 +- lib/Target/AMDGPU/SILowerSGPRSpills.cpp | 4 +- lib/Target/AMDGPU/SIOptimizeExecMasking.cpp | 2 +- .../AMDGPU/SIOptimizeExecMaskingPreRA.cpp | 8 +- lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 2 +- lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp | 8 +- lib/Target/AMDGPU/SIRegisterInfo.cpp | 35 +-- lib/Target/AMDGPU/SIShrinkInstructions.cpp | 12 +- lib/Target/AMDGPU/SIWholeQuadMode.cpp | 12 +- lib/Target/ARC/ARCISelLowering.cpp | 2 +- lib/Target/ARC/ARCOptAddrMode.cpp | 10 +- lib/Target/ARC/ARCRegisterInfo.cpp | 2 +- lib/Target/ARM/A15SDOptimizer.cpp | 32 +-- lib/Target/ARM/ARMAsmPrinter.cpp | 50 ++-- lib/Target/ARM/ARMBaseInstrInfo.cpp | 84 +++---- lib/Target/ARM/ARMCallLowering.cpp | 2 +- lib/Target/ARM/ARMConstantIslandPass.cpp | 14 +- lib/Target/ARM/ARMExpandPseudoInsts.cpp | 58 ++--- lib/Target/ARM/ARMFastISel.cpp | 16 +- lib/Target/ARM/ARMFrameLowering.cpp | 6 +- lib/Target/ARM/ARMISelDAGToDAG.cpp | 4 +- lib/Target/ARM/ARMISelLowering.cpp | 114 ++++----- lib/Target/ARM/ARMInstrInfo.cpp | 2 +- lib/Target/ARM/ARMInstructionSelector.cpp | 30 +-- lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 32 +-- lib/Target/ARM/MLxExpansionPass.cpp | 22 +- lib/Target/ARM/Thumb1FrameLowering.cpp | 8 +- lib/Target/ARM/Thumb2ITBlockPass.cpp | 6 +- lib/Target/ARM/Thumb2InstrInfo.cpp | 2 +- lib/Target/ARM/Thumb2SizeReduction.cpp | 28 +-- lib/Target/ARM/ThumbRegisterInfo.cpp | 4 +- lib/Target/AVR/AVRAsmPrinter.cpp | 2 +- lib/Target/AVR/AVRExpandPseudoInsts.cpp | 10 +- lib/Target/AVR/AVRFrameLowering.cpp | 2 +- lib/Target/AVR/AVRISelLowering.cpp | 12 +- lib/Target/AVR/AVRRegisterInfo.cpp | 2 +- lib/Target/BPF/BPFISelDAGToDAG.cpp | 2 +- lib/Target/BPF/BPFISelLowering.cpp | 15 +- lib/Target/BPF/BPFInstrInfo.cpp | 6 +- lib/Target/BPF/BPFMIPeephole.cpp | 14 +- lib/Target/BPF/BPFMISimplifyPatchable.cpp | 4 +- lib/Target/BPF/BPFRegisterInfo.cpp | 6 +- lib/Target/Hexagon/HexagonAsmPrinter.cpp | 2 +- lib/Target/Hexagon/HexagonBitSimplify.cpp | 38 +-- lib/Target/Hexagon/HexagonBitTracker.cpp | 2 +- lib/Target/Hexagon/HexagonConstExtenders.cpp | 2 +- .../Hexagon/HexagonConstPropagation.cpp | 10 +- lib/Target/Hexagon/HexagonCopyToCombine.cpp | 30 +-- lib/Target/Hexagon/HexagonEarlyIfConv.cpp | 18 +- lib/Target/Hexagon/HexagonExpandCondsets.cpp | 8 +- lib/Target/Hexagon/HexagonFrameLowering.cpp | 50 ++-- lib/Target/Hexagon/HexagonGenInsert.cpp | 10 +- lib/Target/Hexagon/HexagonGenMux.cpp | 6 +- lib/Target/Hexagon/HexagonGenPredicate.cpp | 4 +- lib/Target/Hexagon/HexagonHardwareLoops.cpp | 52 ++-- lib/Target/Hexagon/HexagonISelLowering.cpp | 4 +- lib/Target/Hexagon/HexagonInstrInfo.cpp | 104 ++++---- lib/Target/Hexagon/HexagonNewValueJump.cpp | 6 +- lib/Target/Hexagon/HexagonOptAddrMode.cpp | 10 +- lib/Target/Hexagon/HexagonPeephole.cpp | 24 +- lib/Target/Hexagon/HexagonRegisterInfo.cpp | 6 +- .../Hexagon/HexagonSplitConst32AndConst64.cpp | 8 +- lib/Target/Hexagon/HexagonSplitDouble.cpp | 40 +-- lib/Target/Hexagon/HexagonStoreWidening.cpp | 2 +- lib/Target/Hexagon/HexagonSubtarget.cpp | 2 +- lib/Target/Hexagon/HexagonVExtract.cpp | 12 +- lib/Target/Hexagon/HexagonVLIWPacketizer.cpp | 12 +- lib/Target/Hexagon/RDFGraph.cpp | 8 +- lib/Target/Hexagon/RDFLiveness.cpp | 4 +- lib/Target/Lanai/LanaiAsmPrinter.cpp | 2 +- lib/Target/Lanai/LanaiFrameLowering.cpp | 4 +- lib/Target/Lanai/LanaiISelLowering.cpp | 2 +- lib/Target/Lanai/LanaiInstrInfo.cpp | 2 +- lib/Target/Lanai/LanaiRegisterInfo.cpp | 2 +- lib/Target/MSP430/MSP430ISelLowering.cpp | 20 +- lib/Target/MSP430/MSP430RegisterInfo.cpp | 2 +- lib/Target/Mips/MicroMipsSizeReduction.cpp | 18 +- lib/Target/Mips/Mips16ISelDAGToDAG.cpp | 2 +- lib/Target/Mips/Mips16ISelLowering.cpp | 16 +- lib/Target/Mips/MipsAsmPrinter.cpp | 6 +- lib/Target/Mips/MipsExpandPseudo.cpp | 54 ++-- lib/Target/Mips/MipsFastISel.cpp | 2 +- lib/Target/Mips/MipsISelDAGToDAG.cpp | 2 +- lib/Target/Mips/MipsISelLowering.cpp | 102 ++++---- lib/Target/Mips/MipsInstructionSelector.cpp | 4 +- lib/Target/Mips/MipsOptimizePICCall.cpp | 2 +- lib/Target/Mips/MipsSEFrameLowering.cpp | 48 ++-- lib/Target/Mips/MipsSEISelLowering.cpp | 116 ++++----- lib/Target/Mips/MipsSEInstrInfo.cpp | 20 +- lib/Target/Mips/MipsSERegisterInfo.cpp | 2 +- lib/Target/NVPTX/NVPTXAsmPrinter.cpp | 2 +- lib/Target/PowerPC/PPCAsmPrinter.cpp | 6 +- lib/Target/PowerPC/PPCBranchSelector.cpp | 6 +- lib/Target/PowerPC/PPCFastISel.cpp | 14 +- lib/Target/PowerPC/PPCFrameLowering.cpp | 10 +- lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 6 +- lib/Target/PowerPC/PPCISelLowering.cpp | 84 +++---- lib/Target/PowerPC/PPCInstrInfo.cpp | 54 ++-- lib/Target/PowerPC/PPCMIPeephole.cpp | 40 +-- lib/Target/PowerPC/PPCPreEmitPeephole.cpp | 4 +- lib/Target/PowerPC/PPCQPXLoadSplat.cpp | 6 +- lib/Target/PowerPC/PPCReduceCRLogicals.cpp | 2 +- lib/Target/PowerPC/PPCRegisterInfo.cpp | 32 +-- lib/Target/PowerPC/PPCTLSDynamicCall.cpp | 4 +- lib/Target/PowerPC/PPCVSXCopy.cpp | 4 +- lib/Target/PowerPC/PPCVSXFMAMutate.cpp | 12 +- lib/Target/PowerPC/PPCVSXSwapRemoval.cpp | 26 +- lib/Target/Sparc/DelaySlotFiller.cpp | 10 +- lib/Target/Sparc/SparcISelDAGToDAG.cpp | 4 +- lib/Target/Sparc/SparcISelLowering.cpp | 6 +- lib/Target/Sparc/SparcInstrInfo.cpp | 4 +- lib/Target/Sparc/SparcRegisterInfo.cpp | 12 +- lib/Target/SystemZ/SystemZElimCompare.cpp | 2 +- lib/Target/SystemZ/SystemZExpandPseudo.cpp | 4 +- lib/Target/SystemZ/SystemZFrameLowering.cpp | 2 +- lib/Target/SystemZ/SystemZISelLowering.cpp | 78 +++--- lib/Target/SystemZ/SystemZInstrInfo.cpp | 30 +-- lib/Target/SystemZ/SystemZPostRewrite.cpp | 2 +- lib/Target/SystemZ/SystemZRegisterInfo.cpp | 14 +- lib/Target/SystemZ/SystemZShortenInst.cpp | 4 +- lib/Target/X86/X86AsmPrinter.cpp | 4 +- .../X86/X86AvoidStoreForwardingBlocks.cpp | 2 +- lib/Target/X86/X86CallFrameOptimization.cpp | 10 +- lib/Target/X86/X86CallLowering.cpp | 2 +- lib/Target/X86/X86CmovConversion.cpp | 14 +- lib/Target/X86/X86DomainReassignment.cpp | 8 +- lib/Target/X86/X86EvexToVex.cpp | 2 +- lib/Target/X86/X86ExpandPseudo.cpp | 4 +- lib/Target/X86/X86FastISel.cpp | 4 +- lib/Target/X86/X86FixupBWInsts.cpp | 2 +- lib/Target/X86/X86FixupLEAs.cpp | 16 +- lib/Target/X86/X86FixupSetCC.cpp | 4 +- lib/Target/X86/X86FlagsCopyLowering.cpp | 8 +- lib/Target/X86/X86FloatingPoint.cpp | 6 +- lib/Target/X86/X86FrameLowering.cpp | 16 +- lib/Target/X86/X86ISelLowering.cpp | 114 +++++---- lib/Target/X86/X86InsertPrefetch.cpp | 4 +- lib/Target/X86/X86InstrInfo.cpp | 69 +++--- lib/Target/X86/X86InstructionSelector.cpp | 96 +++---- lib/Target/X86/X86MCInstLower.cpp | 18 +- lib/Target/X86/X86OptimizeLEAs.cpp | 4 +- lib/Target/X86/X86RegisterInfo.cpp | 8 +- lib/Target/X86/X86SelectionDAGInfo.cpp | 2 +- .../X86/X86SpeculativeLoadHardening.cpp | 48 ++-- lib/Target/X86/X86WinAllocaExpander.cpp | 4 +- lib/Target/XCore/XCoreFrameLowering.cpp | 4 +- .../XCore/XCoreFrameToArgsOffsetElim.cpp | 2 +- lib/Target/XCore/XCoreISelLowering.cpp | 4 +- lib/Target/XCore/XCoreRegisterInfo.cpp | 2 +- 239 files changed, 1887 insertions(+), 1891 deletions(-) diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/lib/CodeGen/AggressiveAntiDepBreaker.cpp index 444f618d8b8..18fcee6239e 100644 --- a/lib/CodeGen/AggressiveAntiDepBreaker.cpp +++ b/lib/CodeGen/AggressiveAntiDepBreaker.cpp @@ -232,7 +232,7 @@ bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr &MI, if (!MO.isReg() || !MO.isImplicit()) return false; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) return false; @@ -252,7 +252,7 @@ void AggressiveAntiDepBreaker::GetPassthruRegs( if (!MO.isReg()) continue; if ((MO.isDef() && MI.isRegTiedToUseOperand(i)) || IsImplicitDefUse(MI, MO)) { - const unsigned Reg = MO.getReg(); + const Register Reg = MO.getReg(); for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); SubRegs.isValid(); ++SubRegs) PassthruRegs.insert(*SubRegs); @@ -365,7 +365,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction( for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; HandleLastUse(Reg, Count + 1, "", "\tDead Def: ", "\n"); @@ -375,7 +375,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction( for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g" @@ -418,7 +418,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction( for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; // Ignore KILLs and passthru registers for liveness... if (MI.isKill() || (PassthruRegs.count(Reg) != 0)) @@ -471,7 +471,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI, for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || !MO.isUse()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g" @@ -506,7 +506,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI, for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; if (FirstReg != 0) { diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index cc1566929e3..2daad1f2292 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -783,7 +783,7 @@ static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) { /// emitImplicitDef - This method emits the specified machine instruction /// that is an implicit def. void AsmPrinter::emitImplicitDef(const MachineInstr *MI) const { - unsigned RegNo = MI->getOperand(0).getReg(); + Register RegNo = MI->getOperand(0).getReg(); SmallString<128> Str; raw_svector_ostream OS(Str); diff --git a/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp b/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp index cc5effcdee2..9d05cfe9bdc 100644 --- a/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp +++ b/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp @@ -177,13 +177,13 @@ static void handleNewDebugValue(InlinedEntity Var, const MachineInstr &DV, IndicesToErase.push_back(Index); Entry.endEntry(NewIndex); } - if (unsigned Reg = isDescribedByReg(DV)) + if (Register Reg = isDescribedByReg(DV)) TrackedRegs[Reg] |= !Overlaps; } // If the new debug value is described by a register, add tracking of // that register if it is not already tracked. - if (unsigned NewReg = isDescribedByReg(DV)) { + if (Register NewReg = isDescribedByReg(DV)) { if (!TrackedRegs.count(NewReg)) addRegDescribedVar(RegVars, NewReg, Var); LiveEntries[Var].insert(NewIndex); @@ -234,7 +234,7 @@ void llvm::calculateDbgEntityHistory(const MachineFunction *MF, DbgLabelInstrMap &DbgLabels) { const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); - unsigned FrameReg = TRI->getFrameRegister(*MF); + Register FrameReg = TRI->getFrameRegister(*MF); RegDescribedVarsMap RegVars; DbgValueEntriesMap LiveEntries; for (const auto &MBB : *MF) { diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 364119a6ca1..136c0e90163 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -660,9 +660,9 @@ static void collectCallSiteParameters(const MachineInstr *CallMI, DbgValueLoc DbgLocVal(ParamValue->second, Val); finishCallSiteParam(DbgLocVal, Reg); } else if (ParamValue->first->isReg()) { - unsigned RegLoc = ParamValue->first->getReg(); + Register RegLoc = ParamValue->first->getReg(); unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); - unsigned FP = TRI->getFrameRegister(*MF); + Register FP = TRI->getFrameRegister(*MF); bool IsSPorFP = (RegLoc == SP) || (RegLoc == FP); if (TRI->isCalleeSavedPhysReg(RegLoc, *MF) || IsSPorFP) { DbgValueLoc DbgLocVal(ParamValue->second, diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index ecd6ac44e2e..a0a072be868 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -1871,7 +1871,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB, for (const MachineOperand &MO : Loc->operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (MO.isUse()) { @@ -1909,7 +1909,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB, return Loc; if (!MO.isReg() || MO.isUse()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (Uses.count(Reg)) { @@ -1937,7 +1937,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB, for (const MachineOperand &MO : PI->operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (MO.isUse()) { @@ -2010,7 +2010,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) { } if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (MO.isDef()) { @@ -2060,7 +2060,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) { for (const MachineOperand &MO : TIB->operands()) { if (!MO.isReg() || !MO.isUse() || !MO.isKill()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (!AllDefsSet.count(Reg)) { @@ -2078,7 +2078,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) { for (const MachineOperand &MO : TIB->operands()) { if (!MO.isReg() || !MO.isDef() || MO.isDead()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg || Register::isVirtualRegister(Reg)) continue; addRegAndItsAliases(Reg, TRI, ActiveDefsSet); diff --git a/lib/CodeGen/BreakFalseDeps.cpp b/lib/CodeGen/BreakFalseDeps.cpp index cc4b2caa9be..a65b3447bab 100644 --- a/lib/CodeGen/BreakFalseDeps.cpp +++ b/lib/CodeGen/BreakFalseDeps.cpp @@ -109,7 +109,7 @@ bool BreakFalseDeps::pickBestRegisterForUndef(MachineInstr *MI, unsigned OpIdx, MachineOperand &MO = MI->getOperand(OpIdx); assert(MO.isUndef() && "Expected undef machine operand"); - unsigned OriginalReg = MO.getReg(); + Register OriginalReg = MO.getReg(); // Update only undef operands that have reg units that are mapped to one root. for (MCRegUnitIterator Unit(OriginalReg, TRI); Unit.isValid(); ++Unit) { @@ -162,7 +162,7 @@ bool BreakFalseDeps::pickBestRegisterForUndef(MachineInstr *MI, unsigned OpIdx, bool BreakFalseDeps::shouldBreakDependence(MachineInstr *MI, unsigned OpIdx, unsigned Pref) { - unsigned reg = MI->getOperand(OpIdx).getReg(); + Register reg = MI->getOperand(OpIdx).getReg(); unsigned Clearance = RDA->getClearance(MI, reg); LLVM_DEBUG(dbgs() << "Clearance: " << Clearance << ", want " << Pref); diff --git a/lib/CodeGen/CalcSpillWeights.cpp b/lib/CodeGen/CalcSpillWeights.cpp index 6d0a02f99db..bf97aaee366 100644 --- a/lib/CodeGen/CalcSpillWeights.cpp +++ b/lib/CodeGen/CalcSpillWeights.cpp @@ -244,7 +244,7 @@ float VirtRegAuxInfo::weightCalcHelper(LiveInterval &li, SlotIndex *start, // Get allocation hints from copies. if (!mi->isCopy()) continue; - unsigned hint = copyHint(mi, li.reg, tri, mri); + Register hint = copyHint(mi, li.reg, tri, mri); if (!hint) continue; // Force hweight onto the stack so that x86 doesn't add hidden precision, diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp index 4144c243a34..22575d526b3 100644 --- a/lib/CodeGen/CriticalAntiDepBreaker.cpp +++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp @@ -187,7 +187,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr &MI) { for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; const TargetRegisterClass *NewRC = nullptr; @@ -272,7 +272,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr &MI, unsigned Count) { } if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; if (!MO.isDef()) continue; @@ -303,7 +303,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr &MI, unsigned Count) { for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; if (!MO.isUse()) continue; @@ -612,7 +612,7 @@ BreakAntiDependencies(const std::vector &SUnits, for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; if (MO.isUse() && TRI->regsOverlap(AntiDepReg, Reg)) { AntiDepReg = 0; diff --git a/lib/CodeGen/DeadMachineInstructionElim.cpp b/lib/CodeGen/DeadMachineInstructionElim.cpp index 4fe1112f1b4..9a537c859a6 100644 --- a/lib/CodeGen/DeadMachineInstructionElim.cpp +++ b/lib/CodeGen/DeadMachineInstructionElim.cpp @@ -75,7 +75,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const { for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (MO.isReg() && MO.isDef()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isPhysicalRegister(Reg)) { // Don't delete live physreg defs, or any reserved register defs. if (LivePhysRegs.test(Reg) || MRI->isReserved(Reg)) @@ -140,7 +140,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) { for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (MO.isReg() && MO.isDef()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isPhysicalRegister(Reg)) { // Check the subreg set, not the alias set, because a def // of a super-register may still be partially live after @@ -159,7 +159,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) { for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (MO.isReg() && MO.isUse()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isPhysicalRegister(Reg)) { for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) LivePhysRegs.set(*AI); diff --git a/lib/CodeGen/DetectDeadLanes.cpp b/lib/CodeGen/DetectDeadLanes.cpp index 31dea0984e2..6d5306c1dc0 100644 --- a/lib/CodeGen/DetectDeadLanes.cpp +++ b/lib/CodeGen/DetectDeadLanes.cpp @@ -154,7 +154,7 @@ static bool isCrossCopy(const MachineRegisterInfo &MRI, const TargetRegisterClass *DstRC, const MachineOperand &MO) { assert(lowersToCopies(MI)); - unsigned SrcReg = MO.getReg(); + Register SrcReg = MO.getReg(); const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); if (DstRC == SrcRC) return false; @@ -194,7 +194,7 @@ void DetectDeadLanes::addUsedLanesOnOperand(const MachineOperand &MO, LaneBitmask UsedLanes) { if (!MO.readsReg()) return; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (!Register::isVirtualRegister(MOReg)) return; @@ -250,7 +250,7 @@ LaneBitmask DetectDeadLanes::transferUsedLanes(const MachineInstr &MI, return MO2UsedLanes; const MachineOperand &Def = MI.getOperand(0); - unsigned DefReg = Def.getReg(); + Register DefReg = Def.getReg(); const TargetRegisterClass *RC = MRI->getRegClass(DefReg); LaneBitmask MO1UsedLanes; if (RC->CoveredBySubRegs) @@ -285,7 +285,7 @@ void DetectDeadLanes::transferDefinedLanesStep(const MachineOperand &Use, if (MI.getOpcode() == TargetOpcode::PATCHPOINT) return; const MachineOperand &Def = *MI.defs().begin(); - unsigned DefReg = Def.getReg(); + Register DefReg = Def.getReg(); if (!Register::isVirtualRegister(DefReg)) return; unsigned DefRegIdx = Register::virtReg2Index(DefReg); @@ -377,7 +377,7 @@ LaneBitmask DetectDeadLanes::determineInitialDefinedLanes(unsigned Reg) { for (const MachineOperand &MO : DefMI.uses()) { if (!MO.isReg() || !MO.readsReg()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (!MOReg) continue; @@ -428,7 +428,7 @@ LaneBitmask DetectDeadLanes::determineInitialUsedLanes(unsigned Reg) { if (lowersToCopies(UseMI)) { assert(UseMI.getDesc().getNumDefs() == 1); const MachineOperand &Def = *UseMI.defs().begin(); - unsigned DefReg = Def.getReg(); + Register DefReg = Def.getReg(); // The used lanes of COPY-like instruction operands are determined by the // following dataflow analysis. if (Register::isVirtualRegister(DefReg)) { @@ -470,7 +470,7 @@ bool DetectDeadLanes::isUndefInput(const MachineOperand &MO, if (!lowersToCopies(MI)) return false; const MachineOperand &Def = MI.getOperand(0); - unsigned DefReg = Def.getReg(); + Register DefReg = Def.getReg(); if (!Register::isVirtualRegister(DefReg)) return false; unsigned DefRegIdx = Register::virtReg2Index(DefReg); @@ -482,7 +482,7 @@ bool DetectDeadLanes::isUndefInput(const MachineOperand &MO, if (UsedLanes.any()) return false; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (Register::isVirtualRegister(MOReg)) { const TargetRegisterClass *DstRC = MRI->getRegClass(DefReg); *CrossCopy = isCrossCopy(*MRI, MI, DstRC, MO); @@ -536,7 +536,7 @@ bool DetectDeadLanes::runOnce(MachineFunction &MF) { for (MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; unsigned RegIdx = Register::virtReg2Index(Reg); diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp index 00a5ce2ae25..61e994ec077 100644 --- a/lib/CodeGen/EarlyIfConversion.cpp +++ b/lib/CodeGen/EarlyIfConversion.cpp @@ -232,7 +232,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) { } if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // Remember clobbered regunits. if (MO.isDef() && Register::isPhysicalRegister(Reg)) @@ -288,7 +288,7 @@ bool SSAIfConv::findInsertionPoint() { // We're ignoring regmask operands. That is conservatively correct. if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isPhysicalRegister(Reg)) continue; // I clobbers Reg, so it isn't live before I. @@ -467,7 +467,7 @@ void SSAIfConv::replacePHIInstrs() { for (unsigned i = 0, e = PHIs.size(); i != e; ++i) { PHIInfo &PI = PHIs[i]; LLVM_DEBUG(dbgs() << "If-converting " << *PI.PHI); - unsigned DstReg = PI.PHI->getOperand(0).getReg(); + Register DstReg = PI.PHI->getOperand(0).getReg(); TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg); LLVM_DEBUG(dbgs() << " --> " << *std::prev(FirstTerm)); PI.PHI->eraseFromParent(); @@ -494,7 +494,7 @@ void SSAIfConv::rewritePHIOperands() { // equal. DstReg = PI.TReg; } else { - unsigned PHIDst = PI.PHI->getOperand(0).getReg(); + Register PHIDst = PI.PHI->getOperand(0).getReg(); DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst)); TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg); diff --git a/lib/CodeGen/ExpandPostRAPseudos.cpp b/lib/CodeGen/ExpandPostRAPseudos.cpp index 381cefed4ca..1fc57fac148 100644 --- a/lib/CodeGen/ExpandPostRAPseudos.cpp +++ b/lib/CodeGen/ExpandPostRAPseudos.cpp @@ -79,13 +79,13 @@ bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) { (MI->getOperand(2).isReg() && MI->getOperand(2).isUse()) && MI->getOperand(3).isImm() && "Invalid subreg_to_reg"); - unsigned DstReg = MI->getOperand(0).getReg(); - unsigned InsReg = MI->getOperand(2).getReg(); + Register DstReg = MI->getOperand(0).getReg(); + Register InsReg = MI->getOperand(2).getReg(); assert(!MI->getOperand(2).getSubReg() && "SubIdx on physreg?"); unsigned SubIdx = MI->getOperand(3).getImm(); assert(SubIdx != 0 && "Invalid index for insert_subreg"); - unsigned DstSubReg = TRI->getSubReg(DstReg, SubIdx); + Register DstSubReg = TRI->getSubReg(DstReg, SubIdx); assert(Register::isPhysicalRegister(DstReg) && "Insert destination must be in a physical register"); diff --git a/lib/CodeGen/GlobalISel/CSEInfo.cpp b/lib/CodeGen/GlobalISel/CSEInfo.cpp index 78939057e8a..9f81b0af483 100644 --- a/lib/CodeGen/GlobalISel/CSEInfo.cpp +++ b/lib/CodeGen/GlobalISel/CSEInfo.cpp @@ -332,7 +332,7 @@ GISelInstProfileBuilder::addNodeIDFlag(unsigned Flag) const { const GISelInstProfileBuilder &GISelInstProfileBuilder::addNodeIDMachineOperand( const MachineOperand &MO) const { if (MO.isReg()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!MO.isDef()) addNodeIDRegNum(Reg); LLT Ty = MRI.getType(Reg); diff --git a/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 18ea45ef99c..b1bf8258119 100644 --- a/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -62,8 +62,8 @@ bool CombinerHelper::tryCombineCopy(MachineInstr &MI) { bool CombinerHelper::matchCombineCopy(MachineInstr &MI) { if (MI.getOpcode() != TargetOpcode::COPY) return false; - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(DstReg); LLT SrcTy = MRI.getType(SrcReg); // Simple Copy Propagation. @@ -73,8 +73,8 @@ bool CombinerHelper::matchCombineCopy(MachineInstr &MI) { return false; } void CombinerHelper::applyCombineCopy(MachineInstr &MI) { - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); MI.eraseFromParent(); replaceRegWith(MRI, DstReg, SrcReg); } @@ -286,7 +286,7 @@ void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI, // up the type and extend so that it uses the preferred use. if (UseMI->getOpcode() == Preferred.ExtendOpcode || UseMI->getOpcode() == TargetOpcode::G_ANYEXT) { - unsigned UseDstReg = UseMI->getOperand(0).getReg(); + Register UseDstReg = UseMI->getOperand(0).getReg(); MachineOperand &UseSrcMO = UseMI->getOperand(1); const LLT &UseDstTy = MRI.getType(UseDstReg); if (UseDstReg != ChosenDstReg) { @@ -883,8 +883,8 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { unsigned DstAlign = MemOp->getBaseAlignment(); unsigned SrcAlign = 0; - unsigned Dst = MI.getOperand(1).getReg(); - unsigned Src = MI.getOperand(2).getReg(); + Register Dst = MI.getOperand(1).getReg(); + Register Src = MI.getOperand(2).getReg(); Register Len = MI.getOperand(3).getReg(); if (ID != Intrinsic::memset) { diff --git a/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/lib/CodeGen/GlobalISel/InstructionSelect.cpp index 517f13c40a4..d699b4d2d42 100644 --- a/lib/CodeGen/GlobalISel/InstructionSelect.cpp +++ b/lib/CodeGen/GlobalISel/InstructionSelect.cpp @@ -160,8 +160,8 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) { --MII; if (MI.getOpcode() != TargetOpcode::COPY) continue; - unsigned SrcReg = MI.getOperand(1).getReg(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if (Register::isVirtualRegister(SrcReg) && Register::isVirtualRegister(DstReg)) { auto SrcRC = MRI.getRegClass(SrcReg); diff --git a/lib/CodeGen/GlobalISel/Localizer.cpp b/lib/CodeGen/GlobalISel/Localizer.cpp index 3592409710a..8dd99692128 100644 --- a/lib/CodeGen/GlobalISel/Localizer.cpp +++ b/lib/CodeGen/GlobalISel/Localizer.cpp @@ -79,7 +79,7 @@ bool Localizer::shouldLocalize(const MachineInstr &MI) { return true; case TargetOpcode::G_GLOBAL_VALUE: { unsigned RematCost = TTI->getGISelRematGlobalCost(); - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); unsigned MaxUses = maxUses(RematCost); if (MaxUses == UINT_MAX) return true; // Remats are "free" so always localize. @@ -121,7 +121,7 @@ bool Localizer::localizeInterBlock(MachineFunction &MF, LLVM_DEBUG(dbgs() << "Should localize: " << MI); assert(MI.getDesc().getNumDefs() == 1 && "More than one definition not supported yet"); - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); // Check if all the users of MI are local. // We are going to invalidation the list of use operands, so we // can't use range iterator. @@ -151,7 +151,7 @@ bool Localizer::localizeInterBlock(MachineFunction &MF, LocalizedMI); // Set a new register for the definition. - unsigned NewReg = MRI->createGenericVirtualRegister(MRI->getType(Reg)); + Register NewReg = MRI->createGenericVirtualRegister(MRI->getType(Reg)); MRI->setRegClassOrRegBank(NewReg, MRI->getRegClassOrRegBank(Reg)); LocalizedMI->getOperand(0).setReg(NewReg); NewVRegIt = @@ -177,7 +177,7 @@ bool Localizer::localizeIntraBlock(LocalizedSetVecT &LocalizedInstrs) { // many users, but this case may be better served by regalloc improvements. for (MachineInstr *MI : LocalizedInstrs) { - unsigned Reg = MI->getOperand(0).getReg(); + Register Reg = MI->getOperand(0).getReg(); MachineBasicBlock &MBB = *MI->getParent(); // All of the user MIs of this reg. SmallPtrSet Users; diff --git a/lib/CodeGen/GlobalISel/Utils.cpp b/lib/CodeGen/GlobalISel/Utils.cpp index 907cb67f0db..be09db16d83 100644 --- a/lib/CodeGen/GlobalISel/Utils.cpp +++ b/lib/CodeGen/GlobalISel/Utils.cpp @@ -43,7 +43,7 @@ unsigned llvm::constrainOperandRegClass( const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, const MachineOperand &RegMO, unsigned OpIdx) { - unsigned Reg = RegMO.getReg(); + Register Reg = RegMO.getReg(); // Assume physical registers are properly constrained. assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); @@ -72,7 +72,7 @@ unsigned llvm::constrainOperandRegClass( MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, const MachineOperand &RegMO, unsigned OpIdx) { - unsigned Reg = RegMO.getReg(); + Register Reg = RegMO.getReg(); // Assume physical registers are properly constrained. assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); @@ -128,7 +128,7 @@ bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n'); assert(MO.isReg() && "Unsupported non-reg operand"); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // Physical registers don't need to be constrained. if (Register::isPhysicalRegister(Reg)) continue; @@ -168,7 +168,7 @@ bool llvm::isTriviallyDead(const MachineInstr &MI, if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg)) return false; } @@ -288,7 +288,7 @@ llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg, if (!DstTy.isValid()) return nullptr; while (DefMI->getOpcode() == TargetOpcode::COPY) { - unsigned SrcReg = DefMI->getOperand(1).getReg(); + Register SrcReg = DefMI->getOperand(1).getReg(); auto SrcTy = MRI.getType(SrcReg); if (!SrcTy.isValid() || SrcTy != DstTy) break; diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp index 6f6581e360c..83317194b74 100644 --- a/lib/CodeGen/IfConversion.cpp +++ b/lib/CodeGen/IfConversion.cpp @@ -1815,7 +1815,7 @@ bool IfConverter::IfConvertDiamondCommon( for (const MachineOperand &MO : FI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (MO.isDef()) { @@ -1983,7 +1983,7 @@ static bool MaySpeculate(const MachineInstr &MI, for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (MO.isDef() && !LaterRedefs.count(Reg)) diff --git a/lib/CodeGen/ImplicitNullChecks.cpp b/lib/CodeGen/ImplicitNullChecks.cpp index 1e82ea65961..b7dcaec9010 100644 --- a/lib/CodeGen/ImplicitNullChecks.cpp +++ b/lib/CodeGen/ImplicitNullChecks.cpp @@ -278,12 +278,12 @@ bool ImplicitNullChecks::canReorder(const MachineInstr *A, if (!(MOA.isReg() && MOA.getReg())) continue; - unsigned RegA = MOA.getReg(); + Register RegA = MOA.getReg(); for (auto MOB : B->operands()) { if (!(MOB.isReg() && MOB.getReg())) continue; - unsigned RegB = MOB.getReg(); + Register RegB = MOB.getReg(); if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef())) return false; @@ -517,7 +517,7 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks( // // we must ensure that there are no instructions between the 'test' and // conditional jump that modify %rax. - const unsigned PointerReg = MBP.LHS.getReg(); + const Register PointerReg = MBP.LHS.getReg(); assert(MBP.ConditionDef->getParent() == &MBB && "Should be in basic block"); @@ -689,7 +689,7 @@ void ImplicitNullChecks::rewriteNullChecks( for (const MachineOperand &MO : FaultingInstr->operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg || MBB->isLiveIn(Reg)) continue; MBB->addLiveIn(Reg); diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index 7fb3fff5cb6..0a10f917924 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -376,7 +376,7 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI, assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"); #endif - unsigned SrcReg = CopyMI.getOperand(1).getReg(); + Register SrcReg = CopyMI.getOperand(1).getReg(); LiveInterval &SrcLI = LIS.getInterval(SrcReg); VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx); LiveQueryResult SrcQ = SrcLI.Query(Idx); @@ -844,7 +844,7 @@ foldMemoryOperand(ArrayRef> Ops, for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) { if (!MO->isReg()) continue; - unsigned Reg = MO->getReg(); + Register Reg = MO->getReg(); if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) { continue; } diff --git a/lib/CodeGen/LiveDebugValues.cpp b/lib/CodeGen/LiveDebugValues.cpp index 82231a0cc02..b82cef90589 100644 --- a/lib/CodeGen/LiveDebugValues.cpp +++ b/lib/CodeGen/LiveDebugValues.cpp @@ -898,8 +898,8 @@ void LiveDebugValues::transferRegisterCopy(MachineInstr &MI, return false; }; - unsigned SrcReg = SrcRegOp->getReg(); - unsigned DestReg = DestRegOp->getReg(); + Register SrcReg = SrcRegOp->getReg(); + Register DestReg = DestRegOp->getReg(); // We want to recognize instructions where destination register is callee // saved register. If register that could be clobbered by the call is @@ -1182,7 +1182,7 @@ bool LiveDebugValues::ExtendRanges(MachineFunction &MF) { const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); - unsigned FP = TRI->getFrameRegister(MF); + Register FP = TRI->getFrameRegister(MF); auto IsRegOtherThanSPAndFP = [&](const MachineOperand &Op) -> bool { return Op.isReg() && Op.getReg() != SP && Op.getReg() != FP; }; diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp index 6ba8c48fcee..31b38c14ecf 100644 --- a/lib/CodeGen/LiveDebugVariables.cpp +++ b/lib/CodeGen/LiveDebugVariables.cpp @@ -607,7 +607,7 @@ bool LDVImpl::handleDebugValue(MachineInstr &MI, SlotIndex Idx) { bool Discard = false; if (MI.getOperand(0).isReg() && Register::isVirtualRegister(MI.getOperand(0).getReg())) { - const unsigned Reg = MI.getOperand(0).getReg(); + const Register Reg = MI.getOperand(0).getReg(); if (!LIS->hasInterval(Reg)) { // The DBG_VALUE is described by a virtual register that does not have a // live interval. Discard the DBG_VALUE. @@ -768,7 +768,7 @@ void UserValue::addDefsFromCopies( // Copies of the full value. if (MO.getSubReg() || !MI->isCopy()) continue; - unsigned DstReg = MI->getOperand(0).getReg(); + Register DstReg = MI->getOperand(0).getReg(); // Don't follow copies to physregs. These are usually setting up call // arguments, and the argument registers are always call clobbered. We are @@ -1162,7 +1162,7 @@ void UserValue::rewriteLocations(VirtRegMap &VRM, const MachineFunction &MF, // Only virtual registers are rewritten. if (Loc.isReg() && Loc.getReg() && Register::isVirtualRegister(Loc.getReg())) { - unsigned VirtReg = Loc.getReg(); + Register VirtReg = Loc.getReg(); if (VRM.isAssignedReg(VirtReg) && Register::isPhysicalRegister(VRM.getPhys(VirtReg))) { // This can create a %noreg operand in rare cases when the sub-register @@ -1258,7 +1258,7 @@ findNextInsertLocation(MachineBasicBlock *MBB, const TargetRegisterInfo &TRI) { if (!LocMO.isReg()) return MBB->instr_end(); - unsigned Reg = LocMO.getReg(); + Register Reg = LocMO.getReg(); // Find the next instruction in the MBB that define the register Reg. while (I != MBB->end() && !I->isTerminator()) { diff --git a/lib/CodeGen/LiveIntervals.cpp b/lib/CodeGen/LiveIntervals.cpp index cc161e6ed53..0781a0b121b 100644 --- a/lib/CodeGen/LiveIntervals.cpp +++ b/lib/CodeGen/LiveIntervals.cpp @@ -986,7 +986,7 @@ public: MO.setIsKill(false); } - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (Register::isVirtualRegister(Reg)) { @@ -1644,7 +1644,7 @@ void LiveIntervals::splitSeparateComponents(LiveInterval &LI, unsigned Reg = LI.reg; const TargetRegisterClass *RegClass = MRI->getRegClass(Reg); for (unsigned I = 1; I < NumComp; ++I) { - unsigned NewVReg = MRI->createVirtualRegister(RegClass); + Register NewVReg = MRI->createVirtualRegister(RegClass); LiveInterval &NewLI = createEmptyInterval(NewVReg); SplitLIs.push_back(&NewLI); } diff --git a/lib/CodeGen/LivePhysRegs.cpp b/lib/CodeGen/LivePhysRegs.cpp index 9836d3dc112..c2a1cc7c649 100644 --- a/lib/CodeGen/LivePhysRegs.cpp +++ b/lib/CodeGen/LivePhysRegs.cpp @@ -46,7 +46,7 @@ void LivePhysRegs::removeDefs(const MachineInstr &MI) { if (O->isReg()) { if (!O->isDef() || O->isDebug()) continue; - unsigned Reg = O->getReg(); + Register Reg = O->getReg(); if (!Register::isPhysicalRegister(Reg)) continue; removeReg(Reg); @@ -60,7 +60,7 @@ void LivePhysRegs::addUses(const MachineInstr &MI) { for (ConstMIBundleOperands O(MI); O.isValid(); ++O) { if (!O->isReg() || !O->readsReg() || O->isDebug()) continue; - unsigned Reg = O->getReg(); + Register Reg = O->getReg(); if (!Register::isPhysicalRegister(Reg)) continue; addReg(Reg); @@ -86,7 +86,7 @@ void LivePhysRegs::stepForward(const MachineInstr &MI, // Remove killed registers from the set. for (ConstMIBundleOperands O(MI); O.isValid(); ++O) { if (O->isReg() && !O->isDebug()) { - unsigned Reg = O->getReg(); + Register Reg = O->getReg(); if (!Register::isPhysicalRegister(Reg)) continue; if (O->isDef()) { @@ -292,7 +292,7 @@ void llvm::recomputeLivenessFlags(MachineBasicBlock &MBB) { if (!MO->isReg() || !MO->isDef() || MO->isDebug()) continue; - unsigned Reg = MO->getReg(); + Register Reg = MO->getReg(); if (Reg == 0) continue; assert(Register::isPhysicalRegister(Reg)); @@ -309,7 +309,7 @@ void llvm::recomputeLivenessFlags(MachineBasicBlock &MBB) { if (!MO->isReg() || !MO->readsReg() || MO->isDebug()) continue; - unsigned Reg = MO->getReg(); + Register Reg = MO->getReg(); if (Reg == 0) continue; assert(Register::isPhysicalRegister(Reg)); diff --git a/lib/CodeGen/LiveRangeEdit.cpp b/lib/CodeGen/LiveRangeEdit.cpp index d1291290fe4..d03e11fae1b 100644 --- a/lib/CodeGen/LiveRangeEdit.cpp +++ b/lib/CodeGen/LiveRangeEdit.cpp @@ -32,7 +32,7 @@ void LiveRangeEdit::Delegate::anchor() { } LiveInterval &LiveRangeEdit::createEmptyIntervalFrom(unsigned OldReg, bool createSubRanges) { - unsigned VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg)); + Register VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg)); if (VRM) VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg)); @@ -52,7 +52,7 @@ LiveInterval &LiveRangeEdit::createEmptyIntervalFrom(unsigned OldReg, } unsigned LiveRangeEdit::createFrom(unsigned OldReg) { - unsigned VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg)); + Register VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg)); if (VRM) { VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg)); } @@ -308,7 +308,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink, MOE = MI->operands_end(); MOI != MOE; ++MOI) { if (!MOI->isReg()) continue; - unsigned Reg = MOI->getReg(); + Register Reg = MOI->getReg(); if (!Register::isVirtualRegister(Reg)) { // Check if MI reads any unreserved physregs. if (Reg && MOI->readsReg() && !MRI.isReserved(Reg)) diff --git a/lib/CodeGen/LiveRangeShrink.cpp b/lib/CodeGen/LiveRangeShrink.cpp index 08f82e00848..cbf112ee2bd 100644 --- a/lib/CodeGen/LiveRangeShrink.cpp +++ b/lib/CodeGen/LiveRangeShrink.cpp @@ -172,7 +172,7 @@ bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) { for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg() || MO.isDead() || MO.isDebug()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // Do not move the instruction if it def/uses a physical register, // unless it is a constant physical register or a noreg. if (!Register::isVirtualRegister(Reg)) { diff --git a/lib/CodeGen/LiveRegMatrix.cpp b/lib/CodeGen/LiveRegMatrix.cpp index ce99e5535c2..72c79e5f8a7 100644 --- a/lib/CodeGen/LiveRegMatrix.cpp +++ b/lib/CodeGen/LiveRegMatrix.cpp @@ -118,7 +118,7 @@ void LiveRegMatrix::assign(LiveInterval &VirtReg, unsigned PhysReg) { } void LiveRegMatrix::unassign(LiveInterval &VirtReg) { - unsigned PhysReg = VRM->getPhys(VirtReg.reg); + Register PhysReg = VRM->getPhys(VirtReg.reg); LLVM_DEBUG(dbgs() << "unassigning " << printReg(VirtReg.reg, TRI) << " from " << printReg(PhysReg, TRI) << ':'); VRM->clearVirt(VirtReg.reg); diff --git a/lib/CodeGen/LiveRegUnits.cpp b/lib/CodeGen/LiveRegUnits.cpp index 0c1518b862e..97763def1f4 100644 --- a/lib/CodeGen/LiveRegUnits.cpp +++ b/lib/CodeGen/LiveRegUnits.cpp @@ -47,7 +47,7 @@ void LiveRegUnits::stepBackward(const MachineInstr &MI) { if (O->isReg()) { if (!O->isDef() || O->isDebug()) continue; - unsigned Reg = O->getReg(); + Register Reg = O->getReg(); if (!Register::isPhysicalRegister(Reg)) continue; removeReg(Reg); @@ -59,7 +59,7 @@ void LiveRegUnits::stepBackward(const MachineInstr &MI) { for (ConstMIBundleOperands O(MI); O.isValid(); ++O) { if (!O->isReg() || !O->readsReg() || O->isDebug()) continue; - unsigned Reg = O->getReg(); + Register Reg = O->getReg(); if (!Register::isPhysicalRegister(Reg)) continue; addReg(Reg); @@ -70,7 +70,7 @@ void LiveRegUnits::accumulate(const MachineInstr &MI) { // Add defs, uses and regmask clobbers to the set. for (ConstMIBundleOperands O(MI); O.isValid(); ++O) { if (O->isReg()) { - unsigned Reg = O->getReg(); + Register Reg = O->getReg(); if (!Register::isPhysicalRegister(Reg)) continue; if (!O->isDef() && !O->readsReg()) diff --git a/lib/CodeGen/LiveVariables.cpp b/lib/CodeGen/LiveVariables.cpp index 861f8eaeba2..955792808d8 100644 --- a/lib/CodeGen/LiveVariables.cpp +++ b/lib/CodeGen/LiveVariables.cpp @@ -214,7 +214,7 @@ MachineInstr *LiveVariables::FindLastPartialDef(unsigned Reg, MachineOperand &MO = LastDef->getOperand(i); if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0) continue; - unsigned DefReg = MO.getReg(); + Register DefReg = MO.getReg(); if (TRI->isSubRegister(Reg, DefReg)) { for (MCSubRegIterator SubRegs(DefReg, TRI, /*IncludeSelf=*/true); SubRegs.isValid(); ++SubRegs) @@ -519,7 +519,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI, } if (!MO.isReg() || MO.getReg() == 0) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (MO.isUse()) { if (!(Register::isPhysicalRegister(MOReg) && MRI->isReserved(MOReg))) MO.setIsKill(false); @@ -690,7 +690,7 @@ void LiveVariables::removeVirtualRegistersKilled(MachineInstr &MI) { MachineOperand &MO = MI.getOperand(i); if (MO.isReg() && MO.isKill()) { MO.setIsKill(false); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isVirtualRegister(Reg)) { bool removed = getVarInfo(Reg).removeKill(MI); assert(removed && "kill not in register's VarInfo?"); diff --git a/lib/CodeGen/MIRCanonicalizerPass.cpp b/lib/CodeGen/MIRCanonicalizerPass.cpp index 222432d0acb..a9b49457bad 100644 --- a/lib/CodeGen/MIRCanonicalizerPass.cpp +++ b/lib/CodeGen/MIRCanonicalizerPass.cpp @@ -340,8 +340,8 @@ static bool propagateLocalCopies(MachineBasicBlock *MBB) { if (!MI->getOperand(1).isReg()) continue; - const unsigned Dst = MI->getOperand(0).getReg(); - const unsigned Src = MI->getOperand(1).getReg(); + const Register Dst = MI->getOperand(0).getReg(); + const Register Src = MI->getOperand(1).getReg(); if (!Register::isVirtualRegister(Dst)) continue; @@ -386,7 +386,7 @@ static std::vector populateCandidates(MachineBasicBlock *MBB) { bool DoesMISideEffect = false; if (MI->getNumOperands() > 0 && MI->getOperand(0).isReg()) { - const unsigned Dst = MI->getOperand(0).getReg(); + const Register Dst = MI->getOperand(0).getReg(); DoesMISideEffect |= !Register::isVirtualRegister(Dst); for (auto UI = MRI.use_begin(Dst); UI != MRI.use_end(); ++UI) { @@ -754,7 +754,7 @@ static bool runOnBasicBlock(MachineBasicBlock *MBB, for (unsigned i = 0; i < IdempotentInstCount && MII != MBB->end(); ++i) { MachineInstr &MI = *MII++; Changed = true; - unsigned vRegToRename = MI.getOperand(0).getReg(); + Register vRegToRename = MI.getOperand(0).getReg(); auto Rename = NVC.createVirtualRegister(vRegToRename); std::vector RenameMOs; diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index de0e63aefb2..f57a0424d33 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -500,14 +500,14 @@ MachineBasicBlock::addLiveIn(MCRegister PhysReg, const TargetRegisterClass *RC) if (LiveIn) for (;I != E && I->isCopy(); ++I) if (I->getOperand(1).getReg() == PhysReg) { - unsigned VirtReg = I->getOperand(0).getReg(); + Register VirtReg = I->getOperand(0).getReg(); if (!MRI.constrainRegClass(VirtReg, RC)) llvm_unreachable("Incompatible live-in register class."); return VirtReg; } // No luck, create a virtual register. - unsigned VirtReg = MRI.createVirtualRegister(RC); + Register VirtReg = MRI.createVirtualRegister(RC); BuildMI(*this, I, DebugLoc(), TII.get(TargetOpcode::COPY), VirtReg) .addReg(PhysReg, RegState::Kill); if (!LiveIn) @@ -907,7 +907,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, if (!OI->isReg() || OI->getReg() == 0 || !OI->isUse() || !OI->isKill() || OI->isUndef()) continue; - unsigned Reg = OI->getReg(); + Register Reg = OI->getReg(); if (Register::isPhysicalRegister(Reg) || LV->getVarInfo(Reg).removeKill(*MI)) { KilledRegs.push_back(Reg); @@ -928,7 +928,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, if (!OI->isReg() || OI->getReg() == 0) continue; - unsigned Reg = OI->getReg(); + Register Reg = OI->getReg(); if (!is_contained(UsedRegs, Reg)) UsedRegs.push_back(Reg); } @@ -1033,7 +1033,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, for (unsigned ni = 1, ne = I->getNumOperands(); ni != ne; ni += 2) { if (I->getOperand(ni+1).getMBB() == NMBB) { MachineOperand &MO = I->getOperand(ni); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); PHISrcRegs.insert(Reg); if (MO.isUndef()) continue; diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp index 8f5778a51ac..58d73f06599 100644 --- a/lib/CodeGen/MachineCSE.cpp +++ b/lib/CodeGen/MachineCSE.cpp @@ -167,14 +167,14 @@ bool MachineCSE::PerformTrivialCopyPropagation(MachineInstr *MI, for (MachineOperand &MO : MI->operands()) { if (!MO.isReg() || !MO.isUse()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; bool OnlyOneUse = MRI->hasOneNonDBGUse(Reg); MachineInstr *DefMI = MRI->getVRegDef(Reg); if (!DefMI->isCopy()) continue; - unsigned SrcReg = DefMI->getOperand(1).getReg(); + Register SrcReg = DefMI->getOperand(1).getReg(); if (!Register::isVirtualRegister(SrcReg)) continue; if (DefMI->getOperand(0).getSubReg()) @@ -280,7 +280,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI, for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg() || MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (Register::isVirtualRegister(Reg)) @@ -299,7 +299,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI, const MachineOperand &MO = MOP.value(); if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (Register::isVirtualRegister(Reg)) @@ -376,7 +376,7 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, return false; if (!MO.isReg() || !MO.isDef()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (Register::isVirtualRegister(MOReg)) continue; if (PhysRefs.count(MOReg)) @@ -593,8 +593,8 @@ bool MachineCSE::ProcessBlockCSE(MachineBasicBlock *MBB) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isDef()) continue; - unsigned OldReg = MO.getReg(); - unsigned NewReg = CSMI->getOperand(i).getReg(); + Register OldReg = MO.getReg(); + Register NewReg = CSMI->getOperand(i).getReg(); // Go through implicit defs of CSMI and MI, if a def is not dead at MI, // we should make sure it is not dead at CSMI. @@ -822,8 +822,8 @@ bool MachineCSE::ProcessBlockPRE(MachineDominatorTree *DT, assert(MI->getOperand(0).isDef() && "First operand of instr with one explicit def must be this def"); - unsigned VReg = MI->getOperand(0).getReg(); - unsigned NewReg = MRI->cloneVirtualRegister(VReg); + Register VReg = MI->getOperand(0).getReg(); + Register NewReg = MRI->cloneVirtualRegister(VReg); if (!isProfitableToCSE(NewReg, VReg, CMBB, MI)) continue; MachineInstr &NewMI = diff --git a/lib/CodeGen/MachineCopyPropagation.cpp b/lib/CodeGen/MachineCopyPropagation.cpp index f3946b8403b..ebe76e31dca 100644 --- a/lib/CodeGen/MachineCopyPropagation.cpp +++ b/lib/CodeGen/MachineCopyPropagation.cpp @@ -119,8 +119,8 @@ public: void trackCopy(MachineInstr *MI, const TargetRegisterInfo &TRI) { assert(MI->isCopy() && "Tracking non-copy?"); - unsigned Def = MI->getOperand(0).getReg(); - unsigned Src = MI->getOperand(1).getReg(); + Register Def = MI->getOperand(0).getReg(); + Register Src = MI->getOperand(1).getReg(); // Remember Def is defined by the copy. for (MCRegUnitIterator RUI(Def, &TRI); RUI.isValid(); ++RUI) @@ -163,8 +163,8 @@ public: // Check that the available copy isn't clobbered by any regmasks between // itself and the destination. - unsigned AvailSrc = AvailCopy->getOperand(1).getReg(); - unsigned AvailDef = AvailCopy->getOperand(0).getReg(); + Register AvailSrc = AvailCopy->getOperand(1).getReg(); + Register AvailDef = AvailCopy->getOperand(0).getReg(); for (const MachineInstr &MI : make_range(AvailCopy->getIterator(), DestCopy.getIterator())) for (const MachineOperand &MO : MI.operands()) @@ -262,8 +262,8 @@ void MachineCopyPropagation::ReadRegister(unsigned Reg, MachineInstr &Reader, /// isNopCopy("ecx = COPY eax", AH, CL) == false static bool isNopCopy(const MachineInstr &PreviousCopy, unsigned Src, unsigned Def, const TargetRegisterInfo *TRI) { - unsigned PreviousSrc = PreviousCopy.getOperand(1).getReg(); - unsigned PreviousDef = PreviousCopy.getOperand(0).getReg(); + Register PreviousSrc = PreviousCopy.getOperand(1).getReg(); + Register PreviousDef = PreviousCopy.getOperand(0).getReg(); if (Src == PreviousSrc) { assert(Def == PreviousDef); return true; @@ -300,7 +300,7 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy, unsigned Src, // Copy was redundantly redefining either Src or Def. Remove earlier kill // flags between Copy and PrevCopy because the value will be reused now. assert(Copy.isCopy()); - unsigned CopyDef = Copy.getOperand(0).getReg(); + Register CopyDef = Copy.getOperand(0).getReg(); assert(CopyDef == Src || CopyDef == Def); for (MachineInstr &MI : make_range(PrevCopy->getIterator(), Copy.getIterator())) @@ -319,7 +319,7 @@ bool MachineCopyPropagation::isForwardableRegClassCopy(const MachineInstr &Copy, const MachineInstr &UseI, unsigned UseIdx) { - unsigned CopySrcReg = Copy.getOperand(1).getReg(); + Register CopySrcReg = Copy.getOperand(1).getReg(); // If the new register meets the opcode register constraints, then allow // forwarding. @@ -410,9 +410,9 @@ void MachineCopyPropagation::forwardUses(MachineInstr &MI) { if (!Copy) continue; - unsigned CopyDstReg = Copy->getOperand(0).getReg(); + Register CopyDstReg = Copy->getOperand(0).getReg(); const MachineOperand &CopySrc = Copy->getOperand(1); - unsigned CopySrcReg = CopySrc.getReg(); + Register CopySrcReg = CopySrc.getReg(); // FIXME: Don't handle partial uses of wider COPYs yet. if (MOUse.getReg() != CopyDstReg) { @@ -468,8 +468,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { // Analyze copies (which don't overlap themselves). if (MI->isCopy() && !TRI->regsOverlap(MI->getOperand(0).getReg(), MI->getOperand(1).getReg())) { - unsigned Def = MI->getOperand(0).getReg(); - unsigned Src = MI->getOperand(1).getReg(); + Register Def = MI->getOperand(0).getReg(); + Register Src = MI->getOperand(1).getReg(); assert(!Register::isVirtualRegister(Def) && !Register::isVirtualRegister(Src) && @@ -504,7 +504,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { for (const MachineOperand &MO : MI->implicit_operands()) { if (!MO.isReg() || !MO.readsReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; ReadRegister(Reg, *MI, RegularUse); @@ -527,7 +527,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { for (const MachineOperand &MO : MI->implicit_operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; Tracker.clobberRegister(Reg, *TRI); @@ -541,7 +541,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { // Clobber any earlyclobber regs first. for (const MachineOperand &MO : MI->operands()) if (MO.isReg() && MO.isEarlyClobber()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // If we have a tied earlyclobber, that means it is also read by this // instruction, so we need to make sure we don't remove it as dead // later. @@ -560,7 +560,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { RegMask = &MO; if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; @@ -583,7 +583,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { MaybeDeadCopies.begin(); DI != MaybeDeadCopies.end();) { MachineInstr *MaybeDead = *DI; - unsigned Reg = MaybeDead->getOperand(0).getReg(); + Register Reg = MaybeDead->getOperand(0).getReg(); assert(!MRI->isReserved(Reg)); if (!RegMask->clobbersPhysReg(Reg)) { diff --git a/lib/CodeGen/MachineInstrBundle.cpp b/lib/CodeGen/MachineInstrBundle.cpp index 1bd7021f2b6..feb849ced35 100644 --- a/lib/CodeGen/MachineInstrBundle.cpp +++ b/lib/CodeGen/MachineInstrBundle.cpp @@ -154,7 +154,7 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB, continue; } - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; @@ -177,7 +177,7 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB, for (unsigned i = 0, e = Defs.size(); i != e; ++i) { MachineOperand &MO = *Defs[i]; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; @@ -329,7 +329,7 @@ MachineOperandIteratorBase::analyzePhysReg(unsigned Reg, if (!MO.isReg()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (!MOReg || !Register::isPhysicalRegister(MOReg)) continue; diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp index 3e80e8ba06e..15c38da2fb9 100644 --- a/lib/CodeGen/MachineLICM.cpp +++ b/lib/CodeGen/MachineLICM.cpp @@ -424,7 +424,7 @@ void MachineLICMBase::ProcessMI(MachineInstr *MI, if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; assert(Register::isPhysicalRegister(Reg) && @@ -526,7 +526,7 @@ void MachineLICMBase::HoistRegionPostRA() { for (const MachineOperand &MO : TI->operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) @@ -554,7 +554,7 @@ void MachineLICMBase::HoistRegionPostRA() { for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg() || MO.isDef() || !MO.getReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)) { // If it's using a non-loop-invariant register, then it's obviously @@ -852,7 +852,7 @@ MachineLICMBase::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen, const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || MO.isImplicit()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; @@ -922,7 +922,7 @@ static bool isInvariantStore(const MachineInstr &MI, // Check that all register operands are caller-preserved physical registers. for (const MachineOperand &MO : MI.operands()) { if (MO.isReg()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // If operand is a virtual register, check if it comes from a copy of a // physical register. if (Register::isVirtualRegister(Reg)) @@ -955,14 +955,14 @@ static bool isCopyFeedingInvariantStore(const MachineInstr &MI, const MachineFunction *MF = MI.getMF(); // Check that we are copying a constant physical register. - unsigned CopySrcReg = MI.getOperand(1).getReg(); + Register CopySrcReg = MI.getOperand(1).getReg(); if (Register::isVirtualRegister(CopySrcReg)) return false; if (!TRI->isCallerPreservedPhysReg(CopySrcReg, *MF)) return false; - unsigned CopyDstReg = MI.getOperand(0).getReg(); + Register CopyDstReg = MI.getOperand(0).getReg(); // Check if any of the uses of the copy are invariant stores. assert(Register::isVirtualRegister(CopyDstReg) && "copy dst is not a virtual reg"); @@ -1010,7 +1010,7 @@ bool MachineLICMBase::IsLoopInvariantInst(MachineInstr &I) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; // Don't hoist an instruction that uses or defines a physical register. @@ -1061,7 +1061,7 @@ bool MachineLICMBase::HasLoopPHIUse(const MachineInstr *MI) const { for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; for (MachineInstr &UseMI : MRI->use_instructions(Reg)) { @@ -1104,7 +1104,7 @@ bool MachineLICMBase::HasHighOperandLatency(MachineInstr &MI, const MachineOperand &MO = UseMI.getOperand(i); if (!MO.isReg() || !MO.isUse()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (MOReg != Reg) continue; @@ -1132,7 +1132,7 @@ bool MachineLICMBase::IsCheapInstruction(MachineInstr &MI) const { if (!DefMO.isReg() || !DefMO.isDef()) continue; --NumDefs; - unsigned Reg = DefMO.getReg(); + Register Reg = DefMO.getReg(); if (Register::isPhysicalRegister(Reg)) continue; @@ -1225,7 +1225,7 @@ bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) { const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || MO.isImplicit()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; if (MO.isDef() && HasHighOperandLatency(MI, i, Reg)) { @@ -1304,7 +1304,7 @@ MachineInstr *MachineLICMBase::ExtractHoistableLoad(MachineInstr *MI) { MachineFunction &MF = *MI->getMF(); const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF); // Ok, we're unfolding. Create a temporary register and do the unfold. - unsigned Reg = MRI->createVirtualRegister(RC); + Register Reg = MRI->createVirtualRegister(RC); SmallVector NewMIs; bool Success = TII->unfoldMemoryOperand(MF, *MI, Reg, @@ -1390,8 +1390,8 @@ bool MachineLICMBase::EliminateCSE(MachineInstr *MI, SmallVector OrigRCs; for (unsigned i = 0, e = Defs.size(); i != e; ++i) { unsigned Idx = Defs[i]; - unsigned Reg = MI->getOperand(Idx).getReg(); - unsigned DupReg = Dup->getOperand(Idx).getReg(); + Register Reg = MI->getOperand(Idx).getReg(); + Register DupReg = Dup->getOperand(Idx).getReg(); OrigRCs.push_back(MRI->getRegClass(DupReg)); if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) { @@ -1403,8 +1403,8 @@ bool MachineLICMBase::EliminateCSE(MachineInstr *MI, } for (unsigned Idx : Defs) { - unsigned Reg = MI->getOperand(Idx).getReg(); - unsigned DupReg = Dup->getOperand(Idx).getReg(); + Register Reg = MI->getOperand(Idx).getReg(); + Register DupReg = Dup->getOperand(Idx).getReg(); MRI->replaceRegWith(Reg, DupReg); MRI->clearKillFlags(DupReg); } diff --git a/lib/CodeGen/MachineOperand.cpp b/lib/CodeGen/MachineOperand.cpp index 43081ccb329..c7e804f6260 100644 --- a/lib/CodeGen/MachineOperand.cpp +++ b/lib/CodeGen/MachineOperand.cpp @@ -750,7 +750,7 @@ void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, printTargetFlags(OS, *this); switch (getType()) { case MachineOperand::MO_Register: { - unsigned Reg = getReg(); + Register Reg = getReg(); if (isImplicit()) OS << (isDef() ? "implicit-def " : "implicit "); else if (PrintDef && isDef()) diff --git a/lib/CodeGen/MachinePipeliner.cpp b/lib/CodeGen/MachinePipeliner.cpp index ca7d694db7f..584b2a1eaec 100644 --- a/lib/CodeGen/MachinePipeliner.cpp +++ b/lib/CodeGen/MachinePipeliner.cpp @@ -349,7 +349,7 @@ void MachinePipeliner::preprocessPhiNodes(MachineBasicBlock &B) { // If the operand uses a subregister, replace it with a new register // without subregisters, and generate a copy to the new register. - unsigned NewReg = MRI.createVirtualRegister(RC); + Register NewReg = MRI.createVirtualRegister(RC); MachineBasicBlock &PredB = *PI.getOperand(i+1).getMBB(); MachineBasicBlock::iterator At = PredB.getFirstTerminator(); const DebugLoc &DL = PredB.findDebugLoc(At); @@ -730,7 +730,7 @@ void SwingSchedulerDAG::updatePhiDependences() { MOI != MOE; ++MOI) { if (!MOI->isReg()) continue; - unsigned Reg = MOI->getReg(); + Register Reg = MOI->getReg(); if (MOI->isDef()) { // If the register is used by a Phi, then create an anti dependence. for (MachineRegisterInfo::use_instr_iterator @@ -809,7 +809,7 @@ void SwingSchedulerDAG::changeDependences() { continue; // Get the MI and SUnit for the instruction that defines the original base. - unsigned OrigBase = I.getInstr()->getOperand(BasePos).getReg(); + Register OrigBase = I.getInstr()->getOperand(BasePos).getReg(); MachineInstr *DefMI = MRI.getUniqueVRegDef(OrigBase); if (!DefMI) continue; @@ -1514,7 +1514,7 @@ static void computeLiveOuts(MachineFunction &MF, RegPressureTracker &RPTracker, continue; for (const MachineOperand &MO : MI->operands()) if (MO.isReg() && MO.isUse()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isVirtualRegister(Reg)) Uses.insert(Reg); else if (MRI.isAllocatable(Reg)) @@ -1525,7 +1525,7 @@ static void computeLiveOuts(MachineFunction &MF, RegPressureTracker &RPTracker, for (SUnit *SU : NS) for (const MachineOperand &MO : SU->getInstr()->operands()) if (MO.isReg() && MO.isDef() && !MO.isDead()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isVirtualRegister(Reg)) { if (!Uses.count(Reg)) LiveOutRegs.push_back(RegisterMaskPair(Reg, @@ -2311,7 +2311,7 @@ void SwingSchedulerDAG::generateExistingPhis( for (MachineBasicBlock::iterator BBI = BB->instr_begin(), BBE = BB->getFirstNonPHI(); BBI != BBE; ++BBI) { - unsigned Def = BBI->getOperand(0).getReg(); + Register Def = BBI->getOperand(0).getReg(); unsigned InitVal = 0; unsigned LoopVal = 0; @@ -2558,7 +2558,7 @@ void SwingSchedulerDAG::generatePhis( int StageScheduled = Schedule.stageScheduled(getSUnit(&*BBI)); assert(StageScheduled != -1 && "Expecting scheduled instruction."); - unsigned Def = MO.getReg(); + Register Def = MO.getReg(); unsigned NumPhis = Schedule.getStagesForReg(Def, CurStageNum); // An instruction scheduled in stage 0 and is used after the loop // requires a phi in the epilog for the last definition from either @@ -2591,7 +2591,7 @@ void SwingSchedulerDAG::generatePhis( PhiOp2 = VRMap[PrevStage - np][Def]; const TargetRegisterClass *RC = MRI.getRegClass(Def); - unsigned NewReg = MRI.createVirtualRegister(RC); + Register NewReg = MRI.createVirtualRegister(RC); MachineInstrBuilder NewPhi = BuildMI(*NewBB, NewBB->getFirstNonPHI(), DebugLoc(), @@ -2656,7 +2656,7 @@ void SwingSchedulerDAG::removeDeadInstructions(MachineBasicBlock *KernelBB, MOI != MOE; ++MOI) { if (!MOI->isReg() || !MOI->isDef()) continue; - unsigned reg = MOI->getReg(); + Register reg = MOI->getReg(); // Assume physical registers are used, unless they are marked dead. if (Register::isPhysicalRegister(reg)) { used = !MOI->isDead(); @@ -2694,7 +2694,7 @@ void SwingSchedulerDAG::removeDeadInstructions(MachineBasicBlock *KernelBB, BBI != BBE;) { MachineInstr *MI = &*BBI; ++BBI; - unsigned reg = MI->getOperand(0).getReg(); + Register reg = MI->getOperand(0).getReg(); if (MRI.use_begin(reg) == MRI.use_end()) { LIS.RemoveMachineInstrFromMaps(*MI); MI->eraseFromParent(); @@ -2717,7 +2717,7 @@ void SwingSchedulerDAG::splitLifetimes(MachineBasicBlock *KernelBB, SMSchedule &Schedule) { const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); for (auto &PHI : KernelBB->phis()) { - unsigned Def = PHI.getOperand(0).getReg(); + Register Def = PHI.getOperand(0).getReg(); // Check for any Phi definition that used as an operand of another Phi // in the same block. for (MachineRegisterInfo::use_instr_iterator I = MRI.use_instr_begin(Def), @@ -2854,7 +2854,7 @@ bool SwingSchedulerDAG::computeDelta(MachineInstr &MI, unsigned &Delta) { if (!BaseOp->isReg()) return false; - unsigned BaseReg = BaseOp->getReg(); + Register BaseReg = BaseOp->getReg(); MachineRegisterInfo &MRI = MF.getRegInfo(); // Check if there is a Phi. If so, get the definition in the loop. @@ -2964,11 +2964,11 @@ void SwingSchedulerDAG::updateInstruction(MachineInstr *NewMI, bool LastDef, MachineOperand &MO = NewMI->getOperand(i); if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg())) continue; - unsigned reg = MO.getReg(); + Register reg = MO.getReg(); if (MO.isDef()) { // Create a new virtual register for the definition. const TargetRegisterClass *RC = MRI.getRegClass(reg); - unsigned NewReg = MRI.createVirtualRegister(RC); + Register NewReg = MRI.createVirtualRegister(RC); MO.setReg(NewReg); VRMap[CurStageNum][reg] = NewReg; if (LastDef) @@ -3051,7 +3051,7 @@ void SwingSchedulerDAG::rewritePhiValues(MachineBasicBlock *NewBB, unsigned InitVal = 0; unsigned LoopVal = 0; getPhiRegs(PHI, BB, InitVal, LoopVal); - unsigned PhiDef = PHI.getOperand(0).getReg(); + Register PhiDef = PHI.getOperand(0).getReg(); unsigned PhiStage = (unsigned)Schedule.stageScheduled(getSUnit(MRI.getVRegDef(PhiDef))); @@ -3147,7 +3147,7 @@ bool SwingSchedulerDAG::canUseLastOffsetValue(MachineInstr *MI, unsigned BasePosLd, OffsetPosLd; if (!TII->getBaseAndOffsetPosition(*MI, BasePosLd, OffsetPosLd)) return false; - unsigned BaseReg = MI->getOperand(BasePosLd).getReg(); + Register BaseReg = MI->getOperand(BasePosLd).getReg(); // Look for the Phi instruction. MachineRegisterInfo &MRI = MI->getMF()->getRegInfo(); @@ -3202,7 +3202,7 @@ void SwingSchedulerDAG::applyInstrChange(MachineInstr *MI, unsigned BasePos, OffsetPos; if (!TII->getBaseAndOffsetPosition(*MI, BasePos, OffsetPos)) return; - unsigned BaseReg = MI->getOperand(BasePos).getReg(); + Register BaseReg = MI->getOperand(BasePos).getReg(); MachineInstr *LoopDef = findDefInLoop(BaseReg); int DefStageNum = Schedule.stageScheduled(getSUnit(LoopDef)); int DefCycleNum = Schedule.cycleScheduled(getSUnit(LoopDef)); @@ -3502,7 +3502,7 @@ void SMSchedule::orderDependence(SwingSchedulerDAG *SSD, SUnit *SU, if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg())) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); unsigned BasePos, OffsetPos; if (ST.getInstrInfo()->getBaseAndOffsetPosition(*MI, BasePos, OffsetPos)) if (MI->getOperand(BasePos).getReg() == Reg) @@ -3857,7 +3857,7 @@ void SMSchedule::finalizeSchedule(SwingSchedulerDAG *SSD) { if (!Op.isReg() || !Op.isDef()) continue; - unsigned Reg = Op.getReg(); + Register Reg = Op.getReg(); unsigned MaxDiff = 0; bool PhiIsSwapped = false; for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(Reg), diff --git a/lib/CodeGen/MachineSSAUpdater.cpp b/lib/CodeGen/MachineSSAUpdater.cpp index e8b42047b49..3370b52ede4 100644 --- a/lib/CodeGen/MachineSSAUpdater.cpp +++ b/lib/CodeGen/MachineSSAUpdater.cpp @@ -95,7 +95,7 @@ unsigned LookForIdenticalPHI(MachineBasicBlock *BB, while (I != BB->end() && I->isPHI()) { bool Same = true; for (unsigned i = 1, e = I->getNumOperands(); i != e; i += 2) { - unsigned SrcReg = I->getOperand(i).getReg(); + Register SrcReg = I->getOperand(i).getReg(); MachineBasicBlock *SrcBB = I->getOperand(i+1).getMBB(); if (AVals[SrcBB] != SrcReg) { Same = false; @@ -118,7 +118,7 @@ MachineInstrBuilder InsertNewDef(unsigned Opcode, const TargetRegisterClass *RC, MachineRegisterInfo *MRI, const TargetInstrInfo *TII) { - unsigned NewVR = MRI->createVirtualRegister(RC); + Register NewVR = MRI->createVirtualRegister(RC); return BuildMI(*BB, I, DebugLoc(), TII->get(Opcode), NewVR); } diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp index ea322bf0d2d..a46ded954af 100644 --- a/lib/CodeGen/MachineScheduler.cpp +++ b/lib/CodeGen/MachineScheduler.cpp @@ -933,7 +933,7 @@ void ScheduleDAGMILive::collectVRegUses(SUnit &SU) { if (TrackLaneMasks && !MO.isUse()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; @@ -1687,12 +1687,12 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { // Check for pure vreg copies. const MachineOperand &SrcOp = Copy->getOperand(1); - unsigned SrcReg = SrcOp.getReg(); + Register SrcReg = SrcOp.getReg(); if (!Register::isVirtualRegister(SrcReg) || !SrcOp.readsReg()) return; const MachineOperand &DstOp = Copy->getOperand(0); - unsigned DstReg = DstOp.getReg(); + Register DstReg = DstOp.getReg(); if (!Register::isVirtualRegister(DstReg) || DstOp.isDead()) return; diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp index 3ebb76d5f20..3d5530d4b8d 100644 --- a/lib/CodeGen/MachineSink.cpp +++ b/lib/CodeGen/MachineSink.cpp @@ -195,8 +195,8 @@ bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI, if (!MI.isCopy()) return false; - unsigned SrcReg = MI.getOperand(1).getReg(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if (!Register::isVirtualRegister(SrcReg) || !Register::isVirtualRegister(DstReg) || !MRI->hasOneNonDBGUse(SrcReg)) return false; @@ -414,7 +414,7 @@ bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI, const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || !MO.isUse()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; @@ -613,7 +613,7 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; // Ignore non-register operands. - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; if (Register::isPhysicalRegister(Reg)) { @@ -815,7 +815,7 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore, for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { const MachineOperand &MO = MI.getOperand(I); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0 || !Register::isPhysicalRegister(Reg)) continue; if (SuccToSinkTo->isLiveIn(Reg)) @@ -1029,7 +1029,7 @@ static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB, const TargetRegisterInfo *TRI) { for (auto U : UsedOpsInCopy) { MachineOperand &MO = MI->getOperand(U); - unsigned SrcReg = MO.getReg(); + Register SrcReg = MO.getReg(); if (!UsedRegUnits.available(SrcReg)) { MachineBasicBlock::iterator NI = std::next(MI->getIterator()); for (MachineInstr &UI : make_range(NI, CurBB.end())) { @@ -1052,7 +1052,7 @@ static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB, for (MCSubRegIterator S(DefReg, TRI, true); S.isValid(); ++S) SuccBB->removeLiveIn(*S); for (auto U : UsedOpsInCopy) { - unsigned Reg = MI->getOperand(U).getReg(); + Register Reg = MI->getOperand(U).getReg(); if (!SuccBB->isLiveIn(Reg)) SuccBB->addLiveIn(Reg); } @@ -1068,7 +1068,7 @@ static bool hasRegisterDependency(MachineInstr *MI, MachineOperand &MO = MI->getOperand(i); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (MO.isDef()) { @@ -1181,7 +1181,7 @@ bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB, for (auto &MO : MI->operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned reg = MO.getReg(); + Register reg = MO.getReg(); for (auto *MI : SeenDbgInstrs.lookup(reg)) DbgValsToSink.push_back(MI); } diff --git a/lib/CodeGen/MachineTraceMetrics.cpp b/lib/CodeGen/MachineTraceMetrics.cpp index 8f382fd2f26..66a3bc2f8cc 100644 --- a/lib/CodeGen/MachineTraceMetrics.cpp +++ b/lib/CodeGen/MachineTraceMetrics.cpp @@ -660,7 +660,7 @@ static bool getDataDeps(const MachineInstr &UseMI, const MachineOperand &MO = *I; if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (Register::isPhysicalRegister(Reg)) { @@ -687,7 +687,7 @@ static void getPHIDeps(const MachineInstr &UseMI, assert(UseMI.isPHI() && UseMI.getNumOperands() % 2 && "Bad PHI"); for (unsigned i = 1; i != UseMI.getNumOperands(); i += 2) { if (UseMI.getOperand(i + 1).getMBB() == Pred) { - unsigned Reg = UseMI.getOperand(i).getReg(); + Register Reg = UseMI.getOperand(i).getReg(); Deps.push_back(DataDep(MRI, Reg, i)); return; } @@ -708,7 +708,7 @@ static void updatePhysDepsDownwards(const MachineInstr *UseMI, const MachineOperand &MO = *MI; if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isPhysicalRegister(Reg)) continue; // Track live defs and kills for updating RegUnits. @@ -902,7 +902,7 @@ static unsigned updatePhysDepsUpwards(const MachineInstr &MI, unsigned Height, const MachineOperand &MO = *MOI; if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isPhysicalRegister(Reg)) continue; if (MO.readsReg()) @@ -930,7 +930,7 @@ static unsigned updatePhysDepsUpwards(const MachineInstr &MI, unsigned Height, // Now we know the height of MI. Update any regunits read. for (unsigned i = 0, e = ReadOps.size(); i != e; ++i) { - unsigned Reg = MI.getOperand(ReadOps[i]).getReg(); + Register Reg = MI.getOperand(ReadOps[i]).getReg(); for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) { LiveRegUnit &LRU = RegUnits[*Units]; // Set the height to the highest reader of the unit. diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp index 33d22469075..c0c5a70fac8 100644 --- a/lib/CodeGen/MachineVerifier.cpp +++ b/lib/CodeGen/MachineVerifier.cpp @@ -1609,7 +1609,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { switch (MO->getType()) { case MachineOperand::MO_Register: { - const unsigned Reg = MO->getReg(); + const Register Reg = MO->getReg(); if (!Reg) return; if (MRI->tracksLiveness() && !MI->isDebugValue()) @@ -2184,7 +2184,7 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) { if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() || MODef.isEarlyClobber() || MODef.isDebug()) report("Unexpected flag on PHI operand", &MODef, 0); - unsigned DefReg = MODef.getReg(); + Register DefReg = MODef.getReg(); if (!Register::isVirtualRegister(DefReg)) report("Expected first PHI operand to be a virtual register", &MODef, 0); diff --git a/lib/CodeGen/OptimizePHIs.cpp b/lib/CodeGen/OptimizePHIs.cpp index 3a8c71166d4..1a493964e67 100644 --- a/lib/CodeGen/OptimizePHIs.cpp +++ b/lib/CodeGen/OptimizePHIs.cpp @@ -97,7 +97,7 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI, unsigned &SingleValReg, InstrSet &PHIsInCycle) { assert(MI->isPHI() && "IsSingleValuePHICycle expects a PHI instruction"); - unsigned DstReg = MI->getOperand(0).getReg(); + Register DstReg = MI->getOperand(0).getReg(); // See if we already saw this register. if (!PHIsInCycle.insert(MI).second) @@ -109,7 +109,7 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI, // Scan the PHI operands. for (unsigned i = 1; i != MI->getNumOperands(); i += 2) { - unsigned SrcReg = MI->getOperand(i).getReg(); + Register SrcReg = MI->getOperand(i).getReg(); if (SrcReg == DstReg) continue; MachineInstr *SrcMI = MRI->getVRegDef(SrcReg); @@ -141,7 +141,7 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI, /// other PHIs in a cycle. bool OptimizePHIs::IsDeadPHICycle(MachineInstr *MI, InstrSet &PHIsInCycle) { assert(MI->isPHI() && "IsDeadPHICycle expects a PHI instruction"); - unsigned DstReg = MI->getOperand(0).getReg(); + Register DstReg = MI->getOperand(0).getReg(); assert(Register::isVirtualRegister(DstReg) && "PHI destination is not a virtual register"); @@ -176,7 +176,7 @@ bool OptimizePHIs::OptimizeBB(MachineBasicBlock &MBB) { InstrSet PHIsInCycle; if (IsSingleValuePHICycle(MI, SingleValReg, PHIsInCycle) && SingleValReg != 0) { - unsigned OldReg = MI->getOperand(0).getReg(); + Register OldReg = MI->getOperand(0).getReg(); if (!MRI->constrainRegClass(SingleValReg, MRI->getRegClass(OldReg))) continue; diff --git a/lib/CodeGen/PHIElimination.cpp b/lib/CodeGen/PHIElimination.cpp index eea32bd216e..2b3ce890d98 100644 --- a/lib/CodeGen/PHIElimination.cpp +++ b/lib/CodeGen/PHIElimination.cpp @@ -168,7 +168,7 @@ bool PHIElimination::runOnMachineFunction(MachineFunction &MF) { // Remove dead IMPLICIT_DEF instructions. for (MachineInstr *DefMI : ImpDefs) { - unsigned DefReg = DefMI->getOperand(0).getReg(); + Register DefReg = DefMI->getOperand(0).getReg(); if (MRI->use_nodbg_empty(DefReg)) { if (LIS) LIS->RemoveMachineInstrFromMaps(*DefMI); @@ -240,7 +240,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB, MachineInstr *MPhi = MBB.remove(&*MBB.begin()); unsigned NumSrcs = (MPhi->getNumOperands() - 1) / 2; - unsigned DestReg = MPhi->getOperand(0).getReg(); + Register DestReg = MPhi->getOperand(0).getReg(); assert(MPhi->getOperand(0).getSubReg() == 0 && "Can't handle sub-reg PHIs"); bool isDead = MPhi->getOperand(0).isDead(); @@ -368,7 +368,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB, // IncomingReg register in the corresponding predecessor basic block. SmallPtrSet MBBsInsertedInto; for (int i = NumSrcs - 1; i >= 0; --i) { - unsigned SrcReg = MPhi->getOperand(i*2+1).getReg(); + Register SrcReg = MPhi->getOperand(i * 2 + 1).getReg(); unsigned SrcSubReg = MPhi->getOperand(i*2+1).getSubReg(); bool SrcUndef = MPhi->getOperand(i*2+1).isUndef() || isImplicitlyDefined(SrcReg, *MRI); @@ -567,7 +567,7 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF, for (MachineBasicBlock::iterator BBI = MBB.begin(), BBE = MBB.end(); BBI != BBE && BBI->isPHI(); ++BBI) { for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) { - unsigned Reg = BBI->getOperand(i).getReg(); + Register Reg = BBI->getOperand(i).getReg(); MachineBasicBlock *PreMBB = BBI->getOperand(i+1).getMBB(); // Is there a critical edge from PreMBB to MBB? if (PreMBB->succ_size() == 1) diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp index a41ac73326d..e613d074bfc 100644 --- a/lib/CodeGen/PeepholeOptimizer.cpp +++ b/lib/CodeGen/PeepholeOptimizer.cpp @@ -581,7 +581,7 @@ optimizeExtInstr(MachineInstr &MI, MachineBasicBlock &MBB, MRI->constrainRegClass(DstReg, DstRC); } - unsigned NewVR = MRI->createVirtualRegister(RC); + Register NewVR = MRI->createVirtualRegister(RC); MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(), TII->get(TargetOpcode::COPY), NewVR) .addReg(DstReg, 0, SubIdx); @@ -761,7 +761,7 @@ insertPHI(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, // NewRC is only correct if no subregisters are involved. findNextSource() // should have rejected those cases already. assert(SrcRegs[0].SubReg == 0 && "should not have subreg operand"); - unsigned NewVR = MRI.createVirtualRegister(NewRC); + Register NewVR = MRI.createVirtualRegister(NewRC); MachineBasicBlock *MBB = OrigPHI.getParent(); MachineInstrBuilder MIB = BuildMI(*MBB, &OrigPHI, OrigPHI.getDebugLoc(), TII.get(TargetOpcode::PHI), NewVR); @@ -1229,7 +1229,7 @@ PeepholeOptimizer::rewriteSource(MachineInstr &CopyLike, // Insert the COPY. const TargetRegisterClass *DefRC = MRI->getRegClass(Def.Reg); - unsigned NewVReg = MRI->createVirtualRegister(DefRC); + Register NewVReg = MRI->createVirtualRegister(DefRC); MachineInstr *NewCopy = BuildMI(*CopyLike.getParent(), &CopyLike, CopyLike.getDebugLoc(), @@ -1315,7 +1315,7 @@ bool PeepholeOptimizer::isLoadFoldable( if (MCID.getNumDefs() != 1) return false; - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); // To reduce compilation time, we check MRI->hasOneNonDBGUser when inserting // loads. It should be checked when processing uses of the load, since // uses can be removed during peephole. @@ -1335,7 +1335,7 @@ bool PeepholeOptimizer::isMoveImmediate( return false; if (MCID.getNumDefs() != 1) return false; - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); if (Register::isVirtualRegister(Reg)) { ImmDefMIs.insert(std::make_pair(Reg, &MI)); ImmDefRegs.insert(Reg); @@ -1358,7 +1358,7 @@ bool PeepholeOptimizer::foldImmediate(MachineInstr &MI, // Ignore dead implicit defs. if (MO.isImplicit() && MO.isDead()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; if (ImmDefRegs.count(Reg) == 0) @@ -1392,11 +1392,11 @@ bool PeepholeOptimizer::foldRedundantCopy(MachineInstr &MI, DenseMap &CopyMIs) { assert(MI.isCopy() && "expected a COPY machine instruction"); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); if (!Register::isVirtualRegister(SrcReg)) return false; - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if (!Register::isVirtualRegister(DstReg)) return false; @@ -1415,7 +1415,7 @@ bool PeepholeOptimizer::foldRedundantCopy(MachineInstr &MI, if (SrcSubReg != PrevSrcSubReg) return false; - unsigned PrevDstReg = PrevCopy->getOperand(0).getReg(); + Register PrevDstReg = PrevCopy->getOperand(0).getReg(); // Only replace if the copy register class is the same. // @@ -1442,8 +1442,8 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy( if (DisableNAPhysCopyOpt) return false; - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); if (isNAPhysCopy(SrcReg) && Register::isVirtualRegister(DstReg)) { // %vreg = COPY %physreg // Avoid using a datastructure which can track multiple live non-allocatable @@ -1465,7 +1465,7 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy( return false; } - unsigned PrevDstReg = PrevCopy->second->getOperand(0).getReg(); + Register PrevDstReg = PrevCopy->second->getOperand(0).getReg(); if (PrevDstReg == SrcReg) { // Remove the virt->phys copy: we saw the virtual register definition, and // the non-allocatable physical register's state hasn't changed since then. @@ -1660,7 +1660,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { for (const MachineOperand &MO : MI->operands()) { // Visit all operands: definitions can be implicit or explicit. if (MO.isReg()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (MO.isDef() && isNAPhysCopy(Reg)) { const auto &Def = NAPhysToVirtMIs.find(Reg); if (Def != NAPhysToVirtMIs.end()) { diff --git a/lib/CodeGen/ProcessImplicitDefs.cpp b/lib/CodeGen/ProcessImplicitDefs.cpp index 1fd055c6bad..11bff45f9ad 100644 --- a/lib/CodeGen/ProcessImplicitDefs.cpp +++ b/lib/CodeGen/ProcessImplicitDefs.cpp @@ -73,7 +73,7 @@ bool ProcessImplicitDefs::canTurnIntoImplicitDef(MachineInstr *MI) { void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) { LLVM_DEBUG(dbgs() << "Processing " << *MI); - unsigned Reg = MI->getOperand(0).getReg(); + Register Reg = MI->getOperand(0).getReg(); if (Register::isVirtualRegister(Reg)) { // For virtual registers, mark all uses as , and convert users to @@ -100,7 +100,7 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) { for (MachineOperand &MO : UserMI->operands()) { if (!MO.isReg()) continue; - unsigned UserReg = MO.getReg(); + Register UserReg = MO.getReg(); if (!Register::isPhysicalRegister(UserReg) || !TRI->regsOverlap(Reg, UserReg)) continue; diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp index fbf81c25881..44d0233604e 100644 --- a/lib/CodeGen/RegAllocFast.cpp +++ b/lib/CodeGen/RegAllocFast.cpp @@ -455,7 +455,7 @@ void RegAllocFast::usePhysReg(MachineOperand &MO) { if (MO.isUndef()) return; - unsigned PhysReg = MO.getReg(); + Register PhysReg = MO.getReg(); assert(Register::isPhysicalRegister(PhysReg) && "Bad usePhysReg operand"); markRegUsedInInstr(PhysReg); @@ -645,7 +645,7 @@ unsigned RegAllocFast::traceCopies(unsigned VirtReg) const { unsigned C = 0; for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) { if (isCoalescable(MI)) { - unsigned Reg = MI.getOperand(1).getReg(); + Register Reg = MI.getOperand(1).getReg(); Reg = traceCopyChain(Reg); if (Reg != 0) return Reg; @@ -750,7 +750,7 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, unsigned Hint0) { void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) { assert(MO.isUndef() && "expected undef use"); - unsigned VirtReg = MO.getReg(); + Register VirtReg = MO.getReg(); assert(Register::isVirtualRegister(VirtReg) && "Expected virtreg"); LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg); @@ -889,7 +889,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI, SmallSet ThroughRegs; for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; if (MO.isEarlyClobber() || (MO.isUse() && MO.isTied()) || @@ -904,7 +904,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI, LLVM_DEBUG(dbgs() << "\nChecking for physdef collisions.\n"); for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg || !Register::isPhysicalRegister(Reg)) continue; markRegUsedInInstr(Reg); @@ -919,7 +919,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI, for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { MachineOperand &MO = MI.getOperand(I); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; if (MO.isUse()) { @@ -945,7 +945,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI, for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { const MachineOperand &MO = MI.getOperand(I); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; if (!MO.isEarlyClobber()) @@ -960,7 +960,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI, UsedInInstr.clear(); for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg || !Register::isPhysicalRegister(Reg)) continue; LLVM_DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI) @@ -1043,7 +1043,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) { continue; } if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; if (Register::isVirtualRegister(Reg)) { VirtOpEnd = i+1; @@ -1094,7 +1094,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) { for (unsigned I = 0; I != VirtOpEnd; ++I) { MachineOperand &MO = MI.getOperand(I); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; if (MO.isUse()) { @@ -1123,7 +1123,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) { for (MachineOperand &MO : MI.uses()) { if (!MO.isReg() || !MO.isUse()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; @@ -1138,7 +1138,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) { if (hasEarlyClobbers) { for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg || !Register::isPhysicalRegister(Reg)) continue; // Look for physreg defs and tied uses. @@ -1166,7 +1166,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) { const MachineOperand &MO = MI.getOperand(I); if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg || !Register::isPhysicalRegister(Reg) || !MRI->isAllocatable(Reg)) continue; @@ -1179,7 +1179,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) { const MachineOperand &MO = MI.getOperand(I); if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // We have already dealt with phys regs in the previous scan. if (Register::isPhysicalRegister(Reg)) @@ -1214,7 +1214,7 @@ void RegAllocFast::handleDebugValue(MachineInstr &MI) { // mostly constants and frame indices. if (!MO.isReg()) return; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) return; diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp index 10289efefd5..d27db678f02 100644 --- a/lib/CodeGen/RegAllocGreedy.cpp +++ b/lib/CodeGen/RegAllocGreedy.cpp @@ -2919,7 +2919,7 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) { SmallVector RecoloringCandidates; HintsInfo Info; unsigned Reg = VirtReg.reg; - unsigned PhysReg = VRM->getPhys(Reg); + Register PhysReg = VRM->getPhys(Reg); // Start the recoloring algorithm from the input live-interval, then // it will propagate to the ones that are copy-related with it. Visited.insert(Reg); @@ -2940,7 +2940,7 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) { // Get the live interval mapped with this virtual register to be able // to check for the interference with the new color. LiveInterval &LI = LIS->getInterval(Reg); - unsigned CurrPhys = VRM->getPhys(Reg); + Register CurrPhys = VRM->getPhys(Reg); // Check that the new color matches the register class constraints and // that it is free for this live range. if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) || diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp index b91b495c4da..6ff5ddbc023 100644 --- a/lib/CodeGen/RegisterCoalescer.cpp +++ b/lib/CodeGen/RegisterCoalescer.cpp @@ -802,7 +802,7 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, return { false, false }; MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); - unsigned NewReg = NewDstMO.getReg(); + Register NewReg = NewDstMO.getReg(); if (NewReg != IntB.reg || !IntB.Query(AValNo->def).isKill()) return { false, false }; @@ -1240,7 +1240,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, return false; // Only support subregister destinations when the def is read-undef. MachineOperand &DstOperand = CopyMI->getOperand(0); - unsigned CopyDstReg = DstOperand.getReg(); + Register CopyDstReg = DstOperand.getReg(); if (DstOperand.getSubReg() && !DstOperand.isUndef()) return false; @@ -2411,7 +2411,7 @@ std::pair JoinVals::followCopyChain( assert(MI && "No defining instruction"); if (!MI->isFullCopy()) return std::make_pair(VNI, TrackReg); - unsigned SrcReg = MI->getOperand(1).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); if (!Register::isVirtualRegister(SrcReg)) return std::make_pair(VNI, TrackReg); @@ -3189,7 +3189,7 @@ void JoinVals::eraseInstrs(SmallPtrSetImpl &ErasedInstrs, MachineInstr *MI = Indexes->getInstructionFromIndex(Def); assert(MI && "No instruction to erase"); if (MI->isCopy()) { - unsigned Reg = MI->getOperand(1).getReg(); + Register Reg = MI->getOperand(1).getReg(); if (Register::isVirtualRegister(Reg) && Reg != CP.getSrcReg() && Reg != CP.getDstReg()) ShrinkRegs.push_back(Reg); @@ -3463,8 +3463,8 @@ static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) { if (Copy->getOperand(1).isUndef()) return false; - unsigned SrcReg = Copy->getOperand(1).getReg(); - unsigned DstReg = Copy->getOperand(0).getReg(); + Register SrcReg = Copy->getOperand(1).getReg(); + Register DstReg = Copy->getOperand(0).getReg(); if (Register::isPhysicalRegister(SrcReg) || Register::isPhysicalRegister(DstReg)) return false; diff --git a/lib/CodeGen/RegisterPressure.cpp b/lib/CodeGen/RegisterPressure.cpp index 8a287635c81..bf192d1c530 100644 --- a/lib/CodeGen/RegisterPressure.cpp +++ b/lib/CodeGen/RegisterPressure.cpp @@ -499,7 +499,7 @@ class RegisterOperandsCollector { void collectOperand(const MachineOperand &MO) const { if (!MO.isReg() || !MO.getReg()) return; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (MO.isUse()) { if (!MO.isUndef() && !MO.isInternalRead()) pushReg(Reg, RegOpers.Uses); @@ -530,7 +530,7 @@ class RegisterOperandsCollector { void collectOperandLanes(const MachineOperand &MO) const { if (!MO.isReg() || !MO.getReg()) return; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); unsigned SubRegIdx = MO.getSubReg(); if (MO.isUse()) { if (!MO.isUndef() && !MO.isInternalRead()) diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp index 8ddd51141c6..3f48b1de854 100644 --- a/lib/CodeGen/RegisterScavenging.cpp +++ b/lib/CodeGen/RegisterScavenging.cpp @@ -133,7 +133,7 @@ void RegScavenger::determineKillsAndDefs() { } if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isPhysicalRegister(Reg) || isReserved(Reg)) continue; @@ -204,7 +204,7 @@ void RegScavenger::forward() { for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isPhysicalRegister(Reg) || isReserved(Reg)) continue; if (MO.isUse()) { @@ -694,7 +694,7 @@ static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI, for (const MachineOperand &MO : NMI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // We only care about virtual registers and ignore virtual registers // created by the target callbacks in the process (those will be handled // in a scavenging round). @@ -716,7 +716,7 @@ static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI, for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // Only vregs, no newly created vregs (see above). if (!Register::isVirtualRegister(Reg) || Register::virtReg2Index(Reg) >= InitialNumVirtRegs) diff --git a/lib/CodeGen/RenameIndependentSubregs.cpp b/lib/CodeGen/RenameIndependentSubregs.cpp index 35635ad064f..e3f5abb6301 100644 --- a/lib/CodeGen/RenameIndependentSubregs.cpp +++ b/lib/CodeGen/RenameIndependentSubregs.cpp @@ -138,7 +138,7 @@ bool RenameIndependentSubregs::renameComponents(LiveInterval &LI) const { LLVM_DEBUG(dbgs() << printReg(Reg) << ": Splitting into newly created:"); for (unsigned I = 1, NumClasses = Classes.getNumClasses(); I < NumClasses; ++I) { - unsigned NewVReg = MRI->createVirtualRegister(RegClass); + Register NewVReg = MRI->createVirtualRegister(RegClass); LiveInterval &NewLI = LIS->createEmptyInterval(NewVReg); Intervals.push_back(&NewLI); LLVM_DEBUG(dbgs() << ' ' << printReg(NewVReg)); diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 9d4e00daba3..af07f8f948f 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -205,7 +205,7 @@ void ScheduleDAGInstrs::addSchedBarrierDeps() { if (ExitMI) { for (const MachineOperand &MO : ExitMI->operands()) { if (!MO.isReg() || MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isPhysicalRegister(Reg)) { Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg)); } else if (Register::isVirtualRegister(Reg) && MO.readsReg()) { @@ -285,7 +285,7 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { MachineInstr *MI = SU->getInstr(); MachineOperand &MO = MI->getOperand(OperIdx); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // We do not need to track any dependencies for constant registers. if (MRI.isConstantPhysReg(Reg)) return; @@ -361,7 +361,7 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { LaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // No point in tracking lanemasks if we don't have interesting subregisters. const TargetRegisterClass &RC = *MRI.getRegClass(Reg); if (!RC.HasDisjunctSubRegs) @@ -382,7 +382,7 @@ LaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { MachineInstr *MI = SU->getInstr(); MachineOperand &MO = MI->getOperand(OperIdx); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); LaneBitmask DefLaneMask; LaneBitmask KillLaneMask; @@ -491,7 +491,7 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { const MachineInstr *MI = SU->getInstr(); const MachineOperand &MO = MI->getOperand(OperIdx); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // Remember the use. Data dependencies will be added when we find the def. LaneBitmask LaneMask = TrackLaneMasks ? getLaneMaskForMO(MO) @@ -821,7 +821,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, const MachineOperand &MO = MI.getOperand(j); if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isPhysicalRegister(Reg)) { addPhysRegDeps(SU, j); } else if (Register::isVirtualRegister(Reg)) { @@ -838,7 +838,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, // additional use dependencies. if (!MO.isReg() || !MO.isUse()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isPhysicalRegister(Reg)) { addPhysRegDeps(SU, j); } else if (Register::isVirtualRegister(Reg) && MO.readsReg()) { @@ -1071,7 +1071,7 @@ static void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs, for (MachineOperand &MO : MI.operands()) { if (!MO.isReg() || !MO.readsReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; @@ -1102,7 +1102,7 @@ void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) { if (MO.isReg()) { if (!MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; LiveRegs.removeReg(Reg); diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index d42ce273fc3..3ad9c41fe9f 100644 --- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -272,7 +272,7 @@ unsigned InstrEmitter::getVR(SDValue Op, // does not include operand register class info. const TargetRegisterClass *RC = TLI->getRegClassFor( Op.getSimpleValueType(), Op.getNode()->isDivergent()); - unsigned VReg = MRI->createVirtualRegister(RC); + Register VReg = MRI->createVirtualRegister(RC); BuildMI(*MBB, InsertPos, Op.getDebugLoc(), TII->get(TargetOpcode::IMPLICIT_DEF), VReg); return VReg; @@ -319,7 +319,7 @@ InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB, if (!ConstrainedRC) { OpRC = TRI->getAllocatableClass(OpRC); assert(OpRC && "Constraints cannot be fulfilled for allocation"); - unsigned NewVReg = MRI->createVirtualRegister(OpRC); + Register NewVReg = MRI->createVirtualRegister(OpRC); BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(), TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg); VReg = NewVReg; @@ -386,7 +386,7 @@ void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, : nullptr; if (OpRC && IIRC && OpRC != IIRC && Register::isVirtualRegister(VReg)) { - unsigned NewVReg = MRI->createVirtualRegister(IIRC); + Register NewVReg = MRI->createVirtualRegister(IIRC); BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(), TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg); VReg = NewVReg; @@ -464,7 +464,7 @@ unsigned InstrEmitter::ConstrainForSubReg(unsigned VReg, unsigned SubIdx, // register instead. RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx); assert(RC && "No legal register class for VT supports that SubIdx"); - unsigned NewReg = MRI->createVirtualRegister(RC); + Register NewReg = MRI->createVirtualRegister(RC); BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg) .addReg(VReg); return NewReg; @@ -613,7 +613,7 @@ InstrEmitter::EmitCopyToRegClassNode(SDNode *Node, unsigned DstRCIdx = cast(Node->getOperand(1))->getZExtValue(); const TargetRegisterClass *DstRC = TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx)); - unsigned NewVReg = MRI->createVirtualRegister(DstRC); + Register NewVReg = MRI->createVirtualRegister(DstRC); BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg); @@ -630,7 +630,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node, bool IsClone, bool IsCloned) { unsigned DstRCIdx = cast(Node->getOperand(0))->getZExtValue(); const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); - unsigned NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC)); + Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC)); const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE); MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg); unsigned NumOps = Node->getNumOperands(); diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index 9718a130381..7468ba51151 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -808,7 +808,7 @@ EmitPhysRegCopy(SUnit *SU, DenseMap &VRBaseMap, } else { // Copy from physical register. assert(I->getReg() && "Unknown physical register!"); - unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC); + Register VRBase = MRI.createVirtualRegister(SU->CopyDstRC); bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second; (void)isNew; // Silence compiler warning. assert(isNew && "Node emitted out of order - early"); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 71d0022a3f9..df553cc4924 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4117,7 +4117,7 @@ void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) { SDValue Src = getValue(SrcV); // Create a virtual register, then update the virtual register. - unsigned VReg = + Register VReg = SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand()); // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue // Chain can be getRoot or getControlRoot. @@ -7154,7 +7154,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, if (SwiftErrorVal && TLI.supportSwiftError()) { // Get the last element of InVals. SDValue Src = CLI.InVals.back(); - unsigned VReg = SwiftError.getOrCreateVRegDefAt( + Register VReg = SwiftError.getOrCreateVRegDefAt( CS.getInstruction(), FuncInfo.MBB, SwiftErrorVal); SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src); DAG.setRoot(CopyNode); @@ -9725,7 +9725,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) { MachineFunction& MF = SDB->DAG.getMachineFunction(); MachineRegisterInfo& RegInfo = MF.getRegInfo(); - unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT)); + Register SRetReg = + RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT)); FuncInfo->DemoteRegister = SRetReg; NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue); diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index b03c3884acc..b1d6b69abb8 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -79,7 +79,7 @@ bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, const CCValAssign &ArgLoc = ArgLocs[I]; if (!ArgLoc.isRegLoc()) continue; - unsigned Reg = ArgLoc.getLocReg(); + Register Reg = ArgLoc.getLocReg(); // Only look at callee saved registers. if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) continue; diff --git a/lib/CodeGen/ShrinkWrap.cpp b/lib/CodeGen/ShrinkWrap.cpp index 36f2da20d1e..412a00095b9 100644 --- a/lib/CodeGen/ShrinkWrap.cpp +++ b/lib/CodeGen/ShrinkWrap.cpp @@ -278,7 +278,7 @@ bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI, // Ignore instructions like DBG_VALUE which don't read/def the register. if (!MO.isDef() && !MO.readsReg()) continue; - unsigned PhysReg = MO.getReg(); + Register PhysReg = MO.getReg(); if (!PhysReg) continue; assert(Register::isPhysicalRegister(PhysReg) && "Unallocated register?!"); diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp index 5c944fe3f6b..4e4971d435f 100644 --- a/lib/CodeGen/SplitKit.cpp +++ b/lib/CodeGen/SplitKit.cpp @@ -437,7 +437,7 @@ void SplitEditor::addDeadDef(LiveInterval &LI, VNInfo *VNI, bool Original) { assert(DefMI != nullptr); LaneBitmask LM; for (const MachineOperand &DefOp : DefMI->defs()) { - unsigned R = DefOp.getReg(); + Register R = DefOp.getReg(); if (R != LI.reg) continue; if (unsigned SR = DefOp.getSubReg()) @@ -1373,7 +1373,7 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) { assert(LI.hasSubRanges()); LiveRangeCalc SubLRC; - unsigned Reg = EP.MO.getReg(), Sub = EP.MO.getSubReg(); + Register Reg = EP.MO.getReg(), Sub = EP.MO.getSubReg(); LaneBitmask LM = Sub != 0 ? TRI.getSubRegIndexLaneMask(Sub) : MRI.getMaxLaneMaskForVReg(Reg); for (LiveInterval::SubRange &S : LI.subranges()) { diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp index 6e1b86fa609..f4e6aa2471c 100644 --- a/lib/CodeGen/StackMaps.cpp +++ b/lib/CodeGen/StackMaps.cpp @@ -113,7 +113,7 @@ StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI, unsigned Size = DL.getPointerSizeInBits(); assert((Size % 8) == 0 && "Need pointer size in bytes."); Size /= 8; - unsigned Reg = (++MOI)->getReg(); + Register Reg = (++MOI)->getReg(); int64_t Imm = (++MOI)->getImm(); Locs.emplace_back(StackMaps::Location::Direct, Size, getDwarfRegNum(Reg, TRI), Imm); @@ -122,7 +122,7 @@ StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI, case StackMaps::IndirectMemRefOp: { int64_t Size = (++MOI)->getImm(); assert(Size > 0 && "Need a valid size for indirect memory locations."); - unsigned Reg = (++MOI)->getReg(); + Register Reg = (++MOI)->getReg(); int64_t Imm = (++MOI)->getImm(); Locs.emplace_back(StackMaps::Location::Indirect, Size, getDwarfRegNum(Reg, TRI), Imm); diff --git a/lib/CodeGen/TailDuplicator.cpp b/lib/CodeGen/TailDuplicator.cpp index 6e3a5cca2de..03c68a37e45 100644 --- a/lib/CodeGen/TailDuplicator.cpp +++ b/lib/CodeGen/TailDuplicator.cpp @@ -235,8 +235,8 @@ bool TailDuplicator::tailDuplicateAndUpdate( MachineInstr *Copy = Copies[i]; if (!Copy->isCopy()) continue; - unsigned Dst = Copy->getOperand(0).getReg(); - unsigned Src = Copy->getOperand(1).getReg(); + Register Dst = Copy->getOperand(0).getReg(); + Register Src = Copy->getOperand(1).getReg(); if (MRI->hasOneNonDBGUse(Src) && MRI->constrainRegClass(Src, MRI->getRegClass(Dst))) { // Copy is the only use. Do trivial copy propagation here. @@ -312,7 +312,7 @@ static void getRegsUsedByPHIs(const MachineBasicBlock &BB, if (!MI.isPHI()) break; for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { - unsigned SrcReg = MI.getOperand(i).getReg(); + Register SrcReg = MI.getOperand(i).getReg(); UsedByPhi->insert(SrcReg); } } @@ -340,17 +340,17 @@ void TailDuplicator::processPHI( DenseMap &LocalVRMap, SmallVectorImpl> &Copies, const DenseSet &RegsUsedByPhi, bool Remove) { - unsigned DefReg = MI->getOperand(0).getReg(); + Register DefReg = MI->getOperand(0).getReg(); unsigned SrcOpIdx = getPHISrcRegOpIdx(MI, PredBB); assert(SrcOpIdx && "Unable to find matching PHI source?"); - unsigned SrcReg = MI->getOperand(SrcOpIdx).getReg(); + Register SrcReg = MI->getOperand(SrcOpIdx).getReg(); unsigned SrcSubReg = MI->getOperand(SrcOpIdx).getSubReg(); const TargetRegisterClass *RC = MRI->getRegClass(DefReg); LocalVRMap.insert(std::make_pair(DefReg, RegSubRegPair(SrcReg, SrcSubReg))); // Insert a copy from source to the end of the block. The def register is the // available value liveout of the block. - unsigned NewDef = MRI->createVirtualRegister(RC); + Register NewDef = MRI->createVirtualRegister(RC); Copies.push_back(std::make_pair(NewDef, RegSubRegPair(SrcReg, SrcSubReg))); if (isDefLiveOut(DefReg, TailBB, MRI) || RegsUsedByPhi.count(DefReg)) addSSAUpdateEntry(DefReg, NewDef, PredBB); @@ -384,12 +384,12 @@ void TailDuplicator::duplicateInstruction( MachineOperand &MO = NewMI.getOperand(i); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; if (MO.isDef()) { const TargetRegisterClass *RC = MRI->getRegClass(Reg); - unsigned NewReg = MRI->createVirtualRegister(RC); + Register NewReg = MRI->createVirtualRegister(RC); MO.setReg(NewReg); LocalVRMap.insert(std::make_pair(Reg, RegSubRegPair(NewReg, 0))); if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg)) @@ -433,7 +433,7 @@ void TailDuplicator::duplicateInstruction( auto *NewRC = MI->getRegClassConstraint(i, TII, TRI); if (NewRC == nullptr) NewRC = OrigRC; - unsigned NewReg = MRI->createVirtualRegister(NewRC); + Register NewReg = MRI->createVirtualRegister(NewRC); BuildMI(*PredBB, NewMI, NewMI.getDebugLoc(), TII->get(TargetOpcode::COPY), NewReg) .addReg(VI->second.Reg, 0, VI->second.SubReg); @@ -477,7 +477,7 @@ void TailDuplicator::updateSuccessorsPHIs( assert(Idx != 0); MachineOperand &MO0 = MI.getOperand(Idx); - unsigned Reg = MO0.getReg(); + Register Reg = MO0.getReg(); if (isDead) { // Folded into the previous BB. // There could be duplicate phi source entries. FIXME: Should sdisel diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp index 1cc22876472..e64c67f6240 100644 --- a/lib/CodeGen/TargetInstrInfo.cpp +++ b/lib/CodeGen/TargetInstrInfo.cpp @@ -443,8 +443,8 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, if (FoldOp.getSubReg() || LiveOp.getSubReg()) return nullptr; - unsigned FoldReg = FoldOp.getReg(); - unsigned LiveReg = LiveOp.getReg(); + Register FoldReg = FoldOp.getReg(); + Register LiveReg = LiveOp.getReg(); assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs"); @@ -805,11 +805,11 @@ void TargetInstrInfo::reassociateOps( MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]); MachineOperand &OpC = Root.getOperand(0); - unsigned RegA = OpA.getReg(); - unsigned RegB = OpB.getReg(); - unsigned RegX = OpX.getReg(); - unsigned RegY = OpY.getReg(); - unsigned RegC = OpC.getReg(); + Register RegA = OpA.getReg(); + Register RegB = OpB.getReg(); + Register RegX = OpX.getReg(); + Register RegY = OpY.getReg(); + Register RegC = OpC.getReg(); if (Register::isVirtualRegister(RegA)) MRI.constrainRegClass(RegA, RC); @@ -825,7 +825,7 @@ void TargetInstrInfo::reassociateOps( // Create a new virtual register for the result of (X op Y) instead of // recycling RegB because the MachineCombiner's computation of the critical // path requires a new register definition rather than an existing one. - unsigned NewVR = MRI.createVirtualRegister(RC); + Register NewVR = MRI.createVirtualRegister(RC); InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); unsigned Opcode = Root.getOpcode(); @@ -887,7 +887,7 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( // Remat clients assume operand 0 is the defined register. if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) return false; - unsigned DefReg = MI.getOperand(0).getReg(); + Register DefReg = MI.getOperand(0).getReg(); // A sub-register definition can only be rematerialized if the instruction // doesn't read the other parts of the register. Otherwise it is really a @@ -924,7 +924,7 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; diff --git a/lib/CodeGen/TargetSchedule.cpp b/lib/CodeGen/TargetSchedule.cpp index 195279719ad..ce59452fd1b 100644 --- a/lib/CodeGen/TargetSchedule.cpp +++ b/lib/CodeGen/TargetSchedule.cpp @@ -300,7 +300,7 @@ computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx, // TODO: The following hack exists because predication passes do not // correctly append imp-use operands, and readsReg() strangely returns false // for predicated defs. - unsigned Reg = DefMI->getOperand(DefOperIdx).getReg(); + Register Reg = DefMI->getOperand(DefOperIdx).getReg(); const MachineFunction &MF = *DefMI->getMF(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(*DepMI)) diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp index 07ffa426fc4..ea971809d4e 100644 --- a/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -230,7 +230,7 @@ sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg, for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (!MOReg) continue; if (MO.isUse() && MOReg != SavedReg) @@ -299,7 +299,7 @@ sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg, MachineOperand &MO = OtherMI.getOperand(i); if (!MO.isReg()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (!MOReg) continue; if (DefReg == MOReg) @@ -682,7 +682,7 @@ bool TwoAddressInstructionPass::commuteInstruction(MachineInstr *MI, unsigned RegBIdx, unsigned RegCIdx, unsigned Dist) { - unsigned RegC = MI->getOperand(RegCIdx).getReg(); + Register RegC = MI->getOperand(RegCIdx).getReg(); LLVM_DEBUG(dbgs() << "2addr: COMMUTING : " << *MI); MachineInstr *NewMI = TII->commuteInstruction(*MI, false, RegBIdx, RegCIdx); @@ -699,7 +699,7 @@ bool TwoAddressInstructionPass::commuteInstruction(MachineInstr *MI, // Update source register map. unsigned FromRegC = getMappedReg(RegC, SrcRegMap); if (FromRegC) { - unsigned RegA = MI->getOperand(DstIdx).getReg(); + Register RegA = MI->getOperand(DstIdx).getReg(); SrcRegMap[RegA] = FromRegC; } @@ -910,7 +910,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi, for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (!MOReg) continue; if (MO.isDef()) @@ -954,7 +954,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi, for (const MachineOperand &MO : OtherMI.operands()) { if (!MO.isReg()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (!MOReg) continue; if (MO.isDef()) { @@ -1092,7 +1092,7 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi, for (const MachineOperand &MO : KillMI->operands()) { if (!MO.isReg()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (MO.isUse()) { if (!MOReg) continue; @@ -1129,7 +1129,7 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi, for (const MachineOperand &MO : OtherMI.operands()) { if (!MO.isReg()) continue; - unsigned MOReg = MO.getReg(); + Register MOReg = MO.getReg(); if (!MOReg) continue; if (MO.isUse()) { @@ -1206,8 +1206,8 @@ bool TwoAddressInstructionPass::tryInstructionCommute(MachineInstr *MI, return false; bool MadeChange = false; - unsigned DstOpReg = MI->getOperand(DstOpIdx).getReg(); - unsigned BaseOpReg = MI->getOperand(BaseOpIdx).getReg(); + Register DstOpReg = MI->getOperand(DstOpIdx).getReg(); + Register BaseOpReg = MI->getOperand(BaseOpIdx).getReg(); unsigned OpsNum = MI->getDesc().getNumOperands(); unsigned OtherOpIdx = MI->getDesc().getNumDefs(); for (; OtherOpIdx < OpsNum; OtherOpIdx++) { @@ -1219,7 +1219,7 @@ bool TwoAddressInstructionPass::tryInstructionCommute(MachineInstr *MI, !TII->findCommutedOpIndices(*MI, BaseOpIdx, OtherOpIdx)) continue; - unsigned OtherOpReg = MI->getOperand(OtherOpIdx).getReg(); + Register OtherOpReg = MI->getOperand(OtherOpIdx).getReg(); bool AggressiveCommute = false; // If OtherOp dies but BaseOp does not, swap the OtherOp and BaseOp @@ -1274,8 +1274,8 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi, return false; MachineInstr &MI = *mi; - unsigned regA = MI.getOperand(DstIdx).getReg(); - unsigned regB = MI.getOperand(SrcIdx).getReg(); + Register regA = MI.getOperand(DstIdx).getReg(); + Register regB = MI.getOperand(SrcIdx).getReg(); assert(Register::isVirtualRegister(regB) && "cannot make instruction into two-address form"); @@ -1361,7 +1361,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi, const TargetRegisterClass *RC = TRI->getAllocatableClass( TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI, *MF)); - unsigned Reg = MRI->createVirtualRegister(RC); + Register Reg = MRI->createVirtualRegister(RC); SmallVector NewMIs; if (!TII->unfoldMemoryOperand(*MF, MI, Reg, /*UnfoldLoad=*/true, @@ -1471,8 +1471,8 @@ collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) { AnyOps = true; MachineOperand &SrcMO = MI->getOperand(SrcIdx); MachineOperand &DstMO = MI->getOperand(DstIdx); - unsigned SrcReg = SrcMO.getReg(); - unsigned DstReg = DstMO.getReg(); + Register SrcReg = SrcMO.getReg(); + Register DstReg = DstMO.getReg(); // Tied constraint already satisfied? if (SrcReg == DstReg) continue; @@ -1519,7 +1519,7 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI, unsigned DstIdx = TiedPairs[tpi].second; const MachineOperand &DstMO = MI->getOperand(DstIdx); - unsigned RegA = DstMO.getReg(); + Register RegA = DstMO.getReg(); // Grab RegB from the instruction because it may have changed if the // instruction was commuted. @@ -1739,8 +1739,8 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) { if (TiedPairs.size() == 1) { unsigned SrcIdx = TiedPairs[0].first; unsigned DstIdx = TiedPairs[0].second; - unsigned SrcReg = mi->getOperand(SrcIdx).getReg(); - unsigned DstReg = mi->getOperand(DstIdx).getReg(); + Register SrcReg = mi->getOperand(SrcIdx).getReg(); + Register DstReg = mi->getOperand(DstIdx).getReg(); if (SrcReg != DstReg && tryInstructionTransform(mi, nmi, SrcIdx, DstIdx, Dist, false)) { // The tied operands have been eliminated or shifted further down @@ -1798,7 +1798,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) { void TwoAddressInstructionPass:: eliminateRegSequence(MachineBasicBlock::iterator &MBBI) { MachineInstr &MI = *MBBI; - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if (MI.getOperand(0).getSubReg() || Register::isPhysicalRegister(DstReg) || !(MI.getNumOperands() & 1)) { LLVM_DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << MI); @@ -1815,7 +1815,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) { bool DefEmitted = false; for (unsigned i = 1, e = MI.getNumOperands(); i < e; i += 2) { MachineOperand &UseMO = MI.getOperand(i); - unsigned SrcReg = UseMO.getReg(); + Register SrcReg = UseMO.getReg(); unsigned SubIdx = MI.getOperand(i+1).getImm(); // Nothing needs to be inserted for undef operands. if (UseMO.isUndef()) diff --git a/lib/CodeGen/UnreachableBlockElim.cpp b/lib/CodeGen/UnreachableBlockElim.cpp index c5343690329..de15e6c814b 100644 --- a/lib/CodeGen/UnreachableBlockElim.cpp +++ b/lib/CodeGen/UnreachableBlockElim.cpp @@ -173,8 +173,8 @@ bool UnreachableMachineBlockElim::runOnMachineFunction(MachineFunction &F) { if (phi->getNumOperands() == 3) { const MachineOperand &Input = phi->getOperand(1); const MachineOperand &Output = phi->getOperand(0); - unsigned InputReg = Input.getReg(); - unsigned OutputReg = Output.getReg(); + Register InputReg = Input.getReg(); + Register OutputReg = Output.getReg(); assert(Output.getSubReg() == 0 && "Cannot have output subregister"); ModifiedPHI = true; diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp index a429c7cd833..cd6c359d37d 100644 --- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -677,7 +677,7 @@ AMDGPUAsmPrinter::SIFunctionResourceInfo AMDGPUAsmPrinter::analyzeResourceUsage( if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); switch (Reg) { case AMDGPU::EXEC: case AMDGPU::EXEC_LO: diff --git a/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index e18c0ff6507..2421f294b12 100644 --- a/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -220,7 +220,7 @@ AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, if (MO.isReg()) { unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) .addReg(Reg, 0, ComposedSubIdx); @@ -676,12 +676,12 @@ bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const { MachineRegisterInfo &MRI = MF->getRegInfo(); const DebugLoc &DL = I.getDebugLoc(); - unsigned SrcReg = I.getOperand(2).getReg(); + Register SrcReg = I.getOperand(2).getReg(); unsigned Size = RBI.getSizeInBits(SrcReg, MRI, TRI); auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate(); - unsigned CCReg = I.getOperand(0).getReg(); + Register CCReg = I.getOperand(0).getReg(); if (isSCC(CCReg, MRI)) { int Opcode = getS_CMPOpcode(Pred, Size); if (Opcode == -1) @@ -758,9 +758,9 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( const DebugLoc &DL = I.getDebugLoc(); int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg())); int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg())); - unsigned Reg0 = I.getOperand(3).getReg(); - unsigned Reg1 = I.getOperand(4).getReg(); - unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register Reg0 = I.getOperand(3).getReg(); + Register Reg1 = I.getOperand(4).getReg(); + Register Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg())); int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg())); @@ -796,11 +796,11 @@ bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const { MachineRegisterInfo &MRI = MF->getRegInfo(); const DebugLoc &DL = I.getDebugLoc(); - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI); assert(Size <= 32 || Size == 64); const MachineOperand &CCOp = I.getOperand(1); - unsigned CCReg = CCOp.getReg(); + Register CCReg = CCOp.getReg(); if (isSCC(CCReg, MRI)) { unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32; @@ -870,8 +870,8 @@ bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const { MachineFunction *MF = BB->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); - unsigned DstReg = I.getOperand(0).getReg(); - unsigned SrcReg = I.getOperand(1).getReg(); + Register DstReg = I.getOperand(0).getReg(); + Register SrcReg = I.getOperand(1).getReg(); const LLT DstTy = MRI.getType(DstReg); const LLT SrcTy = MRI.getType(SrcReg); if (!DstTy.isScalar()) @@ -927,8 +927,8 @@ bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { MachineBasicBlock &MBB = *I.getParent(); MachineFunction &MF = *MBB.getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned SrcReg = I.getOperand(1).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); const LLT DstTy = MRI.getType(DstReg); const LLT SrcTy = MRI.getType(SrcReg); @@ -951,7 +951,7 @@ bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { // FIXME: Create an extra copy to avoid incorrectly constraining the result // of the scc producer. - unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); + Register TmpReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), TmpReg) .addReg(SrcReg); BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) @@ -1026,10 +1026,8 @@ bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width. if (DstSize > 32 && SrcSize <= 32) { // We need a 64-bit register source, but the high bits don't matter. - unsigned ExtReg - = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - unsigned UndefReg - = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); + Register ExtReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + Register UndefReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg) .addReg(SrcReg) @@ -1077,7 +1075,7 @@ bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue()); } - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); unsigned Size; bool IsSgpr; const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg()); @@ -1103,8 +1101,8 @@ bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { DebugLoc DL = I.getDebugLoc(); const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass : &AMDGPU::VGPR_32RegClass; - unsigned LoReg = MRI.createVirtualRegister(RC); - unsigned HiReg = MRI.createVirtualRegister(RC); + Register LoReg = MRI.createVirtualRegister(RC); + Register HiReg = MRI.createVirtualRegister(RC); const APInt &Imm = APInt(Size, I.getOperand(1).getImm()); BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) @@ -1516,7 +1514,7 @@ AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { // failed trying to select this load into one of the _IMM variants since // the _IMM Patterns are considered before the _SGPR patterns. unsigned PtrReg = GEPInfo.SgprParts[0]; - unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg) .addImm(GEPInfo.Imm); return {{ diff --git a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp index 6d9cecbfb7d..ba72f71f432 100644 --- a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp +++ b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp @@ -1023,7 +1023,7 @@ void LinearizedRegion::removeFalseRegisterKills(MachineRegisterInfo *MRI) { for (auto &II : *MBB) { for (auto &RI : II.uses()) { if (RI.isReg()) { - unsigned Reg = RI.getReg(); + Register Reg = RI.getReg(); if (Register::isVirtualRegister(Reg)) { if (hasNoDef(Reg, MRI)) continue; @@ -1404,7 +1404,7 @@ void AMDGPUMachineCFGStructurizer::storePHILinearizationInfoDest( unsigned AMDGPUMachineCFGStructurizer::storePHILinearizationInfo( MachineInstr &PHI, SmallVector *RegionIndices) { unsigned DestReg = getPHIDestReg(PHI); - unsigned LinearizeDestReg = + Register LinearizeDestReg = MRI->createVirtualRegister(MRI->getRegClass(DestReg)); PHIInfo.addDest(LinearizeDestReg, PHI.getDebugLoc()); storePHILinearizationInfoDest(LinearizeDestReg, PHI, RegionIndices); @@ -1892,7 +1892,7 @@ void AMDGPUMachineCFGStructurizer::ensureCondIsNotKilled( if (!Cond[0].isReg()) return; - unsigned CondReg = Cond[0].getReg(); + Register CondReg = Cond[0].getReg(); for (auto UI = MRI->use_begin(CondReg), E = MRI->use_end(); UI != E; ++UI) { (*UI).setIsKill(false); } @@ -1931,8 +1931,8 @@ void AMDGPUMachineCFGStructurizer::rewriteCodeBBTerminator(MachineBasicBlock *Co BBSelectReg, TrueBB->getNumber()); } else { const TargetRegisterClass *RegClass = MRI->getRegClass(BBSelectReg); - unsigned TrueBBReg = MRI->createVirtualRegister(RegClass); - unsigned FalseBBReg = MRI->createVirtualRegister(RegClass); + Register TrueBBReg = MRI->createVirtualRegister(RegClass); + Register FalseBBReg = MRI->createVirtualRegister(RegClass); TII->materializeImmediate(*CodeBB, CodeBB->getFirstTerminator(), DL, TrueBBReg, TrueBB->getNumber()); TII->materializeImmediate(*CodeBB, CodeBB->getFirstTerminator(), DL, @@ -1998,7 +1998,7 @@ void AMDGPUMachineCFGStructurizer::insertChainedPHI(MachineBasicBlock *IfBB, InnerRegion->replaceRegisterOutsideRegion(SourceReg, DestReg, false, MRI); } const TargetRegisterClass *RegClass = MRI->getRegClass(DestReg); - unsigned NextDestReg = MRI->createVirtualRegister(RegClass); + Register NextDestReg = MRI->createVirtualRegister(RegClass); bool IsLastDef = PHIInfo.getNumSources(DestReg) == 1; LLVM_DEBUG(dbgs() << "Insert Chained PHI\n"); insertMergePHI(IfBB, InnerRegion->getExit(), MergeBB, DestReg, NextDestReg, @@ -2058,8 +2058,8 @@ void AMDGPUMachineCFGStructurizer::rewriteLiveOutRegs(MachineBasicBlock *IfBB, // register, unless it is the outgoing BB select register. We have // already creaed phi nodes for these. const TargetRegisterClass *RegClass = MRI->getRegClass(Reg); - unsigned PHIDestReg = MRI->createVirtualRegister(RegClass); - unsigned IfSourceReg = MRI->createVirtualRegister(RegClass); + Register PHIDestReg = MRI->createVirtualRegister(RegClass); + Register IfSourceReg = MRI->createVirtualRegister(RegClass); // Create initializer, this value is never used, but is needed // to satisfy SSA. LLVM_DEBUG(dbgs() << "Initializer for reg: " << printReg(Reg) << "\n"); @@ -2174,7 +2174,7 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio MachineBasicBlock *PHIDefMBB = PHIDefInstr->getParent(); const TargetRegisterClass *RegClass = MRI->getRegClass(CurrentBackedgeReg); - unsigned NewBackedgeReg = MRI->createVirtualRegister(RegClass); + Register NewBackedgeReg = MRI->createVirtualRegister(RegClass); MachineInstrBuilder BackedgePHI = BuildMI(*PHIDefMBB, PHIDefMBB->instr_begin(), DL, TII->get(TargetOpcode::PHI), NewBackedgeReg); @@ -2311,7 +2311,7 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion( } else { // Handle internal block. const TargetRegisterClass *RegClass = MRI->getRegClass(BBSelectRegIn); - unsigned CodeBBSelectReg = MRI->createVirtualRegister(RegClass); + Register CodeBBSelectReg = MRI->createVirtualRegister(RegClass); rewriteCodeBBTerminator(CodeBB, MergeBB, CodeBBSelectReg); bool IsRegionEntryBB = CurrentRegion->getEntry() == CodeBB; MachineBasicBlock *IfBB = createIfBlock(MergeBB, CodeBB, CodeBB, CodeBB, @@ -2448,7 +2448,7 @@ void AMDGPUMachineCFGStructurizer::splitLoopPHI(MachineInstr &PHI, } const TargetRegisterClass *RegClass = MRI->getRegClass(PHIDest); - unsigned NewDestReg = MRI->createVirtualRegister(RegClass); + Register NewDestReg = MRI->createVirtualRegister(RegClass); LRegion->replaceRegisterInsideRegion(PHIDest, NewDestReg, false, MRI); MachineInstrBuilder MIB = BuildMI(*EntrySucc, EntrySucc->instr_begin(), PHI.getDebugLoc(), @@ -2736,9 +2736,9 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) { } const DebugLoc &DL = NewSucc->findDebugLoc(NewSucc->getFirstNonPHI()); unsigned InReg = LRegion->getBBSelectRegIn(); - unsigned InnerSelectReg = + Register InnerSelectReg = MRI->createVirtualRegister(MRI->getRegClass(InReg)); - unsigned NewInReg = MRI->createVirtualRegister(MRI->getRegClass(InReg)); + Register NewInReg = MRI->createVirtualRegister(MRI->getRegClass(InReg)); TII->materializeImmediate(*(LRegion->getEntry()), LRegion->getEntry()->getFirstTerminator(), DL, NewInReg, Region->getEntry()->getNumber()); diff --git a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp index efba7f28938..361bf7ab968 100644 --- a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -797,7 +797,7 @@ void AMDGPURegisterBankInfo::executeInWaterfallLoop( unsigned NumPieces = Unmerge->getNumOperands() - 1; for (unsigned PieceIdx = 0; PieceIdx != NumPieces; ++PieceIdx) { - unsigned UnmergePiece = Unmerge.getReg(PieceIdx); + Register UnmergePiece = Unmerge.getReg(PieceIdx); Register CurrentLaneOpReg; if (Is64) { @@ -1548,7 +1548,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { int ResultBank = -1; for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { - unsigned Reg = MI.getOperand(I).getReg(); + Register Reg = MI.getOperand(I).getReg(); const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI); // FIXME: Assuming VGPR for any undetermined inputs. @@ -2053,7 +2053,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { } case Intrinsic::amdgcn_readlane: { // This must be an SGPR, but accept a VGPR. - unsigned IdxReg = MI.getOperand(3).getReg(); + Register IdxReg = MI.getOperand(3).getReg(); unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits(); unsigned IdxBank = getRegBankID(IdxReg, MRI, *TRI, AMDGPU::SGPRRegBankID); OpdsMapping[3] = AMDGPU::getValueMapping(IdxBank, IdxSize); @@ -2068,10 +2068,10 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { } case Intrinsic::amdgcn_writelane: { unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); - unsigned SrcReg = MI.getOperand(2).getReg(); + Register SrcReg = MI.getOperand(2).getReg(); unsigned SrcSize = MRI.getType(SrcReg).getSizeInBits(); unsigned SrcBank = getRegBankID(SrcReg, MRI, *TRI, AMDGPU::SGPRRegBankID); - unsigned IdxReg = MI.getOperand(3).getReg(); + Register IdxReg = MI.getOperand(3).getReg(); unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits(); unsigned IdxBank = getRegBankID(IdxReg, MRI, *TRI, AMDGPU::SGPRRegBankID); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize); diff --git a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp index 12f2e9519c9..101ecfc0c87 100644 --- a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp +++ b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp @@ -1307,8 +1307,8 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB, if (LandBlkHasOtherPred) { report_fatal_error("Extra register needed to handle CFG"); - unsigned CmpResReg = - HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC); + Register CmpResReg = + HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC); report_fatal_error("Extra compare instruction needed to handle CFG"); insertCondBranchBefore(LandBlk, I, R600::IF_PREDICATE_SET, CmpResReg, DebugLoc()); @@ -1316,8 +1316,8 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB, // XXX: We are running this after RA, so creating virtual registers will // cause an assertion failure in the PostRA scheduling pass. - unsigned InitReg = - HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC); + Register InitReg = + HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC); insertCondBranchBefore(LandBlk, I, R600::IF_PREDICATE_SET, InitReg, DebugLoc()); diff --git a/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index 885239e2fae..958b8019c72 100644 --- a/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -726,7 +726,7 @@ int GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def, if (!TRI->isVGPR(MRI, Def.getReg())) return WaitStatesNeeded; - unsigned Reg = Def.getReg(); + Register Reg = Def.getReg(); auto IsHazardFn = [this, Reg, TRI] (MachineInstr *MI) { int DataIdx = createsVALUHazard(*MI); return DataIdx >= 0 && @@ -792,7 +792,7 @@ int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) { if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg())) return 0; - unsigned LaneSelectReg = LaneSelectOp->getReg(); + Register LaneSelectReg = LaneSelectOp->getReg(); auto IsHazardFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); }; @@ -891,7 +891,7 @@ bool GCNHazardRecognizer::fixVcmpxPermlaneHazards(MachineInstr *MI) { // Use V_MOB_B32 v?, v?. Register must be alive so use src0 of V_PERMLANE* // which is always a VGPR and available. auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); - unsigned Reg = Src0->getReg(); + Register Reg = Src0->getReg(); bool IsUndef = Src0->isUndef(); BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32)) @@ -976,7 +976,7 @@ bool GCNHazardRecognizer::fixSMEMtoVectorWriteHazards(MachineInstr *MI) { if (!SDST) return false; - const unsigned SDSTReg = SDST->getReg(); + const Register SDSTReg = SDST->getReg(); auto IsHazardFn = [SDSTReg, TRI] (MachineInstr *I) { return SIInstrInfo::isSMRD(*I) && I->readsRegister(SDSTReg, TRI); }; @@ -1251,14 +1251,14 @@ int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) { const int MFMA16x16WritesAGPRAccVgprWriteWaitStates = 7; const int MFMA32x32WritesAGPRAccVgprWriteWaitStates = 15; const int MaxWaitStates = 18; - unsigned Reg = Op.getReg(); + Register Reg = Op.getReg(); unsigned HazardDefLatency = 0; auto IsOverlappedMFMAFn = [Reg, &IsMFMAFn, &HazardDefLatency, this] (MachineInstr *MI) { if (!IsMFMAFn(MI)) return false; - unsigned DstReg = MI->getOperand(0).getReg(); + Register DstReg = MI->getOperand(0).getReg(); if (DstReg == Reg) return false; HazardDefLatency = std::max(HazardDefLatency, @@ -1304,7 +1304,7 @@ int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) { auto IsAccVgprWriteFn = [Reg, this] (MachineInstr *MI) { if (MI->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32) return false; - unsigned DstReg = MI->getOperand(0).getReg(); + Register DstReg = MI->getOperand(0).getReg(); return TRI.regsOverlap(Reg, DstReg); }; @@ -1330,14 +1330,14 @@ int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) { const int MFMA16x16ReadSrcCAccVgprWriteWaitStates = 5; const int MFMA32x32ReadSrcCAccVgprWriteWaitStates = 13; const int MaxWaitStates = 13; - unsigned DstReg = MI->getOperand(0).getReg(); + Register DstReg = MI->getOperand(0).getReg(); unsigned HazardDefLatency = 0; auto IsSrcCMFMAFn = [DstReg, &IsMFMAFn, &HazardDefLatency, this] (MachineInstr *MI) { if (!IsMFMAFn(MI)) return false; - unsigned Reg = TII.getNamedOperand(*MI, AMDGPU::OpName::src2)->getReg(); + Register Reg = TII.getNamedOperand(*MI, AMDGPU::OpName::src2)->getReg(); HazardDefLatency = std::max(HazardDefLatency, TSchedModel.computeInstrLatency(MI)); return TRI.regsOverlap(Reg, DstReg); @@ -1376,7 +1376,7 @@ int GCNHazardRecognizer::checkMAILdStHazards(MachineInstr *MI) { if (!Op.isReg() || !TRI.isVGPR(MF.getRegInfo(), Op.getReg())) continue; - unsigned Reg = Op.getReg(); + Register Reg = Op.getReg(); const int AccVgprReadLdStWaitStates = 2; const int VALUWriteAccVgprReadLdStDepVALUWaitStates = 1; diff --git a/lib/Target/AMDGPU/GCNNSAReassign.cpp b/lib/Target/AMDGPU/GCNNSAReassign.cpp index c9446015aeb..36a8f74150f 100644 --- a/lib/Target/AMDGPU/GCNNSAReassign.cpp +++ b/lib/Target/AMDGPU/GCNNSAReassign.cpp @@ -173,11 +173,11 @@ GCNNSAReassign::CheckNSA(const MachineInstr &MI, bool Fast) const { bool NSA = false; for (unsigned I = 0; I < Info->VAddrDwords; ++I) { const MachineOperand &Op = MI.getOperand(VAddr0Idx + I); - unsigned Reg = Op.getReg(); + Register Reg = Op.getReg(); if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg)) return NSA_Status::FIXED; - unsigned PhysReg = VRM->getPhys(Reg); + Register PhysReg = VRM->getPhys(Reg); if (!Fast) { if (!PhysReg) @@ -276,7 +276,7 @@ bool GCNNSAReassign::runOnMachineFunction(MachineFunction &MF) { SlotIndex MinInd, MaxInd; for (unsigned I = 0; I < Info->VAddrDwords; ++I) { const MachineOperand &Op = MI->getOperand(VAddr0Idx + I); - unsigned Reg = Op.getReg(); + Register Reg = Op.getReg(); LiveInterval *LI = &LIS->getInterval(Reg); if (llvm::find(Intervals, LI) != Intervals.end()) { // Same register used, unable to make sequential diff --git a/lib/Target/AMDGPU/GCNRegBankReassign.cpp b/lib/Target/AMDGPU/GCNRegBankReassign.cpp index 2e5b130cf9f..2927d4eb745 100644 --- a/lib/Target/AMDGPU/GCNRegBankReassign.cpp +++ b/lib/Target/AMDGPU/GCNRegBankReassign.cpp @@ -364,7 +364,7 @@ unsigned GCNRegBankReassign::analyzeInst(const MachineInstr& MI, if (!Op.isReg() || Op.isUndef()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (TRI->hasAGPRs(TRI->getRegClassForReg(*MRI, R))) continue; @@ -425,7 +425,7 @@ bool GCNRegBankReassign::isReassignable(unsigned Reg) const { const MachineInstr *Def = MRI->getUniqueVRegDef(Reg); - unsigned PhysReg = VRM->getPhys(Reg); + Register PhysReg = VRM->getPhys(Reg); if (Def && Def->isCopy() && Def->getOperand(1).getReg() == PhysReg) return false; @@ -654,7 +654,7 @@ unsigned GCNRegBankReassign::tryReassign(Candidate &C) { } std::sort(BankStalls.begin(), BankStalls.end()); - unsigned OrigReg = VRM->getPhys(C.Reg); + Register OrigReg = VRM->getPhys(C.Reg); LRM->unassign(LI); while (!BankStalls.empty()) { BankStall BS = BankStalls.pop_back_val(); diff --git a/lib/Target/AMDGPU/GCNRegPressure.cpp b/lib/Target/AMDGPU/GCNRegPressure.cpp index 4abbb8537fb..d593204cba0 100644 --- a/lib/Target/AMDGPU/GCNRegPressure.cpp +++ b/lib/Target/AMDGPU/GCNRegPressure.cpp @@ -406,7 +406,7 @@ void GCNDownwardRPTracker::advanceToNext() { for (const auto &MO : LastTrackedMI->defs()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; auto &LiveMask = LiveRegs[Reg]; diff --git a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp index 8098b81d1ea..e4160ac11c8 100644 --- a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp +++ b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp @@ -303,7 +303,7 @@ private: if (!MO.isReg()) continue; if (MO.isDef()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (R600::R600_Reg128RegClass.contains(Reg)) DstMI = Reg; else @@ -312,7 +312,7 @@ private: &R600::R600_Reg128RegClass); } if (MO.isUse()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (R600::R600_Reg128RegClass.contains(Reg)) SrcMI = Reg; else diff --git a/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp index c6e8a060d8a..fd75c41040e 100644 --- a/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp +++ b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp @@ -135,7 +135,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { const R600RegisterInfo &TRI = TII->getRegisterInfo(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK; for (unsigned Chan = 0; Chan < 4; ++Chan) { @@ -155,12 +155,12 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { unsigned Opcode = BMI->getOpcode(); // While not strictly necessary from hw point of view, we force // all src operands of a dot4 inst to belong to the same slot. - unsigned Src0 = BMI->getOperand( - TII->getOperandIdx(Opcode, R600::OpName::src0)) - .getReg(); - unsigned Src1 = BMI->getOperand( - TII->getOperandIdx(Opcode, R600::OpName::src1)) - .getReg(); + Register Src0 = + BMI->getOperand(TII->getOperandIdx(Opcode, R600::OpName::src0)) + .getReg(); + Register Src1 = + BMI->getOperand(TII->getOperandIdx(Opcode, R600::OpName::src1)) + .getReg(); (void) Src0; (void) Src1; if ((TRI.getEncodingValue(Src0) & 0xff) < 127 && @@ -205,10 +205,10 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { // T0_Z = CUBE T1_X, T1_Z // T0_W = CUBE T1_Y, T1_Z for (unsigned Chan = 0; Chan < 4; Chan++) { - unsigned DstReg = MI.getOperand( - TII->getOperandIdx(MI, R600::OpName::dst)).getReg(); - unsigned Src0 = MI.getOperand( - TII->getOperandIdx(MI, R600::OpName::src0)).getReg(); + Register DstReg = + MI.getOperand(TII->getOperandIdx(MI, R600::OpName::dst)).getReg(); + Register Src0 = + MI.getOperand(TII->getOperandIdx(MI, R600::OpName::src0)).getReg(); unsigned Src1 = 0; // Determine the correct source registers diff --git a/lib/Target/AMDGPU/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp index 29b4d5559d9..fa1565148d1 100644 --- a/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -334,7 +334,7 @@ R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } case R600::MASK_WRITE: { - unsigned maskedRegister = MI.getOperand(0).getReg(); + Register maskedRegister = MI.getOperand(0).getReg(); assert(Register::isVirtualRegister(maskedRegister)); MachineInstr * defInstr = MRI.getVRegDef(maskedRegister); TII->addFlag(*defInstr, 0, MO_FLAG_MASK); diff --git a/lib/Target/AMDGPU/R600InstrInfo.cpp b/lib/Target/AMDGPU/R600InstrInfo.cpp index 79e36b71e0f..04a5e93f621 100644 --- a/lib/Target/AMDGPU/R600InstrInfo.cpp +++ b/lib/Target/AMDGPU/R600InstrInfo.cpp @@ -293,7 +293,7 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const { for (unsigned j = 0; j < 8; j++) { MachineOperand &MO = MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0])); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == R600::ALU_CONST) { MachineOperand &Sel = MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1])); @@ -316,7 +316,7 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const { if (SrcIdx < 0) break; MachineOperand &MO = MI.getOperand(SrcIdx); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == R600::ALU_CONST) { MachineOperand &Sel = MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1])); @@ -347,7 +347,7 @@ R600InstrInfo::ExtractSrcs(MachineInstr &MI, unsigned i = 0; for (const auto &Src : getSrcs(MI)) { ++i; - unsigned Reg = Src.first->getReg(); + Register Reg = Src.first->getReg(); int Index = RI.getEncodingValue(Reg) & 0xff; if (Reg == R600::OQAP) { Result.push_back(std::make_pair(Index, 0U)); @@ -864,7 +864,7 @@ bool R600InstrInfo::isPredicated(const MachineInstr &MI) const { if (idx < 0) return false; - unsigned Reg = MI.getOperand(idx).getReg(); + Register Reg = MI.getOperand(idx).getReg(); switch (Reg) { default: return false; case R600::PRED_SEL_ONE: @@ -1037,7 +1037,7 @@ bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { unsigned RegIndex = MI.getOperand(RegOpIdx).getImm(); unsigned Channel = MI.getOperand(ChanOpIdx).getImm(); unsigned Address = calculateIndirectAddress(RegIndex, Channel); - unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg(); + Register OffsetReg = MI.getOperand(OffsetOpIdx).getReg(); if (OffsetReg == R600::INDIRECT_BASE_ADDR) { buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(), getIndirectAddrRegClass()->getRegister(Address)); @@ -1051,7 +1051,7 @@ bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { unsigned RegIndex = MI.getOperand(RegOpIdx).getImm(); unsigned Channel = MI.getOperand(ChanOpIdx).getImm(); unsigned Address = calculateIndirectAddress(RegIndex, Channel); - unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg(); + Register OffsetReg = MI.getOperand(OffsetOpIdx).getReg(); if (OffsetReg == R600::INDIRECT_BASE_ADDR) { buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address), MI.getOperand(ValOpIdx).getReg()); diff --git a/lib/Target/AMDGPU/R600MachineScheduler.cpp b/lib/Target/AMDGPU/R600MachineScheduler.cpp index e1abdb78452..7569a262953 100644 --- a/lib/Target/AMDGPU/R600MachineScheduler.cpp +++ b/lib/Target/AMDGPU/R600MachineScheduler.cpp @@ -270,7 +270,7 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { } // Is the result already member of a X/Y/Z/W class ? - unsigned DestReg = MI->getOperand(0).getReg(); + Register DestReg = MI->getOperand(0).getReg(); if (regBelongsToClass(DestReg, &R600::R600_TReg32_XRegClass) || regBelongsToClass(DestReg, &R600::R600_AddrRegClass)) return AluT_X; @@ -357,7 +357,7 @@ void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) { if (DstIndex == -1) { return; } - unsigned DestReg = MI->getOperand(DstIndex).getReg(); + Register DestReg = MI->getOperand(DstIndex).getReg(); // PressureRegister crashes if an operand is def and used in the same inst // and we try to constraint its regclass for (MachineInstr::mop_iterator It = MI->operands_begin(), diff --git a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp index d34d30fcdd7..cec7f563f48 100644 --- a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp +++ b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp @@ -197,17 +197,17 @@ unsigned getReassignedChan( MachineInstr *R600VectorRegMerger::RebuildVector( RegSeqInfo *RSI, const RegSeqInfo *BaseRSI, const std::vector> &RemapChan) const { - unsigned Reg = RSI->Instr->getOperand(0).getReg(); + Register Reg = RSI->Instr->getOperand(0).getReg(); MachineBasicBlock::iterator Pos = RSI->Instr; MachineBasicBlock &MBB = *Pos->getParent(); DebugLoc DL = Pos->getDebugLoc(); - unsigned SrcVec = BaseRSI->Instr->getOperand(0).getReg(); + Register SrcVec = BaseRSI->Instr->getOperand(0).getReg(); DenseMap UpdatedRegToChan = BaseRSI->RegToChan; std::vector UpdatedUndef = BaseRSI->UndefReg; for (DenseMap::iterator It = RSI->RegToChan.begin(), E = RSI->RegToChan.end(); It != E; ++It) { - unsigned DstReg = MRI->createVirtualRegister(&R600::R600_Reg128RegClass); + Register DstReg = MRI->createVirtualRegister(&R600::R600_Reg128RegClass); unsigned SubReg = (*It).first; unsigned Swizzle = (*It).second; unsigned Chan = getReassignedChan(RemapChan, Swizzle); @@ -350,7 +350,7 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) { MachineInstr &MI = *MII; if (MI.getOpcode() != R600::REG_SEQUENCE) { if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST) { - unsigned Reg = MI.getOperand(1).getReg(); + Register Reg = MI.getOperand(1).getReg(); for (MachineRegisterInfo::def_instr_iterator It = MRI->def_instr_begin(Reg), E = MRI->def_instr_end(); It != E; ++It) { @@ -363,7 +363,7 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) { RegSeqInfo RSI(*MRI, &MI); // All uses of MI are swizzeable ? - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); if (!areAllUsesSwizzeable(Reg)) continue; diff --git a/lib/Target/AMDGPU/R600Packetizer.cpp b/lib/Target/AMDGPU/R600Packetizer.cpp index df200baf11c..176269f9b68 100644 --- a/lib/Target/AMDGPU/R600Packetizer.cpp +++ b/lib/Target/AMDGPU/R600Packetizer.cpp @@ -90,7 +90,7 @@ private: if (DstIdx == -1) { continue; } - unsigned Dst = BI->getOperand(DstIdx).getReg(); + Register Dst = BI->getOperand(DstIdx).getReg(); if (isTrans || TII->isTransOnly(*BI)) { Result[Dst] = R600::PS; continue; @@ -136,7 +136,7 @@ private: int OperandIdx = TII->getOperandIdx(MI.getOpcode(), Ops[i]); if (OperandIdx < 0) continue; - unsigned Src = MI.getOperand(OperandIdx).getReg(); + Register Src = MI.getOperand(OperandIdx).getReg(); const DenseMap::const_iterator It = PVs.find(Src); if (It != PVs.end()) MI.getOperand(OperandIdx).setReg(It->second); diff --git a/lib/Target/AMDGPU/SIAddIMGInit.cpp b/lib/Target/AMDGPU/SIAddIMGInit.cpp index f8094e35816..ee011286b8f 100644 --- a/lib/Target/AMDGPU/SIAddIMGInit.cpp +++ b/lib/Target/AMDGPU/SIAddIMGInit.cpp @@ -129,7 +129,7 @@ bool SIAddIMGInit::runOnMachineFunction(MachineFunction &MF) { continue; // Create a register for the intialization value. - unsigned PrevDst = + Register PrevDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); unsigned NewDst = 0; // Final initialized value will be in here @@ -150,7 +150,7 @@ bool SIAddIMGInit::runOnMachineFunction(MachineFunction &MF) { NewDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); // Initialize dword - unsigned SubReg = + Register SubReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), SubReg) .addImm(0); diff --git a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp index a169133a6ec..4570ebf0569 100644 --- a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -161,8 +161,8 @@ static std::pair getCopyRegClasses(const MachineInstr &Copy, const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) { - unsigned DstReg = Copy.getOperand(0).getReg(); - unsigned SrcReg = Copy.getOperand(1).getReg(); + Register DstReg = Copy.getOperand(0).getReg(); + Register SrcReg = Copy.getOperand(1).getReg(); const TargetRegisterClass *SrcRC = Register::isVirtualRegister(SrcReg) ? MRI.getRegClass(SrcReg) @@ -197,8 +197,8 @@ static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI, const SIInstrInfo *TII) { MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); auto &Src = MI.getOperand(1); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = Src.getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = Src.getReg(); if (!Register::isVirtualRegister(SrcReg) || !Register::isVirtualRegister(DstReg)) return false; @@ -236,7 +236,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI, MachineRegisterInfo &MRI) { assert(MI.isRegSequence()); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if (!TRI->isSGPRClass(MRI.getRegClass(DstReg))) return false; @@ -279,7 +279,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI, bool IsAGPR = TRI->hasAGPRs(DstRC); for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) { - unsigned SrcReg = MI.getOperand(I).getReg(); + Register SrcReg = MI.getOperand(I).getReg(); unsigned SrcSubReg = MI.getOperand(I).getSubReg(); const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); @@ -289,7 +289,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI, SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg); const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC); - unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC); + Register TmpReg = MRI.createVirtualRegister(NewSrcRC); BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), TmpReg) @@ -297,7 +297,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI, if (IsAGPR) { const TargetRegisterClass *NewSrcRC = TRI->getEquivalentAGPRClass(SrcRC); - unsigned TmpAReg = MRI.createVirtualRegister(NewSrcRC); + Register TmpAReg = MRI.createVirtualRegister(NewSrcRC); unsigned Opc = NewSrcRC == &AMDGPU::AGPR_32RegClass ? AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::COPY; BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(Opc), @@ -318,7 +318,7 @@ static bool phiHasVGPROperands(const MachineInstr &PHI, const SIRegisterInfo *TRI, const SIInstrInfo *TII) { for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) { - unsigned Reg = PHI.getOperand(i).getReg(); + Register Reg = PHI.getOperand(i).getReg(); if (TRI->hasVGPRs(MRI.getRegClass(Reg))) return true; } @@ -329,7 +329,7 @@ static bool phiHasBreakDef(const MachineInstr &PHI, const MachineRegisterInfo &MRI, SmallSet &Visited) { for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) { - unsigned Reg = PHI.getOperand(i).getReg(); + Register Reg = PHI.getOperand(i).getReg(); if (Visited.count(Reg)) continue; @@ -641,7 +641,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { } if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) { - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); if (!Register::isVirtualRegister(SrcReg)) { TII->moveToVALU(MI, MDT); break; @@ -666,7 +666,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { break; } case AMDGPU::PHI: { - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); if (!TRI->isSGPRClass(MRI.getRegClass(Reg))) break; diff --git a/lib/Target/AMDGPU/SIFoldOperands.cpp b/lib/Target/AMDGPU/SIFoldOperands.cpp index ca8448ab674..e33cf1d9008 100644 --- a/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -248,7 +248,7 @@ static bool updateOperand(FoldCandidate &Fold, bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg()); const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg()); - unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC); + Register NewReg0 = MRI.createVirtualRegister(Dst0RC); MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32); @@ -443,7 +443,7 @@ static bool tryToFoldACImm(const SIInstrInfo *TII, if (!OpToFold.isReg()) return false; - unsigned UseReg = OpToFold.getReg(); + Register UseReg = OpToFold.getReg(); if (!Register::isVirtualRegister(UseReg)) return false; @@ -518,7 +518,7 @@ void SIFoldOperands::foldOperand( // REG_SEQUENCE instructions, so we have to fold them into the // uses of REG_SEQUENCE. if (UseMI->isRegSequence()) { - unsigned RegSeqDstReg = UseMI->getOperand(0).getReg(); + Register RegSeqDstReg = UseMI->getOperand(0).getReg(); unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); MachineRegisterInfo::use_iterator Next; @@ -569,12 +569,12 @@ void SIFoldOperands::foldOperand( OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); if (FoldingImmLike && UseMI->isCopy()) { - unsigned DestReg = UseMI->getOperand(0).getReg(); + Register DestReg = UseMI->getOperand(0).getReg(); const TargetRegisterClass *DestRC = Register::isVirtualRegister(DestReg) ? MRI->getRegClass(DestReg) : TRI->getPhysRegClass(DestReg); - unsigned SrcReg = UseMI->getOperand(1).getReg(); + Register SrcReg = UseMI->getOperand(1).getReg(); if (Register::isVirtualRegister(DestReg) && Register::isVirtualRegister(SrcReg)) { const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); @@ -710,7 +710,7 @@ void SIFoldOperands::foldOperand( // Split 64-bit constants into 32-bits for folding. if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { - unsigned UseReg = UseOp.getReg(); + Register UseReg = UseOp.getReg(); const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg); if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) diff --git a/lib/Target/AMDGPU/SIFormMemoryClauses.cpp b/lib/Target/AMDGPU/SIFormMemoryClauses.cpp index 1fcf8fbeb35..26bae5734df 100644 --- a/lib/Target/AMDGPU/SIFormMemoryClauses.cpp +++ b/lib/Target/AMDGPU/SIFormMemoryClauses.cpp @@ -120,7 +120,7 @@ static bool isValidClauseInst(const MachineInstr &MI, bool IsVMEMClause) { return false; // If this is a load instruction where the result has been coalesced with an operand, then we cannot clause it. for (const MachineOperand &ResMO : MI.defs()) { - unsigned ResReg = ResMO.getReg(); + Register ResReg = ResMO.getReg(); for (const MachineOperand &MO : MI.uses()) { if (!MO.isReg() || MO.isDef()) continue; @@ -216,7 +216,7 @@ bool SIFormMemoryClauses::canBundle(const MachineInstr &MI, if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); // If it is tied we will need to write same register as we read. if (MO.isTied()) @@ -265,7 +265,7 @@ void SIFormMemoryClauses::collectRegUses(const MachineInstr &MI, for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; diff --git a/lib/Target/AMDGPU/SIFrameLowering.cpp b/lib/Target/AMDGPU/SIFrameLowering.cpp index feab6bed260..45c06ebb547 100644 --- a/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -202,15 +202,15 @@ void SIFrameLowering::emitFlatScratchInit(const GCNSubtarget &ST, DebugLoc DL; MachineBasicBlock::iterator I = MBB.begin(); - unsigned FlatScratchInitReg - = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); + Register FlatScratchInitReg = + MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT); MachineRegisterInfo &MRI = MF.getRegInfo(); MRI.addLiveIn(FlatScratchInitReg); MBB.addLiveIn(FlatScratchInitReg); - unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); - unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); + Register FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); + Register FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); @@ -424,8 +424,8 @@ void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF, getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF); // We need to insert initialization of the scratch resource descriptor. - unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( - AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); + Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg( + AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister; if (ST.isAmdHsaOrMesa(F)) { @@ -539,9 +539,9 @@ void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST, if (ST.isAmdPalOS()) { // The pointer to the GIT is formed from the offset passed in and either // the amdgpu-git-ptr-high function attribute or the top part of the PC - unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); - unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); - unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); + Register RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); + Register RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); + Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); @@ -601,14 +601,14 @@ void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST, assert(!ST.isAmdHsaOrMesa(Fn)); const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); - unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); - unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); + Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); + Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); // Use relocations to get the pointer, and setup the other bits manually. uint64_t Rsrc23 = TII->getScratchRsrcWords23(); if (MFI->hasImplicitBufferPtr()) { - unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); + Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); @@ -640,8 +640,8 @@ void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST, MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR()); } } else { - unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); - unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); + Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); + Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); BuildMI(MBB, I, DL, SMovB32, Rsrc0) .addExternalSymbol("SCRATCH_RSRC_DWORD0") diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index 0485d22e08d..ae746a0e27b 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1883,7 +1883,7 @@ static void reservePrivateMemoryRegs(const TargetMachine &TM, // resource. For the Code Object V2 ABI, this will be the first 4 user // SGPR inputs. We can reserve those and use them directly. - unsigned PrivateSegmentBufferReg = + Register PrivateSegmentBufferReg = Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); Info.setScratchRSrcReg(PrivateSegmentBufferReg); } else { @@ -1944,7 +1944,7 @@ static void reservePrivateMemoryRegs(const TargetMachine &TM, // // FIXME: Should not do this if inline asm is reading/writing these // registers. - unsigned PreloadedSP = Info.getPreloadedReg( + Register PreloadedSP = Info.getPreloadedReg( AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); Info.setStackPtrOffsetReg(PreloadedSP); @@ -1994,7 +1994,7 @@ void SITargetLowering::insertCopiesSplitCSR( else llvm_unreachable("Unexpected register class in CSRsViaCopy!"); - unsigned NewVR = MRI->createVirtualRegister(RC); + Register NewVR = MRI->createVirtualRegister(RC); // Create copy from CSR to a virtual register. Entry->addLiveIn(*I); BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) @@ -2157,7 +2157,7 @@ SDValue SITargetLowering::LowerFormalArguments( assert(VA.isRegLoc() && "Parameter must be in a register!"); - unsigned Reg = VA.getLocReg(); + Register Reg = VA.getLocReg(); const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); EVT ValVT = VA.getValVT(); @@ -3121,7 +3121,7 @@ SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI, bundleInstWithWaitcnt(MI); - unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); // Load and check TRAP_STS.MEM_VIOL BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg) @@ -3162,10 +3162,10 @@ static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( MachineBasicBlock::iterator I = LoopBB.begin(); const TargetRegisterClass *BoolRC = TRI->getBoolRC(); - unsigned PhiExec = MRI.createVirtualRegister(BoolRC); - unsigned NewExec = MRI.createVirtualRegister(BoolRC); - unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); - unsigned CondReg = MRI.createVirtualRegister(BoolRC); + Register PhiExec = MRI.createVirtualRegister(BoolRC); + Register NewExec = MRI.createVirtualRegister(BoolRC); + Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register CondReg = MRI.createVirtualRegister(BoolRC); BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) .addReg(InitReg) @@ -3264,9 +3264,9 @@ static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, MachineBasicBlock::iterator I(&MI); const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC); - unsigned TmpExec = MRI.createVirtualRegister(BoolXExecRC); + Register DstReg = MI.getOperand(0).getReg(); + Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); + Register TmpExec = MRI.createVirtualRegister(BoolXExecRC); unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; @@ -3339,7 +3339,7 @@ static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, SetOn->getOperand(3).setIsUndef(); } else { - unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) .add(*Idx) .addImm(Offset); @@ -3375,8 +3375,8 @@ static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, MachineFunction *MF = MBB.getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); - unsigned Dst = MI.getOperand(0).getReg(); - unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); @@ -3414,8 +3414,8 @@ static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, const DebugLoc &DL = MI.getDebugLoc(); MachineBasicBlock::iterator I(&MI); - unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); @@ -3466,7 +3466,7 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, MachineFunction *MF = MBB.getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); - unsigned Dst = MI.getOperand(0).getReg(); + Register Dst = MI.getOperand(0).getReg(); const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); @@ -3529,7 +3529,7 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, const DebugLoc &DL = MI.getDebugLoc(); - unsigned PhiReg = MRI.createVirtualRegister(VecRC); + Register PhiReg = MRI.createVirtualRegister(VecRC); auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, Offset, UseGPRIdxMode, false); @@ -3588,8 +3588,8 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( MachineOperand &Src0 = MI.getOperand(1); MachineOperand &Src1 = MI.getOperand(2); - unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, Src0, BoolRC, AMDGPU::sub0, @@ -3656,8 +3656,8 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( // S_CMOV_B64 exec, -1 MachineInstr *FirstMI = &*BB->begin(); MachineRegisterInfo &MRI = MF->getRegInfo(); - unsigned InputReg = MI.getOperand(0).getReg(); - unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register InputReg = MI.getOperand(0).getReg(); + Register CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); bool Found = false; // Move the COPY of the input reg to the beginning, so that we can use it. @@ -3731,16 +3731,16 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( const GCNSubtarget &ST = MF->getSubtarget(); const SIRegisterInfo *TRI = ST.getRegisterInfo(); - unsigned Dst = MI.getOperand(0).getReg(); - unsigned Src0 = MI.getOperand(1).getReg(); - unsigned Src1 = MI.getOperand(2).getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register Src0 = MI.getOperand(1).getReg(); + Register Src1 = MI.getOperand(2).getReg(); const DebugLoc &DL = MI.getDebugLoc(); - unsigned SrcCond = MI.getOperand(3).getReg(); + Register SrcCond = MI.getOperand(3).getReg(); - unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); - unsigned SrcCondCopy = MRI.createVirtualRegister(CondRC); + Register SrcCondCopy = MRI.createVirtualRegister(CondRC); BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) .addReg(SrcCond); @@ -10377,7 +10377,7 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, Node->use_begin()->isMachineOpcode() && Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && !Node->use_begin()->hasAnyUseOfValue(0))) { - unsigned Def = MI.getOperand(0).getReg(); + Register Def = MI.getOperand(0).getReg(); // Change this into a noret atomic. MI.setDesc(TII->get(NoRetAtomicOp)); diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp index 77dbd239ede..f4f858fa483 100644 --- a/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -458,7 +458,7 @@ bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1, const MachineRegisterInfo &MRI = FirstLdSt.getParent()->getParent()->getRegInfo(); - const unsigned Reg = FirstDst->getReg(); + const Register Reg = FirstDst->getReg(); const TargetRegisterClass *DstRC = Register::isVirtualRegister(Reg) ? MRI.getRegClass(Reg) @@ -807,7 +807,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, "Not a VGPR32 reg"); if (Cond.size() == 1) { - unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); + Register SReg = MRI.createVirtualRegister(BoolXExecRC); BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) .add(Cond[0]); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -820,7 +820,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, assert(Cond[0].isImm() && "Cond[0] is not an immediate"); switch (Cond[0].getImm()) { case SIInstrInfo::SCC_TRUE: { - unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); + Register SReg = MRI.createVirtualRegister(BoolXExecRC); BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 : AMDGPU::S_CSELECT_B64), SReg) .addImm(-1) @@ -834,7 +834,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, break; } case SIInstrInfo::SCC_FALSE: { - unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); + Register SReg = MRI.createVirtualRegister(BoolXExecRC); BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 : AMDGPU::S_CSELECT_B64), SReg) .addImm(0) @@ -850,7 +850,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, case SIInstrInfo::VCCNZ: { MachineOperand RegOp = Cond[1]; RegOp.setImplicit(false); - unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); + Register SReg = MRI.createVirtualRegister(BoolXExecRC); BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) .add(RegOp); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -864,7 +864,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, case SIInstrInfo::VCCZ: { MachineOperand RegOp = Cond[1]; RegOp.setImplicit(false); - unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); + Register SReg = MRI.createVirtualRegister(BoolXExecRC); BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) .add(RegOp); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -876,8 +876,8 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, break; } case SIInstrInfo::EXECNZ: { - unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); - unsigned SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); + Register SReg = MRI.createVirtualRegister(BoolXExecRC); + Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) .addImm(0); @@ -894,8 +894,8 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, break; } case SIInstrInfo::EXECZ: { - unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); - unsigned SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); + Register SReg = MRI.createVirtualRegister(BoolXExecRC); + Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) .addImm(0); @@ -925,7 +925,7 @@ unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB, const DebugLoc &DL, unsigned SrcReg, int Value) const { MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); - unsigned Reg = MRI.createVirtualRegister(RI.getBoolRC()); + Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) .addImm(Value) .addReg(SrcReg); @@ -938,7 +938,7 @@ unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB, const DebugLoc &DL, unsigned SrcReg, int Value) const { MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); - unsigned Reg = MRI.createVirtualRegister(RI.getBoolRC()); + Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) .addImm(Value) .addReg(SrcReg); @@ -1083,7 +1083,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, auto MIB = BuildMI(MBB, MI, DL, get(Opcode)); if (RI.hasAGPRs(RC)) { MachineRegisterInfo &MRI = MF->getRegInfo(); - unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); MIB.addReg(Tmp, RegState::Define); } MIB.addReg(SrcReg, getKillRegState(isKill)) // data @@ -1208,7 +1208,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, auto MIB = BuildMI(MBB, MI, DL, get(Opcode), DestReg); if (RI.hasAGPRs(RC)) { MachineRegisterInfo &MRI = MF->getRegInfo(); - unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); MIB.addReg(Tmp, RegState::Define); } MIB.addFrameIndex(FrameIndex) // vaddr @@ -1242,13 +1242,13 @@ unsigned SIInstrInfo::calculateLDSSpillAddress( if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) && WorkGroupSize > WavefrontSize) { - unsigned TIDIGXReg - = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); - unsigned TIDIGYReg - = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); - unsigned TIDIGZReg - = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); - unsigned InputPtrReg = + Register TIDIGXReg = + MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); + Register TIDIGYReg = + MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); + Register TIDIGZReg = + MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); + Register InputPtrReg = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { if (!Entry.isLiveIn(Reg)) @@ -1416,9 +1416,9 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { break; case AMDGPU::V_MOV_B64_PSEUDO: { - unsigned Dst = MI.getOperand(0).getReg(); - unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); - unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); + Register Dst = MI.getOperand(0).getReg(); + Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); + Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); const MachineOperand &SrcOp = MI.getOperand(1); // FIXME: Will this work for 64-bit floating point immediates? @@ -1475,7 +1475,7 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case AMDGPU::V_MOVRELD_B32_V8: case AMDGPU::V_MOVRELD_B32_V16: { const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32); - unsigned VecReg = MI.getOperand(0).getReg(); + Register VecReg = MI.getOperand(0).getReg(); bool IsUndef = MI.getOperand(1).isUndef(); unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm(); assert(VecReg == MI.getOperand(1).getReg()); @@ -1498,9 +1498,9 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { } case AMDGPU::SI_PC_ADD_REL_OFFSET: { MachineFunction &MF = *MBB.getParent(); - unsigned Reg = MI.getOperand(0).getReg(); - unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); - unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); + Register Reg = MI.getOperand(0).getReg(); + Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); + Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); // Create a bundle so these instructions won't be re-ordered by the // post-RA scheduler. @@ -1580,7 +1580,7 @@ bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, MachineOperand &RegOp, MachineOperand &NonRegOp) { - unsigned Reg = RegOp.getReg(); + Register Reg = RegOp.getReg(); unsigned SubReg = RegOp.getSubReg(); bool IsKill = RegOp.isKill(); bool IsDead = RegOp.isDead(); @@ -1716,7 +1716,7 @@ unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, // FIXME: Virtual register workaround for RegScavenger not working with empty // blocks. - unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); auto I = MBB.end(); @@ -2170,7 +2170,7 @@ void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, SmallVector Regs; for (int Idx = 0; Idx != NElts; ++Idx) { - unsigned DstElt = MRI.createVirtualRegister(EltRC); + Register DstElt = MRI.createVirtualRegister(EltRC); Regs.push_back(DstElt); unsigned SubIdx = SubIndices[Idx]; @@ -2334,7 +2334,7 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, UseMI.RemoveOperand( AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); - unsigned Src1Reg = Src1->getReg(); + Register Src1Reg = Src1->getReg(); unsigned Src1SubReg = Src1->getSubReg(); Src0->setReg(Src1Reg); Src0->setSubReg(Src1SubReg); @@ -3152,7 +3152,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, if (!Op.isReg()) continue; - unsigned Reg = Op.getReg(); + Register Reg = Op.getReg(); if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) { ErrInfo = "inlineasm operand has incorrect register class."; return false; @@ -3217,7 +3217,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, continue; if (RegClass != -1) { - unsigned Reg = MI.getOperand(i).getReg(); + Register Reg = MI.getOperand(i).getReg(); if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg)) continue; @@ -3716,7 +3716,7 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, const MCInstrDesc &Desc = get(MI.getOpcode()); if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || Desc.OpInfo[OpNo].RegClass == -1) { - unsigned Reg = MI.getOperand(OpNo).getReg(); + Register Reg = MI.getOperand(OpNo).getReg(); if (Register::isVirtualRegister(Reg)) return MRI.getRegClass(Reg); @@ -3749,7 +3749,7 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { else VRC = &AMDGPU::VGPR_32RegClass; - unsigned Reg = MRI.createVirtualRegister(VRC); + Register Reg = MRI.createVirtualRegister(VRC); DebugLoc DL = MBB->findDebugLoc(I); BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); MO.ChangeToRegister(Reg, false); @@ -3764,7 +3764,7 @@ unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, const { MachineBasicBlock *MBB = MI->getParent(); DebugLoc DL = MI->getDebugLoc(); - unsigned SubReg = MRI.createVirtualRegister(SubRC); + Register SubReg = MRI.createVirtualRegister(SubRC); if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) @@ -3776,7 +3776,7 @@ unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, // value so we don't need to worry about merging its subreg index with the // SubIdx passed to this function. The register coalescer should be able to // eliminate this extra copy. - unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); + Register NewSuperReg = MRI.createVirtualRegister(SuperRC); BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); @@ -3822,7 +3822,7 @@ bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, if (!MO.isReg()) return false; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); const TargetRegisterClass *RC = Register::isVirtualRegister(Reg) ? MRI.getRegClass(Reg) : RI.getPhysRegClass(Reg); @@ -3942,13 +3942,13 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, if (Opc == AMDGPU::V_WRITELANE_B32) { const DebugLoc &DL = MI.getDebugLoc(); if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { - unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) .add(Src0); Src0.ChangeToRegister(Reg, false); } if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { - unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); const DebugLoc &DL = MI.getDebugLoc(); BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) .add(Src1); @@ -3974,7 +3974,7 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, // select is uniform. if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { - unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); const DebugLoc &DL = MI.getDebugLoc(); BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) .add(Src1); @@ -4010,7 +4010,7 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, MI.setDesc(get(CommutedOpc)); - unsigned Src0Reg = Src0.getReg(); + Register Src0Reg = Src0.getReg(); unsigned Src0SubReg = Src0.getSubReg(); bool Src0Kill = Src0.isKill(); @@ -4046,13 +4046,13 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); const DebugLoc &DL = MI.getDebugLoc(); if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { - unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) .add(Src1); Src1.ChangeToRegister(Reg, false); } if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { - unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) .add(Src2); Src2.ChangeToRegister(Reg, false); @@ -4120,12 +4120,12 @@ unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, MachineRegisterInfo &MRI) const { const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); - unsigned DstReg = MRI.createVirtualRegister(SRC); + Register DstReg = MRI.createVirtualRegister(SRC); unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; if (RI.hasAGPRs(VRC)) { VRC = RI.getEquivalentVGPRClass(VRC); - unsigned NewSrcReg = MRI.createVirtualRegister(VRC); + Register NewSrcReg = MRI.createVirtualRegister(VRC); BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(TargetOpcode::COPY), NewSrcReg) .addReg(SrcReg); @@ -4141,7 +4141,7 @@ unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, SmallVector SRegs; for (unsigned i = 0; i < SubRegs; ++i) { - unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(AMDGPU::V_READFIRSTLANE_B32), SGPR) .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); @@ -4183,7 +4183,7 @@ void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, MachineOperand &Op, MachineRegisterInfo &MRI, const DebugLoc &DL) const { - unsigned OpReg = Op.getReg(); + Register OpReg = Op.getReg(); unsigned OpSubReg = Op.getSubReg(); const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( @@ -4193,7 +4193,7 @@ void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, if (DstRC == OpRC) return; - unsigned DstReg = MRI.createVirtualRegister(DstRC); + Register DstReg = MRI.createVirtualRegister(DstRC); MachineInstr *Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); @@ -4230,18 +4230,18 @@ emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, MachineBasicBlock::iterator I = LoopBB.begin(); - unsigned VRsrc = Rsrc.getReg(); + Register VRsrc = Rsrc.getReg(); unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); - unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC); - unsigned CondReg0 = MRI.createVirtualRegister(BoolXExecRC); - unsigned CondReg1 = MRI.createVirtualRegister(BoolXExecRC); - unsigned AndCond = MRI.createVirtualRegister(BoolXExecRC); - unsigned SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); - unsigned SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); - unsigned SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); - unsigned SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); - unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); + Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); + Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); + Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); + Register AndCond = MRI.createVirtualRegister(BoolXExecRC); + Register SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); // Beginning of the loop, read the next Rsrc variant. BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0) @@ -4309,7 +4309,7 @@ static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); - unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC); + Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); // Save the EXEC mask BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); @@ -4377,10 +4377,10 @@ extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); // Create an empty resource descriptor - unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); - unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); - unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); + Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); // Zero64 = 0 @@ -4509,8 +4509,8 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI, // Legalize INSERT_SUBREG // src0 must have the same register class as dst if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { - unsigned Dst = MI.getOperand(0).getReg(); - unsigned Src0 = MI.getOperand(1).getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register Src0 = MI.getOperand(1).getReg(); const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); if (DstRC != Src0RC) { @@ -4584,13 +4584,13 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI, if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { // This is already an ADDR64 instruction so we need to add the pointer // extracted from the resource descriptor to the current value of VAddr. - unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); + Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); - unsigned CondReg0 = MRI.createVirtualRegister(BoolXExecRC); - unsigned CondReg1 = MRI.createVirtualRegister(BoolXExecRC); + Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); + Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); unsigned RsrcPtr, NewSRsrc; std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); @@ -4630,7 +4630,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI, unsigned RsrcPtr, NewSRsrc; std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); - unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); + Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); @@ -4940,7 +4940,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst, bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); unsigned NewDstReg = AMDGPU::NoRegister; if (HasDst) { - unsigned DstReg = Inst.getOperand(0).getReg(); + Register DstReg = Inst.getOperand(0).getReg(); if (Register::isPhysicalRegister(DstReg)) continue; @@ -4995,8 +4995,8 @@ bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, MachineBasicBlock &MBB = *Inst.getParent(); MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); - unsigned OldDstReg = Inst.getOperand(0).getReg(); - unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register OldDstReg = Inst.getOperand(0).getReg(); + Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); unsigned Opc = Inst.getOpcode(); assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); @@ -5029,8 +5029,8 @@ void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, MachineOperand &Dest = Inst.getOperand(0); MachineOperand &Src = Inst.getOperand(1); - unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); unsigned SubOp = ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32; @@ -5059,7 +5059,7 @@ void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, MachineOperand &Src1 = Inst.getOperand(2); if (ST.hasDLInsts()) { - unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); @@ -5079,8 +5079,8 @@ void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, bool Src1IsSGPR = Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); MachineInstr *Xor; - unsigned Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); // Build a pair of scalar instructions and add them to the work list. // The next iteration over the work list will lower these to the vector @@ -5124,8 +5124,8 @@ void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, MachineOperand &Src0 = Inst.getOperand(1); MachineOperand &Src1 = Inst.getOperand(2); - unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) .add(Src0) @@ -5153,8 +5153,8 @@ void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, MachineOperand &Src0 = Inst.getOperand(1); MachineOperand &Src1 = Inst.getOperand(2); - unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) .add(Src1); @@ -5196,16 +5196,16 @@ void SIInstrInfo::splitScalar64BitUnaryOp( const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); - unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); + Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); - unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); + Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); - unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); + Register FullDestReg = MRI.createVirtualRegister(NewDestRC); BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) .addReg(DestSub0) .addImm(AMDGPU::sub0) @@ -5233,12 +5233,12 @@ void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); - unsigned FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); - unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); + Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned CarryReg = MRI.createVirtualRegister(CarryRC); - unsigned DeadCarryReg = MRI.createVirtualRegister(CarryRC); + Register CarryReg = MRI.createVirtualRegister(CarryRC); + Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); MachineOperand &Dest = Inst.getOperand(0); MachineOperand &Src0 = Inst.getOperand(1); @@ -5334,17 +5334,17 @@ void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); - unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); + Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) .add(SrcReg0Sub0) .add(SrcReg1Sub0); - unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); + Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) .add(SrcReg0Sub1) .add(SrcReg1Sub1); - unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); + Register FullDestReg = MRI.createVirtualRegister(NewDestRC); BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) .addReg(DestSub0) .addImm(AMDGPU::sub0) @@ -5375,7 +5375,7 @@ void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); - unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); MachineOperand* Op0; MachineOperand* Op1; @@ -5391,7 +5391,7 @@ void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) .add(*Op0); - unsigned NewDest = MRI.createVirtualRegister(DestRC); + Register NewDest = MRI.createVirtualRegister(DestRC); MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) .addReg(Interm) @@ -5418,8 +5418,8 @@ void SIInstrInfo::splitScalar64BitBCNT( MRI.getRegClass(Src.getReg()) : &AMDGPU::SGPR_32RegClass; - unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); @@ -5458,9 +5458,9 @@ void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, Offset == 0 && "Not implemented"); if (BitWidth < 32) { - unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); + Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) @@ -5483,8 +5483,8 @@ void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, } MachineOperand &Src = Inst.getOperand(1); - unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); + Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) .addImm(31) @@ -5539,7 +5539,7 @@ void SIInstrInfo::addUsersToMoveToVALUWorklist( void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, MachineRegisterInfo &MRI, MachineInstr &Inst) const { - unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); MachineBasicBlock *MBB = Inst.getParent(); MachineOperand &Src0 = Inst.getOperand(1); MachineOperand &Src1 = Inst.getOperand(2); @@ -5547,8 +5547,8 @@ void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, switch (Inst.getOpcode()) { case AMDGPU::S_PACK_LL_B32_B16: { - unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); // FIXME: Can do a lot better if we know the high bits of src0 or src1 are // 0. @@ -5566,7 +5566,7 @@ void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, break; } case AMDGPU::S_PACK_LH_B32_B16: { - unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) .addImm(0xffff); BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) @@ -5576,8 +5576,8 @@ void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, break; } case AMDGPU::S_PACK_HH_B32_B16: { - unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) .addImm(16) .add(Src0); @@ -5695,7 +5695,7 @@ unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI, return MO.getReg(); // If this could be a VGPR or an SGPR, Check the dynamic register class. - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); if (RI.isSGPRClass(RegRC)) UsedSGPRs[i] = Reg; @@ -5950,7 +5950,7 @@ void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { - unsigned DstReg = MRI.createVirtualRegister(RI.getBoolRC()); + Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); MachineInstr *SIIF = BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) .add(Branch->getOperand(0)) @@ -5977,8 +5977,8 @@ void SIInstrInfo::convertNonUniformLoopRegion( if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { - unsigned DstReg = MRI.createVirtualRegister(RI.getBoolRC()); - unsigned BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); + Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); + Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); MachineInstrBuilder HeaderPHIBuilder = BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), @@ -5988,7 +5988,7 @@ void SIInstrInfo::convertNonUniformLoopRegion( HeaderPHIBuilder.addReg(BackEdgeReg); } else { MachineBasicBlock *PMBB = *PI; - unsigned ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); + Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), ZeroReg, 0); HeaderPHIBuilder.addReg(ZeroReg); @@ -6072,7 +6072,7 @@ SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); - unsigned UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); + Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) diff --git a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index bded67e37e2..30ee08220d5 100644 --- a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -725,15 +725,15 @@ SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) { const TargetRegisterClass *SuperRC = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass; - unsigned DestReg = MRI->createVirtualRegister(SuperRC); + Register DestReg = MRI->createVirtualRegister(SuperRC); DebugLoc DL = CI.I->getDebugLoc(); - unsigned BaseReg = AddrReg->getReg(); + Register BaseReg = AddrReg->getReg(); unsigned BaseSubReg = AddrReg->getSubReg(); unsigned BaseRegFlags = 0; if (CI.BaseOff) { - unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass); BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg) .addImm(CI.BaseOff); @@ -823,11 +823,11 @@ SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) { const MCInstrDesc &Write2Desc = TII->get(Opc); DebugLoc DL = CI.I->getDebugLoc(); - unsigned BaseReg = AddrReg->getReg(); + Register BaseReg = AddrReg->getReg(); unsigned BaseSubReg = AddrReg->getSubReg(); unsigned BaseRegFlags = 0; if (CI.BaseOff) { - unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass); + Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass); BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg) .addImm(CI.BaseOff); @@ -869,7 +869,7 @@ SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) { const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI); - unsigned DestReg = MRI->createVirtualRegister(SuperRC); + Register DestReg = MRI->createVirtualRegister(SuperRC); unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1); // It shouldn't be possible to get this far if the two instructions @@ -921,7 +921,7 @@ SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) { const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI); // Copy to the new source register. - unsigned DestReg = MRI->createVirtualRegister(SuperRC); + Register DestReg = MRI->createVirtualRegister(SuperRC); unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1); auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg); @@ -1103,7 +1103,7 @@ SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) { // Copy to the new source register. const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI); - unsigned SrcReg = MRI->createVirtualRegister(SuperRC); + Register SrcReg = MRI->createVirtualRegister(SuperRC); const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata); @@ -1154,7 +1154,7 @@ SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) { if (TII->isInlineConstant(V)) return MachineOperand::CreateImm(Val); - unsigned Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); + Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); MachineInstr *Mov = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), Reg) @@ -1185,11 +1185,11 @@ unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI, createRegOrImm(static_cast(Addr.Offset >> 32), MI); const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); - unsigned CarryReg = MRI->createVirtualRegister(CarryRC); - unsigned DeadCarryReg = MRI->createVirtualRegister(CarryRC); + Register CarryReg = MRI->createVirtualRegister(CarryRC); + Register DeadCarryReg = MRI->createVirtualRegister(CarryRC); - unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); MachineInstr *LoHalf = BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0) .addReg(CarryReg, RegState::Define) @@ -1209,7 +1209,7 @@ unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI, (void)HiHalf; LLVM_DEBUG(dbgs() << " "; HiHalf->dump();); - unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass); + Register FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass); MachineInstr *FullBase = BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg) .addReg(DestSub0) diff --git a/lib/Target/AMDGPU/SILowerControlFlow.cpp b/lib/Target/AMDGPU/SILowerControlFlow.cpp index 0070b1229c9..add9824a501 100644 --- a/lib/Target/AMDGPU/SILowerControlFlow.cpp +++ b/lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -149,7 +149,7 @@ char &llvm::SILowerControlFlowID = SILowerControlFlow::ID; static bool isSimpleIf(const MachineInstr &MI, const MachineRegisterInfo *MRI, const SIInstrInfo *TII) { - unsigned SaveExecReg = MI.getOperand(0).getReg(); + Register SaveExecReg = MI.getOperand(0).getReg(); auto U = MRI->use_instr_nodbg_begin(SaveExecReg); if (U == MRI->use_instr_nodbg_end() || @@ -209,7 +209,7 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) { .addReg(Exec) .addReg(Exec, RegState::ImplicitDefine); - unsigned Tmp = MRI->createVirtualRegister(BoolRC); + Register Tmp = MRI->createVirtualRegister(BoolRC); MachineInstr *And = BuildMI(MBB, I, DL, TII->get(AndOpc), Tmp) @@ -546,7 +546,7 @@ void SILowerControlFlow::combineMasks(MachineInstr &MI) { else if (Ops[1].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1; else return; - unsigned Reg = MI.getOperand(OpToReplace).getReg(); + Register Reg = MI.getOperand(OpToReplace).getReg(); MI.RemoveOperand(OpToReplace); MI.addOperand(Ops[UniqueOpndIdx]); if (MRI->use_empty(Reg)) diff --git a/lib/Target/AMDGPU/SILowerI1Copies.cpp b/lib/Target/AMDGPU/SILowerI1Copies.cpp index 8b46f3d8044..8bc449a3f02 100644 --- a/lib/Target/AMDGPU/SILowerI1Copies.cpp +++ b/lib/Target/AMDGPU/SILowerI1Copies.cpp @@ -497,8 +497,8 @@ void SILowerI1Copies::lowerCopiesFromI1() { if (MI.getOpcode() != AMDGPU::COPY) continue; - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); if (!isVreg1(SrcReg)) continue; @@ -544,7 +544,7 @@ void SILowerI1Copies::lowerPhis() { LF.initialize(MBB); for (MachineInstr &MI : MBB.phis()) { - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if (!isVreg1(DstReg)) continue; @@ -556,7 +556,7 @@ void SILowerI1Copies::lowerPhis() { // Collect incoming values. for (unsigned i = 1; i < MI.getNumOperands(); i += 2) { assert(i + 1 < MI.getNumOperands()); - unsigned IncomingReg = MI.getOperand(i).getReg(); + Register IncomingReg = MI.getOperand(i).getReg(); MachineBasicBlock *IncomingMBB = MI.getOperand(i + 1).getMBB(); MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg); @@ -669,7 +669,7 @@ void SILowerI1Copies::lowerCopiesToI1() { MI.getOpcode() != AMDGPU::COPY) continue; - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if (!isVreg1(DstReg)) continue; @@ -686,7 +686,7 @@ void SILowerI1Copies::lowerCopiesToI1() { continue; DebugLoc DL = MI.getDebugLoc(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); assert(!MI.getOperand(1).getSubReg()); if (!Register::isVirtualRegister(SrcReg) || diff --git a/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/lib/Target/AMDGPU/SILowerSGPRSpills.cpp index a8204747337..714d403a3e8 100644 --- a/lib/Target/AMDGPU/SILowerSGPRSpills.cpp +++ b/lib/Target/AMDGPU/SILowerSGPRSpills.cpp @@ -278,8 +278,8 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) { unsigned FIOp = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr); int FI = MI.getOperand(FIOp).getIndex(); - unsigned VReg = TII->getNamedOperand(MI, AMDGPU::OpName::vdata) - ->getReg(); + Register VReg = + TII->getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); if (FuncInfo->allocateVGPRSpillToAGPR(MF, FI, TRI->isAGPR(MRI, VReg))) { TRI->eliminateFrameIndex(MI, 0, FIOp, nullptr); diff --git a/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp b/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp index 00675bb5798..b8e076f5efd 100644 --- a/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp +++ b/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp @@ -328,7 +328,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) { continue; } - unsigned CopyFromExec = CopyFromExecInst->getOperand(0).getReg(); + Register CopyFromExec = CopyFromExecInst->getOperand(0).getReg(); MachineInstr *SaveExecInst = nullptr; SmallVector OtherUseInsts; diff --git a/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp b/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp index b04df380103..0eb850fe176 100644 --- a/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp +++ b/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp @@ -211,7 +211,7 @@ static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB, return AMDGPU::NoRegister; MachineOperand *AndCC = &And->getOperand(1); - unsigned CmpReg = AndCC->getReg(); + Register CmpReg = AndCC->getReg(); unsigned CmpSubReg = AndCC->getSubReg(); if (CmpReg == ExecReg) { AndCC = &And->getOperand(2); @@ -234,7 +234,7 @@ static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB, if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1) return AMDGPU::NoRegister; - unsigned SelReg = Op1->getReg(); + Register SelReg = Op1->getReg(); auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS); if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64) return AMDGPU::NoRegister; @@ -253,7 +253,7 @@ static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB, LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t' << *Cmp << '\t' << *And); - unsigned CCReg = CC->getReg(); + Register CCReg = CC->getReg(); LIS->RemoveMachineInstrFromMaps(*And); MachineInstr *Andn2 = BuildMI(MBB, *And, And->getDebugLoc(), TII->get(Andn2Opc), And->getOperand(0).getReg()) @@ -412,7 +412,7 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) { if (!SaveExec || !SaveExec->isFullCopy()) continue; - unsigned SavedExec = SaveExec->getOperand(0).getReg(); + Register SavedExec = SaveExec->getOperand(0).getReg(); bool SafeToReplace = true; for (auto& U : MRI.use_nodbg_instructions(SavedExec)) { if (U.getParent() != SaveExec->getParent()) { diff --git a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp index afb047c24df..9b3b2436475 100644 --- a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -1189,7 +1189,7 @@ void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI, continue; } - unsigned VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), VGPR); if (Op.isImm()) diff --git a/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp b/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp index 1cda9932785..6cdd12d0e7b 100644 --- a/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp +++ b/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp @@ -90,7 +90,7 @@ bool SIPreAllocateWWMRegs::processDef(MachineOperand &MO) { if (!MO.isReg()) return false; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!TRI->isVGPR(*MRI, Reg)) return false; @@ -124,14 +124,14 @@ void SIPreAllocateWWMRegs::rewriteRegs(MachineFunction &MF) { if (!MO.isReg()) continue; - const unsigned VirtReg = MO.getReg(); + const Register VirtReg = MO.getReg(); if (Register::isPhysicalRegister(VirtReg)) continue; if (!VRM->hasPhys(VirtReg)) continue; - unsigned PhysReg = VRM->getPhys(VirtReg); + Register PhysReg = VRM->getPhys(VirtReg); const unsigned SubReg = MO.getSubReg(); if (SubReg != 0) { PhysReg = TRI->getSubReg(PhysReg, SubReg); @@ -149,7 +149,7 @@ void SIPreAllocateWWMRegs::rewriteRegs(MachineFunction &MF) { for (unsigned Reg : RegsToRewrite) { LIS->removeInterval(Reg); - const unsigned PhysReg = VRM->getPhys(Reg); + const Register PhysReg = VRM->getPhys(Reg); assert(PhysReg != 0); MFI->ReserveWWMRegister(PhysReg); } diff --git a/lib/Target/AMDGPU/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp index c16bdad04e3..08d7167c67f 100644 --- a/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -390,9 +390,9 @@ void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, } MachineRegisterInfo &MRI = MF->getRegInfo(); - unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - unsigned FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg) .addImm(Offset); @@ -715,8 +715,9 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI, } for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) { - unsigned SubReg = NumSubRegs == 1 ? - Register(ValueReg) : getSubReg(ValueReg, getSubRegFromChannel(i)); + Register SubReg = NumSubRegs == 1 + ? Register(ValueReg) + : getSubReg(ValueReg, getSubRegFromChannel(i)); unsigned SOffsetRegState = 0; unsigned SrcDstRegState = getDefRegState(!IsStore); @@ -851,8 +852,8 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, // SubReg carries the "Kill" flag when SubReg == SuperReg. unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill); for (unsigned i = 0, e = NumSubRegs; i < e; ++i) { - unsigned SubReg = NumSubRegs == 1 ? - SuperReg : getSubReg(SuperReg, SplitParts[i]); + Register SubReg = + NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]); if (SpillToSMEM) { int64_t FrOffset = FrameInfo.getObjectOffset(Index); @@ -924,7 +925,7 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, // Spill SGPR to a frame index. // TODO: Should VI try to spill to VGPR and then spill to SMEM? - unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); // TODO: Should VI try to spill to VGPR and then spill to SMEM? MachineInstrBuilder Mov @@ -1026,8 +1027,8 @@ bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, int64_t FrOffset = FrameInfo.getObjectOffset(Index); for (unsigned i = 0, e = NumSubRegs; i < e; ++i) { - unsigned SubReg = NumSubRegs == 1 ? - SuperReg : getSubReg(SuperReg, SplitParts[i]); + Register SubReg = + NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]); if (SpillToSMEM) { // FIXME: Size may be > 4 but extra bytes wasted. @@ -1079,7 +1080,7 @@ bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, // Restore SGPR from a stack slot. // FIXME: We should use S_LOAD_DWORD here for VI. - unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); unsigned Align = FrameInfo.getObjectAlignment(Index); MachinePointerInfo PtrInfo @@ -1263,8 +1264,8 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, // In an entry function/kernel the offset is already the absolute // address relative to the frame register. - unsigned DiffReg - = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register DiffReg = + MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32; Register ResultReg = IsCopy ? @@ -1282,8 +1283,8 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, .addImm(Log2_32(ST.getWavefrontSize())) .addReg(DiffReg); } else { - unsigned ScaledReg - = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register ScaledReg = + MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ScaledReg) .addImm(Log2_32(ST.getWavefrontSize())) @@ -1296,8 +1297,8 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, .addReg(ScaledReg, RegState::Kill) .addImm(0); // clamp bit } else { - unsigned ConstOffsetReg - = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register ConstOffsetReg = + MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg) .addImm(Offset); @@ -1345,7 +1346,7 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, int64_t Offset = FrameInfo.getObjectOffset(Index); FIOp.ChangeToImmediate(Offset); if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) { - unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) .addImm(Offset); FIOp.ChangeToRegister(TmpReg, false, false, true); diff --git a/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/lib/Target/AMDGPU/SIShrinkInstructions.cpp index c208cf1ef1e..8afca2cdc32 100644 --- a/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -77,7 +77,7 @@ static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, // Try to fold Src0 MachineOperand &Src0 = MI.getOperand(Src0Idx); if (Src0.isReg()) { - unsigned Reg = Src0.getReg(); + Register Reg = Src0.getReg(); if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) { MachineInstr *Def = MRI.getUniqueVRegDef(Reg); if (Def && Def->isMoveImmediate()) { @@ -457,13 +457,13 @@ static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI, assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 || MovT.getOpcode() == AMDGPU::COPY); - unsigned T = MovT.getOperand(0).getReg(); + Register T = MovT.getOperand(0).getReg(); unsigned Tsub = MovT.getOperand(0).getSubReg(); MachineOperand &Xop = MovT.getOperand(1); if (!Xop.isReg()) return nullptr; - unsigned X = Xop.getReg(); + Register X = Xop.getReg(); unsigned Xsub = Xop.getSubReg(); unsigned Size = TII->getOpSize(MovT, 0) / 4; @@ -482,7 +482,7 @@ static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI, MovY.getOperand(1).getSubReg() != Tsub) continue; - unsigned Y = MovY.getOperand(0).getReg(); + Register Y = MovY.getOperand(0).getReg(); unsigned Ysub = MovY.getOperand(0).getSubReg(); if (!TRI.isVGPR(MRI, Y) || MovT.getParent() != MovY.getParent()) @@ -717,7 +717,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) { int Op32 = AMDGPU::getVOPe32(MI.getOpcode()); if (TII->isVOPC(Op32)) { - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if (Register::isVirtualRegister(DstReg)) { // VOPC instructions can only write to the VCC register. We can't // force them to use VCC here, because this is only one register and @@ -741,7 +741,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) { TII->getNamedOperand(MI, AMDGPU::OpName::src2); if (!Src2->isReg()) continue; - unsigned SReg = Src2->getReg(); + Register SReg = Src2->getReg(); if (Register::isVirtualRegister(SReg)) { MRI.setRegAllocationHint(SReg, 0, VCCReg); continue; diff --git a/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/lib/Target/AMDGPU/SIWholeQuadMode.cpp index 7980f15104c..cb4cf68d709 100644 --- a/lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ b/lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -273,7 +273,7 @@ void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag, if (!Use.isReg() || !Use.isUse()) continue; - unsigned Reg = Use.getReg(); + Register Reg = Use.getReg(); // Handle physical registers that we need to track; this is mostly relevant // for VCC, which can appear as the (implicit) input of a uniform branch, @@ -361,7 +361,7 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF, if (Inactive.isUndef()) { LowerToCopyInstrs.push_back(&MI); } else { - unsigned Reg = Inactive.getReg(); + Register Reg = Inactive.getReg(); if (Register::isVirtualRegister(Reg)) { for (MachineInstr &DefMI : MRI->def_instructions(Reg)) markInstruction(DefMI, StateWWM, Worklist); @@ -390,7 +390,7 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF, if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg) && TRI->hasVectorRegisters(TRI->getPhysRegClass(Reg))) { @@ -556,7 +556,7 @@ bool SIWholeQuadMode::requiresCorrectState(const MachineInstr &MI) const { MachineBasicBlock::iterator SIWholeQuadMode::saveSCC(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before) { - unsigned SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + Register SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); MachineInstr *Save = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), SaveReg) @@ -840,7 +840,7 @@ void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, void SIWholeQuadMode::lowerLiveMaskQueries(unsigned LiveMaskReg) { for (MachineInstr *MI : LiveMaskQueries) { const DebugLoc &DL = MI->getDebugLoc(); - unsigned Dest = MI->getOperand(0).getReg(); + Register Dest = MI->getOperand(0).getReg(); MachineInstr *Copy = BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest) .addReg(LiveMaskReg); @@ -855,7 +855,7 @@ void SIWholeQuadMode::lowerCopyInstrs() { for (unsigned i = MI->getNumExplicitOperands() - 1; i > 1; i--) MI->RemoveOperand(i); - const unsigned Reg = MI->getOperand(0).getReg(); + const Register Reg = MI->getOperand(0).getReg(); if (TRI->isVGPR(*MRI, Reg)) { const TargetRegisterClass *regClass = Register::isVirtualRegister(Reg) diff --git a/lib/Target/ARC/ARCISelLowering.cpp b/lib/Target/ARC/ARCISelLowering.cpp index 847d23f0abd..751fd567bae 100644 --- a/lib/Target/ARC/ARCISelLowering.cpp +++ b/lib/Target/ARC/ARCISelLowering.cpp @@ -716,7 +716,7 @@ SDValue ARCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); assert(cast(Op.getOperand(0))->getZExtValue() == 0 && "Only support lowering frame addr of current frame."); - unsigned FrameReg = ARI.getFrameRegister(MF); + Register FrameReg = ARI.getFrameRegister(MF); return DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); } diff --git a/lib/Target/ARC/ARCOptAddrMode.cpp b/lib/Target/ARC/ARCOptAddrMode.cpp index 2ef42589214..22a3b9111c8 100644 --- a/lib/Target/ARC/ARCOptAddrMode.cpp +++ b/lib/Target/ARC/ARCOptAddrMode.cpp @@ -180,7 +180,7 @@ static bool isLoadStoreThatCanHandleDisplacement(const TargetInstrInfo *TII, bool ARCOptAddrMode::noUseOfAddBeforeLoadOrStore(const MachineInstr *Add, const MachineInstr *Ldst) { - unsigned R = Add->getOperand(0).getReg(); + Register R = Add->getOperand(0).getReg(); return dominatesAllUsesOf(Ldst, R, MDT, MRI); } @@ -204,7 +204,7 @@ MachineInstr *ARCOptAddrMode::tryToCombine(MachineInstr &Ldst) { return nullptr; } - unsigned B = Base.getReg(); + Register B = Base.getReg(); if (Register::isStackSlot(B) || !Register::isVirtualRegister(B)) { LLVM_DEBUG(dbgs() << "[ABAW] Base is not VReg\n"); return nullptr; @@ -283,7 +283,7 @@ ARCOptAddrMode::canJoinInstructions(MachineInstr *Ldst, MachineInstr *Add, return nullptr; } - unsigned BaseReg = Ldst->getOperand(BasePos).getReg(); + Register BaseReg = Ldst->getOperand(BasePos).getReg(); // prohibit this: // v1 = add v0, c @@ -292,7 +292,7 @@ ARCOptAddrMode::canJoinInstructions(MachineInstr *Ldst, MachineInstr *Add, // st v0, [v0, 0] // v1 = add v0, c if (Ldst->mayStore() && Ldst->getOperand(0).isReg()) { - unsigned StReg = Ldst->getOperand(0).getReg(); + Register StReg = Ldst->getOperand(0).getReg(); if (Add->getOperand(0).getReg() == StReg || BaseReg == StReg) { LLVM_DEBUG(dbgs() << "[canJoinInstructions] Store uses result of Add\n"); return nullptr; @@ -445,7 +445,7 @@ void ARCOptAddrMode::changeToAddrMode(MachineInstr &Ldst, unsigned NewOpcode, MachineOperand Src = MachineOperand::CreateImm(0xDEADBEEF); AII->getBaseAndOffsetPosition(Ldst, BasePos, OffPos); - unsigned BaseReg = Ldst.getOperand(BasePos).getReg(); + Register BaseReg = Ldst.getOperand(BasePos).getReg(); Ldst.RemoveOperand(OffPos); Ldst.RemoveOperand(BasePos); diff --git a/lib/Target/ARC/ARCRegisterInfo.cpp b/lib/Target/ARC/ARCRegisterInfo.cpp index 9c8340ac8f8..a7f89b385ff 100644 --- a/lib/Target/ARC/ARCRegisterInfo.cpp +++ b/lib/Target/ARC/ARCRegisterInfo.cpp @@ -206,7 +206,7 @@ void ARCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, LLVM_DEBUG(dbgs() << "Offset : " << Offset << "\n" << "<--------->\n"); - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); assert(ARC::GPR32RegClass.contains(Reg) && "Unexpected register operand"); if (!TFI->hasFP(MF)) { diff --git a/lib/Target/ARM/A15SDOptimizer.cpp b/lib/Target/ARM/A15SDOptimizer.cpp index a4aacfb6dc8..30b9c8071ba 100644 --- a/lib/Target/ARM/A15SDOptimizer.cpp +++ b/lib/Target/ARM/A15SDOptimizer.cpp @@ -133,7 +133,7 @@ bool A15SDOptimizer::usesRegClass(MachineOperand &MO, const TargetRegisterClass *TRC) { if (!MO.isReg()) return false; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isVirtualRegister(Reg)) return MRI->getRegClass(Reg)->hasSuperClassEq(TRC); @@ -191,7 +191,7 @@ void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) { for (MachineOperand &MO : MI->operands()) { if ((!MO.isReg()) || (!MO.isUse())) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; MachineOperand *Op = MI->findRegisterDefOperand(Reg); @@ -213,7 +213,7 @@ void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) { for (MachineOperand &MODef : Def->operands()) { if ((!MODef.isReg()) || (!MODef.isDef())) continue; - unsigned DefReg = MODef.getReg(); + Register DefReg = MODef.getReg(); if (!Register::isVirtualRegister(DefReg)) { IsDead = false; break; @@ -245,8 +245,8 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) { } if (MI->isInsertSubreg()) { - unsigned DPRReg = MI->getOperand(1).getReg(); - unsigned SPRReg = MI->getOperand(2).getReg(); + Register DPRReg = MI->getOperand(1).getReg(); + Register SPRReg = MI->getOperand(2).getReg(); if (Register::isVirtualRegister(DPRReg) && Register::isVirtualRegister(SPRReg)) { MachineInstr *DPRMI = MRI->getVRegDef(MI->getOperand(1).getReg()); @@ -267,7 +267,7 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) { // Find the thing we're subreg copying out of - is it of the same // regclass as DPRMI? (i.e. a DPR or QPR). - unsigned FullReg = SPRMI->getOperand(1).getReg(); + Register FullReg = SPRMI->getOperand(1).getReg(); const TargetRegisterClass *TRC = MRI->getRegClass(MI->getOperand(1).getReg()); if (TRC->hasSuperClassEq(MRI->getRegClass(FullReg))) { @@ -296,7 +296,7 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) { if (!MI->getOperand(I).isReg()) continue; ++NumTotal; - unsigned OpReg = MI->getOperand(I).getReg(); + Register OpReg = MI->getOperand(I).getReg(); if (!Register::isVirtualRegister(OpReg)) break; @@ -369,7 +369,7 @@ void A15SDOptimizer::elideCopiesAndPHIs(MachineInstr *MI, Reached.insert(MI); if (MI->isPHI()) { for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) { - unsigned Reg = MI->getOperand(I).getReg(); + Register Reg = MI->getOperand(I).getReg(); if (!Register::isVirtualRegister(Reg)) { continue; } @@ -418,8 +418,8 @@ unsigned A15SDOptimizer::createDupLane(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL, unsigned Reg, unsigned Lane, bool QPR) { - unsigned Out = MRI->createVirtualRegister(QPR ? &ARM::QPRRegClass : - &ARM::DPRRegClass); + Register Out = + MRI->createVirtualRegister(QPR ? &ARM::QPRRegClass : &ARM::DPRRegClass); BuildMI(MBB, InsertBefore, DL, TII->get(QPR ? ARM::VDUPLN32q : ARM::VDUPLN32d), Out) .addReg(Reg) @@ -434,7 +434,7 @@ unsigned A15SDOptimizer::createExtractSubreg( MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL, unsigned DReg, unsigned Lane, const TargetRegisterClass *TRC) { - unsigned Out = MRI->createVirtualRegister(TRC); + Register Out = MRI->createVirtualRegister(TRC); BuildMI(MBB, InsertBefore, DL, @@ -448,7 +448,7 @@ unsigned A15SDOptimizer::createExtractSubreg( unsigned A15SDOptimizer::createRegSequence( MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL, unsigned Reg1, unsigned Reg2) { - unsigned Out = MRI->createVirtualRegister(&ARM::QPRRegClass); + Register Out = MRI->createVirtualRegister(&ARM::QPRRegClass); BuildMI(MBB, InsertBefore, DL, @@ -466,7 +466,7 @@ unsigned A15SDOptimizer::createVExt(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL, unsigned Ssub0, unsigned Ssub1) { - unsigned Out = MRI->createVirtualRegister(&ARM::DPRRegClass); + Register Out = MRI->createVirtualRegister(&ARM::DPRRegClass); BuildMI(MBB, InsertBefore, DL, TII->get(ARM::VEXTd32), Out) .addReg(Ssub0) .addReg(Ssub1) @@ -478,7 +478,7 @@ unsigned A15SDOptimizer::createVExt(MachineBasicBlock &MBB, unsigned A15SDOptimizer::createInsertSubreg( MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL, unsigned DReg, unsigned Lane, unsigned ToInsert) { - unsigned Out = MRI->createVirtualRegister(&ARM::DPR_VFP2RegClass); + Register Out = MRI->createVirtualRegister(&ARM::DPR_VFP2RegClass); BuildMI(MBB, InsertBefore, DL, @@ -494,7 +494,7 @@ unsigned A15SDOptimizer::createImplicitDef(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL) { - unsigned Out = MRI->createVirtualRegister(&ARM::DPRRegClass); + Register Out = MRI->createVirtualRegister(&ARM::DPRRegClass); BuildMI(MBB, InsertBefore, DL, @@ -622,7 +622,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) { // Collect all the uses of this MI's DPR def for updating later. SmallVector Uses; - unsigned DPRDefReg = MI->getOperand(0).getReg(); + Register DPRDefReg = MI->getOperand(0).getReg(); for (MachineRegisterInfo::use_iterator I = MRI->use_begin(DPRDefReg), E = MRI->use_end(); I != E; ++I) Uses.push_back(&*I); diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index b8d2c7fbbe5..9bd7ae7ca99 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -203,7 +203,7 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum, switch (MO.getType()) { default: llvm_unreachable(""); case MachineOperand::MO_Register: { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); assert(Register::isPhysicalRegister(Reg)); assert(!MO.getSubReg() && "Subregs should be eliminated!"); if(ARM::GPRPairRegClass.contains(Reg)) { @@ -275,7 +275,7 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, return false; case 'y': // Print a VFP single precision register as indexed double. if (MI->getOperand(OpNum).isReg()) { - unsigned Reg = MI->getOperand(OpNum).getReg(); + Register Reg = MI->getOperand(OpNum).getReg(); const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); // Find the 'd' register that has this 's' register as a sub-register, // and determine the lane number. @@ -302,14 +302,14 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, if (!MI->getOperand(OpNum).isReg()) return true; const MachineOperand &MO = MI->getOperand(OpNum); - unsigned RegBegin = MO.getReg(); + Register RegBegin = MO.getReg(); // This takes advantage of the 2 operand-ness of ldm/stm and that we've // already got the operands in registers that are operands to the // inline asm statement. O << "{"; if (ARM::GPRPairRegClass.contains(RegBegin)) { const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); - unsigned Reg0 = TRI->getSubReg(RegBegin, ARM::gsub_0); + Register Reg0 = TRI->getSubReg(RegBegin, ARM::gsub_0); O << ARMInstPrinter::getRegisterName(Reg0) << ", "; RegBegin = TRI->getSubReg(RegBegin, ARM::gsub_1); } @@ -378,8 +378,8 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, if (!MO.isReg()) return true; const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); - unsigned Reg = TRI->getSubReg(MO.getReg(), FirstHalf ? - ARM::gsub_0 : ARM::gsub_1); + Register Reg = + TRI->getSubReg(MO.getReg(), FirstHalf ? ARM::gsub_0 : ARM::gsub_1); O << ARMInstPrinter::getRegisterName(Reg); return false; } @@ -391,7 +391,7 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, const MachineOperand &MO = MI->getOperand(RegOp); if (!MO.isReg()) return true; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); O << ARMInstPrinter::getRegisterName(Reg); return false; } @@ -400,12 +400,12 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, case 'f': { // The high doubleword register of a NEON quad register. if (!MI->getOperand(OpNum).isReg()) return true; - unsigned Reg = MI->getOperand(OpNum).getReg(); + Register Reg = MI->getOperand(OpNum).getReg(); if (!ARM::QPRRegClass.contains(Reg)) return true; const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); - unsigned SubReg = TRI->getSubReg(Reg, ExtraCode[0] == 'e' ? - ARM::dsub_0 : ARM::dsub_1); + Register SubReg = + TRI->getSubReg(Reg, ExtraCode[0] == 'e' ? ARM::dsub_0 : ARM::dsub_1); O << ARMInstPrinter::getRegisterName(SubReg); return false; } @@ -419,7 +419,7 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, return true; const MachineFunction &MF = *MI->getParent()->getParent(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if(!ARM::GPRPairRegClass.contains(Reg)) return false; Reg = TRI->getSubReg(Reg, ARM::gsub_1); @@ -1072,7 +1072,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) { MF.getSubtarget().getRegisterInfo(); const MachineRegisterInfo &MachineRegInfo = MF.getRegInfo(); - unsigned FramePtr = TargetRegInfo->getFrameRegister(MF); + Register FramePtr = TargetRegInfo->getFrameRegister(MF); unsigned Opc = MI->getOpcode(); unsigned SrcReg, DstReg; @@ -1136,7 +1136,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) { } // Check for registers that are remapped (for a Thumb1 prologue that // saves high registers). - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (unsigned RemappedReg = AFI->EHPrologueRemappedRegs.lookup(Reg)) Reg = RemappedReg; RegList.push_back(Reg); @@ -1326,7 +1326,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { // So here we generate a bl to a small jump pad that does bx rN. // The jump pads are emitted after the function body. - unsigned TReg = MI->getOperand(0).getReg(); + Register TReg = MI->getOperand(0).getReg(); MCSymbol *TRegSym = nullptr; for (std::pair &TIP : ThumbIndirectPads) { if (TIP.first == TReg) { @@ -1663,8 +1663,8 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { case ARM::tTBH_JT: { bool Is8Bit = MI->getOpcode() == ARM::tTBB_JT; - unsigned Base = MI->getOperand(0).getReg(); - unsigned Idx = MI->getOperand(1).getReg(); + Register Base = MI->getOperand(0).getReg(); + Register Idx = MI->getOperand(1).getReg(); assert(MI->getOperand(1).isKill() && "We need the index register as scratch!"); // Multiply up idx if necessary. @@ -1844,8 +1844,8 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { // b LSJLJEH // movs r0, #1 // LSJLJEH: - unsigned SrcReg = MI->getOperand(0).getReg(); - unsigned ValReg = MI->getOperand(1).getReg(); + Register SrcReg = MI->getOperand(0).getReg(); + Register ValReg = MI->getOperand(1).getReg(); MCSymbol *Label = OutContext.createTempSymbol("SJLJEH", false, true); OutStreamer->AddComment("eh_setjmp begin"); EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tMOVr) @@ -1910,8 +1910,8 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { // mov r0, #0 // add pc, pc, #0 // mov r0, #1 - unsigned SrcReg = MI->getOperand(0).getReg(); - unsigned ValReg = MI->getOperand(1).getReg(); + Register SrcReg = MI->getOperand(0).getReg(); + Register ValReg = MI->getOperand(1).getReg(); OutStreamer->AddComment("eh_setjmp begin"); EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::ADDri) @@ -1967,8 +1967,8 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { // ldr $scratch, [$src, #4] // ldr r7, [$src] // bx $scratch - unsigned SrcReg = MI->getOperand(0).getReg(); - unsigned ScratchReg = MI->getOperand(1).getReg(); + Register SrcReg = MI->getOperand(0).getReg(); + Register ScratchReg = MI->getOperand(1).getReg(); EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::LDRi12) .addReg(ARM::SP) .addReg(SrcReg) @@ -2027,8 +2027,8 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { // ldr $scratch, [$src, #4] // ldr r7, [$src] // bx $scratch - unsigned SrcReg = MI->getOperand(0).getReg(); - unsigned ScratchReg = MI->getOperand(1).getReg(); + Register SrcReg = MI->getOperand(0).getReg(); + Register ScratchReg = MI->getOperand(1).getReg(); EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tLDRi) .addReg(ScratchReg) @@ -2095,7 +2095,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { // ldr.w sp, [$src, #8] // ldr.w pc, [$src, #4] - unsigned SrcReg = MI->getOperand(0).getReg(); + Register SrcReg = MI->getOperand(0).getReg(); EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2LDRi12) .addReg(ARM::R11) diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 763722cbf1d..d166a6d7eeb 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -172,9 +172,9 @@ MachineInstr *ARMBaseInstrInfo::convertToThreeAddress( const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0); const MachineOperand &Base = MI.getOperand(2); const MachineOperand &Offset = MI.getOperand(NumOps - 3); - unsigned WBReg = WB.getReg(); - unsigned BaseReg = Base.getReg(); - unsigned OffReg = Offset.getReg(); + Register WBReg = WB.getReg(); + Register BaseReg = Base.getReg(); + Register OffReg = Offset.getReg(); unsigned OffImm = MI.getOperand(NumOps - 2).getImm(); ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm(); switch (AddrMode) { @@ -277,7 +277,7 @@ MachineInstr *ARMBaseInstrInfo::convertToThreeAddress( for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); if (MO.isDef()) { @@ -966,8 +966,8 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, SmallSet DstRegs; #endif for (unsigned i = 0; i != SubRegs; ++i) { - unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing); - unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing); + Register Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing); + Register Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing); assert(Dst && Src && "Bad sub-register"); #ifndef NDEBUG assert(!DstRegs.count(Src) && "destructive vector copy"); @@ -1583,8 +1583,8 @@ bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { // Look for a copy between even S-registers. That is where we keep floats // when using NEON v2f32 instructions for f32 arithmetic. - unsigned DstRegS = MI.getOperand(0).getReg(); - unsigned SrcRegS = MI.getOperand(1).getReg(); + Register DstRegS = MI.getOperand(0).getReg(); + Register SrcRegS = MI.getOperand(1).getReg(); if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) return false; @@ -1794,8 +1794,8 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0, if (MI0.getNumOperands() != MI1.getNumOperands()) return false; - unsigned Addr0 = MI0.getOperand(1).getReg(); - unsigned Addr1 = MI1.getOperand(1).getReg(); + Register Addr0 = MI0.getOperand(1).getReg(); + Register Addr1 = MI1.getOperand(1).getReg(); if (Addr0 != Addr1) { if (!MRI || !Register::isVirtualRegister(Addr0) || !Register::isVirtualRegister(Addr1)) @@ -2210,7 +2210,7 @@ ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI, // Find new register class to use. MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1); - unsigned DestReg = MI.getOperand(0).getReg(); + Register DestReg = MI.getOperand(0).getReg(); const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg()); if (!MRI.constrainRegClass(DestReg, PreviousClass)) return nullptr; @@ -3271,9 +3271,9 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, } unsigned OpIdx = Commute ? 2 : 1; - unsigned Reg1 = UseMI.getOperand(OpIdx).getReg(); + Register Reg1 = UseMI.getOperand(OpIdx).getReg(); bool isKill = UseMI.getOperand(OpIdx).isKill(); - unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg)); + Register NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg)); BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc), NewReg) .addReg(Reg1, getKillRegState(isKill)) @@ -3335,15 +3335,15 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, case ARM::LDRSB_POST: case ARM::LDRSH_POST: { - unsigned Rt = MI.getOperand(0).getReg(); - unsigned Rm = MI.getOperand(3).getReg(); + Register Rt = MI.getOperand(0).getReg(); + Register Rm = MI.getOperand(3).getReg(); return (Rt == Rm) ? 4 : 3; } case ARM::LDR_PRE_REG: case ARM::LDRB_PRE_REG: { - unsigned Rt = MI.getOperand(0).getReg(); - unsigned Rm = MI.getOperand(3).getReg(); + Register Rt = MI.getOperand(0).getReg(); + Register Rm = MI.getOperand(3).getReg(); if (Rt == Rm) return 3; unsigned ShOpVal = MI.getOperand(4).getImm(); @@ -3372,8 +3372,8 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, case ARM::LDRH_PRE: case ARM::STRH_PRE: { - unsigned Rt = MI.getOperand(0).getReg(); - unsigned Rm = MI.getOperand(3).getReg(); + Register Rt = MI.getOperand(0).getReg(); + Register Rm = MI.getOperand(3).getReg(); if (!Rm) return 2; if (Rt == Rm) @@ -3384,8 +3384,8 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, case ARM::LDR_POST_REG: case ARM::LDRB_POST_REG: case ARM::LDRH_POST: { - unsigned Rt = MI.getOperand(0).getReg(); - unsigned Rm = MI.getOperand(3).getReg(); + Register Rt = MI.getOperand(0).getReg(); + Register Rm = MI.getOperand(3).getReg(); return (Rt == Rm) ? 3 : 2; } @@ -3404,10 +3404,10 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, case ARM::LDRSB_PRE: case ARM::LDRSH_PRE: { - unsigned Rm = MI.getOperand(3).getReg(); + Register Rm = MI.getOperand(3).getReg(); if (Rm == 0) return 3; - unsigned Rt = MI.getOperand(0).getReg(); + Register Rt = MI.getOperand(0).getReg(); if (Rt == Rm) return 4; unsigned ShOpVal = MI.getOperand(4).getImm(); @@ -3422,9 +3422,9 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, } case ARM::LDRD: { - unsigned Rt = MI.getOperand(0).getReg(); - unsigned Rn = MI.getOperand(2).getReg(); - unsigned Rm = MI.getOperand(3).getReg(); + Register Rt = MI.getOperand(0).getReg(); + Register Rn = MI.getOperand(2).getReg(); + Register Rm = MI.getOperand(3).getReg(); if (Rm) return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 : 3; @@ -3432,7 +3432,7 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, } case ARM::STRD: { - unsigned Rm = MI.getOperand(3).getReg(); + Register Rm = MI.getOperand(3).getReg(); if (Rm) return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 : 3; @@ -3448,9 +3448,9 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, return 4; case ARM::LDRD_PRE: { - unsigned Rt = MI.getOperand(0).getReg(); - unsigned Rn = MI.getOperand(3).getReg(); - unsigned Rm = MI.getOperand(4).getReg(); + Register Rt = MI.getOperand(0).getReg(); + Register Rn = MI.getOperand(3).getReg(); + Register Rm = MI.getOperand(4).getReg(); if (Rm) return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 : 4; @@ -3458,13 +3458,13 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, } case ARM::t2LDRD_PRE: { - unsigned Rt = MI.getOperand(0).getReg(); - unsigned Rn = MI.getOperand(3).getReg(); + Register Rt = MI.getOperand(0).getReg(); + Register Rn = MI.getOperand(3).getReg(); return (Rt == Rn) ? 4 : 3; } case ARM::STRD_PRE: { - unsigned Rm = MI.getOperand(4).getReg(); + Register Rm = MI.getOperand(4).getReg(); if (Rm) return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 : 4; @@ -3495,8 +3495,8 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, return 2; case ARM::t2LDRDi8: { - unsigned Rt = MI.getOperand(0).getReg(); - unsigned Rn = MI.getOperand(2).getReg(); + Register Rt = MI.getOperand(0).getReg(); + Register Rn = MI.getOperand(2).getReg(); return (Rt == Rn) ? 3 : 2; } @@ -3745,7 +3745,7 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, } bool ARMBaseInstrInfo::isLDMBaseRegInList(const MachineInstr &MI) const { - unsigned BaseReg = MI.getOperand(0).getReg(); + Register BaseReg = MI.getOperand(0).getReg(); for (unsigned i = 1, sz = MI.getNumOperands(); i < sz; ++i) { const auto &Op = MI.getOperand(i); if (Op.isReg() && Op.getReg() == BaseReg) @@ -4219,7 +4219,7 @@ int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, return -1; const MachineOperand &DefMO = DefMI.getOperand(DefIdx); - unsigned Reg = DefMO.getReg(); + Register Reg = DefMO.getReg(); const MachineInstr *ResolvedDefMI = &DefMI; unsigned DefAdj = 0; @@ -4708,7 +4708,7 @@ bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI, if (MI.getOperand(i).isImplicit() || !MI.getOperand(i).isReg()) continue; - unsigned Reg = MI.getOperand(i).getReg(); + Register Reg = MI.getOperand(i).getReg(); if (Reg < ARM::R0 || Reg > ARM::R7) { if (!(MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) && !(MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) { @@ -4731,7 +4731,7 @@ void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI, MachineBasicBlock &MBB = *MI->getParent(); DebugLoc DL = MI->getDebugLoc(); - unsigned Reg = MI->getOperand(0).getReg(); + Register Reg = MI->getOperand(0).getReg(); const GlobalValue *GV = cast((*MI->memoperands_begin())->getValue()); MachineInstrBuilder MIB; @@ -5104,7 +5104,7 @@ unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance( const MachineOperand &MO = MI.getOperand(OpNum); if (MO.readsReg()) return 0; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); int UseOp = -1; switch (MI.getOpcode()) { @@ -5159,7 +5159,7 @@ void ARMBaseInstrInfo::breakPartialRegDependency( assert(TRI && "Need TRI instance"); const MachineOperand &MO = MI.getOperand(OpNum); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); assert(Register::isPhysicalRegister(Reg) && "Can't break virtual register dependencies."); unsigned DReg = Reg; @@ -5337,7 +5337,7 @@ MachineInstr *llvm::findCMPToFoldIntoCBZ(MachineInstr *Br, // is not redefined between the cmp and the br. if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri) return nullptr; - unsigned Reg = CmpMI->getOperand(0).getReg(); + Register Reg = CmpMI->getOperand(0).getReg(); unsigned PredReg = 0; ARMCC::CondCodes Pred = getInstrPredicate(*CmpMI, PredReg); if (Pred != ARMCC::AL || CmpMI->getOperand(1).getImm() != 0) diff --git a/lib/Target/ARM/ARMCallLowering.cpp b/lib/Target/ARM/ARMCallLowering.cpp index 453a0ef1c76..3f1c7d41342 100644 --- a/lib/Target/ARM/ARMCallLowering.cpp +++ b/lib/Target/ARM/ARMCallLowering.cpp @@ -298,7 +298,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler { int FI = MFI.CreateFixedObject(Size, Offset, true); MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); - unsigned AddrReg = + Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(MPO.getAddrSpace(), 32)); MIRBuilder.buildFrameIndex(AddrReg, FI); diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index c97cb08c6b8..4f66946ed89 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -1638,7 +1638,7 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) { // L2: ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm(); CC = ARMCC::getOppositeCondition(CC); - unsigned CCReg = MI->getOperand(2).getReg(); + Register CCReg = MI->getOperand(2).getReg(); // If the branch is at the end of its MBB and that has a fall-through block, // direct the updated conditional branch to the fall-through block. Otherwise, @@ -1870,7 +1870,7 @@ bool ARMConstantIslands::optimizeThumb2Branches() { if (!CmpMI || CmpMI->getOpcode() != ARM::tCMPi8) continue; - unsigned Reg = CmpMI->getOperand(0).getReg(); + Register Reg = CmpMI->getOperand(0).getReg(); // Check for Kill flags on Reg. If they are present remove them and set kill // on the new CBZ. @@ -1949,8 +1949,8 @@ bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI, // of BaseReg, but only if the t2ADDrs can be removed. // + Some instruction other than t2ADDrs computing the entry. Not seen in // the wild, but we should be careful. - unsigned EntryReg = JumpMI->getOperand(0).getReg(); - unsigned BaseReg = LEAMI->getOperand(0).getReg(); + Register EntryReg = JumpMI->getOperand(0).getReg(); + Register BaseReg = LEAMI->getOperand(0).getReg(); CanDeleteLEA = true; BaseRegKill = false; @@ -2027,7 +2027,7 @@ static void RemoveDeadAddBetweenLEAAndJT(MachineInstr *LEAMI, // but the JT now uses PC. Finds the last ADD (if any) that def's EntryReg // and is not clobbered / used. MachineInstr *RemovableAdd = nullptr; - unsigned EntryReg = JumpMI->getOperand(0).getReg(); + Register EntryReg = JumpMI->getOperand(0).getReg(); // Find the last ADD to set EntryReg MachineBasicBlock::iterator I(LEAMI); @@ -2124,7 +2124,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() { // %idx = tLSLri %idx, 2 // %base = tLEApcrelJT // %t = tLDRr %base, %idx - unsigned BaseReg = User.MI->getOperand(0).getReg(); + Register BaseReg = User.MI->getOperand(0).getReg(); if (User.MI->getIterator() == User.MI->getParent()->begin()) continue; @@ -2134,7 +2134,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() { !Shift->getOperand(2).isKill()) continue; IdxReg = Shift->getOperand(2).getReg(); - unsigned ShiftedIdxReg = Shift->getOperand(0).getReg(); + Register ShiftedIdxReg = Shift->getOperand(0).getReg(); // It's important that IdxReg is live until the actual TBB/TBH. Most of // the range is checked later, but the LEA might still clobber it and not diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp index 2312e6be7ea..bd4ca3828fc 100644 --- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -481,7 +481,7 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) { unsigned OpIdx = 0; bool DstIsDead = MI.getOperand(OpIdx).isDead(); - unsigned DstReg = MI.getOperand(OpIdx++).getReg(); + Register DstReg = MI.getOperand(OpIdx++).getReg(); if(TableEntry->RealOpc == ARM::VLD2DUPd8x2 || TableEntry->RealOpc == ARM::VLD2DUPd16x2 || TableEntry->RealOpc == ARM::VLD2DUPd32x2) { @@ -492,7 +492,7 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) { assert(RegSpc == OddDblSpc && "Unexpected spacing!"); SubRegIndex = ARM::dsub_1; } - unsigned SubReg = TRI->getSubReg(DstReg, SubRegIndex); + Register SubReg = TRI->getSubReg(DstReg, SubRegIndex); unsigned DstRegPair = TRI->getMatchingSuperReg(SubReg, ARM::dsub_0, &ARM::DPairSpcRegClass); MIB.addReg(DstRegPair, RegState::Define | getDeadRegState(DstIsDead)); @@ -624,7 +624,7 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) { bool SrcIsKill = MI.getOperand(OpIdx).isKill(); bool SrcIsUndef = MI.getOperand(OpIdx).isUndef(); - unsigned SrcReg = MI.getOperand(OpIdx++).getReg(); + Register SrcReg = MI.getOperand(OpIdx++).getReg(); unsigned D0, D1, D2, D3; GetDSubRegs(SrcReg, RegSpc, TRI, D0, D1, D2, D3); MIB.addReg(D0, getUndefRegState(SrcIsUndef)); @@ -760,7 +760,7 @@ void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI, } bool SrcIsKill = MI.getOperand(OpIdx).isKill(); - unsigned SrcReg = MI.getOperand(OpIdx++).getReg(); + Register SrcReg = MI.getOperand(OpIdx++).getReg(); unsigned D0, D1, D2, D3; GetDSubRegs(SrcReg, SingleSpc, TRI, D0, D1, D2, D3); MIB.addReg(D0); @@ -829,7 +829,7 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB, unsigned Opcode = MI.getOpcode(); unsigned PredReg = 0; ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); bool DstIsDead = MI.getOperand(0).isDead(); bool isCC = Opcode == ARM::MOVCCi32imm || Opcode == ARM::t2MOVCCi32imm; const MachineOperand &MO = MI.getOperand(isCC ? 2 : 1); @@ -933,13 +933,13 @@ bool ARMExpandPseudo::ExpandCMP_SWAP(MachineBasicBlock &MBB, MachineInstr &MI = *MBBI; DebugLoc DL = MI.getDebugLoc(); const MachineOperand &Dest = MI.getOperand(0); - unsigned TempReg = MI.getOperand(1).getReg(); + Register TempReg = MI.getOperand(1).getReg(); // Duplicating undef operands into 2 instructions does not guarantee the same // value on both; However undef should be replaced by xzr anyway. assert(!MI.getOperand(2).isUndef() && "cannot handle undef"); - unsigned AddrReg = MI.getOperand(2).getReg(); - unsigned DesiredReg = MI.getOperand(3).getReg(); - unsigned NewReg = MI.getOperand(4).getReg(); + Register AddrReg = MI.getOperand(2).getReg(); + Register DesiredReg = MI.getOperand(3).getReg(); + Register NewReg = MI.getOperand(4).getReg(); MachineFunction *MF = MBB.getParent(); auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); @@ -1036,8 +1036,8 @@ static void addExclusiveRegPair(MachineInstrBuilder &MIB, MachineOperand &Reg, unsigned Flags, bool IsThumb, const TargetRegisterInfo *TRI) { if (IsThumb) { - unsigned RegLo = TRI->getSubReg(Reg.getReg(), ARM::gsub_0); - unsigned RegHi = TRI->getSubReg(Reg.getReg(), ARM::gsub_1); + Register RegLo = TRI->getSubReg(Reg.getReg(), ARM::gsub_0); + Register RegHi = TRI->getSubReg(Reg.getReg(), ARM::gsub_1); MIB.addReg(RegLo, Flags); MIB.addReg(RegHi, Flags); } else @@ -1052,19 +1052,19 @@ bool ARMExpandPseudo::ExpandCMP_SWAP_64(MachineBasicBlock &MBB, MachineInstr &MI = *MBBI; DebugLoc DL = MI.getDebugLoc(); MachineOperand &Dest = MI.getOperand(0); - unsigned TempReg = MI.getOperand(1).getReg(); + Register TempReg = MI.getOperand(1).getReg(); // Duplicating undef operands into 2 instructions does not guarantee the same // value on both; However undef should be replaced by xzr anyway. assert(!MI.getOperand(2).isUndef() && "cannot handle undef"); - unsigned AddrReg = MI.getOperand(2).getReg(); - unsigned DesiredReg = MI.getOperand(3).getReg(); + Register AddrReg = MI.getOperand(2).getReg(); + Register DesiredReg = MI.getOperand(3).getReg(); MachineOperand New = MI.getOperand(4); New.setIsKill(false); - unsigned DestLo = TRI->getSubReg(Dest.getReg(), ARM::gsub_0); - unsigned DestHi = TRI->getSubReg(Dest.getReg(), ARM::gsub_1); - unsigned DesiredLo = TRI->getSubReg(DesiredReg, ARM::gsub_0); - unsigned DesiredHi = TRI->getSubReg(DesiredReg, ARM::gsub_1); + Register DestLo = TRI->getSubReg(Dest.getReg(), ARM::gsub_0); + Register DestHi = TRI->getSubReg(Dest.getReg(), ARM::gsub_1); + Register DesiredLo = TRI->getSubReg(DesiredReg, ARM::gsub_0); + Register DesiredHi = TRI->getSubReg(DesiredReg, ARM::gsub_1); MachineFunction *MF = MBB.getParent(); auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); @@ -1337,7 +1337,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, // for us. Otherwise, expand to nothing. if (RI.hasBasePointer(MF)) { int32_t NumBytes = AFI->getFramePtrSpillOffset(); - unsigned FramePtr = RI.getFrameRegister(MF); + Register FramePtr = RI.getFrameRegister(MF); assert(MF.getSubtarget().getFrameLowering()->hasFP(MF) && "base pointer without frame pointer?"); @@ -1413,7 +1413,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MachineConstantPoolValue *CPV = ARMConstantPoolSymbol::Create(MF->getFunction().getContext(), "__aeabi_read_tp", PCLabelID, 0); - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Thumb ? ARM::tLDRpci : ARM::LDRi12), Reg) .addConstantPoolIndex(MCP->getConstantPoolIndex(CPV, 4)); @@ -1443,7 +1443,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, case ARM::t2LDRpci_pic: { unsigned NewLdOpc = (Opcode == ARM::tLDRpci_pic) ? ARM::tLDRpci : ARM::t2LDRpci; - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); bool DstIsDead = MI.getOperand(0).isDead(); MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg) @@ -1465,7 +1465,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, case ARM::LDRLIT_ga_pcrel_ldr: case ARM::tLDRLIT_ga_abs: case ARM::tLDRLIT_ga_pcrel: { - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); bool DstIsDead = MI.getOperand(0).isDead(); const MachineOperand &MO1 = MI.getOperand(1); auto Flags = MO1.getTargetFlags(); @@ -1523,7 +1523,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, case ARM::t2MOV_ga_pcrel: { // Expand into movw + movw. Also "add pc" / ldr [pc] in PIC mode. unsigned LabelId = AFI->createPICLabelUId(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); bool DstIsDead = MI.getOperand(0).isDead(); const MachineOperand &MO1 = MI.getOperand(1); const GlobalValue *GV = MO1.getGlobal(); @@ -1587,7 +1587,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, // Grab the Q register destination. bool DstIsDead = MI.getOperand(OpIdx).isDead(); - unsigned DstReg = MI.getOperand(OpIdx++).getReg(); + Register DstReg = MI.getOperand(OpIdx++).getReg(); // Copy the source register. MIB.add(MI.getOperand(OpIdx++)); @@ -1597,8 +1597,8 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MIB.add(MI.getOperand(OpIdx++)); // Add the destination operands (D subregs). - unsigned D0 = TRI->getSubReg(DstReg, ARM::dsub_0); - unsigned D1 = TRI->getSubReg(DstReg, ARM::dsub_1); + Register D0 = TRI->getSubReg(DstReg, ARM::dsub_0); + Register D1 = TRI->getSubReg(DstReg, ARM::dsub_1); MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead)) .addReg(D1, RegState::Define | getDeadRegState(DstIsDead)); @@ -1618,7 +1618,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, // Grab the Q register source. bool SrcIsKill = MI.getOperand(OpIdx).isKill(); - unsigned SrcReg = MI.getOperand(OpIdx++).getReg(); + Register SrcReg = MI.getOperand(OpIdx++).getReg(); // Copy the destination register. MachineOperand Dst(MI.getOperand(OpIdx++)); @@ -1629,8 +1629,8 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MIB.add(MI.getOperand(OpIdx++)); // Add the source operands (D subregs). - unsigned D0 = TRI->getSubReg(SrcReg, ARM::dsub_0); - unsigned D1 = TRI->getSubReg(SrcReg, ARM::dsub_1); + Register D0 = TRI->getSubReg(SrcReg, ARM::dsub_0); + Register D1 = TRI->getSubReg(SrcReg, ARM::dsub_1); MIB.addReg(D0, SrcIsKill ? RegState::Kill : 0) .addReg(D1, SrcIsKill ? RegState::Kill : 0); diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 8c0ceee29ad..2fd11426c5a 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -192,7 +192,7 @@ class ARMFastISel final : public FastISel { bool isLoadTypeLegal(Type *Ty, MVT &VT); bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, bool isZExt, bool isEquality); - bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, + bool ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr, unsigned Alignment = 0, bool isZExt = true, bool allocReg = true); bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, @@ -913,7 +913,7 @@ void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, AddOptionalDefs(MIB); } -bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, +bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr, unsigned Alignment, bool isZExt, bool allocReg) { unsigned Opc; bool useAM3 = false; @@ -1045,7 +1045,7 @@ bool ARMFastISel::SelectLoad(const Instruction *I) { Address Addr; if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; - unsigned ResultReg; + Register ResultReg; if (!ARMEmitLoad(VT, ResultReg, Addr, cast(I)->getAlignment())) return false; updateValueMap(I, ResultReg); @@ -2162,7 +2162,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) { } // Make the copy. - unsigned DstReg = VA.getLocReg(); + Register DstReg = VA.getLocReg(); const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); // Avoid a cross-class copy. This is very unlikely. if (!SrcRC->contains(DstReg)) @@ -2476,7 +2476,7 @@ bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, } bool RV; - unsigned ResultReg; + Register ResultReg; RV = ARMEmitLoad(VT, ResultReg, Src); assert(RV && "Should be able to handle this load."); RV = ARMEmitStore(VT, ResultReg, Dest); @@ -2506,7 +2506,7 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { const ARMBaseRegisterInfo *RegInfo = static_cast(Subtarget->getRegisterInfo()); - unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); + Register FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); unsigned SrcReg = FramePtr; // Recursively load frame address @@ -2947,7 +2947,7 @@ bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, Address Addr; if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; - unsigned ResultReg = MI->getOperand(0).getReg(); + Register ResultReg = MI->getOperand(0).getReg(); if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) return false; MachineBasicBlock::iterator I(MI); @@ -2974,7 +2974,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), MachineMemOperand::MOLoad, 4, 4); - unsigned TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass); + Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass); unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp; MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), TempReg) diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index d2a5111a4de..e11399aa25f 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -376,7 +376,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF, // to determine the end of the prologue. DebugLoc dl; - unsigned FramePtr = RegInfo->getFrameRegister(MF); + Register FramePtr = RegInfo->getFrameRegister(MF); // Determine the sizes of each callee-save spill areas and record which frame // belongs to which callee-save spill areas. @@ -780,7 +780,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF, unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(); int NumBytes = (int)MFI.getStackSize(); - unsigned FramePtr = RegInfo->getFrameRegister(MF); + Register FramePtr = RegInfo->getFrameRegister(MF); // All calls are tail calls in GHC calling conv, and functions have no // prologue/epilogue. @@ -1623,7 +1623,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF, MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); (void)TRI; // Silence unused warning in non-assert builds. - unsigned FramePtr = RegInfo->getFrameRegister(MF); + Register FramePtr = RegInfo->getFrameRegister(MF); // Spill R4 if Thumb2 function requires stack realignment - it will be used as // scratch register. Also spill R4 if Thumb2 function has varsized objects, diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 5ccc08c3b48..a3c7999f20f 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -4471,7 +4471,7 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){ // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to // the original GPRs. - unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass); + Register GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass); PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped); SDValue Chain = SDValue(N,0); @@ -4507,7 +4507,7 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){ // Copy REG_SEQ into a GPRPair-typed VR and replace the original two // i32 VRs of inline asm with it. - unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass); + Register GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass); PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped); Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1)); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 66435c41963..0cd9a1c6e91 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -5356,7 +5356,7 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc dl(Op); // FIXME probably not meaningful unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); - unsigned FrameReg = ARI.getFrameRegister(MF); + Register FrameReg = ARI.getFrameRegister(MF); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); while (Depth--) FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, @@ -9024,19 +9024,19 @@ void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, // orr r5, r5, #1 // add r5, pc // str r5, [$jbuf, #+4] ; &jbuf[1] - unsigned NewVReg1 = MRI->createVirtualRegister(TRC); + Register NewVReg1 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) .addConstantPoolIndex(CPI) .addMemOperand(CPMMO) .add(predOps(ARMCC::AL)); // Set the low bit because of thumb mode. - unsigned NewVReg2 = MRI->createVirtualRegister(TRC); + Register NewVReg2 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) .addReg(NewVReg1, RegState::Kill) .addImm(0x01) .add(predOps(ARMCC::AL)) .add(condCodeOp()); - unsigned NewVReg3 = MRI->createVirtualRegister(TRC); + Register NewVReg3 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) .addReg(NewVReg2, RegState::Kill) .addImm(PCLabelId); @@ -9054,28 +9054,28 @@ void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, // orrs r1, r2 // add r2, $jbuf, #+4 ; &jbuf[1] // str r1, [r2] - unsigned NewVReg1 = MRI->createVirtualRegister(TRC); + Register NewVReg1 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) .addConstantPoolIndex(CPI) .addMemOperand(CPMMO) .add(predOps(ARMCC::AL)); - unsigned NewVReg2 = MRI->createVirtualRegister(TRC); + Register NewVReg2 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) .addReg(NewVReg1, RegState::Kill) .addImm(PCLabelId); // Set the low bit because of thumb mode. - unsigned NewVReg3 = MRI->createVirtualRegister(TRC); + Register NewVReg3 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) .addReg(ARM::CPSR, RegState::Define) .addImm(1) .add(predOps(ARMCC::AL)); - unsigned NewVReg4 = MRI->createVirtualRegister(TRC); + Register NewVReg4 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) .addReg(ARM::CPSR, RegState::Define) .addReg(NewVReg2, RegState::Kill) .addReg(NewVReg3, RegState::Kill) .add(predOps(ARMCC::AL)); - unsigned NewVReg5 = MRI->createVirtualRegister(TRC); + Register NewVReg5 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) .addFrameIndex(FI) .addImm(36); // &jbuf[1] :: pc @@ -9090,13 +9090,13 @@ void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, // ldr r1, LCPI1_1 // add r1, pc, r1 // str r1, [$jbuf, #+4] ; &jbuf[1] - unsigned NewVReg1 = MRI->createVirtualRegister(TRC); + Register NewVReg1 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) .addConstantPoolIndex(CPI) .addImm(0) .addMemOperand(CPMMO) .add(predOps(ARMCC::AL)); - unsigned NewVReg2 = MRI->createVirtualRegister(TRC); + Register NewVReg2 = MRI->createVirtualRegister(TRC); BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) .addReg(NewVReg1, RegState::Kill) .addImm(PCLabelId) @@ -9218,7 +9218,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, bool IsPositionIndependent = isPositionIndependent(); unsigned NumLPads = LPadList.size(); if (Subtarget->isThumb2()) { - unsigned NewVReg1 = MRI->createVirtualRegister(TRC); + Register NewVReg1 = MRI->createVirtualRegister(TRC); BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) .addFrameIndex(FI) .addImm(4) @@ -9231,7 +9231,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addImm(LPadList.size()) .add(predOps(ARMCC::AL)); } else { - unsigned VReg1 = MRI->createVirtualRegister(TRC); + Register VReg1 = MRI->createVirtualRegister(TRC); BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) .addImm(NumLPads & 0xFFFF) .add(predOps(ARMCC::AL)); @@ -9256,12 +9256,12 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addImm(ARMCC::HI) .addReg(ARM::CPSR); - unsigned NewVReg3 = MRI->createVirtualRegister(TRC); + Register NewVReg3 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3) .addJumpTableIndex(MJTI) .add(predOps(ARMCC::AL)); - unsigned NewVReg4 = MRI->createVirtualRegister(TRC); + Register NewVReg4 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) .addReg(NewVReg3, RegState::Kill) .addReg(NewVReg1) @@ -9274,7 +9274,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addReg(NewVReg1) .addJumpTableIndex(MJTI); } else if (Subtarget->isThumb()) { - unsigned NewVReg1 = MRI->createVirtualRegister(TRC); + Register NewVReg1 = MRI->createVirtualRegister(TRC); BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) .addFrameIndex(FI) .addImm(1) @@ -9297,7 +9297,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, Align = MF->getDataLayout().getTypeAllocSize(C->getType()); unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); - unsigned VReg1 = MRI->createVirtualRegister(TRC); + Register VReg1 = MRI->createVirtualRegister(TRC); BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) .addReg(VReg1, RegState::Define) .addConstantPoolIndex(Idx) @@ -9313,19 +9313,19 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addImm(ARMCC::HI) .addReg(ARM::CPSR); - unsigned NewVReg2 = MRI->createVirtualRegister(TRC); + Register NewVReg2 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) .addReg(ARM::CPSR, RegState::Define) .addReg(NewVReg1) .addImm(2) .add(predOps(ARMCC::AL)); - unsigned NewVReg3 = MRI->createVirtualRegister(TRC); + Register NewVReg3 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) .addJumpTableIndex(MJTI) .add(predOps(ARMCC::AL)); - unsigned NewVReg4 = MRI->createVirtualRegister(TRC); + Register NewVReg4 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) .addReg(ARM::CPSR, RegState::Define) .addReg(NewVReg2, RegState::Kill) @@ -9335,7 +9335,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); - unsigned NewVReg5 = MRI->createVirtualRegister(TRC); + Register NewVReg5 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) .addReg(NewVReg4, RegState::Kill) .addImm(0) @@ -9356,7 +9356,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addReg(NewVReg6, RegState::Kill) .addJumpTableIndex(MJTI); } else { - unsigned NewVReg1 = MRI->createVirtualRegister(TRC); + Register NewVReg1 = MRI->createVirtualRegister(TRC); BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) .addFrameIndex(FI) .addImm(4) @@ -9369,7 +9369,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addImm(NumLPads) .add(predOps(ARMCC::AL)); } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { - unsigned VReg1 = MRI->createVirtualRegister(TRC); + Register VReg1 = MRI->createVirtualRegister(TRC); BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) .addImm(NumLPads & 0xFFFF) .add(predOps(ARMCC::AL)); @@ -9398,7 +9398,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, Align = MF->getDataLayout().getTypeAllocSize(C->getType()); unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); - unsigned VReg1 = MRI->createVirtualRegister(TRC); + Register VReg1 = MRI->createVirtualRegister(TRC); BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) .addReg(VReg1, RegState::Define) .addConstantPoolIndex(Idx) @@ -9415,20 +9415,20 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addImm(ARMCC::HI) .addReg(ARM::CPSR); - unsigned NewVReg3 = MRI->createVirtualRegister(TRC); + Register NewVReg3 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) .addReg(NewVReg1) .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) .add(predOps(ARMCC::AL)) .add(condCodeOp()); - unsigned NewVReg4 = MRI->createVirtualRegister(TRC); + Register NewVReg4 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) .addJumpTableIndex(MJTI) .add(predOps(ARMCC::AL)); MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); - unsigned NewVReg5 = MRI->createVirtualRegister(TRC); + Register NewVReg5 = MRI->createVirtualRegister(TRC); BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) .addReg(NewVReg3, RegState::Kill) .addReg(NewVReg4) @@ -9663,8 +9663,8 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, const BasicBlock *LLVM_BB = BB->getBasicBlock(); MachineFunction::iterator It = ++BB->getIterator(); - unsigned dest = MI.getOperand(0).getReg(); - unsigned src = MI.getOperand(1).getReg(); + Register dest = MI.getOperand(0).getReg(); + Register src = MI.getOperand(1).getReg(); unsigned SizeVal = MI.getOperand(2).getImm(); unsigned Align = MI.getOperand(3).getImm(); DebugLoc dl = MI.getDebugLoc(); @@ -9715,9 +9715,9 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, unsigned srcIn = src; unsigned destIn = dest; for (unsigned i = 0; i < LoopSize; i+=UnitSize) { - unsigned srcOut = MRI.createVirtualRegister(TRC); - unsigned destOut = MRI.createVirtualRegister(TRC); - unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); + Register srcOut = MRI.createVirtualRegister(TRC); + Register destOut = MRI.createVirtualRegister(TRC); + Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, IsThumb1, IsThumb2); emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, @@ -9730,9 +9730,9 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, // [scratch, srcOut] = LDRB_POST(srcIn, 1) // [destOut] = STRB_POST(scratch, destIn, 1) for (unsigned i = 0; i < BytesLeft; i++) { - unsigned srcOut = MRI.createVirtualRegister(TRC); - unsigned destOut = MRI.createVirtualRegister(TRC); - unsigned scratch = MRI.createVirtualRegister(TRC); + Register srcOut = MRI.createVirtualRegister(TRC); + Register destOut = MRI.createVirtualRegister(TRC); + Register scratch = MRI.createVirtualRegister(TRC); emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, IsThumb1, IsThumb2); emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, @@ -9775,7 +9775,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, exitMBB->transferSuccessorsAndUpdatePHIs(BB); // Load an immediate to varEnd. - unsigned varEnd = MRI.createVirtualRegister(TRC); + Register varEnd = MRI.createVirtualRegister(TRC); if (Subtarget->useMovt()) { unsigned Vtmp = varEnd; if ((LoopSize & 0xFFFF0000) != 0) @@ -9825,12 +9825,12 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, // destPhi = PHI(destLoop, dst) MachineBasicBlock *entryBB = BB; BB = loopMBB; - unsigned varLoop = MRI.createVirtualRegister(TRC); - unsigned varPhi = MRI.createVirtualRegister(TRC); - unsigned srcLoop = MRI.createVirtualRegister(TRC); - unsigned srcPhi = MRI.createVirtualRegister(TRC); - unsigned destLoop = MRI.createVirtualRegister(TRC); - unsigned destPhi = MRI.createVirtualRegister(TRC); + Register varLoop = MRI.createVirtualRegister(TRC); + Register varPhi = MRI.createVirtualRegister(TRC); + Register srcLoop = MRI.createVirtualRegister(TRC); + Register srcPhi = MRI.createVirtualRegister(TRC); + Register destLoop = MRI.createVirtualRegister(TRC); + Register destPhi = MRI.createVirtualRegister(TRC); BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) .addReg(varLoop).addMBB(loopMBB) @@ -9844,7 +9844,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) - unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); + Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, IsThumb1, IsThumb2); emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, @@ -9885,9 +9885,9 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, unsigned srcIn = srcLoop; unsigned destIn = destLoop; for (unsigned i = 0; i < BytesLeft; i++) { - unsigned srcOut = MRI.createVirtualRegister(TRC); - unsigned destOut = MRI.createVirtualRegister(TRC); - unsigned scratch = MRI.createVirtualRegister(TRC); + Register srcOut = MRI.createVirtualRegister(TRC); + Register destOut = MRI.createVirtualRegister(TRC); + Register scratch = MRI.createVirtualRegister(TRC); emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, IsThumb1, IsThumb2); emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, @@ -9947,7 +9947,7 @@ ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, break; case CodeModel::Large: { MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); - unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); + Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) .addExternalSymbol("__chkstk"); @@ -10195,8 +10195,8 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, // equality. bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; - unsigned LHS1 = MI.getOperand(1).getReg(); - unsigned LHS2 = MI.getOperand(2).getReg(); + Register LHS1 = MI.getOperand(1).getReg(); + Register LHS2 = MI.getOperand(2).getReg(); if (RHSisZero) { BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) .addReg(LHS1) @@ -10206,8 +10206,8 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, .addReg(LHS2).addImm(0) .addImm(ARMCC::EQ).addReg(ARM::CPSR); } else { - unsigned RHS1 = MI.getOperand(3).getReg(); - unsigned RHS2 = MI.getOperand(4).getReg(); + Register RHS1 = MI.getOperand(3).getReg(); + Register RHS2 = MI.getOperand(4).getReg(); BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) .addReg(LHS1) .addReg(RHS1) @@ -10268,15 +10268,15 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, Fn->insert(BBI, RSBBB); Fn->insert(BBI, SinkBB); - unsigned int ABSSrcReg = MI.getOperand(1).getReg(); - unsigned int ABSDstReg = MI.getOperand(0).getReg(); + Register ABSSrcReg = MI.getOperand(1).getReg(); + Register ABSDstReg = MI.getOperand(0).getReg(); bool ABSSrcKIll = MI.getOperand(1).isKill(); bool isThumb2 = Subtarget->isThumb2(); MachineRegisterInfo &MRI = Fn->getRegInfo(); // In Thumb mode S must not be specified if source register is the SP or // PC and if destination register is the SP, so restrict register class - unsigned NewRsbDstReg = - MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); + Register NewRsbDstReg = MRI.createVirtualRegister( + isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); // Transfer the remainder of BB and its successor edges to sinkMBB. SinkBB->splice(SinkBB->begin(), BB, @@ -10355,7 +10355,7 @@ static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, // The MEMCPY both defines and kills the scratch registers. for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { - unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass + Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass : &ARM::GPRRegClass); MIB.addReg(TmpReg, RegState::Define|RegState::Dead); } @@ -16545,7 +16545,7 @@ void ARMTargetLowering::insertCopiesSplitCSR( else llvm_unreachable("Unexpected register class in CSRsViaCopy!"); - unsigned NewVR = MRI->createVirtualRegister(RC); + Register NewVR = MRI->createVirtualRegister(RC); // Create copy from CSR to a virtual register. // FIXME: this currently does not emit CFI pseudo-instructions, it works // fine for CXX_FAST_TLS since the C++-style TLS access functions should be diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp index 388c889349b..a802d5a06f0 100644 --- a/lib/Target/ARM/ARMInstrInfo.cpp +++ b/lib/Target/ARM/ARMInstrInfo.cpp @@ -117,7 +117,7 @@ void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI) const { MachineBasicBlock &MBB = *MI->getParent(); DebugLoc DL = MI->getDebugLoc(); - unsigned Reg = MI->getOperand(0).getReg(); + Register Reg = MI->getOperand(0).getReg(); MachineInstrBuilder MIB; MIB = BuildMI(MBB, MI, DL, get(ARM::MOV_ga_pcrel_ldr), Reg) diff --git a/lib/Target/ARM/ARMInstructionSelector.cpp b/lib/Target/ARM/ARMInstructionSelector.cpp index fc33b4e2782..4696404047a 100644 --- a/lib/Target/ARM/ARMInstructionSelector.cpp +++ b/lib/Target/ARM/ARMInstructionSelector.cpp @@ -210,7 +210,7 @@ static const TargetRegisterClass *guessRegClass(unsigned Reg, static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) { - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); if (Register::isPhysicalRegister(DstReg)) return true; @@ -236,17 +236,17 @@ static bool selectMergeValues(MachineInstrBuilder &MIB, // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs // into one DPR. - unsigned VReg0 = MIB->getOperand(0).getReg(); + Register VReg0 = MIB->getOperand(0).getReg(); (void)VReg0; assert(MRI.getType(VReg0).getSizeInBits() == 64 && RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::FPRRegBankID && "Unsupported operand for G_MERGE_VALUES"); - unsigned VReg1 = MIB->getOperand(1).getReg(); + Register VReg1 = MIB->getOperand(1).getReg(); (void)VReg1; assert(MRI.getType(VReg1).getSizeInBits() == 32 && RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && "Unsupported operand for G_MERGE_VALUES"); - unsigned VReg2 = MIB->getOperand(2).getReg(); + Register VReg2 = MIB->getOperand(2).getReg(); (void)VReg2; assert(MRI.getType(VReg2).getSizeInBits() == 32 && RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::GPRRegBankID && @@ -268,17 +268,17 @@ static bool selectUnmergeValues(MachineInstrBuilder &MIB, // We only support G_UNMERGE_VALUES as a way to break up one DPR into two // GPRs. - unsigned VReg0 = MIB->getOperand(0).getReg(); + Register VReg0 = MIB->getOperand(0).getReg(); (void)VReg0; assert(MRI.getType(VReg0).getSizeInBits() == 32 && RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::GPRRegBankID && "Unsupported operand for G_UNMERGE_VALUES"); - unsigned VReg1 = MIB->getOperand(1).getReg(); + Register VReg1 = MIB->getOperand(1).getReg(); (void)VReg1; assert(MRI.getType(VReg1).getSizeInBits() == 32 && RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID && "Unsupported operand for G_UNMERGE_VALUES"); - unsigned VReg2 = MIB->getOperand(2).getReg(); + Register VReg2 = MIB->getOperand(2).getReg(); (void)VReg2; assert(MRI.getType(VReg2).getSizeInBits() == 64 && RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::FPRRegBankID && @@ -873,10 +873,10 @@ bool ARMInstructionSelector::select(MachineInstr &I) { MIB.addImm(1).add(predOps(ARMCC::AL)).add(condCodeOp()); if (isSExt) { - unsigned SExtResult = I.getOperand(0).getReg(); + Register SExtResult = I.getOperand(0).getReg(); // Use a new virtual register for the result of the AND - unsigned AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass); + Register AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass); I.getOperand(0).setReg(AndResult); auto InsertBefore = std::next(I.getIterator()); @@ -927,7 +927,7 @@ bool ARMInstructionSelector::select(MachineInstr &I) { assert(MRI.getType(SrcReg).getSizeInBits() == 64 && "Unsupported size"); assert(MRI.getType(DstReg).getSizeInBits() <= 32 && "Unsupported size"); - unsigned IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass); + Register IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass); auto InsertBefore = std::next(I.getIterator()); auto MovI = BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::VMOVRRD)) @@ -1038,7 +1038,7 @@ bool ARMInstructionSelector::select(MachineInstr &I) { case G_FCMP: { assert(STI.hasVFP2Base() && "Can't select fcmp without VFP"); - unsigned OpReg = I.getOperand(2).getReg(); + Register OpReg = I.getOperand(2).getReg(); unsigned Size = MRI.getType(OpReg).getSizeInBits(); if (Size == 64 && !STI.hasFP64()) { @@ -1081,7 +1081,7 @@ bool ARMInstructionSelector::select(MachineInstr &I) { return false; } - unsigned Reg = I.getOperand(0).getReg(); + Register Reg = I.getOperand(0).getReg(); unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID(); LLT ValTy = MRI.getType(Reg); @@ -1096,9 +1096,9 @@ bool ARMInstructionSelector::select(MachineInstr &I) { if (ValSize == 1 && NewOpc == Opcodes.STORE8) { // Before storing a 1-bit value, make sure to clear out any unneeded bits. - unsigned OriginalValue = I.getOperand(0).getReg(); + Register OriginalValue = I.getOperand(0).getReg(); - unsigned ValueToStore = MRI.createVirtualRegister(&ARM::GPRRegClass); + Register ValueToStore = MRI.createVirtualRegister(&ARM::GPRRegClass); I.getOperand(0).setReg(ValueToStore); auto InsertBefore = I.getIterator(); @@ -1158,7 +1158,7 @@ bool ARMInstructionSelector::select(MachineInstr &I) { case G_PHI: { I.setDesc(TII.get(PHI)); - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI); if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { break; diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 90a1ce238c3..4a193fed04a 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -509,7 +509,7 @@ void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB, Offset = MO.getImm() - WordOffset * getImmScale(Opc); // If storing the base register, it needs to be reset first. - unsigned InstrSrcReg = getLoadStoreRegOp(*MBBI).getReg(); + Register InstrSrcReg = getLoadStoreRegOp(*MBBI).getReg(); if (Offset >= 0 && !(IsStore && InstrSrcReg == Base)) MO.setImm(Offset); @@ -859,7 +859,7 @@ MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) { // Determine list of registers and list of implicit super-register defs. for (const MachineInstr *MI : Cand.Instrs) { const MachineOperand &MO = getLoadStoreRegOp(*MI); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); bool IsKill = MO.isKill(); if (IsKill) KilledRegs.insert(Reg); @@ -874,7 +874,7 @@ MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) { if (!MO.isReg() || !MO.isDef() || MO.isDead()) continue; assert(MO.isImplicit()); - unsigned DefReg = MO.getReg(); + Register DefReg = MO.getReg(); if (is_contained(ImpDefs, DefReg)) continue; @@ -893,7 +893,7 @@ MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) { iterator InsertBefore = std::next(iterator(LatestMI)); MachineBasicBlock &MBB = *LatestMI->getParent(); unsigned Offset = getMemoryOpOffset(*First); - unsigned Base = getLoadStoreBaseOp(*First).getReg(); + Register Base = getLoadStoreBaseOp(*First).getReg(); bool BaseKill = LatestMI->killsRegister(Base); unsigned PredReg = 0; ARMCC::CondCodes Pred = getInstrPredicate(*First, PredReg); @@ -1005,7 +1005,7 @@ void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) { const MachineInstr *MI = MemOps[SIndex].MI; int Offset = MemOps[SIndex].Offset; const MachineOperand &PMO = getLoadStoreRegOp(*MI); - unsigned PReg = PMO.getReg(); + Register PReg = PMO.getReg(); unsigned PRegNum = PMO.isUndef() ? std::numeric_limits::max() : TRI->getEncodingValue(PReg); unsigned Latest = SIndex; @@ -1052,7 +1052,7 @@ void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) { if (NewOffset != Offset + (int)Size) break; const MachineOperand &MO = getLoadStoreRegOp(*MemOps[I].MI); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == ARM::SP || Reg == ARM::PC) break; if (Count == Limit) @@ -1261,7 +1261,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) { if (isThumb1) return false; const MachineOperand &BaseOP = MI->getOperand(0); - unsigned Base = BaseOP.getReg(); + Register Base = BaseOP.getReg(); bool BaseKill = BaseOP.isKill(); unsigned PredReg = 0; ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg); @@ -1387,7 +1387,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineInstr *MI) { // FIXME: Use LDM/STM with single register instead. if (isThumb1) return false; - unsigned Base = getLoadStoreBaseOp(*MI).getReg(); + Register Base = getLoadStoreBaseOp(*MI).getReg(); bool BaseKill = getLoadStoreBaseOp(*MI).isKill(); unsigned Opcode = MI->getOpcode(); DebugLoc DL = MI->getDebugLoc(); @@ -1512,7 +1512,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(MachineInstr &MI) const { // Behaviour for writeback is undefined if base register is the same as one // of the others. const MachineOperand &BaseOp = MI.getOperand(2); - unsigned Base = BaseOp.getReg(); + Register Base = BaseOp.getReg(); const MachineOperand &Reg0Op = MI.getOperand(0); const MachineOperand &Reg1Op = MI.getOperand(1); if (Reg0Op.getReg() == Base || Reg1Op.getReg() == Base) @@ -1655,9 +1655,9 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB, return false; const MachineOperand &BaseOp = MI->getOperand(2); - unsigned BaseReg = BaseOp.getReg(); - unsigned EvenReg = MI->getOperand(0).getReg(); - unsigned OddReg = MI->getOperand(1).getReg(); + Register BaseReg = BaseOp.getReg(); + Register EvenReg = MI->getOperand(0).getReg(); + Register OddReg = MI->getOperand(1).getReg(); unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false); unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false); @@ -1783,8 +1783,8 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { if (isMemoryOp(*MBBI)) { unsigned Opcode = MBBI->getOpcode(); const MachineOperand &MO = MBBI->getOperand(0); - unsigned Reg = MO.getReg(); - unsigned Base = getLoadStoreBaseOp(*MBBI).getReg(); + Register Reg = MO.getReg(); + Register Base = getLoadStoreBaseOp(*MBBI).getReg(); unsigned PredReg = 0; ARMCC::CondCodes Pred = getInstrPredicate(*MBBI, PredReg); int Offset = getMemoryOpOffset(*MBBI); @@ -2121,7 +2121,7 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base, MachineOperand &MO = I->getOperand(j); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (MO.isDef() && TRI->regsOverlap(Reg, Base)) return false; if (Reg != Base && !MemRegs.count(Reg)) @@ -2415,7 +2415,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) { int Opc = MI.getOpcode(); bool isLd = isLoadSingle(Opc); - unsigned Base = MI.getOperand(1).getReg(); + Register Base = MI.getOperand(1).getReg(); int Offset = getMemoryOpOffset(MI); bool StopHere = false; auto FindBases = [&] (Base2InstMap &Base2Ops, BaseVec &Bases) { diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp index 4256f8dddaa..cc31929899b 100644 --- a/lib/Target/ARM/MLxExpansionPass.cpp +++ b/lib/Target/ARM/MLxExpansionPass.cpp @@ -86,7 +86,7 @@ void MLxExpansion::pushStack(MachineInstr *MI) { MachineInstr *MLxExpansion::getAccDefMI(MachineInstr *MI) const { // Look past COPY and INSERT_SUBREG instructions to find the // real definition MI. This is important for _sfp instructions. - unsigned Reg = MI->getOperand(1).getReg(); + Register Reg = MI->getOperand(1).getReg(); if (Register::isPhysicalRegister(Reg)) return nullptr; @@ -114,7 +114,7 @@ MachineInstr *MLxExpansion::getAccDefMI(MachineInstr *MI) const { } unsigned MLxExpansion::getDefReg(MachineInstr *MI) const { - unsigned Reg = MI->getOperand(0).getReg(); + Register Reg = MI->getOperand(0).getReg(); if (Register::isPhysicalRegister(Reg) || !MRI->hasOneNonDBGUse(Reg)) return Reg; @@ -138,7 +138,7 @@ unsigned MLxExpansion::getDefReg(MachineInstr *MI) const { /// hasLoopHazard - Check whether an MLx instruction is chained to itself across /// a single-MBB loop. bool MLxExpansion::hasLoopHazard(MachineInstr *MI) const { - unsigned Reg = MI->getOperand(1).getReg(); + Register Reg = MI->getOperand(1).getReg(); if (Register::isPhysicalRegister(Reg)) return false; @@ -152,7 +152,7 @@ outer_continue: if (DefMI->isPHI()) { for (unsigned i = 1, e = DefMI->getNumOperands(); i < e; i += 2) { if (DefMI->getOperand(i + 1).getMBB() == MBB) { - unsigned SrcReg = DefMI->getOperand(i).getReg(); + Register SrcReg = DefMI->getOperand(i).getReg(); if (Register::isVirtualRegister(SrcReg)) { DefMI = MRI->getVRegDef(SrcReg); goto outer_continue; @@ -269,23 +269,23 @@ void MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI, unsigned MulOpc, unsigned AddSubOpc, bool NegAcc, bool HasLane) { - unsigned DstReg = MI->getOperand(0).getReg(); + Register DstReg = MI->getOperand(0).getReg(); bool DstDead = MI->getOperand(0).isDead(); - unsigned AccReg = MI->getOperand(1).getReg(); - unsigned Src1Reg = MI->getOperand(2).getReg(); - unsigned Src2Reg = MI->getOperand(3).getReg(); + Register AccReg = MI->getOperand(1).getReg(); + Register Src1Reg = MI->getOperand(2).getReg(); + Register Src2Reg = MI->getOperand(3).getReg(); bool Src1Kill = MI->getOperand(2).isKill(); bool Src2Kill = MI->getOperand(3).isKill(); unsigned LaneImm = HasLane ? MI->getOperand(4).getImm() : 0; unsigned NextOp = HasLane ? 5 : 4; ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NextOp).getImm(); - unsigned PredReg = MI->getOperand(++NextOp).getReg(); + Register PredReg = MI->getOperand(++NextOp).getReg(); const MCInstrDesc &MCID1 = TII->get(MulOpc); const MCInstrDesc &MCID2 = TII->get(AddSubOpc); const MachineFunction &MF = *MI->getParent()->getParent(); - unsigned TmpReg = MRI->createVirtualRegister( - TII->getRegClass(MCID1, 0, TRI, MF)); + Register TmpReg = + MRI->createVirtualRegister(TII->getRegClass(MCID1, 0, TRI, MF)); MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), MCID1, TmpReg) .addReg(Src1Reg, getKillRegState(Src1Kill)) diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp index 426e9a0ed9b..956d474f1d7 100644 --- a/lib/Target/ARM/Thumb1FrameLowering.cpp +++ b/lib/Target/ARM/Thumb1FrameLowering.cpp @@ -164,7 +164,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF, // to determine the end of the prologue. DebugLoc dl; - unsigned FramePtr = RegInfo->getFrameRegister(MF); + Register FramePtr = RegInfo->getFrameRegister(MF); unsigned BasePtr = RegInfo->getBaseRegister(); int CFAOffset = 0; @@ -459,8 +459,8 @@ static bool isCSRestore(MachineInstr &MI, const MCPhysReg *CSRegs) { else if (MI.getOpcode() == ARM::tPOP) { return true; } else if (MI.getOpcode() == ARM::tMOVr) { - unsigned Dst = MI.getOperand(0).getReg(); - unsigned Src = MI.getOperand(1).getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); return ((ARM::tGPRRegClass.contains(Src) || Src == ARM::LR) && ARM::hGPRRegClass.contains(Dst)); } @@ -483,7 +483,7 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF, assert((unsigned)NumBytes >= ArgRegsSaveSize && "ArgRegsSaveSize is included in NumBytes"); const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF); - unsigned FramePtr = RegInfo->getFrameRegister(MF); + Register FramePtr = RegInfo->getFrameRegister(MF); if (!AFI->hasStackFrame()) { if (NumBytes - ArgRegsSaveSize != 0) diff --git a/lib/Target/ARM/Thumb2ITBlockPass.cpp b/lib/Target/ARM/Thumb2ITBlockPass.cpp index 2087fd59906..3bf0c527427 100644 --- a/lib/Target/ARM/Thumb2ITBlockPass.cpp +++ b/lib/Target/ARM/Thumb2ITBlockPass.cpp @@ -87,7 +87,7 @@ static void TrackDefUses(MachineInstr *MI, RegisterSet &Defs, RegisterSet &Uses, for (auto &MO : MI->operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg || Reg == ARM::ITSTATE || Reg == ARM::SP) continue; if (MO.isUse()) @@ -145,8 +145,8 @@ Thumb2ITBlock::MoveCopyOutOfITBlock(MachineInstr *MI, MI->getOperand(1).getSubReg() == 0 && "Sub-register indices still around?"); - unsigned DstReg = MI->getOperand(0).getReg(); - unsigned SrcReg = MI->getOperand(1).getReg(); + Register DstReg = MI->getOperand(0).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); // First check if it's safe to move it. if (Uses.count(DstReg) || Defs.count(SrcReg)) diff --git a/lib/Target/ARM/Thumb2InstrInfo.cpp b/lib/Target/ARM/Thumb2InstrInfo.cpp index fa2be13f753..eae72810897 100644 --- a/lib/Target/ARM/Thumb2InstrInfo.cpp +++ b/lib/Target/ARM/Thumb2InstrInfo.cpp @@ -554,7 +554,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, // register then we change to an immediate version. unsigned NewOpc = Opcode; if (AddrMode == ARMII::AddrModeT2_so) { - unsigned OffsetReg = MI.getOperand(FrameRegIdx+1).getReg(); + Register OffsetReg = MI.getOperand(FrameRegIdx + 1).getReg(); if (OffsetReg != 0) { MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); return Offset == 0; diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index 37a85fa3841..c5a62aa3399 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -300,7 +300,7 @@ Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) { for (const MachineOperand &MO : CPSRDef->operands()) { if (!MO.isReg() || MO.isUndef() || MO.isUse()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0 || Reg == ARM::CPSR) continue; Defs.insert(Reg); @@ -309,7 +309,7 @@ Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) { for (const MachineOperand &MO : Use->operands()) { if (!MO.isReg() || MO.isUndef() || MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Defs.count(Reg)) return false; } @@ -380,7 +380,7 @@ static bool VerifyLowRegs(MachineInstr *MI) { const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || MO.isImplicit()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0 || Reg == ARM::CPSR) continue; if (isPCOk && Reg == ARM::PC) @@ -464,11 +464,11 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, // For this reason we can't reuse the logic at the end of this function; we // have to implement the MI building here. bool IsStore = Entry.WideOpc == ARM::t2STR_POST; - unsigned Rt = MI->getOperand(IsStore ? 1 : 0).getReg(); - unsigned Rn = MI->getOperand(IsStore ? 0 : 1).getReg(); + Register Rt = MI->getOperand(IsStore ? 1 : 0).getReg(); + Register Rn = MI->getOperand(IsStore ? 0 : 1).getReg(); unsigned Offset = MI->getOperand(3).getImm(); unsigned PredImm = MI->getOperand(4).getImm(); - unsigned PredReg = MI->getOperand(5).getReg(); + Register PredReg = MI->getOperand(5).getReg(); assert(isARMLowRegister(Rt)); assert(isARMLowRegister(Rn)); @@ -496,7 +496,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, return true; } case ARM::t2LDMIA: { - unsigned BaseReg = MI->getOperand(0).getReg(); + Register BaseReg = MI->getOperand(0).getReg(); assert(isARMLowRegister(BaseReg)); // For the non-writeback version (this one), the base register must be @@ -524,7 +524,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, break; case ARM::t2LDMIA_RET: { - unsigned BaseReg = MI->getOperand(1).getReg(); + Register BaseReg = MI->getOperand(1).getReg(); if (BaseReg != ARM::SP) return false; Opc = Entry.NarrowOpc2; // tPOP_RET @@ -537,7 +537,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, case ARM::t2STMDB_UPD: { OpNum = 0; - unsigned BaseReg = MI->getOperand(1).getReg(); + Register BaseReg = MI->getOperand(1).getReg(); if (BaseReg == ARM::SP && (Entry.WideOpc == ARM::t2LDMIA_UPD || Entry.WideOpc == ARM::t2STMDB_UPD)) { @@ -743,11 +743,11 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, // are optimizing for size. return false; - unsigned Reg0 = MI->getOperand(0).getReg(); - unsigned Reg1 = MI->getOperand(1).getReg(); + Register Reg0 = MI->getOperand(0).getReg(); + Register Reg1 = MI->getOperand(1).getReg(); // t2MUL is "special". The tied source operand is second, not first. if (MI->getOpcode() == ARM::t2MUL) { - unsigned Reg2 = MI->getOperand(2).getReg(); + Register Reg2 = MI->getOperand(2).getReg(); // Early exit if the regs aren't all low regs. if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1) || !isARMLowRegister(Reg2)) @@ -782,7 +782,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, if (Imm > Limit) return false; } else { - unsigned Reg2 = MI->getOperand(2).getReg(); + Register Reg2 = MI->getOperand(2).getReg(); if (Entry.LowRegs2 && !isARMLowRegister(Reg2)) return false; } @@ -868,7 +868,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, continue; const MachineOperand &MO = MI->getOperand(i); if (MO.isReg()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg || Reg == ARM::CPSR) continue; if (Entry.LowRegs1 && !isARMLowRegister(Reg)) diff --git a/lib/Target/ARM/ThumbRegisterInfo.cpp b/lib/Target/ARM/ThumbRegisterInfo.cpp index 6ddcdefdb14..b0ba58d8dc4 100644 --- a/lib/Target/ARM/ThumbRegisterInfo.cpp +++ b/lib/Target/ARM/ThumbRegisterInfo.cpp @@ -372,7 +372,7 @@ bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II, if (Opcode == ARM::tADDframe) { Offset += MI.getOperand(FrameRegIdx+1).getImm(); - unsigned DestReg = MI.getOperand(0).getReg(); + Register DestReg = MI.getOperand(0).getReg(); emitThumbRegPlusImmediate(MBB, II, dl, DestReg, FrameReg, Offset, TII, *this); @@ -510,7 +510,7 @@ void ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, if (MI.mayLoad()) { // Use the destination register to materialize sp + offset. - unsigned TmpReg = MI.getOperand(0).getReg(); + Register TmpReg = MI.getOperand(0).getReg(); bool UseRR = false; if (Opcode == ARM::tLDRspi) { if (FrameReg == ARM::SP || STI.genExecuteOnly()) diff --git a/lib/Target/AVR/AVRAsmPrinter.cpp b/lib/Target/AVR/AVRAsmPrinter.cpp index 7586bd7b78f..1db6b2236b4 100644 --- a/lib/Target/AVR/AVRAsmPrinter.cpp +++ b/lib/Target/AVR/AVRAsmPrinter.cpp @@ -97,7 +97,7 @@ bool AVRAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, assert(RegOp.isReg() && "Operand must be a register when you're" "using 'A'..'Z' operand extracodes."); - unsigned Reg = RegOp.getReg(); + Register Reg = RegOp.getReg(); unsigned ByteNumber = ExtraCode[0] - 'A'; diff --git a/lib/Target/AVR/AVRExpandPseudoInsts.cpp b/lib/Target/AVR/AVRExpandPseudoInsts.cpp index 2776b031480..83d0f684533 100644 --- a/lib/Target/AVR/AVRExpandPseudoInsts.cpp +++ b/lib/Target/AVR/AVRExpandPseudoInsts.cpp @@ -140,8 +140,8 @@ bool AVRExpandPseudo:: expandArith(unsigned OpLo, unsigned OpHi, Block &MBB, BlockIt MBBI) { MachineInstr &MI = *MBBI; unsigned SrcLoReg, SrcHiReg, DstLoReg, DstHiReg; - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(2).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(2).getReg(); bool DstIsDead = MI.getOperand(0).isDead(); bool DstIsKill = MI.getOperand(1).isKill(); bool SrcIsKill = MI.getOperand(2).isKill(); @@ -173,8 +173,8 @@ bool AVRExpandPseudo:: expandLogic(unsigned Op, Block &MBB, BlockIt MBBI) { MachineInstr &MI = *MBBI; unsigned SrcLoReg, SrcHiReg, DstLoReg, DstHiReg; - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(2).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(2).getReg(); bool DstIsDead = MI.getOperand(0).isDead(); bool DstIsKill = MI.getOperand(1).isKill(); bool SrcIsKill = MI.getOperand(2).isKill(); @@ -220,7 +220,7 @@ bool AVRExpandPseudo:: expandLogicImm(unsigned Op, Block &MBB, BlockIt MBBI) { MachineInstr &MI = *MBBI; unsigned DstLoReg, DstHiReg; - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); bool DstIsDead = MI.getOperand(0).isDead(); bool SrcIsKill = MI.getOperand(1).isKill(); bool ImpIsDead = MI.getOperand(3).isDead(); diff --git a/lib/Target/AVR/AVRFrameLowering.cpp b/lib/Target/AVR/AVRFrameLowering.cpp index 5e91bb8632c..304c8d22e22 100644 --- a/lib/Target/AVR/AVRFrameLowering.cpp +++ b/lib/Target/AVR/AVRFrameLowering.cpp @@ -323,7 +323,7 @@ static void fixStackStores(MachineBasicBlock &MBB, "Invalid register, should be SP!"); if (insertPushes) { // Replace this instruction with a push. - unsigned SrcReg = MI.getOperand(2).getReg(); + Register SrcReg = MI.getOperand(2).getReg(); bool SrcIsKill = MI.getOperand(2).isKill(); // We can't use PUSHWRr here because when expanded the order of the new diff --git a/lib/Target/AVR/AVRISelLowering.cpp b/lib/Target/AVR/AVRISelLowering.cpp index f159beee973..3d5c481b506 100644 --- a/lib/Target/AVR/AVRISelLowering.cpp +++ b/lib/Target/AVR/AVRISelLowering.cpp @@ -1517,11 +1517,11 @@ MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI, unsigned ShiftAmtReg = RI.createVirtualRegister(&AVR::LD8RegClass); unsigned ShiftAmtReg2 = RI.createVirtualRegister(&AVR::LD8RegClass); - unsigned ShiftReg = RI.createVirtualRegister(RC); - unsigned ShiftReg2 = RI.createVirtualRegister(RC); - unsigned ShiftAmtSrcReg = MI.getOperand(2).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register ShiftReg = RI.createVirtualRegister(RC); + Register ShiftReg2 = RI.createVirtualRegister(RC); + Register ShiftAmtSrcReg = MI.getOperand(2).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); // BB: // cpi N, 0 @@ -1568,7 +1568,7 @@ MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI, static bool isCopyMulResult(MachineBasicBlock::iterator const &I) { if (I->getOpcode() == AVR::COPY) { - unsigned SrcReg = I->getOperand(1).getReg(); + Register SrcReg = I->getOperand(1).getReg(); return (SrcReg == AVR::R0 || SrcReg == AVR::R1); } diff --git a/lib/Target/AVR/AVRRegisterInfo.cpp b/lib/Target/AVR/AVRRegisterInfo.cpp index a6b36f80485..8fce05c933b 100644 --- a/lib/Target/AVR/AVRRegisterInfo.cpp +++ b/lib/Target/AVR/AVRRegisterInfo.cpp @@ -158,7 +158,7 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // We need to materialize the offset via an add instruction. unsigned Opcode; - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); assert(DstReg != AVR::R29R28 && "Dest reg cannot be the frame pointer"); II++; // Skip over the FRMIDX (and now MOVW) instruction. diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp index 98c6528664d..85fa1f2a6be 100644 --- a/lib/Target/BPF/BPFISelDAGToDAG.cpp +++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp @@ -591,7 +591,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node, const MachineOperand &MOP = MI.getOperand(i); if (!MOP.isReg() || !MOP.isDef()) continue; - unsigned Reg = MOP.getReg(); + Register Reg = MOP.getReg(); if (Register::isVirtualRegister(Reg) && Reg == AndOpReg) { MII = &MI; break; diff --git a/lib/Target/BPF/BPFISelLowering.cpp b/lib/Target/BPF/BPFISelLowering.cpp index ff69941d26f..716bef461a9 100644 --- a/lib/Target/BPF/BPFISelLowering.cpp +++ b/lib/Target/BPF/BPFISelLowering.cpp @@ -236,9 +236,8 @@ SDValue BPFTargetLowering::LowerFormalArguments( } case MVT::i32: case MVT::i64: - unsigned VReg = RegInfo.createVirtualRegister(SimpleTy == MVT::i64 ? - &BPF::GPRRegClass : - &BPF::GPR32RegClass); + Register VReg = RegInfo.createVirtualRegister( + SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT); @@ -571,9 +570,9 @@ BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB, DebugLoc DL = MI.getDebugLoc(); MachineRegisterInfo &RegInfo = F->getRegInfo(); - unsigned PromotedReg0 = RegInfo.createVirtualRegister(RC); - unsigned PromotedReg1 = RegInfo.createVirtualRegister(RC); - unsigned PromotedReg2 = RegInfo.createVirtualRegister(RC); + Register PromotedReg0 = RegInfo.createVirtualRegister(RC); + Register PromotedReg1 = RegInfo.createVirtualRegister(RC); + Register PromotedReg2 = RegInfo.createVirtualRegister(RC); BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg); BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1) .addReg(PromotedReg0).addImm(32); @@ -699,7 +698,7 @@ BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, report_fatal_error("unimplemented select CondCode " + Twine(CC)); } - unsigned LHS = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(1).getReg(); bool isSignedCmp = (CC == ISD::SETGT || CC == ISD::SETGE || CC == ISD::SETLT || @@ -716,7 +715,7 @@ BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp); if (isSelectRROp) { - unsigned RHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(2).getReg(); if (is32BitCmp && !HasJmp32) RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp); diff --git a/lib/Target/BPF/BPFInstrInfo.cpp b/lib/Target/BPF/BPFInstrInfo.cpp index 932f718d549..6de3a4084d3 100644 --- a/lib/Target/BPF/BPFInstrInfo.cpp +++ b/lib/Target/BPF/BPFInstrInfo.cpp @@ -43,11 +43,11 @@ void BPFInstrInfo::copyPhysReg(MachineBasicBlock &MBB, } void BPFInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { - unsigned DstReg = MI->getOperand(0).getReg(); - unsigned SrcReg = MI->getOperand(1).getReg(); + Register DstReg = MI->getOperand(0).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); uint64_t CopyLen = MI->getOperand(2).getImm(); uint64_t Alignment = MI->getOperand(3).getImm(); - unsigned ScratchReg = MI->getOperand(4).getReg(); + Register ScratchReg = MI->getOperand(4).getReg(); MachineBasicBlock *BB = MI->getParent(); DebugLoc dl = MI->getDebugLoc(); unsigned LdOpc, StOpc; diff --git a/lib/Target/BPF/BPFMIPeephole.cpp b/lib/Target/BPF/BPFMIPeephole.cpp index ab25da4aa99..fafd2f703ad 100644 --- a/lib/Target/BPF/BPFMIPeephole.cpp +++ b/lib/Target/BPF/BPFMIPeephole.cpp @@ -104,7 +104,7 @@ bool BPFMIPeephole::isMovFrom32Def(MachineInstr *MovMI) if (!opnd.isReg()) return false; - unsigned Reg = opnd.getReg(); + Register Reg = opnd.getReg(); if ((Register::isVirtualRegister(Reg) && MRI->getRegClass(Reg) == &BPF::GPRRegClass)) return false; @@ -134,8 +134,8 @@ bool BPFMIPeephole::eliminateZExtSeq(void) { // SRL_ri rB, rB, 32 if (MI.getOpcode() == BPF::SRL_ri && MI.getOperand(2).getImm() == 32) { - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned ShfReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register ShfReg = MI.getOperand(1).getReg(); MachineInstr *SllMI = MRI->getVRegDef(ShfReg); LLVM_DEBUG(dbgs() << "Starting SRL found:"); @@ -159,7 +159,7 @@ bool BPFMIPeephole::eliminateZExtSeq(void) { LLVM_DEBUG(dbgs() << " Type cast Mov found:"); LLVM_DEBUG(MovMI->dump()); - unsigned SubReg = MovMI->getOperand(1).getReg(); + Register SubReg = MovMI->getOperand(1).getReg(); if (!isMovFrom32Def(MovMI)) { LLVM_DEBUG(dbgs() << " One ZExt elim sequence failed qualifying elim.\n"); @@ -254,9 +254,9 @@ bool BPFMIPreEmitPeephole::eliminateRedundantMov(void) { // register class on src (i32) and dst (i64), RA could generate useless // instruction due to this. if (MI.getOpcode() == BPF::MOV_32_64) { - unsigned dst = MI.getOperand(0).getReg(); - unsigned dst_sub = TRI->getSubReg(dst, BPF::sub_32); - unsigned src = MI.getOperand(1).getReg(); + Register dst = MI.getOperand(0).getReg(); + Register dst_sub = TRI->getSubReg(dst, BPF::sub_32); + Register src = MI.getOperand(1).getReg(); if (dst_sub != src) continue; diff --git a/lib/Target/BPF/BPFMISimplifyPatchable.cpp b/lib/Target/BPF/BPFMISimplifyPatchable.cpp index e9114d7187e..b363179e25b 100644 --- a/lib/Target/BPF/BPFMISimplifyPatchable.cpp +++ b/lib/Target/BPF/BPFMISimplifyPatchable.cpp @@ -100,8 +100,8 @@ bool BPFMISimplifyPatchable::removeLD() { if (!MI.getOperand(2).isImm() || MI.getOperand(2).getImm()) continue; - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); int64_t ImmVal = MI.getOperand(2).getImm(); MachineInstr *DefInst = MRI->getUniqueVRegDef(SrcReg); diff --git a/lib/Target/BPF/BPFRegisterInfo.cpp b/lib/Target/BPF/BPFRegisterInfo.cpp index 714af06e11d..8de81a469b8 100644 --- a/lib/Target/BPF/BPFRegisterInfo.cpp +++ b/lib/Target/BPF/BPFRegisterInfo.cpp @@ -77,7 +77,7 @@ void BPFRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); } - unsigned FrameReg = getFrameRegister(MF); + Register FrameReg = getFrameRegister(MF); int FrameIndex = MI.getOperand(i).getIndex(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); @@ -86,7 +86,7 @@ void BPFRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, WarnSize(Offset, MF, DL); MI.getOperand(i).ChangeToRegister(FrameReg, false); - unsigned reg = MI.getOperand(i - 1).getReg(); + Register reg = MI.getOperand(i - 1).getReg(); BuildMI(MBB, ++II, DL, TII.get(BPF::ADD_ri), reg) .addReg(reg) .addImm(Offset); @@ -105,7 +105,7 @@ void BPFRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // architecture does not really support FI_ri, replace it with // MOV_rr , frame_reg // ADD_ri , imm - unsigned reg = MI.getOperand(i - 1).getReg(); + Register reg = MI.getOperand(i - 1).getReg(); BuildMI(MBB, ++II, DL, TII.get(BPF::MOV_rr), reg) .addReg(FrameReg); diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/lib/Target/Hexagon/HexagonAsmPrinter.cpp index b07d15609ed..3d771d388e2 100644 --- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp +++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp @@ -130,7 +130,7 @@ bool HexagonAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!MO.isReg()) return true; - unsigned RegNumber = MO.getReg(); + Register RegNumber = MO.getReg(); // This should be an assert in the frontend. if (Hexagon::DoubleRegsRegClass.contains(RegNumber)) RegNumber = TRI->getSubReg(RegNumber, ExtraCode[0] == 'L' ? diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp index d33d179616a..3068fb6f962 100644 --- a/lib/Target/Hexagon/HexagonBitSimplify.cpp +++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp @@ -290,7 +290,7 @@ void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI, for (auto &Op : MI.operands()) { if (!Op.isReg() || !Op.isDef()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!Register::isVirtualRegister(R)) continue; Defs.insert(R); @@ -302,7 +302,7 @@ void HexagonBitSimplify::getInstrUses(const MachineInstr &MI, for (auto &Op : MI.operands()) { if (!Op.isReg() || !Op.isUse()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!Register::isVirtualRegister(R)) continue; Uses.insert(R); @@ -976,7 +976,7 @@ bool DeadCodeElimination::isDead(unsigned R) const { continue; if (UseI->isPHI()) { assert(!UseI->getOperand(0).getSubReg()); - unsigned DR = UseI->getOperand(0).getReg(); + Register DR = UseI->getOperand(0).getReg(); if (DR == R) continue; } @@ -1015,7 +1015,7 @@ bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) { for (auto &Op : MI->operands()) { if (!Op.isReg() || !Op.isDef()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!Register::isVirtualRegister(R) || !isDead(R)) { AllDead = false; break; @@ -1217,7 +1217,7 @@ bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) { return false; MachineInstr &UseI = *I->getParent(); if (UseI.isPHI() || UseI.isCopy()) { - unsigned DefR = UseI.getOperand(0).getReg(); + Register DefR = UseI.getOperand(0).getReg(); if (!Register::isVirtualRegister(DefR)) return false; Pending.push_back(DefR); @@ -1342,7 +1342,7 @@ bool RedundantInstrElimination::processBlock(MachineBasicBlock &B, // If found, replace the instruction with a COPY. const DebugLoc &DL = MI->getDebugLoc(); const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); - unsigned NewR = MRI.createVirtualRegister(FRC); + Register NewR = MRI.createVirtualRegister(FRC); MachineInstr *CopyI = BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) .addReg(RS.Reg, 0, RS.Sub); @@ -1409,7 +1409,7 @@ bool ConstGeneration::isTfrConst(const MachineInstr &MI) { // register class and the actual value being transferred. unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C, MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) { - unsigned Reg = MRI.createVirtualRegister(RC); + Register Reg = MRI.createVirtualRegister(RC); if (RC == &Hexagon::IntRegsRegClass) { BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg) .addImm(int32_t(C)); @@ -1606,7 +1606,7 @@ bool CopyGeneration::processBlock(MachineBasicBlock &B, auto *FRC = HBS::getFinalVRegClass(R, MRI); if (findMatch(R, MR, AVB)) { - unsigned NewR = MRI.createVirtualRegister(FRC); + Register NewR = MRI.createVirtualRegister(FRC); BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) .addReg(MR.Reg, 0, MR.Sub); BT.put(BitTracker::RegisterRef(NewR), BT.get(MR)); @@ -1625,7 +1625,7 @@ bool CopyGeneration::processBlock(MachineBasicBlock &B, BitTracker::RegisterRef ML, MH; if (findMatch(TL, ML, AVB) && findMatch(TH, MH, AVB)) { auto *FRC = HBS::getFinalVRegClass(R, MRI); - unsigned NewR = MRI.createVirtualRegister(FRC); + Register NewR = MRI.createVirtualRegister(FRC); BuildMI(B, At, DL, HII.get(TargetOpcode::REG_SEQUENCE), NewR) .addReg(ML.Reg, 0, ML.Sub) .addImm(SubLo) @@ -2022,7 +2022,7 @@ bool BitSimplification::genPackhl(MachineInstr *MI, return false; MachineBasicBlock &B = *MI->getParent(); - unsigned NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); + Register NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); DebugLoc DL = MI->getDebugLoc(); auto At = MI->isPHI() ? B.getFirstNonPHI() : MachineBasicBlock::iterator(MI); @@ -2094,7 +2094,7 @@ bool BitSimplification::genCombineHalf(MachineInstr *MI, MachineBasicBlock &B = *MI->getParent(); DebugLoc DL = MI->getDebugLoc(); - unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); auto At = MI->isPHI() ? B.getFirstNonPHI() : MachineBasicBlock::iterator(MI); BuildMI(B, At, DL, HII.get(COpc), NewR) @@ -2151,7 +2151,7 @@ bool BitSimplification::genExtractLow(MachineInstr *MI, if (!validateReg(RS, NewOpc, 1)) continue; - unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); auto At = MI->isPHI() ? B.getFirstNonPHI() : MachineBasicBlock::iterator(MI); auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR) @@ -2365,7 +2365,7 @@ bool BitSimplification::simplifyTstbit(MachineInstr *MI, return true; } } else if (V.is(0) || V.is(1)) { - unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); + Register NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); unsigned NewOpc = V.is(0) ? Hexagon::PS_false : Hexagon::PS_true; BuildMI(B, At, DL, HII.get(NewOpc), NewR); HBS::replaceReg(RD.Reg, NewR, MRI); @@ -2538,7 +2538,7 @@ bool BitSimplification::simplifyExtractLow(MachineInstr *MI, DebugLoc DL = MI->getDebugLoc(); MachineBasicBlock &B = *MI->getParent(); - unsigned NewR = MRI.createVirtualRegister(FRC); + Register NewR = MRI.createVirtualRegister(FRC); auto At = MI->isPHI() ? B.getFirstNonPHI() : MachineBasicBlock::iterator(MI); auto MIB = BuildMI(B, At, DL, HII.get(ExtOpc), NewR) @@ -2609,8 +2609,8 @@ bool BitSimplification::simplifyRCmp0(MachineInstr *MI, KnownNZ = true; } - auto ReplaceWithConst = [&] (int C) { - unsigned NewR = MRI.createVirtualRegister(FRC); + auto ReplaceWithConst = [&](int C) { + Register NewR = MRI.createVirtualRegister(FRC); BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), NewR) .addImm(C); HBS::replaceReg(RD.Reg, NewR, MRI); @@ -2675,7 +2675,7 @@ bool BitSimplification::simplifyRCmp0(MachineInstr *MI, // replace the comparison with a C2_muxii, using the same predicate // register, but with operands substituted with 0/1 accordingly. if ((KnownZ1 || KnownNZ1) && (KnownZ2 || KnownNZ2)) { - unsigned NewR = MRI.createVirtualRegister(FRC); + Register NewR = MRI.createVirtualRegister(FRC); BuildMI(B, At, DL, HII.get(Hexagon::C2_muxii), NewR) .addReg(InpDef->getOperand(1).getReg()) .addImm(KnownZ1 == (Opc == Hexagon::A4_rcmpeqi)) @@ -3068,7 +3068,7 @@ void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, DenseMap RegMap; const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR); - unsigned PhiR = MRI->createVirtualRegister(PhiRC); + Register PhiR = MRI->createVirtualRegister(PhiRC); BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR) .addReg(NewPredR) .addMBB(&PB) @@ -3080,7 +3080,7 @@ void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, const MachineInstr *SI = G.Ins[i-1]; unsigned DR = getDefReg(SI); const TargetRegisterClass *RC = MRI->getRegClass(DR); - unsigned NewDR = MRI->createVirtualRegister(RC); + Register NewDR = MRI->createVirtualRegister(RC); DebugLoc DL = SI->getDebugLoc(); auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR); diff --git a/lib/Target/Hexagon/HexagonBitTracker.cpp b/lib/Target/Hexagon/HexagonBitTracker.cpp index 80ef3c1e158..ebd060ce503 100644 --- a/lib/Target/Hexagon/HexagonBitTracker.cpp +++ b/lib/Target/Hexagon/HexagonBitTracker.cpp @@ -1042,7 +1042,7 @@ unsigned HexagonEvaluator::getUniqueDefVReg(const MachineInstr &MI) const { for (const MachineOperand &Op : MI.operands()) { if (!Op.isReg() || !Op.isDef()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!Register::isVirtualRegister(R)) continue; if (DefReg != 0) diff --git a/lib/Target/Hexagon/HexagonConstExtenders.cpp b/lib/Target/Hexagon/HexagonConstExtenders.cpp index c900851d739..ddc9b847ef1 100644 --- a/lib/Target/Hexagon/HexagonConstExtenders.cpp +++ b/lib/Target/Hexagon/HexagonConstExtenders.cpp @@ -1525,7 +1525,7 @@ void HCE::calculatePlacement(const ExtenderInit &ExtI, const IndexList &Refs, } HCE::Register HCE::insertInitializer(Loc DefL, const ExtenderInit &ExtI) { - unsigned DefR = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass); + llvm::Register DefR = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass); MachineBasicBlock &MBB = *DefL.Block; MachineBasicBlock::iterator At = DefL.At; DebugLoc dl = DefL.Block->findDebugLoc(DefL.At); diff --git a/lib/Target/Hexagon/HexagonConstPropagation.cpp b/lib/Target/Hexagon/HexagonConstPropagation.cpp index 6724093bf48..a82501cabb9 100644 --- a/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ b/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -2813,7 +2813,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI, for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg() || !MO.isUse() || MO.isImplicit()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); dbgs() << printReg(R, &TRI) << ": " << Inputs.get(R) << "\n"; } } @@ -2831,7 +2831,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI, for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (!Register::isVirtualRegister(R)) continue; assert(!MO.getSubReg()); @@ -2871,7 +2871,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI, const MCInstrDesc *NewD = (Ps & P::Zero) ? &HII.get(Hexagon::PS_false) : &HII.get(Hexagon::PS_true); - unsigned NewR = MRI->createVirtualRegister(PredRC); + Register NewR = MRI->createVirtualRegister(PredRC); const MachineInstrBuilder &MIB = BuildMI(B, At, DL, *NewD, NewR); (void)MIB; #ifndef NDEBUG @@ -2893,7 +2893,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI, NewRC = &Hexagon::IntRegsRegClass; else NewRC = &Hexagon::DoubleRegsRegClass; - unsigned NewR = MRI->createVirtualRegister(NewRC); + Register NewR = MRI->createVirtualRegister(NewRC); const MachineInstr *NewMI; if (W == 32) { @@ -3009,7 +3009,7 @@ bool HexagonConstEvaluator::rewriteHexConstUses(MachineInstr &MI, if (V < 0) V = -V; const TargetRegisterClass *RC = MRI->getRegClass(DefR.Reg); - unsigned NewR = MRI->createVirtualRegister(RC); + Register NewR = MRI->createVirtualRegister(RC); const MachineOperand &Src1 = MI.getOperand(1); NewMI = BuildMI(B, At, DL, D, NewR) .addReg(Src1.getReg(), getRegState(Src1), Src1.getSubReg()) diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp index db0bd3b40ca..394a329ac44 100644 --- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -133,8 +133,8 @@ static bool isCombinableInstType(MachineInstr &MI, const HexagonInstrInfo *TII, const MachineOperand &Op1 = MI.getOperand(1); assert(Op0.isReg() && Op1.isReg()); - unsigned DestReg = Op0.getReg(); - unsigned SrcReg = Op1.getReg(); + Register DestReg = Op0.getReg(); + Register SrcReg = Op1.getReg(); return Hexagon::IntRegsRegClass.contains(DestReg) && Hexagon::IntRegsRegClass.contains(SrcReg); } @@ -146,7 +146,7 @@ static bool isCombinableInstType(MachineInstr &MI, const HexagonInstrInfo *TII, const MachineOperand &Op1 = MI.getOperand(1); assert(Op0.isReg()); - unsigned DestReg = Op0.getReg(); + Register DestReg = Op0.getReg(); // Ensure that TargetFlags are MO_NO_FLAG for a global. This is a // workaround for an ABI bug that prevents GOT relocations on combine // instructions @@ -265,7 +265,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr &I1, unsigned I1DestReg, unsigned I2DestReg, bool &DoInsertAtI1) { - unsigned I2UseReg = UseReg(I2.getOperand(1)); + Register I2UseReg = UseReg(I2.getOperand(1)); // It is not safe to move I1 and I2 into one combine if I2 has a true // dependence on I1. @@ -332,7 +332,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr &I1, // At O3 we got better results (dhrystone) by being more conservative here. if (!ShouldCombineAggressively) End = std::next(MachineBasicBlock::iterator(I2)); - unsigned I1UseReg = UseReg(I1.getOperand(1)); + Register I1UseReg = UseReg(I1.getOperand(1)); // Track killed operands. If we move across an instruction that kills our // operand, we need to update the kill information on the moved I1. It kills // the operand now. @@ -410,7 +410,7 @@ HexagonCopyToCombine::findPotentialNewifiableTFRs(MachineBasicBlock &BB) { continue; // Look for the defining instruction. - unsigned Reg = Op.getReg(); + Register Reg = Op.getReg(); MachineInstr *DefInst = LastDef[Reg]; if (!DefInst) continue; @@ -442,7 +442,7 @@ HexagonCopyToCombine::findPotentialNewifiableTFRs(MachineBasicBlock &BB) { if (Op.isReg()) { if (!Op.isDef() || !Op.getReg()) continue; - unsigned Reg = Op.getReg(); + Register Reg = Op.getReg(); if (Hexagon::DoubleRegsRegClass.contains(Reg)) { for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) LastDef[*SubRegs] = &MI; @@ -528,7 +528,7 @@ MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr &I1, while (I2 != I1.getParent()->end() && I2->isDebugInstr()) ++I2; - unsigned I1DestReg = I1.getOperand(0).getReg(); + Register I1DestReg = I1.getOperand(0).getReg(); for (MachineBasicBlock::iterator End = I1.getParent()->end(); I2 != End; ++I2) { @@ -544,7 +544,7 @@ MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr &I1, if (ShouldCombineAggressively && PotentiallyNewifiableTFR.count(&*I2)) continue; - unsigned I2DestReg = I2->getOperand(0).getReg(); + Register I2DestReg = I2->getOperand(0).getReg(); // Check that registers are adjacent and that the first destination register // is even. @@ -579,8 +579,8 @@ void HexagonCopyToCombine::combine(MachineInstr &I1, MachineInstr &I2, ++MI; // Figure out whether I1 or I2 goes into the lowreg part. - unsigned I1DestReg = I1.getOperand(0).getReg(); - unsigned I2DestReg = I2.getOperand(0).getReg(); + Register I1DestReg = I1.getOperand(0).getReg(); + Register I2DestReg = I2.getOperand(0).getReg(); bool IsI1Loreg = (I2DestReg - I1DestReg) == 1; unsigned LoRegDef = IsI1Loreg ? I1DestReg : I2DestReg; unsigned SubLo; @@ -758,7 +758,7 @@ void HexagonCopyToCombine::emitCombineIR(MachineBasicBlock::iterator &InsertPt, unsigned DoubleDestReg, MachineOperand &HiOperand, MachineOperand &LoOperand) { - unsigned LoReg = LoOperand.getReg(); + Register LoReg = LoOperand.getReg(); unsigned LoRegKillFlag = getKillRegState(LoOperand.isKill()); DebugLoc DL = InsertPt->getDebugLoc(); @@ -807,7 +807,7 @@ void HexagonCopyToCombine::emitCombineRI(MachineBasicBlock::iterator &InsertPt, MachineOperand &HiOperand, MachineOperand &LoOperand) { unsigned HiRegKillFlag = getKillRegState(HiOperand.isKill()); - unsigned HiReg = HiOperand.getReg(); + Register HiReg = HiOperand.getReg(); DebugLoc DL = InsertPt->getDebugLoc(); MachineBasicBlock *BB = InsertPt->getParent(); @@ -857,8 +857,8 @@ void HexagonCopyToCombine::emitCombineRR(MachineBasicBlock::iterator &InsertPt, MachineOperand &LoOperand) { unsigned LoRegKillFlag = getKillRegState(LoOperand.isKill()); unsigned HiRegKillFlag = getKillRegState(HiOperand.isKill()); - unsigned LoReg = LoOperand.getReg(); - unsigned HiReg = HiOperand.getReg(); + Register LoReg = LoOperand.getReg(); + Register HiReg = HiOperand.getReg(); DebugLoc DL = InsertPt->getDebugLoc(); MachineBasicBlock *BB = InsertPt->getParent(); diff --git a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp index 36a5fadc781..0844fb8a862 100644 --- a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -250,7 +250,7 @@ bool HexagonEarlyIfConversion::matchFlowPattern(MachineBasicBlock *B, unsigned Opc = T1I->getOpcode(); if (Opc != Hexagon::J2_jumpt && Opc != Hexagon::J2_jumpf) return false; - unsigned PredR = T1I->getOperand(0).getReg(); + Register PredR = T1I->getOperand(0).getReg(); // Get the layout successor, or 0 if B does not have one. MachineFunction::iterator NextBI = std::next(MachineFunction::iterator(B)); @@ -384,7 +384,7 @@ bool HexagonEarlyIfConversion::isValidCandidate(const MachineBasicBlock *B) for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (!Register::isVirtualRegister(R)) continue; if (!isPredicate(R)) @@ -401,7 +401,7 @@ bool HexagonEarlyIfConversion::usesUndefVReg(const MachineInstr *MI) const { for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg() || !MO.isUse()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (!Register::isVirtualRegister(R)) continue; const MachineInstr *DefI = MRI->getVRegDef(R); @@ -437,7 +437,7 @@ bool HexagonEarlyIfConversion::isValid(const FlowPattern &FP) const { break; if (usesUndefVReg(&MI)) return false; - unsigned DefR = MI.getOperand(0).getReg(); + Register DefR = MI.getOperand(0).getReg(); if (isPredicate(DefR)) return false; } @@ -491,7 +491,7 @@ unsigned HexagonEarlyIfConversion::countPredicateDefs( for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (!Register::isVirtualRegister(R)) continue; if (isPredicate(R)) @@ -798,7 +798,7 @@ unsigned HexagonEarlyIfConversion::buildMux(MachineBasicBlock *B, const MCInstrDesc &D = HII->get(Opc); DebugLoc DL = B->findBranchDebugLoc(); - unsigned MuxR = MRI->createVirtualRegister(DRC); + Register MuxR = MRI->createVirtualRegister(DRC); BuildMI(*B, At, DL, D, MuxR) .addReg(PredR) .addReg(TR, 0, TSR) @@ -837,7 +837,7 @@ void HexagonEarlyIfConversion::updatePhiNodes(MachineBasicBlock *WhereB, unsigned MuxR = 0, MuxSR = 0; if (TR && FR) { - unsigned DR = PN->getOperand(0).getReg(); + Register DR = PN->getOperand(0).getReg(); const TargetRegisterClass *RC = MRI->getRegClass(DR); MuxR = buildMux(FP.SplitB, FP.SplitB->getFirstTerminator(), RC, FP.PredR, TR, TSR, FR, FSR); @@ -988,8 +988,8 @@ void HexagonEarlyIfConversion::eliminatePhis(MachineBasicBlock *B) { MachineInstr *PN = &*I; assert(PN->getNumOperands() == 3 && "Invalid phi node"); MachineOperand &UO = PN->getOperand(1); - unsigned UseR = UO.getReg(), UseSR = UO.getSubReg(); - unsigned DefR = PN->getOperand(0).getReg(); + Register UseR = UO.getReg(), UseSR = UO.getSubReg(); + Register DefR = PN->getOperand(0).getReg(); unsigned NewR = UseR; if (UseSR) { // MRI.replaceVregUsesWith does not allow to update the subregister, diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp index 6175b29135b..8984ee82960 100644 --- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -372,7 +372,7 @@ void HexagonExpandCondsets::updateDeadsInRange(unsigned Reg, LaneBitmask LM, auto IsRegDef = [this,Reg,LM] (MachineOperand &Op) -> std::pair { if (!Op.isReg() || !Op.isDef()) return { false, false }; - unsigned DR = Op.getReg(), DSR = Op.getSubReg(); + Register DR = Op.getReg(), DSR = Op.getSubReg(); if (!Register::isVirtualRegister(DR) || DR != Reg) return { false, false }; LaneBitmask SLM = getLaneMask(DR, DSR); @@ -589,7 +589,7 @@ unsigned HexagonExpandCondsets::getCondTfrOpcode(const MachineOperand &SO, assert(Register::isPhysicalRegister(RS.Reg)); PhysR = RS.Reg; } - unsigned PhysS = (RS.Sub == 0) ? PhysR : TRI->getSubReg(PhysR, RS.Sub); + Register PhysS = (RS.Sub == 0) ? PhysR : TRI->getSubReg(PhysR, RS.Sub); const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysS); switch (TRI->getRegSizeInBits(*RC)) { case 32: @@ -671,7 +671,7 @@ bool HexagonExpandCondsets::split(MachineInstr &MI, MachineOperand &MD = MI.getOperand(0); // Definition MachineOperand &MP = MI.getOperand(1); // Predicate register assert(MD.isDef()); - unsigned DR = MD.getReg(), DSR = MD.getSubReg(); + Register DR = MD.getReg(), DSR = MD.getSubReg(); bool ReadUndef = MD.isUndef(); MachineBasicBlock::iterator At = MI; @@ -954,7 +954,7 @@ bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond, return false; RegisterRef RT(MS); - unsigned PredR = MP.getReg(); + Register PredR = MP.getReg(); MachineInstr *DefI = getReachingDefForPred(RT, TfrI, PredR, Cond); if (!DefI || !isPredicable(DefI)) return false; diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp index 5d2b5b6e536..0b7f416f3bb 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -303,7 +303,7 @@ static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, if (MO.isFI()) return true; if (MO.isReg()) { - unsigned R = MO.getReg(); + Register R = MO.getReg(); // Virtual registers will need scavenging, which then may require // a stack slot. if (Register::isVirtualRegister(R)) @@ -973,8 +973,8 @@ void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB, // understand paired registers for cfi_offset. // Eg .cfi_offset r1:0, -64 - unsigned HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi); - unsigned LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo); + Register HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi); + Register LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo); unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg, true); unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg, true); auto OffHi = MCCFIInstruction::createOffset(FrameLabel, HiDwarfReg, @@ -1570,13 +1570,13 @@ bool HexagonFrameLowering::expandCopy(MachineBasicBlock &B, const HexagonInstrInfo &HII, SmallVectorImpl &NewRegs) const { MachineInstr *MI = &*It; DebugLoc DL = MI->getDebugLoc(); - unsigned DstR = MI->getOperand(0).getReg(); - unsigned SrcR = MI->getOperand(1).getReg(); + Register DstR = MI->getOperand(0).getReg(); + Register SrcR = MI->getOperand(1).getReg(); if (!Hexagon::ModRegsRegClass.contains(DstR) || !Hexagon::ModRegsRegClass.contains(SrcR)) return false; - unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR).add(MI->getOperand(1)); BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR) .addReg(TmpR, RegState::Kill); @@ -1595,13 +1595,13 @@ bool HexagonFrameLowering::expandStoreInt(MachineBasicBlock &B, DebugLoc DL = MI->getDebugLoc(); unsigned Opc = MI->getOpcode(); - unsigned SrcR = MI->getOperand(2).getReg(); + Register SrcR = MI->getOperand(2).getReg(); bool IsKill = MI->getOperand(2).isKill(); int FI = MI->getOperand(0).getIndex(); // TmpR = C2_tfrpr SrcR if SrcR is a predicate register // TmpR = A2_tfrcrr SrcR if SrcR is a modifier register - unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr : Hexagon::A2_tfrcrr; BuildMI(B, It, DL, HII.get(TfrOpc), TmpR) @@ -1628,11 +1628,11 @@ bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &B, DebugLoc DL = MI->getDebugLoc(); unsigned Opc = MI->getOpcode(); - unsigned DstR = MI->getOperand(0).getReg(); + Register DstR = MI->getOperand(0).getReg(); int FI = MI->getOperand(1).getIndex(); // TmpR = L2_loadri_io FI, 0 - unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR) .addFrameIndex(FI) .addImm(0) @@ -1658,7 +1658,7 @@ bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B, return false; DebugLoc DL = MI->getDebugLoc(); - unsigned SrcR = MI->getOperand(2).getReg(); + Register SrcR = MI->getOperand(2).getReg(); bool IsKill = MI->getOperand(2).isKill(); int FI = MI->getOperand(0).getIndex(); auto *RC = &Hexagon::HvxVRRegClass; @@ -1667,8 +1667,8 @@ bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B, // TmpR0 = A2_tfrsi 0x01010101 // TmpR1 = V6_vandqrt Qx, TmpR0 // store FI, 0, TmpR1 - unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); - unsigned TmpR1 = MRI.createVirtualRegister(RC); + Register TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register TmpR1 = MRI.createVirtualRegister(RC); BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0) .addImm(0x01010101); @@ -1695,15 +1695,15 @@ bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B, return false; DebugLoc DL = MI->getDebugLoc(); - unsigned DstR = MI->getOperand(0).getReg(); + Register DstR = MI->getOperand(0).getReg(); int FI = MI->getOperand(1).getIndex(); auto *RC = &Hexagon::HvxVRRegClass; // TmpR0 = A2_tfrsi 0x01010101 // TmpR1 = load FI, 0 // DstR = V6_vandvrt TmpR1, TmpR0 - unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); - unsigned TmpR1 = MRI.createVirtualRegister(RC); + Register TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register TmpR1 = MRI.createVirtualRegister(RC); BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0) .addImm(0x01010101); @@ -1745,9 +1745,9 @@ bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B, } DebugLoc DL = MI->getDebugLoc(); - unsigned SrcR = MI->getOperand(2).getReg(); - unsigned SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo); - unsigned SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi); + Register SrcR = MI->getOperand(2).getReg(); + Register SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo); + Register SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi); bool IsKill = MI->getOperand(2).isKill(); int FI = MI->getOperand(0).getIndex(); @@ -1793,9 +1793,9 @@ bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B, return false; DebugLoc DL = MI->getDebugLoc(); - unsigned DstR = MI->getOperand(0).getReg(); - unsigned DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi); - unsigned DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo); + Register DstR = MI->getOperand(0).getReg(); + Register DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi); + Register DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo); int FI = MI->getOperand(1).getIndex(); unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass); @@ -1834,7 +1834,7 @@ bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B, auto &HRI = *MF.getSubtarget().getRegisterInfo(); DebugLoc DL = MI->getDebugLoc(); - unsigned SrcR = MI->getOperand(2).getReg(); + Register SrcR = MI->getOperand(2).getReg(); bool IsKill = MI->getOperand(2).isKill(); int FI = MI->getOperand(0).getIndex(); @@ -1863,7 +1863,7 @@ bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B, auto &HRI = *MF.getSubtarget().getRegisterInfo(); DebugLoc DL = MI->getDebugLoc(); - unsigned DstR = MI->getOperand(0).getReg(); + Register DstR = MI->getOperand(0).getReg(); int FI = MI->getOperand(1).getIndex(); unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); @@ -2299,7 +2299,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF, int TFI; if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI) continue; - unsigned DstR = MI.getOperand(0).getReg(); + Register DstR = MI.getOperand(0).getReg(); assert(MI.getOperand(0).getSubReg() == 0); MachineInstr *CopyOut = nullptr; if (DstR != FoundR) { diff --git a/lib/Target/Hexagon/HexagonGenInsert.cpp b/lib/Target/Hexagon/HexagonGenInsert.cpp index 6c996c0c8a8..48881e02f4d 100644 --- a/lib/Target/Hexagon/HexagonGenInsert.cpp +++ b/lib/Target/Hexagon/HexagonGenInsert.cpp @@ -606,7 +606,7 @@ void HexagonGenInsert::buildOrderingMF(RegisterOrdering &RO) const { for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { const MachineOperand &MO = MI->getOperand(i); if (MO.isReg() && MO.isDef()) { - unsigned R = MO.getReg(); + Register R = MO.getReg(); assert(MO.getSubReg() == 0 && "Unexpected subregister in definition"); if (Register::isVirtualRegister(R)) RO.insert(std::make_pair(R, Index++)); @@ -724,7 +724,7 @@ void HexagonGenInsert::getInstrDefs(const MachineInstr *MI, const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isDef()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (!Register::isVirtualRegister(R)) continue; Defs.insert(R); @@ -737,7 +737,7 @@ void HexagonGenInsert::getInstrUses(const MachineInstr *MI, const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isUse()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (!Register::isVirtualRegister(R)) continue; Uses.insert(R); @@ -1399,7 +1399,7 @@ bool HexagonGenInsert::generateInserts() { for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { unsigned VR = I->first; const TargetRegisterClass *RC = MRI->getRegClass(VR); - unsigned NewVR = MRI->createVirtualRegister(RC); + Register NewVR = MRI->createVirtualRegister(RC); RegMap[VR] = NewVR; } @@ -1477,7 +1477,7 @@ bool HexagonGenInsert::removeDeadCode(MachineDomTreeNode *N) { for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (!Register::isVirtualRegister(R) || !MRI->use_nodbg_empty(R)) { AllDead = false; break; diff --git a/lib/Target/Hexagon/HexagonGenMux.cpp b/lib/Target/Hexagon/HexagonGenMux.cpp index cdafbc20ab8..b559e7bbbb6 100644 --- a/lib/Target/Hexagon/HexagonGenMux.cpp +++ b/lib/Target/Hexagon/HexagonGenMux.cpp @@ -171,7 +171,7 @@ void HexagonGenMux::getDefsUses(const MachineInstr *MI, BitVector &Defs, for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg() || MO.isImplicit()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); BitVector &Set = MO.isDef() ? Defs : Uses; expandReg(R, Set); } @@ -239,14 +239,14 @@ bool HexagonGenMux::genMuxInBlock(MachineBasicBlock &B) { unsigned Opc = MI->getOpcode(); if (!isCondTransfer(Opc)) continue; - unsigned DR = MI->getOperand(0).getReg(); + Register DR = MI->getOperand(0).getReg(); if (isRegPair(DR)) continue; MachineOperand &PredOp = MI->getOperand(1); if (PredOp.isUndef()) continue; - unsigned PR = PredOp.getReg(); + Register PR = PredOp.getReg(); unsigned Idx = I2X.lookup(MI); CondsetMap::iterator F = CM.find(DR); bool IfTrue = HII->isPredicatedTrue(Opc); diff --git a/lib/Target/Hexagon/HexagonGenPredicate.cpp b/lib/Target/Hexagon/HexagonGenPredicate.cpp index cd4b5fd221f..24d33c91a29 100644 --- a/lib/Target/Hexagon/HexagonGenPredicate.cpp +++ b/lib/Target/Hexagon/HexagonGenPredicate.cpp @@ -265,7 +265,7 @@ RegisterSubReg HexagonGenPredicate::getPredRegFor(const RegisterSubReg &Reg) { MachineBasicBlock &B = *DefI->getParent(); DebugLoc DL = DefI->getDebugLoc(); const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass; - unsigned NewPR = MRI->createVirtualRegister(PredRC); + Register NewPR = MRI->createVirtualRegister(PredRC); // For convertible instructions, do not modify them, so that they can // be converted later. Generate a copy from Reg to NewPR. @@ -432,7 +432,7 @@ bool HexagonGenPredicate::convertToPredForm(MachineInstr *MI) { // Generate a copy-out: NewGPR = NewPR, and replace all uses of OutR // with NewGPR. const TargetRegisterClass *RC = MRI->getRegClass(OutR.R); - unsigned NewOutR = MRI->createVirtualRegister(RC); + Register NewOutR = MRI->createVirtualRegister(RC); BuildMI(B, MI, DL, TII->get(TargetOpcode::COPY), NewOutR) .addReg(NewPR.R, 0, NewPR.S); MRI->replaceRegWith(OutR.R, NewOutR); diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 5c68d223200..62291790f0f 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -435,17 +435,17 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L, if (Phi->getOperand(i+1).getMBB() != Latch) continue; - unsigned PhiOpReg = Phi->getOperand(i).getReg(); + Register PhiOpReg = Phi->getOperand(i).getReg(); MachineInstr *DI = MRI->getVRegDef(PhiOpReg); if (DI->getDesc().isAdd()) { // If the register operand to the add is the PHI we're looking at, this // meets the induction pattern. - unsigned IndReg = DI->getOperand(1).getReg(); + Register IndReg = DI->getOperand(1).getReg(); MachineOperand &Opnd2 = DI->getOperand(2); int64_t V; if (MRI->getVRegDef(IndReg) == Phi && checkForImmediate(Opnd2, V)) { - unsigned UpdReg = DI->getOperand(0).getReg(); + Register UpdReg = DI->getOperand(0).getReg(); IndMap.insert(std::make_pair(UpdReg, std::make_pair(IndReg, V))); } } @@ -694,7 +694,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, Cmp = Comparison::getSwappedComparison(Cmp); if (InitialValue->isReg()) { - unsigned R = InitialValue->getReg(); + Register R = InitialValue->getReg(); MachineBasicBlock *DefBB = MRI->getVRegDef(R)->getParent(); if (!MDT->properlyDominates(DefBB, Header)) { int64_t V; @@ -704,7 +704,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, OldInsts.push_back(MRI->getVRegDef(R)); } if (EndValue->isReg()) { - unsigned R = EndValue->getReg(); + Register R = EndValue->getReg(); MachineBasicBlock *DefBB = MRI->getVRegDef(R)->getParent(); if (!MDT->properlyDominates(DefBB, Header)) { int64_t V; @@ -910,7 +910,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, (RegToImm ? TII->get(Hexagon::A2_subri) : TII->get(Hexagon::A2_addi)); if (RegToReg || RegToImm) { - unsigned SubR = MRI->createVirtualRegister(IntRC); + Register SubR = MRI->createVirtualRegister(IntRC); MachineInstrBuilder SubIB = BuildMI(*PH, InsertPos, DL, SubD, SubR); @@ -931,7 +931,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, EndValInstr->getOperand(2).getImm() == StartV) { DistR = EndValInstr->getOperand(1).getReg(); } else { - unsigned SubR = MRI->createVirtualRegister(IntRC); + Register SubR = MRI->createVirtualRegister(IntRC); MachineInstrBuilder SubIB = BuildMI(*PH, InsertPos, DL, SubD, SubR); SubIB.addReg(End->getReg(), 0, End->getSubReg()) @@ -950,7 +950,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, AdjSR = DistSR; } else { // Generate CountR = ADD DistR, AdjVal - unsigned AddR = MRI->createVirtualRegister(IntRC); + Register AddR = MRI->createVirtualRegister(IntRC); MCInstrDesc const &AddD = TII->get(Hexagon::A2_addi); BuildMI(*PH, InsertPos, DL, AddD, AddR) .addReg(DistR, 0, DistSR) @@ -971,7 +971,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, unsigned Shift = Log2_32(IVBump); // Generate NormR = LSR DistR, Shift. - unsigned LsrR = MRI->createVirtualRegister(IntRC); + Register LsrR = MRI->createVirtualRegister(IntRC); const MCInstrDesc &LsrD = TII->get(Hexagon::S2_lsr_i_r); BuildMI(*PH, InsertPos, DL, LsrD, LsrR) .addReg(AdjR, 0, AdjSR) @@ -1038,7 +1038,7 @@ bool HexagonHardwareLoops::isDead(const MachineInstr *MI, if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (MRI->use_nodbg_empty(Reg)) continue; @@ -1058,7 +1058,7 @@ bool HexagonHardwareLoops::isDead(const MachineInstr *MI, if (!OPO.isReg() || !OPO.isDef()) continue; - unsigned OPReg = OPO.getReg(); + Register OPReg = OPO.getReg(); use_nodbg_iterator nextJ; for (use_nodbg_iterator J = MRI->use_nodbg_begin(OPReg); J != End; J = nextJ) { @@ -1092,7 +1092,7 @@ void HexagonHardwareLoops::removeIfDead(MachineInstr *MI) { const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); MachineRegisterInfo::use_iterator nextI; for (MachineRegisterInfo::use_iterator I = MRI->use_begin(Reg), E = MRI->use_end(); I != E; I = nextI) { @@ -1244,7 +1244,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L, if (TripCount->isReg()) { // Create a copy of the loop count register. - unsigned CountReg = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass); + Register CountReg = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass); BuildMI(*Preheader, InsertPos, DL, TII->get(TargetOpcode::COPY), CountReg) .addReg(TripCount->getReg(), 0, TripCount->getSubReg()); // Add the Loop instruction to the beginning of the loop. @@ -1257,7 +1257,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L, // create a new virtual register. int64_t CountImm = TripCount->getImm(); if (!TII->isValidOffset(LOOP_i, CountImm, TRI)) { - unsigned CountReg = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass); + Register CountReg = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass); BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::A2_tfrsi), CountReg) .addImm(CountImm); BuildMI(*Preheader, InsertPos, DL, TII->get(LOOP_r)) @@ -1333,7 +1333,7 @@ bool HexagonHardwareLoops::orderBumpCompare(MachineInstr *BumpI, return true; // Out of order. - unsigned PredR = CmpI->getOperand(0).getReg(); + Register PredR = CmpI->getOperand(0).getReg(); bool FoundBump = false; instr_iterator CmpIt = CmpI->getIterator(), NextIt = std::next(CmpIt); for (instr_iterator I = NextIt, E = BB->instr_end(); I != E; ++I) { @@ -1428,7 +1428,7 @@ bool HexagonHardwareLoops::loopCountMayWrapOrUnderFlow( if (checkForImmediate(*InitVal, Imm)) return (EndVal->getImm() == Imm); - unsigned Reg = InitVal->getReg(); + Register Reg = InitVal->getReg(); // We don't know the value of a physical register. if (!Register::isVirtualRegister(Reg)) @@ -1508,7 +1508,7 @@ bool HexagonHardwareLoops::checkForImmediate(const MachineOperand &MO, // processed to handle potential subregisters in MO. int64_t TV; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (!Register::isVirtualRegister(R)) return false; MachineInstr *DI = MRI->getVRegDef(R); @@ -1582,11 +1582,11 @@ void HexagonHardwareLoops::setImmediate(MachineOperand &MO, int64_t Val) { } assert(MO.isReg()); - unsigned R = MO.getReg(); + Register R = MO.getReg(); MachineInstr *DI = MRI->getVRegDef(R); const TargetRegisterClass *RC = MRI->getRegClass(R); - unsigned NewR = MRI->createVirtualRegister(RC); + Register NewR = MRI->createVirtualRegister(RC); MachineBasicBlock &B = *DI->getParent(); DebugLoc DL = DI->getDebugLoc(); BuildMI(B, DI, DL, TII->get(DI->getOpcode()), NewR).addImm(Val); @@ -1634,17 +1634,17 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) { if (Phi->getOperand(i+1).getMBB() != Latch) continue; - unsigned PhiReg = Phi->getOperand(i).getReg(); + Register PhiReg = Phi->getOperand(i).getReg(); MachineInstr *DI = MRI->getVRegDef(PhiReg); if (DI->getDesc().isAdd()) { // If the register operand to the add/sub is the PHI we are looking // at, this meets the induction pattern. - unsigned IndReg = DI->getOperand(1).getReg(); + Register IndReg = DI->getOperand(1).getReg(); MachineOperand &Opnd2 = DI->getOperand(2); int64_t V; if (MRI->getVRegDef(IndReg) == Phi && checkForImmediate(Opnd2, V)) { - unsigned UpdReg = DI->getOperand(0).getReg(); + Register UpdReg = DI->getOperand(0).getReg(); IndRegs.insert(std::make_pair(UpdReg, std::make_pair(IndReg, V))); } } @@ -1702,7 +1702,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) { if (!Cond[CSz-1].isReg()) return false; - unsigned P = Cond[CSz-1].getReg(); + Register P = Cond[CSz - 1].getReg(); MachineInstr *PredDef = MRI->getVRegDef(P); if (!PredDef->isCompare()) @@ -1903,15 +1903,15 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( MachineInstr *NewPN = MF->CreateMachineInstr(PD, DL); NewPH->insert(NewPH->end(), NewPN); - unsigned PR = PN->getOperand(0).getReg(); + Register PR = PN->getOperand(0).getReg(); const TargetRegisterClass *RC = MRI->getRegClass(PR); - unsigned NewPR = MRI->createVirtualRegister(RC); + Register NewPR = MRI->createVirtualRegister(RC); NewPN->addOperand(MachineOperand::CreateReg(NewPR, true)); // Copy all non-latch operands of a header's PHI node to the newly // created PHI node in the preheader. for (unsigned i = 1, n = PN->getNumOperands(); i < n; i += 2) { - unsigned PredR = PN->getOperand(i).getReg(); + Register PredR = PN->getOperand(i).getReg(); unsigned PredRSub = PN->getOperand(i).getSubReg(); MachineBasicBlock *PredB = PN->getOperand(i+1).getMBB(); if (PredB == Latch) diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 9ce45b547d2..61cac288e48 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -286,7 +286,7 @@ SDValue HexagonTargetLowering::LowerCallResult( SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), MVT::i32, Glue); // FR0 = (Value, Chain, Glue) - unsigned PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); + Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR, FR0.getValue(0), FR0.getValue(2)); // TPR = (Chain, Glue) @@ -736,7 +736,7 @@ SDValue HexagonTargetLowering::LowerFormalArguments( RegVT = VA.getValVT(); const TargetRegisterClass *RC = getRegClassFor(RegVT); - unsigned VReg = MRI.createVirtualRegister(RC); + Register VReg = MRI.createVirtualRegister(RC); SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); // Treat values of type MVT::i1 specially: they are passed in diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 135fb8e7a5b..97318a8057b 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -193,7 +193,7 @@ static inline void parseOperands(const MachineInstr &MI, if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; @@ -727,7 +727,7 @@ unsigned HexagonInstrInfo::reduceLoopCount( // The loop trip count is a run-time value. We generate code to subtract // one from the trip count, and update the loop instruction. assert(Loop->getOpcode() == Hexagon::J2_loop0r && "Unexpected instruction"); - unsigned LoopCount = Loop->getOperand(1).getReg(); + Register LoopCount = Loop->getOperand(1).getReg(); // Check if we're done with the loop. unsigned LoopEnd = createVR(MF, MVT::i1); MachineInstr *NewCmp = BuildMI(&MBB, DL, get(Hexagon::C2_cmpgtui), LoopEnd). @@ -839,8 +839,8 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB, return; } if (Hexagon::HvxWRRegClass.contains(SrcReg, DestReg)) { - unsigned LoSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_lo); - unsigned HiSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_hi); + Register LoSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_lo); + Register HiSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_hi); BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg) .addReg(HiSrc, KillFlag) .addReg(LoSrc, KillFlag); @@ -1017,7 +1017,7 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { unsigned Opc = MI.getOpcode(); auto RealCirc = [&](unsigned Opc, bool HasImm, unsigned MxOp) { - unsigned Mx = MI.getOperand(MxOp).getReg(); + Register Mx = MI.getOperand(MxOp).getReg(); unsigned CSx = (Mx == Hexagon::M0 ? Hexagon::CS0 : Hexagon::CS1); BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrrcr), CSx) .add(MI.getOperand((HasImm ? 5 : 4))); @@ -1049,8 +1049,8 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { MBB.erase(MI); return true; case Hexagon::V6_vassignp: { - unsigned SrcReg = MI.getOperand(1).getReg(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); unsigned Kill = getKillRegState(MI.getOperand(1).isKill()); BuildMI(MBB, MI, DL, get(Hexagon::V6_vcombine), DstReg) .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi), Kill) @@ -1059,18 +1059,18 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return true; } case Hexagon::V6_lo: { - unsigned SrcReg = MI.getOperand(1).getReg(); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo); + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo); copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill()); MBB.erase(MI); MRI.clearKillFlags(SrcSubLo); return true; } case Hexagon::V6_hi: { - unsigned SrcReg = MI.getOperand(1).getReg(); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi); + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi); copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill()); MBB.erase(MI); MRI.clearKillFlags(SrcSubHi); @@ -1079,9 +1079,9 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case Hexagon::PS_vstorerw_ai: case Hexagon::PS_vstorerwu_ai: { bool Aligned = Opc == Hexagon::PS_vstorerw_ai; - unsigned SrcReg = MI.getOperand(2).getReg(); - unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi); - unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo); + Register SrcReg = MI.getOperand(2).getReg(); + Register SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi); + Register SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo); unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai; unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass); @@ -1103,7 +1103,7 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case Hexagon::PS_vloadrw_ai: case Hexagon::PS_vloadrwu_ai: { bool Aligned = Opc == Hexagon::PS_vloadrw_ai; - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); unsigned NewOpc = Aligned ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32Ub_ai; unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass); @@ -1122,7 +1122,7 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return true; } case Hexagon::PS_true: { - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg) .addReg(Reg, RegState::Undef) .addReg(Reg, RegState::Undef); @@ -1130,7 +1130,7 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return true; } case Hexagon::PS_false: { - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg) .addReg(Reg, RegState::Undef) .addReg(Reg, RegState::Undef); @@ -1152,7 +1152,7 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return true; } case Hexagon::PS_vdd0: { - unsigned Vd = MI.getOperand(0).getReg(); + Register Vd = MI.getOperand(0).getReg(); BuildMI(MBB, MI, DL, get(Hexagon::V6_vsubw_dv), Vd) .addReg(Vd, RegState::Undef) .addReg(Vd, RegState::Undef); @@ -1161,13 +1161,13 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { } case Hexagon::PS_vmulw: { // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies. - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned Src1Reg = MI.getOperand(1).getReg(); - unsigned Src2Reg = MI.getOperand(2).getReg(); - unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi); - unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo); - unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi); - unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo); + Register DstReg = MI.getOperand(0).getReg(); + Register Src1Reg = MI.getOperand(1).getReg(); + Register Src2Reg = MI.getOperand(2).getReg(); + Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi); + Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo); + Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi); + Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo); BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi), HRI.getSubReg(DstReg, Hexagon::isub_hi)) .addReg(Src1SubHi) @@ -1185,16 +1185,16 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { } case Hexagon::PS_vmulw_acc: { // Expand 64-bit vector multiply with addition into 2 scalar multiplies. - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned Src1Reg = MI.getOperand(1).getReg(); - unsigned Src2Reg = MI.getOperand(2).getReg(); - unsigned Src3Reg = MI.getOperand(3).getReg(); - unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi); - unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo); - unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi); - unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo); - unsigned Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi); - unsigned Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo); + Register DstReg = MI.getOperand(0).getReg(); + Register Src1Reg = MI.getOperand(1).getReg(); + Register Src2Reg = MI.getOperand(2).getReg(); + Register Src3Reg = MI.getOperand(3).getReg(); + Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi); + Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo); + Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi); + Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo); + Register Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi); + Register Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo); BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci), HRI.getSubReg(DstReg, Hexagon::isub_hi)) .addReg(Src1SubHi) @@ -1219,10 +1219,10 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { const MachineOperand &Op1 = MI.getOperand(1); const MachineOperand &Op2 = MI.getOperand(2); const MachineOperand &Op3 = MI.getOperand(3); - unsigned Rd = Op0.getReg(); - unsigned Pu = Op1.getReg(); - unsigned Rs = Op2.getReg(); - unsigned Rt = Op3.getReg(); + Register Rd = Op0.getReg(); + Register Pu = Op1.getReg(); + Register Rs = Op2.getReg(); + Register Rt = Op3.getReg(); DebugLoc DL = MI.getDebugLoc(); unsigned K1 = getKillRegState(Op1.isKill()); unsigned K2 = getKillRegState(Op2.isKill()); @@ -1246,7 +1246,7 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { LivePhysRegs LiveAtMI(HRI); getLiveRegsAt(LiveAtMI, MI); bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg()); - unsigned PReg = Op1.getReg(); + Register PReg = Op1.getReg(); assert(Op1.getSubReg() == 0); unsigned PState = getRegState(Op1); @@ -1280,15 +1280,15 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { LivePhysRegs LiveAtMI(HRI); getLiveRegsAt(LiveAtMI, MI); bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg()); - unsigned PReg = Op1.getReg(); + Register PReg = Op1.getReg(); assert(Op1.getSubReg() == 0); unsigned PState = getRegState(Op1); if (Op0.getReg() != Op2.getReg()) { unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill : PState; - unsigned SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo); - unsigned SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi); + Register SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo); + Register SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi); auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine)) .add(Op0) .addReg(PReg, S) @@ -1299,8 +1299,8 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { IsDestLive = true; } if (Op0.getReg() != Op3.getReg()) { - unsigned SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo); - unsigned SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi); + Register SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo); + Register SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi); auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine)) .add(Op0) .addReg(PReg, PState) @@ -1872,7 +1872,7 @@ bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint( if (!getBaseAndOffsetPosition(MIa, BasePosA, OffsetPosA)) return false; const MachineOperand &BaseA = MIa.getOperand(BasePosA); - unsigned BaseRegA = BaseA.getReg(); + Register BaseRegA = BaseA.getReg(); unsigned BaseSubA = BaseA.getSubReg(); // Get the base register in MIb. @@ -1880,7 +1880,7 @@ bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint( if (!getBaseAndOffsetPosition(MIb, BasePosB, OffsetPosB)) return false; const MachineOperand &BaseB = MIb.getOperand(BasePosB); - unsigned BaseRegB = BaseB.getReg(); + Register BaseRegB = BaseB.getReg(); unsigned BaseSubB = BaseB.getSubReg(); if (BaseRegA != BaseRegB || BaseSubA != BaseSubB) @@ -1984,7 +1984,7 @@ unsigned HexagonInstrInfo::createVR(MachineFunction *MF, MVT VT) const { llvm_unreachable("Cannot handle this register class"); } - unsigned NewReg = MRI.createVirtualRegister(TRC); + Register NewReg = MRI.createVirtualRegister(TRC); return NewReg; } @@ -2605,7 +2605,7 @@ bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr &MI1, const MachineInstr &MI2) const { if (mayBeCurLoad(MI1)) { // if (result of SU is used in Next) return true; - unsigned DstReg = MI1.getOperand(0).getReg(); + Register DstReg = MI1.getOperand(0).getReg(); int N = MI2.getNumOperands(); for (int I = 0; I < N; I++) if (MI2.getOperand(I).isReg() && DstReg == MI2.getOperand(I).getReg()) @@ -3374,7 +3374,7 @@ unsigned HexagonInstrInfo::getCompoundOpcode(const MachineInstr &GA, if ((GA.getOpcode() != Hexagon::C2_cmpeqi) || (GB.getOpcode() != Hexagon::J2_jumptnew)) return -1u; - unsigned DestReg = GA.getOperand(0).getReg(); + Register DestReg = GA.getOperand(0).getReg(); if (!GB.readsRegister(DestReg)) return -1u; if (DestReg != Hexagon::P0 && DestReg != Hexagon::P1) diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp index 1c038e31ff1..680d01e12af 100644 --- a/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -177,7 +177,7 @@ static bool canBeFeederToNewValueJump(const HexagonInstrInfo *QII, (II->getOperand(i).isUse() || II->getOperand(i).isDef())) { MachineBasicBlock::iterator localII = II; ++localII; - unsigned Reg = II->getOperand(i).getReg(); + Register Reg = II->getOperand(i).getReg(); for (MachineBasicBlock::iterator localBegin = localII; localBegin != end; ++localBegin) { if (localBegin == skip) @@ -603,7 +603,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { (isSecondOpReg && MI.getOperand(0).getReg() == (unsigned)cmpOp2))) { - unsigned feederReg = MI.getOperand(0).getReg(); + Register feederReg = MI.getOperand(0).getReg(); // First try to see if we can get the feeder from the first operand // of the compare. If we can not, and if secondOpReg is true @@ -651,7 +651,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { for (MachineOperand &MO : MI.operands()) { if (!MO.isReg() || !MO.isUse()) continue; - unsigned UseR = MO.getReg(); + Register UseR = MO.getReg(); for (auto I = std::next(MI.getIterator()); I != jmpPos; ++I) { if (I == cmpPos) continue; diff --git a/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/lib/Target/Hexagon/HexagonOptAddrMode.cpp index 547da9fd598..14f43e14897 100644 --- a/lib/Target/Hexagon/HexagonOptAddrMode.cpp +++ b/lib/Target/Hexagon/HexagonOptAddrMode.cpp @@ -162,7 +162,7 @@ bool HexagonOptAddrMode::canRemoveAddasl(NodeAddr AddAslSN, if (!OffsetOp.isImm() || OffsetOp.getImm() > 3) return false; - unsigned OffsetReg = MI.getOperand(2).getReg(); + Register OffsetReg = MI.getOperand(2).getReg(); RegisterRef OffsetRR; NodeId OffsetRegRD = 0; for (NodeAddr UA : AddAslSN.Addr->members_if(DFG->IsUse, *DFG)) { @@ -348,7 +348,7 @@ bool HexagonOptAddrMode::processAddUses(NodeAddr AddSN, MachineInstr *AddMI, const NodeList &UNodeList) { - unsigned AddDefR = AddMI->getOperand(0).getReg(); + Register AddDefR = AddMI->getOperand(0).getReg(); for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) { NodeAddr UN = *I; NodeAddr SN = UN.Addr->getOwner(*DFG); @@ -381,7 +381,7 @@ bool HexagonOptAddrMode::processAddUses(NodeAddr AddSN, // Ex: Rx= add(Rt,#10) // memw(Rx+#0) = Rs // will be replaced with => memw(Rt+#10) = Rs - unsigned BaseReg = AddMI->getOperand(1).getReg(); + Register BaseReg = AddMI->getOperand(1).getReg(); if (!isSafeToExtLR(AddSN, AddMI, BaseReg, UNodeList)) return false; } @@ -411,7 +411,7 @@ bool HexagonOptAddrMode::updateAddUses(MachineInstr *AddMI, MachineInstr *UseMI) { const MachineOperand ImmOp = AddMI->getOperand(2); const MachineOperand AddRegOp = AddMI->getOperand(1); - unsigned newReg = AddRegOp.getReg(); + Register newReg = AddRegOp.getReg(); const MCInstrDesc &MID = UseMI->getDesc(); MachineOperand &BaseOp = MID.mayLoad() ? UseMI->getOperand(1) @@ -724,7 +724,7 @@ bool HexagonOptAddrMode::processBlock(NodeAddr BA) { } short SizeInc = 0; - unsigned DefR = MI->getOperand(0).getReg(); + Register DefR = MI->getOperand(0).getReg(); InstrEvalMap InstrEvalResult; // Analyze all uses and calculate increase in size. Perform the optimization diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp index 2748e4500b3..0ccfe64ad1e 100644 --- a/lib/Target/Hexagon/HexagonPeephole.cpp +++ b/lib/Target/Hexagon/HexagonPeephole.cpp @@ -136,8 +136,8 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { assert(MI.getNumOperands() == 2); MachineOperand &Dst = MI.getOperand(0); MachineOperand &Src = MI.getOperand(1); - unsigned DstReg = Dst.getReg(); - unsigned SrcReg = Src.getReg(); + Register DstReg = Dst.getReg(); + Register SrcReg = Src.getReg(); // Just handle virtual registers. if (Register::isVirtualRegister(DstReg) && Register::isVirtualRegister(SrcReg)) { @@ -157,8 +157,8 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { MachineOperand &Src2 = MI.getOperand(2); if (Src1.getImm() != 0) continue; - unsigned DstReg = Dst.getReg(); - unsigned SrcReg = Src2.getReg(); + Register DstReg = Dst.getReg(); + Register SrcReg = Src2.getReg(); PeepholeMap[DstReg] = SrcReg; } @@ -174,8 +174,8 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { MachineOperand &Src2 = MI.getOperand(2); if (Src2.getImm() != 32) continue; - unsigned DstReg = Dst.getReg(); - unsigned SrcReg = Src1.getReg(); + Register DstReg = Dst.getReg(); + Register SrcReg = Src1.getReg(); PeepholeDoubleRegsMap[DstReg] = std::make_pair(*&SrcReg, Hexagon::isub_hi); } @@ -185,8 +185,8 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { assert(MI.getNumOperands() == 2); MachineOperand &Dst = MI.getOperand(0); MachineOperand &Src = MI.getOperand(1); - unsigned DstReg = Dst.getReg(); - unsigned SrcReg = Src.getReg(); + Register DstReg = Dst.getReg(); + Register SrcReg = Src.getReg(); // Just handle virtual registers. if (Register::isVirtualRegister(DstReg) && Register::isVirtualRegister(SrcReg)) { @@ -208,8 +208,8 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { if (Src.getSubReg() != Hexagon::isub_lo) continue; - unsigned DstReg = Dst.getReg(); - unsigned SrcReg = Src.getReg(); + Register DstReg = Dst.getReg(); + Register SrcReg = Src.getReg(); if (Register::isVirtualRegister(DstReg) && Register::isVirtualRegister(SrcReg)) { // Try to find in the map. @@ -237,7 +237,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { bool Done = false; if (QII->isPredicated(MI)) { MachineOperand &Op0 = MI.getOperand(0); - unsigned Reg0 = Op0.getReg(); + Register Reg0 = Op0.getReg(); const TargetRegisterClass *RC0 = MRI->getRegClass(Reg0); if (RC0->getID() == Hexagon::PredRegsRegClassID) { // Handle instructions that have a prediate register in op0 @@ -275,7 +275,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { break; } if (NewOp) { - unsigned PSrc = MI.getOperand(PR).getReg(); + Register PSrc = MI.getOperand(PR).getReg(); if (unsigned POrig = PeepholeMap.lookup(PSrc)) { BuildMI(*MBB, MI.getIterator(), MI.getDebugLoc(), QII->get(NewOp), MI.getOperand(0).getReg()) diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp index 4f5f750e584..b7171fb1427 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -217,7 +217,7 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // If the offset is not valid, calculate the address in a temporary // register and use it with offset 0. auto &MRI = MF.getRegInfo(); - unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); const DebugLoc &DL = MI.getDebugLoc(); BuildMI(MB, II, DL, HII.get(Hexagon::A2_addi), TmpR) .addReg(BP) @@ -249,8 +249,8 @@ bool HexagonRegisterInfo::shouldCoalesce(MachineInstr *MI, if (!SmallSrc && !SmallDst) return true; - unsigned DstReg = MI->getOperand(0).getReg(); - unsigned SrcReg = MI->getOperand(1).getReg(); + Register DstReg = MI->getOperand(0).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); const SlotIndexes &Indexes = *LIS.getSlotIndexes(); auto HasCall = [&Indexes] (const LiveInterval::Segment &S) { for (SlotIndex I = S.start.getBaseIndex(), E = S.end.getBaseIndex(); diff --git a/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp index bd4254aea27..f9fb14c190f 100644 --- a/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp +++ b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp @@ -76,18 +76,18 @@ bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) { unsigned Opc = MI.getOpcode(); if (Opc == Hexagon::CONST32) { - unsigned DestReg = MI.getOperand(0).getReg(); + Register DestReg = MI.getOperand(0).getReg(); uint64_t ImmValue = MI.getOperand(1).getImm(); const DebugLoc &DL = MI.getDebugLoc(); BuildMI(B, MI, DL, TII->get(Hexagon::A2_tfrsi), DestReg) .addImm(ImmValue); B.erase(&MI); } else if (Opc == Hexagon::CONST64) { - unsigned DestReg = MI.getOperand(0).getReg(); + Register DestReg = MI.getOperand(0).getReg(); int64_t ImmValue = MI.getOperand(1).getImm(); const DebugLoc &DL = MI.getDebugLoc(); - unsigned DestLo = TRI->getSubReg(DestReg, Hexagon::isub_lo); - unsigned DestHi = TRI->getSubReg(DestReg, Hexagon::isub_hi); + Register DestLo = TRI->getSubReg(DestReg, Hexagon::isub_lo); + Register DestHi = TRI->getSubReg(DestReg, Hexagon::isub_hi); int32_t LowWord = (ImmValue & 0xFFFFFFFF); int32_t HighWord = (ImmValue >> 32) & 0xFFFFFFFF; diff --git a/lib/Target/Hexagon/HexagonSplitDouble.cpp b/lib/Target/Hexagon/HexagonSplitDouble.cpp index ec9d0b03275..55f31c62885 100644 --- a/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ b/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -210,7 +210,7 @@ bool HexagonSplitDoubleRegs::isFixedInstr(const MachineInstr *MI) const { for (auto &Op : MI->operands()) { if (!Op.isReg()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!Register::isVirtualRegister(R)) return true; } @@ -258,7 +258,7 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) { // Skip non-registers or registers with subregisters. if (&MO == &Op || !MO.isReg() || MO.getSubReg()) continue; - unsigned T = MO.getReg(); + Register T = MO.getReg(); if (!Register::isVirtualRegister(T)) { FixedRegs.set(x); continue; @@ -372,8 +372,8 @@ int32_t HexagonSplitDoubleRegs::profit(const MachineInstr *MI) const { case Hexagon::A2_andp: case Hexagon::A2_orp: case Hexagon::A2_xorp: { - unsigned Rs = MI->getOperand(1).getReg(); - unsigned Rt = MI->getOperand(2).getReg(); + Register Rs = MI->getOperand(1).getReg(); + Register Rt = MI->getOperand(2).getReg(); return profit(Rs) + profit(Rt); } @@ -499,7 +499,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L, return; assert(Cond[1].isReg() && "Unexpected Cond vector from analyzeBranch"); // Expect a predicate register. - unsigned PR = Cond[1].getReg(); + Register PR = Cond[1].getReg(); assert(MRI->getRegClass(PR) == &Hexagon::PredRegsRegClass); // Get the registers on which the loop controlling compare instruction @@ -535,7 +535,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L, if (!MI.isPHI()) break; const MachineOperand &MD = MI.getOperand(0); - unsigned R = MD.getReg(); + Register R = MD.getReg(); if (MRI->getRegClass(R) == DoubleRC) DP.push_back(R); } @@ -551,7 +551,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L, // Get the output from the add. If it is one of the inputs to the // loop-controlling compare instruction, then R is likely an induc- // tion register. - unsigned T = UseI->getOperand(0).getReg(); + Register T = UseI->getOperand(0).getReg(); if (T == CmpR1 || T == CmpR2) return false; } @@ -603,7 +603,7 @@ void HexagonSplitDoubleRegs::createHalfInstr(unsigned Opc, MachineInstr *MI, continue; } // For register operands, set the subregister. - unsigned R = Op.getReg(); + Register R = Op.getReg(); unsigned SR = Op.getSubReg(); bool isVirtReg = Register::isVirtualRegister(R); bool isKill = Op.isKill(); @@ -674,7 +674,7 @@ void HexagonSplitDoubleRegs::splitMemRef(MachineInstr *MI, : MI->getOperand(2).getImm(); MachineOperand &UpdOp = Load ? MI->getOperand(1) : MI->getOperand(0); const TargetRegisterClass *RC = MRI->getRegClass(UpdOp.getReg()); - unsigned NewR = MRI->createVirtualRegister(RC); + Register NewR = MRI->createVirtualRegister(RC); assert(!UpdOp.getSubReg() && "Def operand with subreg"); BuildMI(B, MI, DL, TII->get(Hexagon::A2_addi), NewR) .addReg(AdrOp.getReg(), RSA) @@ -789,8 +789,8 @@ void HexagonSplitDoubleRegs::splitShift(MachineInstr *MI, UUPairMap::const_iterator F = PairMap.find(Op0.getReg()); assert(F != PairMap.end()); const UUPair &P = F->second; - unsigned LoR = P.first; - unsigned HiR = P.second; + Register LoR = P.first; + Register HiR = P.second; unsigned Opc = MI->getOpcode(); bool Right = (Opc == S2_lsr_i_p || Opc == S2_asr_i_p); @@ -813,7 +813,7 @@ void HexagonSplitDoubleRegs::splitShift(MachineInstr *MI, .addReg(Op1.getReg(), RS, HiSR); } else if (S < 32) { const TargetRegisterClass *IntRC = &IntRegsRegClass; - unsigned TmpR = MRI->createVirtualRegister(IntRC); + Register TmpR = MRI->createVirtualRegister(IntRC); // Expansion: // Shift left: DR = shl R, #s // LoR = shl R.lo, #s @@ -953,12 +953,12 @@ void HexagonSplitDoubleRegs::splitAslOr(MachineInstr *MI, .addReg(Op1.getReg(), RS1 & ~RegState::Kill, LoSR) .addReg(Op2.getReg(), RS2 & ~RegState::Kill, LoSR) .addImm(S); - unsigned TmpR1 = MRI->createVirtualRegister(IntRC); + Register TmpR1 = MRI->createVirtualRegister(IntRC); BuildMI(B, MI, DL, TII->get(S2_extractu), TmpR1) .addReg(Op2.getReg(), RS2 & ~RegState::Kill, LoSR) .addImm(S) .addImm(32-S); - unsigned TmpR2 = MRI->createVirtualRegister(IntRC); + Register TmpR2 = MRI->createVirtualRegister(IntRC); BuildMI(B, MI, DL, TII->get(A2_or), TmpR2) .addReg(Op1.getReg(), RS1, HiSR) .addReg(TmpR1); @@ -1002,7 +1002,7 @@ bool HexagonSplitDoubleRegs::splitInstr(MachineInstr *MI, switch (Opc) { case TargetOpcode::PHI: case TargetOpcode::COPY: { - unsigned DstR = MI->getOperand(0).getReg(); + Register DstR = MI->getOperand(0).getReg(); if (MRI->getRegClass(DstR) == DoubleRC) { createHalfInstr(Opc, MI, PairMap, isub_lo); createHalfInstr(Opc, MI, PairMap, isub_hi); @@ -1079,7 +1079,7 @@ void HexagonSplitDoubleRegs::replaceSubregUses(MachineInstr *MI, for (auto &Op : MI->operands()) { if (!Op.isReg() || !Op.isUse() || !Op.getSubReg()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); UUPairMap::const_iterator F = PairMap.find(R); if (F == PairMap.end()) continue; @@ -1104,7 +1104,7 @@ void HexagonSplitDoubleRegs::collapseRegPairs(MachineInstr *MI, for (auto &Op : MI->operands()) { if (!Op.isReg() || !Op.isUse()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!Register::isVirtualRegister(R)) continue; if (MRI->getRegClass(R) != DoubleRC || Op.getSubReg()) @@ -1113,7 +1113,7 @@ void HexagonSplitDoubleRegs::collapseRegPairs(MachineInstr *MI, if (F == PairMap.end()) continue; const UUPair &Pr = F->second; - unsigned NewDR = MRI->createVirtualRegister(DoubleRC); + Register NewDR = MRI->createVirtualRegister(DoubleRC); BuildMI(B, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), NewDR) .addReg(Pr.first) .addImm(Hexagon::isub_lo) @@ -1145,8 +1145,8 @@ bool HexagonSplitDoubleRegs::splitPartition(const USet &Part) { U != W; ++U) SplitIns.insert(U->getParent()); - unsigned LoR = MRI->createVirtualRegister(IntRC); - unsigned HiR = MRI->createVirtualRegister(IntRC); + Register LoR = MRI->createVirtualRegister(IntRC); + Register HiR = MRI->createVirtualRegister(IntRC); LLVM_DEBUG(dbgs() << "Created mapping: " << printReg(DR, TRI) << " -> " << printReg(HiR, TRI) << ':' << printReg(LoR, TRI) << '\n'); diff --git a/lib/Target/Hexagon/HexagonStoreWidening.cpp b/lib/Target/Hexagon/HexagonStoreWidening.cpp index b8b61517ff9..27fefa5f5e2 100644 --- a/lib/Target/Hexagon/HexagonStoreWidening.cpp +++ b/lib/Target/Hexagon/HexagonStoreWidening.cpp @@ -441,7 +441,7 @@ bool HexagonStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG, // Create vreg = A2_tfrsi #Acc; mem[hw] = vreg const MCInstrDesc &TfrD = TII->get(Hexagon::A2_tfrsi); const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0, TRI, *MF); - unsigned VReg = MF->getRegInfo().createVirtualRegister(RC); + Register VReg = MF->getRegInfo().createVirtualRegister(RC); MachineInstr *TfrI = BuildMI(*MF, DL, TfrD, VReg) .addImm(int(Acc)); NG.push_back(TfrI); diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp index ee943a0d2c5..761d7d7aa0b 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -344,7 +344,7 @@ void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst, // If it's a REG_SEQUENCE/COPY, use its destination instruction to determine // the correct latency. if ((DstInst->isRegSequence() || DstInst->isCopy()) && Dst->NumSuccs == 1) { - unsigned DReg = DstInst->getOperand(0).getReg(); + Register DReg = DstInst->getOperand(0).getReg(); MachineInstr *DDst = Dst->Succs[0].getSUnit()->getInstr(); unsigned UseIdx = -1; for (unsigned OpNum = 0; OpNum < DDst->getNumOperands(); OpNum++) { diff --git a/lib/Target/Hexagon/HexagonVExtract.cpp b/lib/Target/Hexagon/HexagonVExtract.cpp index a9692f42e46..0c0266a6839 100644 --- a/lib/Target/Hexagon/HexagonVExtract.cpp +++ b/lib/Target/Hexagon/HexagonVExtract.cpp @@ -67,9 +67,9 @@ unsigned HexagonVExtract::genElemLoad(MachineInstr *ExtI, unsigned BaseR, MachineRegisterInfo &MRI) { MachineBasicBlock &ExtB = *ExtI->getParent(); DebugLoc DL = ExtI->getDebugLoc(); - unsigned ElemR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register ElemR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); - unsigned ExtIdxR = ExtI->getOperand(2).getReg(); + Register ExtIdxR = ExtI->getOperand(2).getReg(); unsigned ExtIdxS = ExtI->getOperand(2).getSubReg(); // Simplified check for a compile-time constant value of ExtIdxR. @@ -86,7 +86,7 @@ unsigned HexagonVExtract::genElemLoad(MachineInstr *ExtI, unsigned BaseR, } } - unsigned IdxR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register IdxR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); BuildMI(ExtB, ExtI, DL, HII->get(Hexagon::A2_andir), IdxR) .add(ExtI->getOperand(2)) .addImm(-4); @@ -111,7 +111,7 @@ bool HexagonVExtract::runOnMachineFunction(MachineFunction &MF) { unsigned Opc = MI.getOpcode(); if (Opc != Hexagon::V6_extractw) continue; - unsigned VecR = MI.getOperand(1).getReg(); + Register VecR = MI.getOperand(1).getReg(); VExtractMap[VecR].push_back(&MI); } } @@ -144,13 +144,13 @@ bool HexagonVExtract::runOnMachineFunction(MachineFunction &MF) { MachineBasicBlock &ExtB = *ExtI->getParent(); DebugLoc DL = ExtI->getDebugLoc(); - unsigned BaseR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + Register BaseR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); BuildMI(ExtB, ExtI, DL, HII->get(Hexagon::PS_fi), BaseR) .addFrameIndex(FI) .addImm(SR == 0 ? 0 : VecSize/2); unsigned ElemR = genElemLoad(ExtI, BaseR, MRI); - unsigned ExtR = ExtI->getOperand(0).getReg(); + Register ExtR = ExtI->getOperand(0).getReg(); MRI.replaceRegWith(ExtR, ElemR); ExtB.erase(ExtI); Changed = true; diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index cc1f714573d..e4cc8295a9f 100644 --- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -148,7 +148,7 @@ static bool hasWriteToReadDep(const MachineInstr &FirstI, for (auto &MO : FirstI.operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (SecondI.readsRegister(R, TRI)) return true; } @@ -422,7 +422,7 @@ bool HexagonPacketizerList::canPromoteToDotCur(const MachineInstr &MI, dbgs() << "Checking CUR against "; MJ.dump(); }); - unsigned DestReg = MI.getOperand(0).getReg(); + Register DestReg = MI.getOperand(0).getReg(); bool FoundMatch = false; for (auto &MO : MJ.operands()) if (MO.isReg() && MO.getReg() == DestReg) @@ -515,7 +515,7 @@ bool HexagonPacketizerList::updateOffset(SUnit *SUI, SUnit *SUJ) { unsigned BPJ, OPJ; if (!HII->getBaseAndOffsetPosition(MJ, BPJ, OPJ)) return false; - unsigned Reg = MI.getOperand(BPI).getReg(); + Register Reg = MI.getOperand(BPI).getReg(); if (Reg != MJ.getOperand(BPJ).getReg()) return false; // Make sure that the dependences do not restrict adding MI to the packet. @@ -788,7 +788,7 @@ bool HexagonPacketizerList::canPromoteToNewValueStore(const MachineInstr &MI, return false; if (!MO.isReg() || !MO.isDef() || !MO.isImplicit()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (R == DepReg || HRI->isSuperRegister(DepReg, R)) return false; } @@ -1208,7 +1208,7 @@ bool HexagonPacketizerList::hasDeadDependence(const MachineInstr &I, for (auto &MO : J.operands()) { if (!MO.isReg() || !MO.isDef() || !MO.isDead()) continue; - unsigned R = MO.getReg(); + Register R = MO.getReg(); if (R != Hexagon::USR_OVF && DeadDefs[R]) return true; } @@ -1585,7 +1585,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { // subset of the volatile register set. for (const MachineOperand &Op : I.operands()) { if (Op.isReg() && Op.isDef()) { - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!J.readsRegister(R, HRI) && !J.modifiesRegister(R, HRI)) continue; } else if (!Op.isRegMask()) { diff --git a/lib/Target/Hexagon/RDFGraph.cpp b/lib/Target/Hexagon/RDFGraph.cpp index 7b1a4007d12..0cb35dc9881 100644 --- a/lib/Target/Hexagon/RDFGraph.cpp +++ b/lib/Target/Hexagon/RDFGraph.cpp @@ -633,7 +633,7 @@ bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum) // uses or defs, and those lists do not allow sub-registers. if (Op.getSubReg() != 0) return false; - RegisterId Reg = Op.getReg(); + Register Reg = Op.getReg(); const MCPhysReg *ImpR = Op.isDef() ? D.getImplicitDefs() : D.getImplicitUses(); if (!ImpR) @@ -1291,7 +1291,7 @@ void DataFlowGraph::buildStmt(NodeAddr BA, MachineInstr &In) { MachineOperand &Op = In.getOperand(OpN); if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!R || !Register::isPhysicalRegister(R)) continue; uint16_t Flags = NodeAttrs::None; @@ -1336,7 +1336,7 @@ void DataFlowGraph::buildStmt(NodeAddr BA, MachineInstr &In) { MachineOperand &Op = In.getOperand(OpN); if (!Op.isReg() || !Op.isDef() || !Op.isImplicit()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!R || !Register::isPhysicalRegister(R) || DoneDefs.test(R)) continue; RegisterRef RR = makeRegRef(Op); @@ -1365,7 +1365,7 @@ void DataFlowGraph::buildStmt(NodeAddr BA, MachineInstr &In) { MachineOperand &Op = In.getOperand(OpN); if (!Op.isReg() || !Op.isUse()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!R || !Register::isPhysicalRegister(R)) continue; uint16_t Flags = NodeAttrs::None; diff --git a/lib/Target/Hexagon/RDFLiveness.cpp b/lib/Target/Hexagon/RDFLiveness.cpp index ed8f08f6224..7d7b89462ff 100644 --- a/lib/Target/Hexagon/RDFLiveness.cpp +++ b/lib/Target/Hexagon/RDFLiveness.cpp @@ -889,7 +889,7 @@ void Liveness::resetKills(MachineBasicBlock *B) { // implicit defs. if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!Register::isPhysicalRegister(R)) continue; for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR) @@ -898,7 +898,7 @@ void Liveness::resetKills(MachineBasicBlock *B) { for (auto &Op : MI->operands()) { if (!Op.isReg() || !Op.isUse() || Op.isUndef()) continue; - unsigned R = Op.getReg(); + Register R = Op.getReg(); if (!Register::isPhysicalRegister(R)) continue; bool IsLive = false; diff --git a/lib/Target/Lanai/LanaiAsmPrinter.cpp b/lib/Target/Lanai/LanaiAsmPrinter.cpp index 64d963475e1..12a3202446a 100644 --- a/lib/Target/Lanai/LanaiAsmPrinter.cpp +++ b/lib/Target/Lanai/LanaiAsmPrinter.cpp @@ -133,7 +133,7 @@ bool LanaiAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const MachineOperand &MO = MI->getOperand(RegOp); if (!MO.isReg()) return true; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); O << LanaiInstPrinter::getRegisterName(Reg); return false; } diff --git a/lib/Target/Lanai/LanaiFrameLowering.cpp b/lib/Target/Lanai/LanaiFrameLowering.cpp index 142c09c504c..eddc2b8e61f 100644 --- a/lib/Target/Lanai/LanaiFrameLowering.cpp +++ b/lib/Target/Lanai/LanaiFrameLowering.cpp @@ -72,8 +72,8 @@ void LanaiFrameLowering::replaceAdjDynAllocPseudo(MachineFunction &MF) const { MachineInstr &MI = *MBBI++; if (MI.getOpcode() == Lanai::ADJDYNALLOC) { DebugLoc DL = MI.getDebugLoc(); - unsigned Dst = MI.getOperand(0).getReg(); - unsigned Src = MI.getOperand(1).getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); BuildMI(*MBB, MI, DL, LII.get(Lanai::ADD_I_LO), Dst) .addReg(Src) diff --git a/lib/Target/Lanai/LanaiISelLowering.cpp b/lib/Target/Lanai/LanaiISelLowering.cpp index 1ed078bb433..86e7769caf7 100644 --- a/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/lib/Target/Lanai/LanaiISelLowering.cpp @@ -459,7 +459,7 @@ SDValue LanaiTargetLowering::LowerCCCArguments( EVT RegVT = VA.getLocVT(); switch (RegVT.getSimpleVT().SimpleTy) { case MVT::i32: { - unsigned VReg = RegInfo.createVirtualRegister(&Lanai::GPRRegClass); + Register VReg = RegInfo.createVirtualRegister(&Lanai::GPRRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT); diff --git a/lib/Target/Lanai/LanaiInstrInfo.cpp b/lib/Target/Lanai/LanaiInstrInfo.cpp index 2b0e53c8e4e..b490a32c834 100644 --- a/lib/Target/Lanai/LanaiInstrInfo.cpp +++ b/lib/Target/Lanai/LanaiInstrInfo.cpp @@ -505,7 +505,7 @@ LanaiInstrInfo::optimizeSelect(MachineInstr &MI, // Find new register class to use. MachineOperand FalseReg = MI.getOperand(Invert ? 1 : 2); - unsigned DestReg = MI.getOperand(0).getReg(); + Register DestReg = MI.getOperand(0).getReg(); const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg()); if (!MRI.constrainRegClass(DestReg, PreviousClass)) return nullptr; diff --git a/lib/Target/Lanai/LanaiRegisterInfo.cpp b/lib/Target/Lanai/LanaiRegisterInfo.cpp index d3056a1eba8..7c28debb94d 100644 --- a/lib/Target/Lanai/LanaiRegisterInfo.cpp +++ b/lib/Target/Lanai/LanaiRegisterInfo.cpp @@ -155,7 +155,7 @@ void LanaiRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, if (!HasFP || (needsStackRealignment(MF) && FrameIndex >= 0)) Offset += MF.getFrameInfo().getStackSize(); - unsigned FrameReg = getFrameRegister(MF); + Register FrameReg = getFrameRegister(MF); if (FrameIndex >= 0) { if (hasBasePointer(MF)) FrameReg = getBaseRegister(); diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index fedfb857bd0..c400fa5e45f 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -632,7 +632,7 @@ SDValue MSP430TargetLowering::LowerCCCArguments( llvm_unreachable(nullptr); } case MVT::i16: - unsigned VReg = RegInfo.createVirtualRegister(&MSP430::GR16RegClass); + Register VReg = RegInfo.createVirtualRegister(&MSP430::GR16RegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); @@ -1446,8 +1446,8 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr &MI, case MSP430::Rrcl16: { BuildMI(*BB, MI, dl, TII.get(MSP430::BIC16rc), MSP430::SR) .addReg(MSP430::SR).addImm(1); - unsigned SrcReg = MI.getOperand(1).getReg(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); unsigned RrcOpc = MI.getOpcode() == MSP430::Rrcl16 ? MSP430::RRC16r : MSP430::RRC8r; BuildMI(*BB, MI, dl, TII.get(RrcOpc), DstReg) @@ -1479,13 +1479,13 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr &MI, LoopBB->addSuccessor(RemBB); LoopBB->addSuccessor(LoopBB); - unsigned ShiftAmtReg = RI.createVirtualRegister(&MSP430::GR8RegClass); - unsigned ShiftAmtReg2 = RI.createVirtualRegister(&MSP430::GR8RegClass); - unsigned ShiftReg = RI.createVirtualRegister(RC); - unsigned ShiftReg2 = RI.createVirtualRegister(RC); - unsigned ShiftAmtSrcReg = MI.getOperand(2).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register ShiftAmtReg = RI.createVirtualRegister(&MSP430::GR8RegClass); + Register ShiftAmtReg2 = RI.createVirtualRegister(&MSP430::GR8RegClass); + Register ShiftReg = RI.createVirtualRegister(RC); + Register ShiftReg2 = RI.createVirtualRegister(RC); + Register ShiftAmtSrcReg = MI.getOperand(2).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); // BB: // cmp 0, N diff --git a/lib/Target/MSP430/MSP430RegisterInfo.cpp b/lib/Target/MSP430/MSP430RegisterInfo.cpp index afbb2f213b4..bec357a1548 100644 --- a/lib/Target/MSP430/MSP430RegisterInfo.cpp +++ b/lib/Target/MSP430/MSP430RegisterInfo.cpp @@ -139,7 +139,7 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, return; // We need to materialize the offset via add instruction. - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if (Offset < 0) BuildMI(MBB, std::next(II), dl, TII.get(MSP430::SUB16ri), DstReg) .addReg(DstReg).addImm(-Offset); diff --git a/lib/Target/Mips/MicroMipsSizeReduction.cpp b/lib/Target/Mips/MicroMipsSizeReduction.cpp index 70af95592aa..db93b3d80ed 100644 --- a/lib/Target/Mips/MicroMipsSizeReduction.cpp +++ b/lib/Target/Mips/MicroMipsSizeReduction.cpp @@ -361,7 +361,7 @@ static bool CheckXWPInstr(MachineInstr *MI, bool ReduceToLwp, MI->getOpcode() == Mips::SW16_MM)) return false; - unsigned reg = MI->getOperand(0).getReg(); + Register reg = MI->getOperand(0).getReg(); if (reg == Mips::RA) return false; @@ -403,8 +403,8 @@ static bool ConsecutiveInstr(MachineInstr *MI1, MachineInstr *MI2) { if (!GetImm(MI2, 2, Offset2)) return false; - unsigned Reg1 = MI1->getOperand(0).getReg(); - unsigned Reg2 = MI2->getOperand(0).getReg(); + Register Reg1 = MI1->getOperand(0).getReg(); + Register Reg2 = MI2->getOperand(0).getReg(); return ((Offset1 == (Offset2 - 4)) && (ConsecutiveRegisters(Reg1, Reg2))); } @@ -475,8 +475,8 @@ bool MicroMipsSizeReduce::ReduceXWtoXWP(ReduceEntryFunArgs *Arguments) { if (!CheckXWPInstr(MI2, ReduceToLwp, Entry)) return false; - unsigned Reg1 = MI1->getOperand(1).getReg(); - unsigned Reg2 = MI2->getOperand(1).getReg(); + Register Reg1 = MI1->getOperand(1).getReg(); + Register Reg2 = MI2->getOperand(1).getReg(); if (Reg1 != Reg2) return false; @@ -621,8 +621,8 @@ bool MicroMipsSizeReduce::ReduceMoveToMovep(ReduceEntryFunArgs *Arguments) { MachineInstr *MI1 = Arguments->MI; MachineInstr *MI2 = &*NextMII; - unsigned RegDstMI1 = MI1->getOperand(0).getReg(); - unsigned RegSrcMI1 = MI1->getOperand(1).getReg(); + Register RegDstMI1 = MI1->getOperand(0).getReg(); + Register RegSrcMI1 = MI1->getOperand(1).getReg(); if (!IsMovepSrcRegister(RegSrcMI1)) return false; @@ -633,8 +633,8 @@ bool MicroMipsSizeReduce::ReduceMoveToMovep(ReduceEntryFunArgs *Arguments) { if (MI2->getOpcode() != Entry.WideOpc()) return false; - unsigned RegDstMI2 = MI2->getOperand(0).getReg(); - unsigned RegSrcMI2 = MI2->getOperand(1).getReg(); + Register RegDstMI2 = MI2->getOperand(0).getReg(); + Register RegSrcMI2 = MI2->getOperand(1).getReg(); if (!IsMovepSrcRegister(RegSrcMI2)) return false; diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp index 3ab4f1e064d..768d54fc9c2 100644 --- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp +++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp @@ -72,7 +72,7 @@ void Mips16DAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) { MachineRegisterInfo &RegInfo = MF.getRegInfo(); const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); DebugLoc DL; - unsigned V0, V1, V2, GlobalBaseReg = MipsFI->getGlobalBaseReg(); + Register V0, V1, V2, GlobalBaseReg = MipsFI->getGlobalBaseReg(); const TargetRegisterClass *RC = &Mips::CPU16RegsRegClass; V0 = RegInfo.createVirtualRegister(RC); diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp index 6d8e5aef2a3..5a5b78c9d5f 100644 --- a/lib/Target/Mips/Mips16ISelLowering.cpp +++ b/lib/Target/Mips/Mips16ISelLowering.cpp @@ -708,8 +708,8 @@ Mips16TargetLowering::emitFEXT_T8I816_ins(unsigned BtOpc, unsigned CmpOpc, if (DontExpandCondPseudos16) return BB; const TargetInstrInfo *TII = Subtarget.getInstrInfo(); - unsigned regX = MI.getOperand(0).getReg(); - unsigned regY = MI.getOperand(1).getReg(); + Register regX = MI.getOperand(0).getReg(); + Register regY = MI.getOperand(1).getReg(); MachineBasicBlock *target = MI.getOperand(2).getMBB(); BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(CmpOpc)) .addReg(regX) @@ -725,7 +725,7 @@ MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins( if (DontExpandCondPseudos16) return BB; const TargetInstrInfo *TII = Subtarget.getInstrInfo(); - unsigned regX = MI.getOperand(0).getReg(); + Register regX = MI.getOperand(0).getReg(); int64_t imm = MI.getOperand(1).getImm(); MachineBasicBlock *target = MI.getOperand(2).getMBB(); unsigned CmpOpc; @@ -758,9 +758,9 @@ Mips16TargetLowering::emitFEXT_CCRX16_ins(unsigned SltOpc, MachineInstr &MI, if (DontExpandCondPseudos16) return BB; const TargetInstrInfo *TII = Subtarget.getInstrInfo(); - unsigned CC = MI.getOperand(0).getReg(); - unsigned regX = MI.getOperand(1).getReg(); - unsigned regY = MI.getOperand(2).getReg(); + Register CC = MI.getOperand(0).getReg(); + Register regX = MI.getOperand(1).getReg(); + Register regY = MI.getOperand(2).getReg(); BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SltOpc)) .addReg(regX) .addReg(regY); @@ -777,8 +777,8 @@ Mips16TargetLowering::emitFEXT_CCRXI16_ins(unsigned SltiOpc, unsigned SltiXOpc, if (DontExpandCondPseudos16) return BB; const TargetInstrInfo *TII = Subtarget.getInstrInfo(); - unsigned CC = MI.getOperand(0).getReg(); - unsigned regX = MI.getOperand(1).getReg(); + Register CC = MI.getOperand(0).getReg(); + Register regX = MI.getOperand(1).getReg(); int64_t Imm = MI.getOperand(2).getImm(); unsigned SltOpc = Mips16WhichOp8uOr16simm(SltiOpc, SltiXOpc, Imm); BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SltOpc)).addReg(regX).addImm(Imm); diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index db83fe49cec..d9e478574d1 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -376,7 +376,7 @@ void MipsAsmPrinter::printSavedRegsBitmask() { void MipsAsmPrinter::emitFrameDirective() { const TargetRegisterInfo &RI = *MF->getSubtarget().getRegisterInfo(); - unsigned stackReg = RI.getFrameRegister(*MF); + Register stackReg = RI.getFrameRegister(*MF); unsigned returnReg = RI.getRARegister(); unsigned stackSize = MF->getFrameInfo().getStackSize(); @@ -571,7 +571,7 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, // for 2 for 32 bit mode and 1 for 64 bit mode. if (NumVals != 2) { if (Subtarget->isGP64bit() && NumVals == 1 && MO.isReg()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); O << '$' << MipsInstPrinter::getRegisterName(Reg); return false; } @@ -597,7 +597,7 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, const MachineOperand &MO = MI->getOperand(RegOp); if (!MO.isReg()) return true; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); O << '$' << MipsInstPrinter::getRegisterName(Reg); return false; } diff --git a/lib/Target/Mips/MipsExpandPseudo.cpp b/lib/Target/Mips/MipsExpandPseudo.cpp index 65d84a6c44a..00cd284e709 100644 --- a/lib/Target/Mips/MipsExpandPseudo.cpp +++ b/lib/Target/Mips/MipsExpandPseudo.cpp @@ -99,15 +99,15 @@ bool MipsExpandPseudo::expandAtomicCmpSwapSubword( : (ArePtrs64bit ? Mips::SC64 : Mips::SC); } - unsigned Dest = I->getOperand(0).getReg(); - unsigned Ptr = I->getOperand(1).getReg(); - unsigned Mask = I->getOperand(2).getReg(); - unsigned ShiftCmpVal = I->getOperand(3).getReg(); - unsigned Mask2 = I->getOperand(4).getReg(); - unsigned ShiftNewVal = I->getOperand(5).getReg(); - unsigned ShiftAmnt = I->getOperand(6).getReg(); - unsigned Scratch = I->getOperand(7).getReg(); - unsigned Scratch2 = I->getOperand(8).getReg(); + Register Dest = I->getOperand(0).getReg(); + Register Ptr = I->getOperand(1).getReg(); + Register Mask = I->getOperand(2).getReg(); + Register ShiftCmpVal = I->getOperand(3).getReg(); + Register Mask2 = I->getOperand(4).getReg(); + Register ShiftNewVal = I->getOperand(5).getReg(); + Register ShiftAmnt = I->getOperand(6).getReg(); + Register Scratch = I->getOperand(7).getReg(); + Register Scratch2 = I->getOperand(8).getReg(); // insert new blocks after the current block const BasicBlock *LLVM_BB = BB.getBasicBlock(); @@ -240,11 +240,11 @@ bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB, MOVE = Mips::OR64; } - unsigned Dest = I->getOperand(0).getReg(); - unsigned Ptr = I->getOperand(1).getReg(); - unsigned OldVal = I->getOperand(2).getReg(); - unsigned NewVal = I->getOperand(3).getReg(); - unsigned Scratch = I->getOperand(4).getReg(); + Register Dest = I->getOperand(0).getReg(); + Register Ptr = I->getOperand(1).getReg(); + Register OldVal = I->getOperand(2).getReg(); + Register NewVal = I->getOperand(3).getReg(); + Register Scratch = I->getOperand(4).getReg(); // insert new blocks after the current block const BasicBlock *LLVM_BB = BB.getBasicBlock(); @@ -374,15 +374,15 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword( llvm_unreachable("Unknown subword atomic pseudo for expansion!"); } - unsigned Dest = I->getOperand(0).getReg(); - unsigned Ptr = I->getOperand(1).getReg(); - unsigned Incr = I->getOperand(2).getReg(); - unsigned Mask = I->getOperand(3).getReg(); - unsigned Mask2 = I->getOperand(4).getReg(); - unsigned ShiftAmnt = I->getOperand(5).getReg(); - unsigned OldVal = I->getOperand(6).getReg(); - unsigned BinOpRes = I->getOperand(7).getReg(); - unsigned StoreVal = I->getOperand(8).getReg(); + Register Dest = I->getOperand(0).getReg(); + Register Ptr = I->getOperand(1).getReg(); + Register Incr = I->getOperand(2).getReg(); + Register Mask = I->getOperand(3).getReg(); + Register Mask2 = I->getOperand(4).getReg(); + Register ShiftAmnt = I->getOperand(5).getReg(); + Register OldVal = I->getOperand(6).getReg(); + Register BinOpRes = I->getOperand(7).getReg(); + Register StoreVal = I->getOperand(8).getReg(); const BasicBlock *LLVM_BB = BB.getBasicBlock(); MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); @@ -513,10 +513,10 @@ bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB, BEQ = Mips::BEQ64; } - unsigned OldVal = I->getOperand(0).getReg(); - unsigned Ptr = I->getOperand(1).getReg(); - unsigned Incr = I->getOperand(2).getReg(); - unsigned Scratch = I->getOperand(3).getReg(); + Register OldVal = I->getOperand(0).getReg(); + Register Ptr = I->getOperand(1).getReg(); + Register Incr = I->getOperand(2).getReg(); + Register Scratch = I->getOperand(3).getReg(); unsigned Opcode = 0; unsigned OR = 0; diff --git a/lib/Target/Mips/MipsFastISel.cpp b/lib/Target/Mips/MipsFastISel.cpp index 702c2b01358..80f288ac500 100644 --- a/lib/Target/Mips/MipsFastISel.cpp +++ b/lib/Target/Mips/MipsFastISel.cpp @@ -1728,7 +1728,7 @@ bool MipsFastISel::selectRet(const Instruction *I) { return false; unsigned SrcReg = Reg + VA.getValNo(); - unsigned DestReg = VA.getLocReg(); + Register DestReg = VA.getLocReg(); // Avoid a cross-class copy. This is very unlikely. if (!MRI.getRegClass(SrcReg)->contains(DestReg)) return false; diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp index 9ba54d6bb73..42d158f44f3 100644 --- a/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -65,7 +65,7 @@ bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { /// getGlobalBaseReg - Output the instructions required to put the /// GOT address into a register. SDNode *MipsDAGToDAGISel::getGlobalBaseReg() { - unsigned GlobalBaseReg = MF->getInfo()->getGlobalBaseReg(); + Register GlobalBaseReg = MF->getInfo()->getGlobalBaseReg(); return CurDAG->getRegister(GlobalBaseReg, getTargetLowering()->getPointerTy( CurDAG->getDataLayout())) .getNode(); diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 0ff09007da4..6e86d942a8d 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -1257,7 +1257,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const static unsigned addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC) { - unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); + Register VReg = MF.getRegInfo().createVirtualRegister(RC); MF.getRegInfo().addLiveIn(PReg, VReg); return VReg; } @@ -1477,10 +1477,10 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr &MI, llvm_unreachable("Unknown pseudo atomic for replacement!"); } - unsigned OldVal = MI.getOperand(0).getReg(); - unsigned Ptr = MI.getOperand(1).getReg(); - unsigned Incr = MI.getOperand(2).getReg(); - unsigned Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal)); + Register OldVal = MI.getOperand(0).getReg(); + Register Ptr = MI.getOperand(1).getReg(); + Register Incr = MI.getOperand(2).getReg(); + Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal)); MachineBasicBlock::iterator II(MI); @@ -1519,8 +1519,8 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr &MI, // containing the word. // - unsigned PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr)); - unsigned IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr)); + Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr)); + Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr)); BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr); BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr); @@ -1556,7 +1556,7 @@ MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg( MachineFunction *MF = BB->getParent(); MachineRegisterInfo &RegInfo = MF->getRegInfo(); const TargetRegisterClass *RC = getRegClassFor(MVT::i32); - unsigned ScrReg = RegInfo.createVirtualRegister(RC); + Register ScrReg = RegInfo.createVirtualRegister(RC); assert(Size < 32); int64_t ShiftImm = 32 - (Size * 8); @@ -1581,21 +1581,21 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword( const TargetInstrInfo *TII = Subtarget.getInstrInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Dest = MI.getOperand(0).getReg(); - unsigned Ptr = MI.getOperand(1).getReg(); - unsigned Incr = MI.getOperand(2).getReg(); + Register Dest = MI.getOperand(0).getReg(); + Register Ptr = MI.getOperand(1).getReg(); + Register Incr = MI.getOperand(2).getReg(); - unsigned AlignedAddr = RegInfo.createVirtualRegister(RCp); - unsigned ShiftAmt = RegInfo.createVirtualRegister(RC); - unsigned Mask = RegInfo.createVirtualRegister(RC); - unsigned Mask2 = RegInfo.createVirtualRegister(RC); - unsigned Incr2 = RegInfo.createVirtualRegister(RC); - unsigned MaskLSB2 = RegInfo.createVirtualRegister(RCp); - unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC); - unsigned MaskUpper = RegInfo.createVirtualRegister(RC); - unsigned Scratch = RegInfo.createVirtualRegister(RC); - unsigned Scratch2 = RegInfo.createVirtualRegister(RC); - unsigned Scratch3 = RegInfo.createVirtualRegister(RC); + Register AlignedAddr = RegInfo.createVirtualRegister(RCp); + Register ShiftAmt = RegInfo.createVirtualRegister(RC); + Register Mask = RegInfo.createVirtualRegister(RC); + Register Mask2 = RegInfo.createVirtualRegister(RC); + Register Incr2 = RegInfo.createVirtualRegister(RC); + Register MaskLSB2 = RegInfo.createVirtualRegister(RCp); + Register PtrLSB2 = RegInfo.createVirtualRegister(RC); + Register MaskUpper = RegInfo.createVirtualRegister(RC); + Register Scratch = RegInfo.createVirtualRegister(RC); + Register Scratch2 = RegInfo.createVirtualRegister(RC); + Register Scratch3 = RegInfo.createVirtualRegister(RC); unsigned AtomicOp = 0; switch (MI.getOpcode()) { @@ -1678,7 +1678,7 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword( if (Subtarget.isLittle()) { BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3); } else { - unsigned Off = RegInfo.createVirtualRegister(RC); + Register Off = RegInfo.createVirtualRegister(RC); BuildMI(BB, DL, TII->get(Mips::XORi), Off) .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2); BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3); @@ -1738,12 +1738,12 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI, unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA : Mips::ATOMIC_CMP_SWAP_I64_POSTRA; - unsigned Dest = MI.getOperand(0).getReg(); - unsigned Ptr = MI.getOperand(1).getReg(); - unsigned OldVal = MI.getOperand(2).getReg(); - unsigned NewVal = MI.getOperand(3).getReg(); + Register Dest = MI.getOperand(0).getReg(); + Register Ptr = MI.getOperand(1).getReg(); + Register OldVal = MI.getOperand(2).getReg(); + Register NewVal = MI.getOperand(3).getReg(); - unsigned Scratch = MRI.createVirtualRegister(RC); + Register Scratch = MRI.createVirtualRegister(RC); MachineBasicBlock::iterator II(MI); // We need to create copies of the various registers and kill them at the @@ -1751,9 +1751,9 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI, // after fast register allocation, the spills will end up outside of the // blocks that their values are defined in, causing livein errors. - unsigned PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr)); - unsigned OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal)); - unsigned NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal)); + Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr)); + Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal)); + Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal)); BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr); BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal); @@ -1790,22 +1790,22 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword( const TargetInstrInfo *TII = Subtarget.getInstrInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Dest = MI.getOperand(0).getReg(); - unsigned Ptr = MI.getOperand(1).getReg(); - unsigned CmpVal = MI.getOperand(2).getReg(); - unsigned NewVal = MI.getOperand(3).getReg(); + Register Dest = MI.getOperand(0).getReg(); + Register Ptr = MI.getOperand(1).getReg(); + Register CmpVal = MI.getOperand(2).getReg(); + Register NewVal = MI.getOperand(3).getReg(); - unsigned AlignedAddr = RegInfo.createVirtualRegister(RCp); - unsigned ShiftAmt = RegInfo.createVirtualRegister(RC); - unsigned Mask = RegInfo.createVirtualRegister(RC); - unsigned Mask2 = RegInfo.createVirtualRegister(RC); - unsigned ShiftedCmpVal = RegInfo.createVirtualRegister(RC); - unsigned ShiftedNewVal = RegInfo.createVirtualRegister(RC); - unsigned MaskLSB2 = RegInfo.createVirtualRegister(RCp); - unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC); - unsigned MaskUpper = RegInfo.createVirtualRegister(RC); - unsigned MaskedCmpVal = RegInfo.createVirtualRegister(RC); - unsigned MaskedNewVal = RegInfo.createVirtualRegister(RC); + Register AlignedAddr = RegInfo.createVirtualRegister(RCp); + Register ShiftAmt = RegInfo.createVirtualRegister(RC); + Register Mask = RegInfo.createVirtualRegister(RC); + Register Mask2 = RegInfo.createVirtualRegister(RC); + Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC); + Register ShiftedNewVal = RegInfo.createVirtualRegister(RC); + Register MaskLSB2 = RegInfo.createVirtualRegister(RCp); + Register PtrLSB2 = RegInfo.createVirtualRegister(RC); + Register MaskUpper = RegInfo.createVirtualRegister(RC); + Register MaskedCmpVal = RegInfo.createVirtualRegister(RC); + Register MaskedNewVal = RegInfo.createVirtualRegister(RC); unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA : Mips::ATOMIC_CMP_SWAP_I16_POSTRA; @@ -1820,8 +1820,8 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword( // value isn't a problem. // The Dead flag is needed as the value in scratch isn't used by any other // instruction. Kill isn't used as Dead is more precise. - unsigned Scratch = RegInfo.createVirtualRegister(RC); - unsigned Scratch2 = RegInfo.createVirtualRegister(RC); + Register Scratch = RegInfo.createVirtualRegister(RC); + Register Scratch2 = RegInfo.createVirtualRegister(RC); // insert new blocks after the current block const BasicBlock *LLVM_BB = BB->getBasicBlock(); @@ -1859,7 +1859,7 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword( if (Subtarget.isLittle()) { BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3); } else { - unsigned Off = RegInfo.createVirtualRegister(RC); + Register Off = RegInfo.createVirtualRegister(RC); BuildMI(BB, DL, TII->get(Mips::XORi), Off) .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2); BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3); @@ -3167,7 +3167,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, Arg, DAG.getConstant(1, DL, MVT::i32)); if (!Subtarget.isLittle()) std::swap(Lo, Hi); - unsigned LocRegLo = VA.getLocReg(); + Register LocRegLo = VA.getLocReg(); unsigned LocRegHigh = getNextIntArgReg(LocRegLo); RegsToPass.push_back(std::make_pair(LocRegLo, Lo)); RegsToPass.push_back(std::make_pair(LocRegHigh, Hi)); @@ -3523,7 +3523,7 @@ SDValue MipsTargetLowering::LowerFormalArguments( // Arguments stored on registers if (IsRegLoc) { MVT RegVT = VA.getLocVT(); - unsigned ArgReg = VA.getLocReg(); + Register ArgReg = VA.getLocReg(); const TargetRegisterClass *RC = getRegClassFor(RegVT); // Transform the arguments stored on diff --git a/lib/Target/Mips/MipsInstructionSelector.cpp b/lib/Target/Mips/MipsInstructionSelector.cpp index 8426579720c..da319265fba 100644 --- a/lib/Target/Mips/MipsInstructionSelector.cpp +++ b/lib/Target/Mips/MipsInstructionSelector.cpp @@ -503,7 +503,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) { Opcode = Mips::TRUNC_W_S; else Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32; - unsigned ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass); + Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass); MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode)) .addDef(ResultInFPR) .addUse(I.getOperand(1).getReg()); @@ -724,7 +724,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) { // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used. unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I; - unsigned TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); + Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu)) .addDef(TrueInReg) .addUse(Mips::ZERO) diff --git a/lib/Target/Mips/MipsOptimizePICCall.cpp b/lib/Target/Mips/MipsOptimizePICCall.cpp index c3853e71e7d..8bd64ff6cb2 100644 --- a/lib/Target/Mips/MipsOptimizePICCall.cpp +++ b/lib/Target/Mips/MipsOptimizePICCall.cpp @@ -151,7 +151,7 @@ static void setCallTargetReg(MachineBasicBlock *MBB, MachineBasicBlock::iterator I) { MachineFunction &MF = *MBB->getParent(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); - unsigned SrcReg = I->getOperand(0).getReg(); + Register SrcReg = I->getOperand(0).getReg(); unsigned DstReg = getRegTy(SrcReg, MF) == MVT::i32 ? Mips::T9 : Mips::T9_64; BuildMI(*MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), DstReg) .addReg(SrcReg); diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp index 4c6cc1ef771..55efe2cdc83 100644 --- a/lib/Target/Mips/MipsSEFrameLowering.cpp +++ b/lib/Target/Mips/MipsSEFrameLowering.cpp @@ -171,8 +171,8 @@ void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) { assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); const TargetRegisterClass *RC = RegInfo.intRegClass(4); - unsigned VR = MRI.createVirtualRegister(RC); - unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); + Register VR = MRI.createVirtualRegister(RC); + Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0); BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst) @@ -186,8 +186,8 @@ void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) { assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); const TargetRegisterClass *RC = RegInfo.intRegClass(4); - unsigned VR = MRI.createVirtualRegister(RC); - unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); + Register VR = MRI.createVirtualRegister(RC); + Register Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR) .addReg(Src, getKillRegState(I->getOperand(0).isKill())); @@ -204,11 +204,11 @@ void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I, assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); - unsigned VR0 = MRI.createVirtualRegister(RC); - unsigned VR1 = MRI.createVirtualRegister(RC); - unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); - unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); - unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); + Register VR0 = MRI.createVirtualRegister(RC); + Register VR1 = MRI.createVirtualRegister(RC); + Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); + Register Lo = RegInfo.getSubReg(Dst, Mips::sub_lo); + Register Hi = RegInfo.getSubReg(Dst, Mips::sub_hi); DebugLoc DL = I->getDebugLoc(); const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY); @@ -229,9 +229,9 @@ void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, assert(I->getOperand(0).isReg() && I->getOperand(1).isFI()); const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize); - unsigned VR0 = MRI.createVirtualRegister(RC); - unsigned VR1 = MRI.createVirtualRegister(RC); - unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); + Register VR0 = MRI.createVirtualRegister(RC); + Register VR1 = MRI.createVirtualRegister(RC); + Register Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex(); unsigned SrcKill = getKillRegState(I->getOperand(0).isKill()); DebugLoc DL = I->getDebugLoc(); @@ -242,7 +242,7 @@ void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I, } bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) { - unsigned Src = I->getOperand(1).getReg(); + Register Src = I->getOperand(1).getReg(); std::pair Opcodes = getMFHiLoOpc(Src); if (!Opcodes.first) @@ -262,11 +262,11 @@ bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Dst); unsigned VRegSize = RegInfo.getRegSizeInBits(*DstRC) / 16; const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); - unsigned VR0 = MRI.createVirtualRegister(RC); - unsigned VR1 = MRI.createVirtualRegister(RC); + Register VR0 = MRI.createVirtualRegister(RC); + Register VR1 = MRI.createVirtualRegister(RC); unsigned SrcKill = getKillRegState(I->getOperand(1).isKill()); - unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); - unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); + Register DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo); + Register DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi); DebugLoc DL = I->getDebugLoc(); BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src); @@ -304,9 +304,9 @@ bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, // stack is used. if (I->getNumOperands() == 4 && I->getOperand(3).isReg() && I->getOperand(3).getReg() == Mips::SP) { - unsigned DstReg = I->getOperand(0).getReg(); - unsigned LoReg = I->getOperand(1).getReg(); - unsigned HiReg = I->getOperand(2).getReg(); + Register DstReg = I->getOperand(0).getReg(); + Register LoReg = I->getOperand(1).getReg(); + Register HiReg = I->getOperand(2).getReg(); // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are // the cases where mthc1 is not available). 64-bit architectures and @@ -346,7 +346,7 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, const MachineOperand &Op2 = I->getOperand(2); if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) { - unsigned DstReg = I->getOperand(0).getReg(); + Register DstReg = I->getOperand(0).getReg(); BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg); return true; } @@ -369,8 +369,8 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, // stack is used. if (I->getNumOperands() == 4 && I->getOperand(3).isReg() && I->getOperand(3).getReg() == Mips::SP) { - unsigned DstReg = I->getOperand(0).getReg(); - unsigned SrcReg = Op1.getReg(); + Register DstReg = I->getOperand(0).getReg(); + Register SrcReg = Op1.getReg(); unsigned N = Op2.getImm(); int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N)); @@ -538,7 +538,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, if (RegInfo.needsStackRealignment(MF)) { // addiu $Reg, $zero, -MaxAlignment // andi $sp, $sp, $Reg - unsigned VR = MF.getRegInfo().createVirtualRegister(RC); + Register VR = MF.getRegInfo().createVirtualRegister(RC); assert(isInt<16>(MFI.getMaxAlignment()) && "Function's alignment size requirement is not supported."); int MaxAlign = -(int)MFI.getMaxAlignment(); diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp index edf57a3840d..bf7e7b6c30b 100644 --- a/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/lib/Target/Mips/MipsSEISelLowering.cpp @@ -3062,13 +3062,13 @@ MipsSETargetLowering::emitBPOSGE32(MachineInstr &MI, BuildMI(BB, DL, TII->get(Mips::BPOSGE32C_MMR3)).addMBB(TBB); // Fill $FBB. - unsigned VR2 = RegInfo.createVirtualRegister(RC); + Register VR2 = RegInfo.createVirtualRegister(RC); BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2) .addReg(Mips::ZERO).addImm(0); BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink); // Fill $TBB. - unsigned VR1 = RegInfo.createVirtualRegister(RC); + Register VR1 = RegInfo.createVirtualRegister(RC); BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1) .addReg(Mips::ZERO).addImm(1); @@ -3131,13 +3131,13 @@ MachineBasicBlock *MipsSETargetLowering::emitMSACBranchPseudo( .addMBB(TBB); // Fill $FBB. - unsigned RD1 = RegInfo.createVirtualRegister(RC); + Register RD1 = RegInfo.createVirtualRegister(RC); BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1) .addReg(Mips::ZERO).addImm(0); BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink); // Fill $TBB. - unsigned RD2 = RegInfo.createVirtualRegister(RC); + Register RD2 = RegInfo.createVirtualRegister(RC); BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2) .addReg(Mips::ZERO).addImm(1); @@ -3169,8 +3169,8 @@ MipsSETargetLowering::emitCOPY_FW(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Fd = MI.getOperand(0).getReg(); - unsigned Ws = MI.getOperand(1).getReg(); + Register Fd = MI.getOperand(0).getReg(); + Register Ws = MI.getOperand(1).getReg(); unsigned Lane = MI.getOperand(2).getImm(); if (Lane == 0) { @@ -3185,9 +3185,9 @@ MipsSETargetLowering::emitCOPY_FW(MachineInstr &MI, BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo); } else { - unsigned Wt = RegInfo.createVirtualRegister( - Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass : - &Mips::MSA128WEvensRegClass); + Register Wt = RegInfo.createVirtualRegister( + Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass + : &Mips::MSA128WEvensRegClass); BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(Lane); BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo); @@ -3214,15 +3214,15 @@ MipsSETargetLowering::emitCOPY_FD(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); - unsigned Fd = MI.getOperand(0).getReg(); - unsigned Ws = MI.getOperand(1).getReg(); + Register Fd = MI.getOperand(0).getReg(); + Register Ws = MI.getOperand(1).getReg(); unsigned Lane = MI.getOperand(2).getImm() * 2; DebugLoc DL = MI.getDebugLoc(); if (Lane == 0) BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_64); else { - unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); + Register Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wt).addReg(Ws).addImm(1); BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_64); @@ -3244,13 +3244,13 @@ MipsSETargetLowering::emitINSERT_FW(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Wd = MI.getOperand(0).getReg(); - unsigned Wd_in = MI.getOperand(1).getReg(); + Register Wd = MI.getOperand(0).getReg(); + Register Wd_in = MI.getOperand(1).getReg(); unsigned Lane = MI.getOperand(2).getImm(); - unsigned Fs = MI.getOperand(3).getReg(); - unsigned Wt = RegInfo.createVirtualRegister( - Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass : - &Mips::MSA128WEvensRegClass); + Register Fs = MI.getOperand(3).getReg(); + Register Wt = RegInfo.createVirtualRegister( + Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass + : &Mips::MSA128WEvensRegClass); BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt) .addImm(0) @@ -3280,11 +3280,11 @@ MipsSETargetLowering::emitINSERT_FD(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Wd = MI.getOperand(0).getReg(); - unsigned Wd_in = MI.getOperand(1).getReg(); + Register Wd = MI.getOperand(0).getReg(); + Register Wd_in = MI.getOperand(1).getReg(); unsigned Lane = MI.getOperand(2).getImm(); - unsigned Fs = MI.getOperand(3).getReg(); - unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); + Register Fs = MI.getOperand(3).getReg(); + Register Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt) .addImm(0) @@ -3326,10 +3326,10 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX( const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Wd = MI.getOperand(0).getReg(); - unsigned SrcVecReg = MI.getOperand(1).getReg(); - unsigned LaneReg = MI.getOperand(2).getReg(); - unsigned SrcValReg = MI.getOperand(3).getReg(); + Register Wd = MI.getOperand(0).getReg(); + Register SrcVecReg = MI.getOperand(1).getReg(); + Register LaneReg = MI.getOperand(2).getReg(); + Register SrcValReg = MI.getOperand(3).getReg(); const TargetRegisterClass *VecRC = nullptr; // FIXME: This should be true for N32 too. @@ -3370,7 +3370,7 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX( } if (IsFP) { - unsigned Wt = RegInfo.createVirtualRegister(VecRC); + Register Wt = RegInfo.createVirtualRegister(VecRC); BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt) .addImm(0) .addReg(SrcValReg) @@ -3380,7 +3380,7 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX( // Convert the lane index into a byte index if (EltSizeInBytes != 1) { - unsigned LaneTmp1 = RegInfo.createVirtualRegister(GPRRC); + Register LaneTmp1 = RegInfo.createVirtualRegister(GPRRC); BuildMI(*BB, MI, DL, TII->get(ShiftOp), LaneTmp1) .addReg(LaneReg) .addImm(EltLog2Size); @@ -3388,13 +3388,13 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX( } // Rotate bytes around so that the desired lane is element zero - unsigned WdTmp1 = RegInfo.createVirtualRegister(VecRC); + Register WdTmp1 = RegInfo.createVirtualRegister(VecRC); BuildMI(*BB, MI, DL, TII->get(Mips::SLD_B), WdTmp1) .addReg(SrcVecReg) .addReg(SrcVecReg) .addReg(LaneReg, 0, SubRegIdx); - unsigned WdTmp2 = RegInfo.createVirtualRegister(VecRC); + Register WdTmp2 = RegInfo.createVirtualRegister(VecRC); if (IsFP) { // Use insve.df to insert to element zero BuildMI(*BB, MI, DL, TII->get(InsveOp), WdTmp2) @@ -3413,7 +3413,7 @@ MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX( // Rotate elements the rest of the way for a full rotation. // sld.df inteprets $rt modulo the number of columns so we only need to negate // the lane index to do this. - unsigned LaneTmp2 = RegInfo.createVirtualRegister(GPRRC); + Register LaneTmp2 = RegInfo.createVirtualRegister(GPRRC); BuildMI(*BB, MI, DL, TII->get(Subtarget.isABI_N64() ? Mips::DSUB : Mips::SUB), LaneTmp2) .addReg(Subtarget.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO) @@ -3440,12 +3440,12 @@ MipsSETargetLowering::emitFILL_FW(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Wd = MI.getOperand(0).getReg(); - unsigned Fs = MI.getOperand(1).getReg(); - unsigned Wt1 = RegInfo.createVirtualRegister( + Register Wd = MI.getOperand(0).getReg(); + Register Fs = MI.getOperand(1).getReg(); + Register Wt1 = RegInfo.createVirtualRegister( Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass : &Mips::MSA128WEvensRegClass); - unsigned Wt2 = RegInfo.createVirtualRegister( + Register Wt2 = RegInfo.createVirtualRegister( Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass : &Mips::MSA128WEvensRegClass); @@ -3475,10 +3475,10 @@ MipsSETargetLowering::emitFILL_FD(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Wd = MI.getOperand(0).getReg(); - unsigned Fs = MI.getOperand(1).getReg(); - unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); - unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); + Register Wd = MI.getOperand(0).getReg(); + Register Fs = MI.getOperand(1).getReg(); + Register Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); + Register Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1); BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2) @@ -3509,8 +3509,8 @@ MipsSETargetLowering::emitST_F16_PSEUDO(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Ws = MI.getOperand(0).getReg(); - unsigned Rt = MI.getOperand(1).getReg(); + Register Ws = MI.getOperand(0).getReg(); + Register Rt = MI.getOperand(1).getReg(); const MachineMemOperand &MMO = **MI.memoperands_begin(); unsigned Imm = MMO.getOffset(); @@ -3522,11 +3522,11 @@ MipsSETargetLowering::emitST_F16_PSEUDO(MachineInstr &MI, : (Subtarget.isABI_O32() ? &Mips::GPR32RegClass : &Mips::GPR64RegClass); const bool UsingMips32 = RC == &Mips::GPR32RegClass; - unsigned Rs = RegInfo.createVirtualRegister(&Mips::GPR32RegClass); + Register Rs = RegInfo.createVirtualRegister(&Mips::GPR32RegClass); BuildMI(*BB, MI, DL, TII->get(Mips::COPY_U_H), Rs).addReg(Ws).addImm(0); if(!UsingMips32) { - unsigned Tmp = RegInfo.createVirtualRegister(&Mips::GPR64RegClass); + Register Tmp = RegInfo.createVirtualRegister(&Mips::GPR64RegClass); BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Tmp) .addImm(0) .addReg(Rs) @@ -3564,7 +3564,7 @@ MipsSETargetLowering::emitLD_F16_PSEUDO(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Wd = MI.getOperand(0).getReg(); + Register Wd = MI.getOperand(0).getReg(); // Caution: A load via the GOT can expand to a GPR32 operand, a load via // spill and reload can expand as a GPR64 operand. Examine the @@ -3575,7 +3575,7 @@ MipsSETargetLowering::emitLD_F16_PSEUDO(MachineInstr &MI, : &Mips::GPR64RegClass); const bool UsingMips32 = RC == &Mips::GPR32RegClass; - unsigned Rt = RegInfo.createVirtualRegister(RC); + Register Rt = RegInfo.createVirtualRegister(RC); MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(UsingMips32 ? Mips::LH : Mips::LH64), Rt); @@ -3583,7 +3583,7 @@ MipsSETargetLowering::emitLD_F16_PSEUDO(MachineInstr &MI, MIB.add(MI.getOperand(i)); if(!UsingMips32) { - unsigned Tmp = RegInfo.createVirtualRegister(&Mips::GPR32RegClass); + Register Tmp = RegInfo.createVirtualRegister(&Mips::GPR32RegClass); BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Tmp).addReg(Rt, 0, Mips::sub_32); Rt = Tmp; } @@ -3658,11 +3658,11 @@ MipsSETargetLowering::emitFPROUND_PSEUDO(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Wd = MI.getOperand(0).getReg(); - unsigned Fs = MI.getOperand(1).getReg(); + Register Wd = MI.getOperand(0).getReg(); + Register Fs = MI.getOperand(1).getReg(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); - unsigned Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); + Register Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); const TargetRegisterClass *GPRRC = IsFGR64onMips64 ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; unsigned MFC1Opc = IsFGR64onMips64 @@ -3671,16 +3671,16 @@ MipsSETargetLowering::emitFPROUND_PSEUDO(MachineInstr &MI, unsigned FILLOpc = IsFGR64onMips64 ? Mips::FILL_D : Mips::FILL_W; // Perform the register class copy as mentioned above. - unsigned Rtemp = RegInfo.createVirtualRegister(GPRRC); + Register Rtemp = RegInfo.createVirtualRegister(GPRRC); BuildMI(*BB, MI, DL, TII->get(MFC1Opc), Rtemp).addReg(Fs); BuildMI(*BB, MI, DL, TII->get(FILLOpc), Wtemp).addReg(Rtemp); unsigned WPHI = Wtemp; if (IsFGR64onMips32) { - unsigned Rtemp2 = RegInfo.createVirtualRegister(GPRRC); + Register Rtemp2 = RegInfo.createVirtualRegister(GPRRC); BuildMI(*BB, MI, DL, TII->get(Mips::MFHC1_D64), Rtemp2).addReg(Fs); - unsigned Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); - unsigned Wtemp3 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); + Register Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); + Register Wtemp3 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_W), Wtemp2) .addReg(Wtemp) .addReg(Rtemp2) @@ -3693,7 +3693,7 @@ MipsSETargetLowering::emitFPROUND_PSEUDO(MachineInstr &MI, } if (IsFGR64) { - unsigned Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); + Register Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); BuildMI(*BB, MI, DL, TII->get(Mips::FEXDO_W), Wtemp2) .addReg(WPHI) .addReg(WPHI); @@ -3817,8 +3817,8 @@ MipsSETargetLowering::emitFEXP2_W_1(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); const TargetRegisterClass *RC = &Mips::MSA128WRegClass; - unsigned Ws1 = RegInfo.createVirtualRegister(RC); - unsigned Ws2 = RegInfo.createVirtualRegister(RC); + Register Ws1 = RegInfo.createVirtualRegister(RC); + Register Ws2 = RegInfo.createVirtualRegister(RC); DebugLoc DL = MI.getDebugLoc(); // Splat 1.0 into a vector @@ -3846,8 +3846,8 @@ MipsSETargetLowering::emitFEXP2_D_1(MachineInstr &MI, const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); const TargetRegisterClass *RC = &Mips::MSA128DRegClass; - unsigned Ws1 = RegInfo.createVirtualRegister(RC); - unsigned Ws2 = RegInfo.createVirtualRegister(RC); + Register Ws1 = RegInfo.createVirtualRegister(RC); + Register Ws2 = RegInfo.createVirtualRegister(RC); DebugLoc DL = MI.getDebugLoc(); // Splat 1.0 into a vector diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp index 4e49f5e7d9d..2126a1bda49 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.cpp +++ b/lib/Target/Mips/MipsSEInstrInfo.cpp @@ -628,7 +628,7 @@ unsigned MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB, // The first instruction can be a LUi, which is different from other // instructions (ADDiu, ORI and SLL) in that it does not have a register // operand. - unsigned Reg = RegInfo.createVirtualRegister(RC); + Register Reg = RegInfo.createVirtualRegister(RC); if (Inst->Opc == LUi) BuildMI(MBB, II, DL, get(LUi), Reg).addImm(SignExtend64<16>(Inst->ImmOpnd)); @@ -734,9 +734,9 @@ void MipsSEInstrInfo::expandPseudoMTLoHi(MachineBasicBlock &MBB, // Add lo/hi registers if the mtlo/hi instructions created have explicit // def registers. if (HasExplicitDef) { - unsigned DstReg = I->getOperand(0).getReg(); - unsigned DstLo = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo); - unsigned DstHi = getRegisterInfo().getSubReg(DstReg, Mips::sub_hi); + Register DstReg = I->getOperand(0).getReg(); + Register DstLo = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo); + Register DstHi = getRegisterInfo().getSubReg(DstReg, Mips::sub_hi); LoInst.addReg(DstLo, RegState::Define); HiInst.addReg(DstHi, RegState::Define); } @@ -773,14 +773,14 @@ void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, bool isMicroMips, bool FP64) const { - unsigned DstReg = I->getOperand(0).getReg(); - unsigned SrcReg = I->getOperand(1).getReg(); + Register DstReg = I->getOperand(0).getReg(); + Register SrcReg = I->getOperand(1).getReg(); unsigned N = I->getOperand(2).getImm(); DebugLoc dl = I->getDebugLoc(); assert(N < 2 && "Invalid immediate"); unsigned SubIdx = N ? Mips::sub_hi : Mips::sub_lo; - unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx); + Register SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx); // FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload // in MipsSEFrameLowering.cpp. @@ -815,7 +815,7 @@ void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB, void MipsSEInstrInfo::expandBuildPairF64(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, bool isMicroMips, bool FP64) const { - unsigned DstReg = I->getOperand(0).getReg(); + Register DstReg = I->getOperand(0).getReg(); unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg(); const MCInstrDesc& Mtc1Tdd = get(Mips::MTC1); DebugLoc dl = I->getDebugLoc(); @@ -883,8 +883,8 @@ void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB, unsigned RA = Subtarget.isGP64bit() ? Mips::RA_64 : Mips::RA; unsigned T9 = Subtarget.isGP64bit() ? Mips::T9_64 : Mips::T9; unsigned ZERO = Subtarget.isGP64bit() ? Mips::ZERO_64 : Mips::ZERO; - unsigned OffsetReg = I->getOperand(0).getReg(); - unsigned TargetReg = I->getOperand(1).getReg(); + Register OffsetReg = I->getOperand(0).getReg(); + Register TargetReg = I->getOperand(1).getReg(); // addu $ra, $v0, $zero // addu $sp, $sp, $v1 diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp index f4b164d5c0a..370150bf9f4 100644 --- a/lib/Target/Mips/MipsSERegisterInfo.cpp +++ b/lib/Target/Mips/MipsSERegisterInfo.cpp @@ -224,7 +224,7 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, const TargetRegisterClass *PtrRC = ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); - unsigned Reg = RegInfo.createVirtualRegister(PtrRC); + Register Reg = RegInfo.createVirtualRegister(PtrRC); const MipsSEInstrInfo &TII = *static_cast( MBB.getParent()->getSubtarget().getInstrInfo()); diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index 02fc2f8ffcd..b78e9f58f72 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -507,7 +507,7 @@ const MCSymbol *NVPTXAsmPrinter::getFunctionFrameSymbol() const { } void NVPTXAsmPrinter::emitImplicitDef(const MachineInstr *MI) const { - unsigned RegNo = MI->getOperand(0).getReg(); + Register RegNo = MI->getOperand(0).getReg(); if (Register::isVirtualRegister(RegNo)) { OutStreamer->AddComment(Twine("implicit-def: ") + getVirtualRegisterName(RegNo)); diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp index 8081b4bda2b..71d4b18b025 100644 --- a/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -269,7 +269,7 @@ bool PPCAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, return true; // This operand uses VSX numbering. // If the operand is a VMX register, convert it to a VSX register. - unsigned Reg = MI->getOperand(OpNo).getReg(); + Register Reg = MI->getOperand(OpNo).getReg(); if (PPCInstrInfo::isVRRegister(Reg)) Reg = PPC::VSX32 + (Reg - PPC::V0); else if (PPCInstrInfo::isVFRegister(Reg)) @@ -382,7 +382,7 @@ void PPCAsmPrinter::LowerPATCHPOINT(StackMaps &SM, const MachineInstr &MI) { if (CallTarget) { assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget && "High 16 bits of call target should be zero."); - unsigned ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg(); + Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg(); EncodedBytes = 0; // Materialize the jump address: EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::LI8) @@ -521,7 +521,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { if (!MI->isInlineAsm()) { for (const MachineOperand &MO: MI->operands()) { if (MO.isReg()) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Subtarget->hasSPE()) { if (PPC::F4RCRegClass.contains(Reg) || PPC::F8RCRegClass.contains(Reg) || diff --git a/lib/Target/PowerPC/PPCBranchSelector.cpp b/lib/Target/PowerPC/PPCBranchSelector.cpp index 793d690baec..6d1f09278b7 100644 --- a/lib/Target/PowerPC/PPCBranchSelector.cpp +++ b/lib/Target/PowerPC/PPCBranchSelector.cpp @@ -339,16 +339,16 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) { // 1. CR register // 2. Target MBB PPC::Predicate Pred = (PPC::Predicate)I->getOperand(0).getImm(); - unsigned CRReg = I->getOperand(1).getReg(); + Register CRReg = I->getOperand(1).getReg(); // Jump over the uncond branch inst (i.e. $PC+8) on opposite condition. BuildMI(MBB, I, dl, TII->get(PPC::BCC)) .addImm(PPC::InvertPredicate(Pred)).addReg(CRReg).addImm(2); } else if (I->getOpcode() == PPC::BC) { - unsigned CRBit = I->getOperand(0).getReg(); + Register CRBit = I->getOperand(0).getReg(); BuildMI(MBB, I, dl, TII->get(PPC::BCn)).addReg(CRBit).addImm(2); } else if (I->getOpcode() == PPC::BCn) { - unsigned CRBit = I->getOperand(0).getReg(); + Register CRBit = I->getOperand(0).getReg(); BuildMI(MBB, I, dl, TII->get(PPC::BC)).addReg(CRBit).addImm(2); } else if (I->getOpcode() == PPC::BDNZ) { BuildMI(MBB, I, dl, TII->get(PPC::BDZ)).addImm(2); diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp index 2954b028f63..f524bf525e3 100644 --- a/lib/Target/PowerPC/PPCFastISel.cpp +++ b/lib/Target/PowerPC/PPCFastISel.cpp @@ -162,7 +162,7 @@ class PPCFastISel final : public FastISel { bool PPCEmitCmp(const Value *Src1Value, const Value *Src2Value, bool isZExt, unsigned DestReg, const PPC::Predicate Pred); - bool PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, + bool PPCEmitLoad(MVT VT, Register &ResultReg, Address &Addr, const TargetRegisterClass *RC, bool IsZExt = true, unsigned FP64LoadOpc = PPC::LFD); bool PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr); @@ -451,7 +451,7 @@ void PPCFastISel::PPCSimplifyAddress(Address &Addr, bool &UseOffset, // Emit a load instruction if possible, returning true if we succeeded, // otherwise false. See commentary below for how the register class of // the load is determined. -bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, +bool PPCFastISel::PPCEmitLoad(MVT VT, Register &ResultReg, Address &Addr, const TargetRegisterClass *RC, bool IsZExt, unsigned FP64LoadOpc) { unsigned Opc; @@ -612,7 +612,7 @@ bool PPCFastISel::SelectLoad(const Instruction *I) { const TargetRegisterClass *RC = AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr; - unsigned ResultReg = 0; + Register ResultReg = 0; if (!PPCEmitLoad(VT, ResultReg, Addr, RC, true, PPCSubTarget->hasSPE() ? PPC::EVLDD : PPC::LFD)) return false; @@ -1051,7 +1051,7 @@ unsigned PPCFastISel::PPCMoveToFPReg(MVT SrcVT, unsigned SrcReg, } const TargetRegisterClass *RC = &PPC::F8RCRegClass; - unsigned ResultReg = 0; + Register ResultReg = 0; if (!PPCEmitLoad(MVT::f64, ResultReg, Addr, RC, !IsSigned, LoadOpc)) return 0; @@ -1176,7 +1176,7 @@ unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT, const TargetRegisterClass *RC = AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr; - unsigned ResultReg = 0; + Register ResultReg = 0; if (!PPCEmitLoad(VT, ResultReg, Addr, RC, !IsSigned)) return 0; @@ -1717,7 +1717,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) { if (const ConstantInt *CI = dyn_cast(RV)) { CCValAssign &VA = ValLocs[0]; - unsigned RetReg = VA.getLocReg(); + Register RetReg = VA.getLocReg(); // We still need to worry about properly extending the sign. For example, // we could have only a single bit or a constant that needs zero // extension rather than sign extension. Make sure we pass the return @@ -2353,7 +2353,7 @@ bool PPCFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, if (!PPCComputeAddress(LI->getOperand(0), Addr)) return false; - unsigned ResultReg = MI->getOperand(0).getReg(); + Register ResultReg = MI->getOperand(0).getReg(); if (!PPCEmitLoad(VT, ResultReg, Addr, nullptr, IsZExt, PPCSubTarget->hasSPE() ? PPC::EVLDD : PPC::LFD)) diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp index e3e295ff599..8d6f534ab51 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -378,8 +378,8 @@ static void HandleVRSaveUpdate(MachineInstr &MI, const TargetInstrInfo &TII) { return; } - unsigned SrcReg = MI.getOperand(1).getReg(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if ((UsedRegMask & 0xFFFF) == UsedRegMask) { if (DstReg != SrcReg) @@ -830,7 +830,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF, bool HasRedZone = isPPC64 || !isSVR4ABI; unsigned SPReg = isPPC64 ? PPC::X1 : PPC::R1; - unsigned BPReg = RegInfo->getBaseRegister(MF); + Register BPReg = RegInfo->getBaseRegister(MF); unsigned FPReg = isPPC64 ? PPC::X31 : PPC::R31; unsigned LRReg = isPPC64 ? PPC::LR8 : PPC::LR; unsigned TOCReg = isPPC64 ? PPC::X2 : PPC::R2; @@ -1401,7 +1401,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI(); unsigned SPReg = isPPC64 ? PPC::X1 : PPC::R1; - unsigned BPReg = RegInfo->getBaseRegister(MF); + Register BPReg = RegInfo->getBaseRegister(MF); unsigned FPReg = isPPC64 ? PPC::X31 : PPC::R31; unsigned ScratchReg = 0; unsigned TempReg = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg @@ -1981,7 +1981,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF, assert(FI && "No Base Pointer Save Slot!"); MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); - unsigned BP = RegInfo->getBaseRegister(MF); + Register BP = RegInfo->getBaseRegister(MF); if (PPC::G8RCRegClass.contains(BP)) { MinG8R = std::min(MinG8R, BP); HasG8SaveArea = true; diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index e3795129db8..62d3fe226cc 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -391,8 +391,8 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) { // Create two vregs - one to hold the VRSAVE register that is live-in to the // function and one for the value after having bits or'd into it. - unsigned InVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); - unsigned UpdatedVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); + Register InVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); + Register UpdatedVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); const TargetInstrInfo &TII = *PPCSubTarget->getInstrInfo(); MachineBasicBlock &EntryBB = *Fn.begin(); @@ -447,7 +447,7 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() { } else { BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR)); BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); - unsigned TempReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); + Register TempReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::UpdateGBR), GlobalBaseReg) .addReg(TempReg, RegState::Define).addReg(GlobalBaseReg); diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 4811929f1c0..7a4cadaaf3a 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -10218,7 +10218,7 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, if (CmpOpcode) { // Signed comparisons of byte or halfword values must be sign-extended. if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { - unsigned ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); + Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), ExtReg).addReg(dest); BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) @@ -10269,10 +10269,10 @@ MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( MachineFunction *F = BB->getParent(); MachineFunction::iterator It = ++BB->getIterator(); - unsigned dest = MI.getOperand(0).getReg(); - unsigned ptrA = MI.getOperand(1).getReg(); - unsigned ptrB = MI.getOperand(2).getReg(); - unsigned incr = MI.getOperand(3).getReg(); + Register dest = MI.getOperand(0).getReg(); + Register ptrA = MI.getOperand(1).getReg(); + Register ptrB = MI.getOperand(2).getReg(); + Register incr = MI.getOperand(3).getReg(); DebugLoc dl = MI.getDebugLoc(); MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); @@ -10390,7 +10390,7 @@ MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( if (CmpOpcode) { // For unsigned comparisons, we can directly compare the shifted values. // For signed comparisons we shift and sign extend. - unsigned SReg = RegInfo.createVirtualRegister(GPRC); + Register SReg = RegInfo.createVirtualRegister(GPRC); BuildMI(BB, dl, TII->get(PPC::AND), SReg) .addReg(TmpDestReg) .addReg(MaskReg); @@ -10401,7 +10401,7 @@ MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) .addReg(SReg) .addReg(ShiftReg); - unsigned ValueSReg = RegInfo.createVirtualRegister(GPRC); + Register ValueSReg = RegInfo.createVirtualRegister(GPRC); BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) .addReg(ValueReg); ValueReg = ValueSReg; @@ -10452,11 +10452,11 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, const BasicBlock *BB = MBB->getBasicBlock(); MachineFunction::iterator I = ++MBB->getIterator(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); const TargetRegisterClass *RC = MRI.getRegClass(DstReg); assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); - unsigned mainDstReg = MRI.createVirtualRegister(RC); - unsigned restoreDstReg = MRI.createVirtualRegister(RC); + Register mainDstReg = MRI.createVirtualRegister(RC); + Register restoreDstReg = MRI.createVirtualRegister(RC); MVT PVT = getPointerTy(MF->getDataLayout()); assert((PVT == MVT::i64 || PVT == MVT::i32) && @@ -10508,8 +10508,8 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, // Prepare IP either in reg. const TargetRegisterClass *PtrRC = getRegClassFor(PVT); - unsigned LabelReg = MRI.createVirtualRegister(PtrRC); - unsigned BufReg = MI.getOperand(1).getReg(); + Register LabelReg = MRI.createVirtualRegister(PtrRC); + Register BufReg = MI.getOperand(1).getReg(); if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { setUsesTOCBasePtr(*MBB->getParent()); @@ -10596,7 +10596,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, const TargetRegisterClass *RC = (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; - unsigned Tmp = MRI.createVirtualRegister(RC); + Register Tmp = MRI.createVirtualRegister(RC); // Since FP is only updated here but NOT referenced, it's treated as GPR. unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; @@ -10613,7 +10613,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, const int64_t TOCOffset = 3 * PVT.getStoreSize(); const int64_t BPOffset = 4 * PVT.getStoreSize(); - unsigned BufReg = MI.getOperand(0).getReg(); + Register BufReg = MI.getOperand(0).getReg(); // Reload FP (the jumped-to function may not have had a // frame pointer, and if so, then its r31 will be restored @@ -10854,15 +10854,15 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, BB = readMBB; MachineRegisterInfo &RegInfo = F->getRegInfo(); - unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); - unsigned LoReg = MI.getOperand(0).getReg(); - unsigned HiReg = MI.getOperand(1).getReg(); + Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); + Register LoReg = MI.getOperand(0).getReg(); + Register HiReg = MI.getOperand(1).getReg(); BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); - unsigned CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); + Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) .addReg(HiReg) @@ -11004,11 +11004,11 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, StoreMnemonic = PPC::STDCX; break; } - unsigned dest = MI.getOperand(0).getReg(); - unsigned ptrA = MI.getOperand(1).getReg(); - unsigned ptrB = MI.getOperand(2).getReg(); - unsigned oldval = MI.getOperand(3).getReg(); - unsigned newval = MI.getOperand(4).getReg(); + Register dest = MI.getOperand(0).getReg(); + Register ptrA = MI.getOperand(1).getReg(); + Register ptrB = MI.getOperand(2).getReg(); + Register oldval = MI.getOperand(3).getReg(); + Register newval = MI.getOperand(4).getReg(); DebugLoc dl = MI.getDebugLoc(); MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); @@ -11083,11 +11083,11 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, bool isLittleEndian = Subtarget.isLittleEndian(); bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; - unsigned dest = MI.getOperand(0).getReg(); - unsigned ptrA = MI.getOperand(1).getReg(); - unsigned ptrB = MI.getOperand(2).getReg(); - unsigned oldval = MI.getOperand(3).getReg(); - unsigned newval = MI.getOperand(4).getReg(); + Register dest = MI.getOperand(0).getReg(); + Register ptrA = MI.getOperand(1).getReg(); + Register ptrB = MI.getOperand(2).getReg(); + Register oldval = MI.getOperand(3).getReg(); + Register newval = MI.getOperand(4).getReg(); DebugLoc dl = MI.getDebugLoc(); MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); @@ -11264,13 +11264,13 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, // This pseudo performs an FADD with rounding mode temporarily forced // to round-to-zero. We emit this via custom inserter since the FPSCR // is not modeled at the SelectionDAG level. - unsigned Dest = MI.getOperand(0).getReg(); - unsigned Src1 = MI.getOperand(1).getReg(); - unsigned Src2 = MI.getOperand(2).getReg(); + Register Dest = MI.getOperand(0).getReg(); + Register Src1 = MI.getOperand(1).getReg(); + Register Src2 = MI.getOperand(2).getReg(); DebugLoc dl = MI.getDebugLoc(); MachineRegisterInfo &RegInfo = F->getRegInfo(); - unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); + Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); // Save FPSCR value. BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); @@ -11296,7 +11296,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8); MachineRegisterInfo &RegInfo = F->getRegInfo(); - unsigned Dest = RegInfo.createVirtualRegister( + Register Dest = RegInfo.createVirtualRegister( Opcode == PPC::ANDIo ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); DebugLoc dl = MI.getDebugLoc(); @@ -11309,7 +11309,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } else if (MI.getOpcode() == PPC::TCHECK_RET) { DebugLoc Dl = MI.getDebugLoc(); MachineRegisterInfo &RegInfo = F->getRegInfo(); - unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); + Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg()) @@ -11323,7 +11323,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, .addReg(PPC::CR0EQ); } else if (MI.getOpcode() == PPC::SETRNDi) { DebugLoc dl = MI.getDebugLoc(); - unsigned OldFPSCRReg = MI.getOperand(0).getReg(); + Register OldFPSCRReg = MI.getOperand(0).getReg(); // Save FPSCR value. BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); @@ -11404,7 +11404,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } }; - unsigned OldFPSCRReg = MI.getOperand(0).getReg(); + Register OldFPSCRReg = MI.getOperand(0).getReg(); // Save FPSCR value. BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); @@ -11419,12 +11419,12 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, // mtfsf 255, NewFPSCRReg MachineOperand SrcOp = MI.getOperand(1); MachineRegisterInfo &RegInfo = F->getRegInfo(); - unsigned OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); + Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg); - unsigned ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); - unsigned ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); + Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); + Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); // The first operand of INSERT_SUBREG should be a register which has // subregisters, we only care about its RegClass, so we should use an @@ -11435,14 +11435,14 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, .add(SrcOp) .addImm(1); - unsigned NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); + Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg) .addReg(OldFPSCRTmpReg) .addReg(ExtSrcReg) .addImm(0) .addImm(62); - unsigned NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); + Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg); // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63 @@ -14883,7 +14883,7 @@ void PPCTargetLowering::insertCopiesSplitCSR( else llvm_unreachable("Unexpected register class in CSRsViaCopy!"); - unsigned NewVR = MRI->createVirtualRegister(RC); + Register NewVR = MRI->createVirtualRegister(RC); // Create copy from CSR to a virtual register. // FIXME: this currently does not emit CFI pseudo-instructions, it works // fine for CXX_FAST_TLS since the C++-style TLS access functions should be diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index f65eeebc517..c1a5d94d865 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -184,7 +184,7 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, return Latency; const MachineOperand &DefMO = DefMI.getOperand(DefIdx); - unsigned Reg = DefMO.getReg(); + Register Reg = DefMO.getReg(); bool IsRegCR; if (Register::isVirtualRegister(Reg)) { @@ -1649,7 +1649,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, return false; int OpC = CmpInstr.getOpcode(); - unsigned CRReg = CmpInstr.getOperand(0).getReg(); + Register CRReg = CmpInstr.getOperand(0).getReg(); // FP record forms set CR1 based on the exception status bits, not a // comparison with zero. @@ -1938,7 +1938,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, // Rotates are expensive instructions. If we're emitting a record-form // rotate that can just be an andi/andis, we should just emit that. if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM8) { - unsigned GPRRes = MI->getOperand(0).getReg(); + Register GPRRes = MI->getOperand(0).getReg(); int64_t SH = MI->getOperand(2).getImm(); int64_t MB = MI->getOperand(3).getImm(); int64_t ME = MI->getOperand(4).getImm(); @@ -2123,7 +2123,7 @@ bool PPCInstrInfo::expandVSXMemPseudo(MachineInstr &MI) const { llvm_unreachable("Unknown Operation!"); } - unsigned TargetReg = MI.getOperand(0).getReg(); + Register TargetReg = MI.getOperand(0).getReg(); unsigned Opcode; if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) || (TargetReg >= PPC::VSL0 && TargetReg <= PPC::VSL31)) @@ -2185,7 +2185,7 @@ bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return expandVSXMemPseudo(MI); } case PPC::SPILLTOVSR_LD: { - unsigned TargetReg = MI.getOperand(0).getReg(); + Register TargetReg = MI.getOperand(0).getReg(); if (PPC::VSFRCRegClass.contains(TargetReg)) { MI.setDesc(get(PPC::DFLOADf64)); return expandPostRAPseudo(MI); @@ -2195,7 +2195,7 @@ bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return true; } case PPC::SPILLTOVSR_ST: { - unsigned SrcReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(0).getReg(); if (PPC::VSFRCRegClass.contains(SrcReg)) { NumStoreSPILLVSRRCAsVec++; MI.setDesc(get(PPC::DFSTOREf64)); @@ -2207,7 +2207,7 @@ bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return true; } case PPC::SPILLTOVSR_LDX: { - unsigned TargetReg = MI.getOperand(0).getReg(); + Register TargetReg = MI.getOperand(0).getReg(); if (PPC::VSFRCRegClass.contains(TargetReg)) MI.setDesc(get(PPC::LXSDX)); else @@ -2215,7 +2215,7 @@ bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return true; } case PPC::SPILLTOVSR_STX: { - unsigned SrcReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(0).getReg(); if (PPC::VSFRCRegClass.contains(SrcReg)) { NumStoreSPILLVSRRCAsVec++; MI.setDesc(get(PPC::STXSDX)); @@ -2280,7 +2280,7 @@ void PPCInstrInfo::replaceInstrOperandWithImm(MachineInstr &MI, int64_t Imm) const { assert(MI.getOperand(OpNo).isReg() && "Operand must be a REG"); // Replace the REG with the Immediate. - unsigned InUseReg = MI.getOperand(OpNo).getReg(); + Register InUseReg = MI.getOperand(OpNo).getReg(); MI.getOperand(OpNo).ChangeToImmediate(Imm); if (empty(MI.implicit_operands())) @@ -2360,7 +2360,7 @@ MachineInstr *PPCInstrInfo::getForwardingDefMI( for (int i = 1, e = MI.getNumOperands(); i < e; i++) { if (!MI.getOperand(i).isReg()) continue; - unsigned Reg = MI.getOperand(i).getReg(); + Register Reg = MI.getOperand(i).getReg(); if (!Register::isVirtualRegister(Reg)) continue; unsigned TrueReg = TRI->lookThruCopyLike(Reg, MRI); @@ -2402,7 +2402,7 @@ MachineInstr *PPCInstrInfo::getForwardingDefMI( MachineOperand &MO = MI.getOperand(i); SeenIntermediateUse = false; if (MO.isReg() && MO.isUse() && !MO.isImplicit()) { - unsigned Reg = MI.getOperand(i).getReg(); + Register Reg = MI.getOperand(i).getReg(); // If we see another use of this reg between the def and the MI, // we want to flat it so the def isn't deleted. MachineInstr *DefMI = getDefMIPostRA(Reg, MI, SeenIntermediateUse); @@ -2554,7 +2554,7 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI, "The forwarding operand needs to be valid at this point"); bool IsForwardingOperandKilled = MI.getOperand(ForwardingOperand).isKill(); bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled; - unsigned ForwardingOperandReg = MI.getOperand(ForwardingOperand).getReg(); + Register ForwardingOperandReg = MI.getOperand(ForwardingOperand).getReg(); if (KilledDef && KillFwdDefMI) *KilledDef = DefMI; @@ -2610,7 +2610,7 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI, // If a compare-immediate is fed by an immediate and is itself an input of // an ISEL (the most common case) into a COPY of the correct register. bool Changed = false; - unsigned DefReg = MI.getOperand(0).getReg(); + Register DefReg = MI.getOperand(0).getReg(); int64_t Comparand = MI.getOperand(2).getImm(); int64_t SExtComparand = ((uint64_t)Comparand & ~0x7FFFuLL) != 0 ? (Comparand | 0xFFFFFFFFFFFF0000) : Comparand; @@ -2620,8 +2620,8 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI, if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL8) continue; unsigned CRSubReg = CompareUseMI.getOperand(3).getSubReg(); - unsigned TrueReg = CompareUseMI.getOperand(1).getReg(); - unsigned FalseReg = CompareUseMI.getOperand(2).getReg(); + Register TrueReg = CompareUseMI.getOperand(1).getReg(); + Register FalseReg = CompareUseMI.getOperand(2).getReg(); unsigned RegToCopy = selectReg(SExtImm, SExtComparand, Opc, TrueReg, FalseReg, CRSubReg); if (RegToCopy == PPC::NoRegister) @@ -3305,7 +3305,7 @@ bool PPCInstrInfo::isRegElgibleForForwarding( if (MRI.isSSA()) return false; - unsigned Reg = RegMO.getReg(); + Register Reg = RegMO.getReg(); // Walking the inst in reverse(MI-->DefMI) to get the last DEF of the Reg. MachineBasicBlock::const_reverse_iterator It = MI; @@ -3529,8 +3529,8 @@ bool PPCInstrInfo::transformToImmFormFedByLI(MachineInstr &MI, if (PostRA && III.ZeroIsSpecialOrig != III.ZeroIsSpecialNew) { unsigned PosForOrigZero = III.ZeroIsSpecialOrig ? III.ZeroIsSpecialOrig : III.ZeroIsSpecialNew + 1; - unsigned OrigZeroReg = MI.getOperand(PosForOrigZero).getReg(); - unsigned NewZeroReg = MI.getOperand(III.ZeroIsSpecialNew).getReg(); + Register OrigZeroReg = MI.getOperand(PosForOrigZero).getReg(); + Register NewZeroReg = MI.getOperand(III.ZeroIsSpecialNew).getReg(); // If R0 is in the operand where zero is special for the new instruction, // it is unsafe to transform if the constant operand isn't that operand. if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0) && @@ -3619,7 +3619,7 @@ bool PPCInstrInfo::transformToImmFormFedByLI(MachineInstr &MI, if (III.ZeroIsSpecialNew) { // If operand at III.ZeroIsSpecialNew is physical reg(eg: ZERO/ZERO8), no // need to fix up register class. - unsigned RegToModify = MI.getOperand(III.ZeroIsSpecialNew).getReg(); + Register RegToModify = MI.getOperand(III.ZeroIsSpecialNew).getReg(); if (Register::isVirtualRegister(RegToModify)) { const TargetRegisterClass *NewRC = MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ? @@ -3765,7 +3765,7 @@ bool PPCInstrInfo::isTOCSaveMI(const MachineInstr &MI) const { return false; unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); unsigned StackOffset = MI.getOperand(1).getImm(); - unsigned StackReg = MI.getOperand(2).getReg(); + Register StackReg = MI.getOperand(2).getReg(); if (StackReg == PPC::X1 && StackOffset == TOCSaveOffset) return true; @@ -3790,7 +3790,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, switch (MI.getOpcode()) { case PPC::COPY: { - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); // In both ELFv1 and v2 ABI, method parameters and the return value // are sign- or zero-extended. @@ -3799,7 +3799,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, // We check the ZExt/SExt flags for a method parameter. if (MI.getParent()->getBasicBlock() == &MF->getFunction().getEntryBlock()) { - unsigned VReg = MI.getOperand(0).getReg(); + Register VReg = MI.getOperand(0).getReg(); if (MF->getRegInfo().isLiveIn(VReg)) return SignExt ? FuncInfo->isLiveInSExt(VReg) : FuncInfo->isLiveInZExt(VReg); @@ -3859,7 +3859,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, case PPC::XORIS8: { // logical operation with 16-bit immediate does not change the upper bits. // So, we track the operand register as we do for register copy. - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); if (!Register::isVirtualRegister(SrcReg)) return false; const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg); @@ -3888,7 +3888,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, for (unsigned I = 1; I != E; I += D) { if (MI.getOperand(I).isReg()) { - unsigned SrcReg = MI.getOperand(I).getReg(); + Register SrcReg = MI.getOperand(I).getReg(); if (!Register::isVirtualRegister(SrcReg)) return false; const MachineInstr *SrcMI = MRI->getVRegDef(SrcReg); @@ -3911,8 +3911,8 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, assert(MI.getOperand(1).isReg() && MI.getOperand(2).isReg()); - unsigned SrcReg1 = MI.getOperand(1).getReg(); - unsigned SrcReg2 = MI.getOperand(2).getReg(); + Register SrcReg1 = MI.getOperand(1).getReg(); + Register SrcReg2 = MI.getOperand(2).getReg(); if (!Register::isVirtualRegister(SrcReg1) || !Register::isVirtualRegister(SrcReg2)) @@ -3979,7 +3979,7 @@ unsigned PPCInstrInfo::reduceLoopCount( MachineInstr *Loop = findLoopInstr(PreHeader); if (!Loop) return 0; - unsigned LoopCountReg = Loop->getOperand(0).getReg(); + Register LoopCountReg = Loop->getOperand(0).getReg(); MachineRegisterInfo &MRI = MF->getRegInfo(); MachineInstr *LoopCount = MRI.getUniqueVRegDef(LoopCountReg); diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp index fe4f351e639..ac8ac060f46 100644 --- a/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -148,7 +148,7 @@ static MachineInstr *getVRegDefOrNull(MachineOperand *Op, if (!Op->isReg()) return nullptr; - unsigned Reg = Op->getReg(); + Register Reg = Op->getReg(); if (!Register::isVirtualRegister(Reg)) return nullptr; @@ -452,7 +452,7 @@ bool PPCMIPeephole::simplifyCode(void) { auto isConvertOfSplat = [=]() -> bool { if (DefOpcode != PPC::XVCVSPSXWS && DefOpcode != PPC::XVCVSPUXWS) return false; - unsigned ConvReg = DefMI->getOperand(1).getReg(); + Register ConvReg = DefMI->getOperand(1).getReg(); if (!Register::isVirtualRegister(ConvReg)) return false; MachineInstr *Splt = MRI->getVRegDef(ConvReg); @@ -480,9 +480,9 @@ bool PPCMIPeephole::simplifyCode(void) { // Splat fed by a shift. Usually when we align value to splat into // vector element zero. if (DefOpcode == PPC::XXSLDWI) { - unsigned ShiftRes = DefMI->getOperand(0).getReg(); - unsigned ShiftOp1 = DefMI->getOperand(1).getReg(); - unsigned ShiftOp2 = DefMI->getOperand(2).getReg(); + Register ShiftRes = DefMI->getOperand(0).getReg(); + Register ShiftOp1 = DefMI->getOperand(1).getReg(); + Register ShiftOp2 = DefMI->getOperand(2).getReg(); unsigned ShiftImm = DefMI->getOperand(3).getImm(); unsigned SplatImm = MI.getOperand(2).getImm(); if (ShiftOp1 == ShiftOp2) { @@ -532,8 +532,8 @@ bool PPCMIPeephole::simplifyCode(void) { if (RoundInstr->getOpcode() == PPC::FRSP && MRI->hasOneNonDBGUse(RoundInstr->getOperand(0).getReg())) { Simplified = true; - unsigned ConvReg1 = RoundInstr->getOperand(1).getReg(); - unsigned FRSPDefines = RoundInstr->getOperand(0).getReg(); + Register ConvReg1 = RoundInstr->getOperand(1).getReg(); + Register FRSPDefines = RoundInstr->getOperand(0).getReg(); MachineInstr &Use = *(MRI->use_instr_begin(FRSPDefines)); for (int i = 0, e = Use.getNumOperands(); i < e; ++i) if (Use.getOperand(i).isReg() && @@ -565,7 +565,7 @@ bool PPCMIPeephole::simplifyCode(void) { case PPC::EXTSH8: case PPC::EXTSH8_32_64: { if (!EnableSExtElimination) break; - unsigned NarrowReg = MI.getOperand(1).getReg(); + Register NarrowReg = MI.getOperand(1).getReg(); if (!Register::isVirtualRegister(NarrowReg)) break; @@ -609,7 +609,7 @@ bool PPCMIPeephole::simplifyCode(void) { case PPC::EXTSW_32: case PPC::EXTSW_32_64: { if (!EnableSExtElimination) break; - unsigned NarrowReg = MI.getOperand(1).getReg(); + Register NarrowReg = MI.getOperand(1).getReg(); if (!Register::isVirtualRegister(NarrowReg)) break; @@ -651,8 +651,8 @@ bool PPCMIPeephole::simplifyCode(void) { // We can eliminate EXTSW if the input is known to be already // sign-extended. LLVM_DEBUG(dbgs() << "Removing redundant sign-extension\n"); - unsigned TmpReg = - MF->getRegInfo().createVirtualRegister(&PPC::G8RCRegClass); + Register TmpReg = + MF->getRegInfo().createVirtualRegister(&PPC::G8RCRegClass); BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::IMPLICIT_DEF), TmpReg); BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::INSERT_SUBREG), @@ -678,7 +678,7 @@ bool PPCMIPeephole::simplifyCode(void) { if (MI.getOperand(2).getImm() != 0) break; - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); if (!Register::isVirtualRegister(SrcReg)) break; @@ -694,7 +694,7 @@ bool PPCMIPeephole::simplifyCode(void) { SrcMI = SubRegMI; if (SubRegMI->getOpcode() == PPC::COPY) { - unsigned CopyReg = SubRegMI->getOperand(1).getReg(); + Register CopyReg = SubRegMI->getOperand(1).getReg(); if (Register::isVirtualRegister(CopyReg)) SrcMI = MRI->getVRegDef(CopyReg); } @@ -756,7 +756,7 @@ bool PPCMIPeephole::simplifyCode(void) { break; // We don't have an ADD fed by LI's that can be transformed // Now we know that Op1 is the PHI node and Op2 is the dominator - unsigned DominatorReg = Op2.getReg(); + Register DominatorReg = Op2.getReg(); const TargetRegisterClass *TRC = MI.getOpcode() == PPC::ADD8 ? &PPC::G8RC_and_G8RC_NOX0RegClass @@ -948,7 +948,7 @@ static bool eligibleForCompareElimination(MachineBasicBlock &MBB, (*BII).getOpcode() == PPC::BCC && (*BII).getOperand(1).isReg()) { // We optimize only if the condition code is used only by one BCC. - unsigned CndReg = (*BII).getOperand(1).getReg(); + Register CndReg = (*BII).getOperand(1).getReg(); if (!Register::isVirtualRegister(CndReg) || !MRI->hasOneNonDBGUse(CndReg)) return false; @@ -1269,8 +1269,8 @@ bool PPCMIPeephole::eliminateRedundantCompare(void) { // We touch up the compare instruction in MBB2 and move it to // a previous BB to handle partially redundant case. if (SwapOperands) { - unsigned Op1 = CMPI2->getOperand(1).getReg(); - unsigned Op2 = CMPI2->getOperand(2).getReg(); + Register Op1 = CMPI2->getOperand(1).getReg(); + Register Op2 = CMPI2->getOperand(2).getReg(); CMPI2->getOperand(1).setReg(Op2); CMPI2->getOperand(2).setReg(Op1); } @@ -1293,7 +1293,7 @@ bool PPCMIPeephole::eliminateRedundantCompare(void) { MBBtoMoveCmp->splice(I, &MBB2, MachineBasicBlock::iterator(CMPI2)); DebugLoc DL = CMPI2->getDebugLoc(); - unsigned NewVReg = MRI->createVirtualRegister(&PPC::CRRCRegClass); + Register NewVReg = MRI->createVirtualRegister(&PPC::CRRCRegClass); BuildMI(MBB2, MBB2.begin(), DL, TII->get(PPC::PHI), NewVReg) .addReg(BI1->getOperand(1).getReg()).addMBB(MBB1) @@ -1332,7 +1332,7 @@ bool PPCMIPeephole::emitRLDICWhenLoweringJumpTables(MachineInstr &MI) { if (MI.getOpcode() != PPC::RLDICR) return false; - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); if (!Register::isVirtualRegister(SrcReg)) return false; @@ -1412,7 +1412,7 @@ bool PPCMIPeephole::combineSEXTAndSHL(MachineInstr &MI, if (SHMI + MEMI != 63) return false; - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); if (!Register::isVirtualRegister(SrcReg)) return false; diff --git a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp index e7b6d6f872b..1de01f9bcbc 100644 --- a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp +++ b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp @@ -91,7 +91,7 @@ namespace { LLVM_DEBUG(dbgs() << "Scanning after load immediate: "; BBI->dump();); - unsigned Reg = BBI->getOperand(0).getReg(); + Register Reg = BBI->getOperand(0).getReg(); int64_t Imm = BBI->getOperand(1).getImm(); MachineOperand *DeadOrKillToUnset = nullptr; if (BBI->getOperand(0).isDead()) { @@ -214,7 +214,7 @@ namespace { if (Br->getOpcode() != PPC::BC && Br->getOpcode() != PPC::BCn) continue; MachineInstr *CRSetMI = nullptr; - unsigned CRBit = Br->getOperand(0).getReg(); + Register CRBit = Br->getOperand(0).getReg(); unsigned CRReg = getCRFromCRBit(CRBit); bool SeenUse = false; MachineBasicBlock::reverse_iterator It = Br, Er = MBB.rend(); diff --git a/lib/Target/PowerPC/PPCQPXLoadSplat.cpp b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp index 3a83cc27439..6e904264382 100644 --- a/lib/Target/PowerPC/PPCQPXLoadSplat.cpp +++ b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp @@ -79,8 +79,8 @@ bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) { for (auto SI = Splats.begin(); SI != Splats.end();) { MachineInstr *SMI = *SI; - unsigned SplatReg = SMI->getOperand(0).getReg(); - unsigned SrcReg = SMI->getOperand(1).getReg(); + Register SplatReg = SMI->getOperand(0).getReg(); + Register SrcReg = SMI->getOperand(1).getReg(); if (MI->modifiesRegister(SrcReg, TRI)) { switch (MI->getOpcode()) { @@ -102,7 +102,7 @@ bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) { // the QPX splat source register. unsigned SubRegIndex = TRI->getSubRegIndex(SrcReg, MI->getOperand(0).getReg()); - unsigned SplatSubReg = TRI->getSubReg(SplatReg, SubRegIndex); + Register SplatSubReg = TRI->getSubReg(SplatReg, SubRegIndex); // Substitute both the explicit defined register, and also the // implicit def of the containing QPX register. diff --git a/lib/Target/PowerPC/PPCReduceCRLogicals.cpp b/lib/Target/PowerPC/PPCReduceCRLogicals.cpp index f2c71913a37..ac20fae983c 100644 --- a/lib/Target/PowerPC/PPCReduceCRLogicals.cpp +++ b/lib/Target/PowerPC/PPCReduceCRLogicals.cpp @@ -541,7 +541,7 @@ MachineInstr *PPCReduceCRLogicals::lookThroughCRCopy(unsigned Reg, CpDef = Copy; if (!Copy->isCopy()) return Copy; - unsigned CopySrc = Copy->getOperand(1).getReg(); + Register CopySrc = Copy->getOperand(1).getReg(); Subreg = Copy->getOperand(1).getSubReg(); if (!Register::isVirtualRegister(CopySrc)) { const TargetRegisterInfo *TRI = &TII->getRegisterInfo(); diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index f01e476d8a7..f9a9ee08aeb 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -527,7 +527,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const { // Fortunately, a frame greater than 32K is rare. const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); + Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) { if (LP64) @@ -549,7 +549,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const { } bool KillNegSizeReg = MI.getOperand(1).isKill(); - unsigned NegSizeReg = MI.getOperand(1).getReg(); + Register NegSizeReg = MI.getOperand(1).getReg(); // Grow the stack and update the stack pointer link, then determine the // address of new allocated space. @@ -655,8 +655,8 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II, const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); - unsigned SrcReg = MI.getOperand(0).getReg(); + Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); + Register SrcReg = MI.getOperand(0).getReg(); // We need to store the CR in the low 4-bits of the saved value. First, issue // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg. @@ -700,8 +700,8 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II, const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); - unsigned DestReg = MI.getOperand(0).getReg(); + Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); + Register DestReg = MI.getOperand(0).getReg(); assert(MI.definesRegister(DestReg) && "RESTORE_CR does not define its destination"); @@ -744,8 +744,8 @@ void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II, const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); - unsigned SrcReg = MI.getOperand(0).getReg(); + Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); + Register SrcReg = MI.getOperand(0).getReg(); // Search up the BB to find the definition of the CR bit. MachineBasicBlock::reverse_iterator Ins; @@ -823,8 +823,8 @@ void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II, const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); - unsigned DestReg = MI.getOperand(0).getReg(); + Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); + Register DestReg = MI.getOperand(0).getReg(); assert(MI.definesRegister(DestReg) && "RESTORE_CRBIT does not define its destination"); @@ -833,7 +833,7 @@ void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II, BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF), DestReg); - unsigned RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); + Register RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC); BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO) .addReg(getCRFromCRBit(DestReg)); @@ -870,8 +870,8 @@ void PPCRegisterInfo::lowerVRSAVESpilling(MachineBasicBlock::iterator II, DebugLoc dl = MI.getDebugLoc(); const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned Reg = MF.getRegInfo().createVirtualRegister(GPRC); - unsigned SrcReg = MI.getOperand(0).getReg(); + Register Reg = MF.getRegInfo().createVirtualRegister(GPRC); + Register SrcReg = MI.getOperand(0).getReg(); BuildMI(MBB, II, dl, TII.get(PPC::MFVRSAVEv), Reg) .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); @@ -896,8 +896,8 @@ void PPCRegisterInfo::lowerVRSAVERestore(MachineBasicBlock::iterator II, DebugLoc dl = MI.getDebugLoc(); const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned Reg = MF.getRegInfo().createVirtualRegister(GPRC); - unsigned DestReg = MI.getOperand(0).getReg(); + Register Reg = MF.getRegInfo().createVirtualRegister(GPRC); + Register DestReg = MI.getOperand(0).getReg(); assert(MI.definesRegister(DestReg) && "RESTORE_VRSAVE does not define its destination"); @@ -1128,7 +1128,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, OperandBase = OffsetOperandNo; } - unsigned StackReg = MI.getOperand(FIOperandNum).getReg(); + Register StackReg = MI.getOperand(FIOperandNum).getReg(); MI.getOperand(OperandBase).ChangeToRegister(StackReg, false); MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true); } diff --git a/lib/Target/PowerPC/PPCTLSDynamicCall.cpp b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp index fb826c4a32f..8f313d9d01c 100644 --- a/lib/Target/PowerPC/PPCTLSDynamicCall.cpp +++ b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp @@ -74,8 +74,8 @@ protected: LLVM_DEBUG(dbgs() << "TLS Dynamic Call Fixup:\n " << MI); - unsigned OutReg = MI.getOperand(0).getReg(); - unsigned InReg = MI.getOperand(1).getReg(); + Register OutReg = MI.getOperand(0).getReg(); + Register InReg = MI.getOperand(1).getReg(); DebugLoc DL = MI.getDebugLoc(); unsigned GPR3 = Is64Bit ? PPC::X3 : PPC::R3; unsigned Opc1, Opc2; diff --git a/lib/Target/PowerPC/PPCVSXCopy.cpp b/lib/Target/PowerPC/PPCVSXCopy.cpp index fb2b26a3bf3..3463bbbdc5f 100644 --- a/lib/Target/PowerPC/PPCVSXCopy.cpp +++ b/lib/Target/PowerPC/PPCVSXCopy.cpp @@ -102,7 +102,7 @@ protected: IsVSFReg(SrcMO.getReg(), MRI)) && "Unknown source for a VSX copy"); - unsigned NewVReg = MRI.createVirtualRegister(SrcRC); + Register NewVReg = MRI.createVirtualRegister(SrcRC); BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(TargetOpcode::SUBREG_TO_REG), NewVReg) .addImm(1) // add 1, not 0, because there is no implicit clearing @@ -124,7 +124,7 @@ protected: "Unknown destination for a VSX copy"); // Copy the VSX value into a new VSX register of the correct subclass. - unsigned NewVReg = MRI.createVirtualRegister(DstRC); + Register NewVReg = MRI.createVirtualRegister(DstRC); BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(TargetOpcode::COPY), NewVReg) .add(SrcMO); diff --git a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp index aedbf5150cd..5e150be544e 100644 --- a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp +++ b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp @@ -126,7 +126,7 @@ protected: if (!AddendMI->isFullCopy()) continue; - unsigned AddendSrcReg = AddendMI->getOperand(1).getReg(); + Register AddendSrcReg = AddendMI->getOperand(1).getReg(); if (Register::isVirtualRegister(AddendSrcReg)) { if (MRI.getRegClass(AddendMI->getOperand(0).getReg()) != MRI.getRegClass(AddendSrcReg)) @@ -182,12 +182,12 @@ protected: // %5 = A-form-op %5, %5, %11; // where %5 and %11 are both kills. This case would be skipped // otherwise. - unsigned OldFMAReg = MI.getOperand(0).getReg(); + Register OldFMAReg = MI.getOperand(0).getReg(); // Find one of the product operands that is killed by this instruction. unsigned KilledProdOp = 0, OtherProdOp = 0; - unsigned Reg2 = MI.getOperand(2).getReg(); - unsigned Reg3 = MI.getOperand(3).getReg(); + Register Reg2 = MI.getOperand(2).getReg(); + Register Reg3 = MI.getOperand(3).getReg(); if (LIS->getInterval(Reg2).Query(FMAIdx).isKill() && Reg2 != OldFMAReg) { KilledProdOp = 2; @@ -214,8 +214,8 @@ protected: // Transform: (O2 * O3) + O1 -> (O2 * O1) + O3. - unsigned KilledProdReg = MI.getOperand(KilledProdOp).getReg(); - unsigned OtherProdReg = MI.getOperand(OtherProdOp).getReg(); + Register KilledProdReg = MI.getOperand(KilledProdOp).getReg(); + Register OtherProdReg = MI.getOperand(OtherProdOp).getReg(); unsigned AddSubReg = AddendMI->getOperand(1).getSubReg(); unsigned KilledProdSubReg = MI.getOperand(KilledProdOp).getSubReg(); diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp index aced629f9b2..c3729da0b07 100644 --- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp +++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp @@ -253,7 +253,7 @@ bool PPCVSXSwapRemoval::gatherVectorInstructions() { for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (isAnyVecReg(Reg, Partial)) { RelevantInstr = true; break; @@ -601,7 +601,7 @@ void PPCVSXSwapRemoval::formWebs() { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!isVecReg(Reg) && !isScalarVecReg(Reg)) continue; @@ -667,7 +667,7 @@ void PPCVSXSwapRemoval::recordUnoptimizableWebs() { // than a swap instruction. else if (SwapVector[EntryIdx].IsLoad && SwapVector[EntryIdx].IsSwap) { MachineInstr *MI = SwapVector[EntryIdx].VSEMI; - unsigned DefReg = MI->getOperand(0).getReg(); + Register DefReg = MI->getOperand(0).getReg(); // We skip debug instructions in the analysis. (Note that debug // location information is still maintained by this optimization @@ -695,9 +695,9 @@ void PPCVSXSwapRemoval::recordUnoptimizableWebs() { // other than a swap instruction. } else if (SwapVector[EntryIdx].IsStore && SwapVector[EntryIdx].IsSwap) { MachineInstr *MI = SwapVector[EntryIdx].VSEMI; - unsigned UseReg = MI->getOperand(0).getReg(); + Register UseReg = MI->getOperand(0).getReg(); MachineInstr *DefMI = MRI->getVRegDef(UseReg); - unsigned DefReg = DefMI->getOperand(0).getReg(); + Register DefReg = DefMI->getOperand(0).getReg(); int DefIdx = SwapMap[DefMI]; if (!SwapVector[DefIdx].IsSwap || SwapVector[DefIdx].IsLoad || @@ -756,7 +756,7 @@ void PPCVSXSwapRemoval::markSwapsForRemoval() { if (!SwapVector[Repr].WebRejected) { MachineInstr *MI = SwapVector[EntryIdx].VSEMI; - unsigned DefReg = MI->getOperand(0).getReg(); + Register DefReg = MI->getOperand(0).getReg(); for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { int UseIdx = SwapMap[&UseMI]; @@ -772,7 +772,7 @@ void PPCVSXSwapRemoval::markSwapsForRemoval() { if (!SwapVector[Repr].WebRejected) { MachineInstr *MI = SwapVector[EntryIdx].VSEMI; - unsigned UseReg = MI->getOperand(0).getReg(); + Register UseReg = MI->getOperand(0).getReg(); MachineInstr *DefMI = MRI->getVRegDef(UseReg); int DefIdx = SwapMap[DefMI]; SwapVector[DefIdx].WillRemove = 1; @@ -869,8 +869,8 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) { Selector = 3 - Selector; MI->getOperand(3).setImm(Selector); - unsigned Reg1 = MI->getOperand(1).getReg(); - unsigned Reg2 = MI->getOperand(2).getReg(); + Register Reg1 = MI->getOperand(1).getReg(); + Register Reg2 = MI->getOperand(2).getReg(); MI->getOperand(1).setReg(Reg2); MI->getOperand(2).setReg(Reg1); @@ -894,9 +894,9 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) { LLVM_DEBUG(dbgs() << "Changing SUBREG_TO_REG: "); LLVM_DEBUG(MI->dump()); - unsigned DstReg = MI->getOperand(0).getReg(); + Register DstReg = MI->getOperand(0).getReg(); const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); - unsigned NewVReg = MRI->createVirtualRegister(DstRC); + Register NewVReg = MRI->createVirtualRegister(DstRC); MI->getOperand(0).setReg(NewVReg); LLVM_DEBUG(dbgs() << " Into: "); @@ -910,8 +910,8 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) { // prior to the swap, and from VSRC to VRRC following the swap. // Coalescing will usually remove all this mess. if (DstRC == &PPC::VRRCRegClass) { - unsigned VSRCTmp1 = MRI->createVirtualRegister(&PPC::VSRCRegClass); - unsigned VSRCTmp2 = MRI->createVirtualRegister(&PPC::VSRCRegClass); + Register VSRCTmp1 = MRI->createVirtualRegister(&PPC::VSRCRegClass); + Register VSRCTmp2 = MRI->createVirtualRegister(&PPC::VSRCRegClass); BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(), TII->get(PPC::COPY), VSRCTmp1) diff --git a/lib/Target/Sparc/DelaySlotFiller.cpp b/lib/Target/Sparc/DelaySlotFiller.cpp index f1ca8e18c22..db8e7850300 100644 --- a/lib/Target/Sparc/DelaySlotFiller.cpp +++ b/lib/Target/Sparc/DelaySlotFiller.cpp @@ -253,7 +253,7 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate, if (!MO.isReg()) continue; // skip - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (MO.isDef()) { // check whether Reg is defined or used before delay slot. @@ -324,7 +324,7 @@ void Filler::insertDefsUses(MachineBasicBlock::iterator MI, if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg == 0) continue; if (MO.isDef()) @@ -380,7 +380,7 @@ static bool combineRestoreADD(MachineBasicBlock::iterator RestoreMI, // // After : restore , , %o[0-7] - unsigned reg = AddMI->getOperand(0).getReg(); + Register reg = AddMI->getOperand(0).getReg(); if (reg < SP::I0 || reg > SP::I7) return false; @@ -408,7 +408,7 @@ static bool combineRestoreOR(MachineBasicBlock::iterator RestoreMI, // // After : restore , , %o[0-7] - unsigned reg = OrMI->getOperand(0).getReg(); + Register reg = OrMI->getOperand(0).getReg(); if (reg < SP::I0 || reg > SP::I7) return false; @@ -446,7 +446,7 @@ static bool combineRestoreSETHIi(MachineBasicBlock::iterator RestoreMI, // // After : restore %g0, (imm3<<10), %o[0-7] - unsigned reg = SetHiMI->getOperand(0).getReg(); + Register reg = SetHiMI->getOperand(0).getReg(); if (reg < SP::I0 || reg > SP::I7) return false; diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp index 8cff50d19ed..4e61c341b70 100644 --- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp +++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp @@ -231,7 +231,7 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){ // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to // the original GPRs. - unsigned GPVR = MRI.createVirtualRegister(&SP::IntPairRegClass); + Register GPVR = MRI.createVirtualRegister(&SP::IntPairRegClass); PairedReg = CurDAG->getRegister(GPVR, MVT::v2i32); SDValue Chain = SDValue(N,0); @@ -278,7 +278,7 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){ // Copy REG_SEQ into a GPRPair-typed VR and replace the original two // i32 VRs of inline asm with it. - unsigned GPVR = MRI.createVirtualRegister(&SP::IntPairRegClass); + Register GPVR = MRI.createVirtualRegister(&SP::IntPairRegClass); PairedReg = CurDAG->getRegister(GPVR, MVT::v2i32); Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1)); diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 804f7ba74ed..4bae160c43e 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -417,7 +417,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_32( if (VA.needsCustom()) { assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32); - unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); + Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi); SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32); @@ -445,7 +445,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_32( InVals.push_back(WholeValue); continue; } - unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); + Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg); SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); if (VA.getLocVT() == MVT::f32) @@ -552,7 +552,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_32( std::vector OutChains; for (; CurArgReg != ArgRegEnd; ++CurArgReg) { - unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); + Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg, VReg); SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32); diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp index ad343fe6f80..3d3d314a26b 100644 --- a/lib/Target/Sparc/SparcInstrInfo.cpp +++ b/lib/Target/Sparc/SparcInstrInfo.cpp @@ -375,8 +375,8 @@ void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineInstr *MovMI = nullptr; for (unsigned i = 0; i != numSubRegs; ++i) { - unsigned Dst = TRI->getSubReg(DestReg, subRegIdx[i]); - unsigned Src = TRI->getSubReg(SrcReg, subRegIdx[i]); + Register Dst = TRI->getSubReg(DestReg, subRegIdx[i]); + Register Src = TRI->getSubReg(SrcReg, subRegIdx[i]); assert(Dst && Src && "Bad sub-register"); MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(movOpc), Dst); diff --git a/lib/Target/Sparc/SparcRegisterInfo.cpp b/lib/Target/Sparc/SparcRegisterInfo.cpp index ce11a423d10..19a90e98db7 100644 --- a/lib/Target/Sparc/SparcRegisterInfo.cpp +++ b/lib/Target/Sparc/SparcRegisterInfo.cpp @@ -182,9 +182,9 @@ SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, if (!Subtarget.isV9() || !Subtarget.hasHardQuad()) { if (MI.getOpcode() == SP::STQFri) { const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); - unsigned SrcReg = MI.getOperand(2).getReg(); - unsigned SrcEvenReg = getSubReg(SrcReg, SP::sub_even64); - unsigned SrcOddReg = getSubReg(SrcReg, SP::sub_odd64); + Register SrcReg = MI.getOperand(2).getReg(); + Register SrcEvenReg = getSubReg(SrcReg, SP::sub_even64); + Register SrcOddReg = getSubReg(SrcReg, SP::sub_odd64); MachineInstr *StMI = BuildMI(*MI.getParent(), II, dl, TII.get(SP::STDFri)) .addReg(FrameReg).addImm(0).addReg(SrcEvenReg); @@ -194,9 +194,9 @@ SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, Offset += 8; } else if (MI.getOpcode() == SP::LDQFri) { const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); - unsigned DestReg = MI.getOperand(0).getReg(); - unsigned DestEvenReg = getSubReg(DestReg, SP::sub_even64); - unsigned DestOddReg = getSubReg(DestReg, SP::sub_odd64); + Register DestReg = MI.getOperand(0).getReg(); + Register DestEvenReg = getSubReg(DestReg, SP::sub_even64); + Register DestOddReg = getSubReg(DestReg, SP::sub_odd64); MachineInstr *LdMI = BuildMI(*MI.getParent(), II, dl, TII.get(SP::LDDFri), DestEvenReg) .addReg(FrameReg).addImm(0); diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp index 9cbf6b32050..36babf02992 100644 --- a/lib/Target/SystemZ/SystemZElimCompare.cpp +++ b/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -152,7 +152,7 @@ Reference SystemZElimCompare::getRegReferences(MachineInstr &MI, unsigned Reg) { for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { const MachineOperand &MO = MI.getOperand(I); if (MO.isReg()) { - if (unsigned MOReg = MO.getReg()) { + if (Register MOReg = MO.getReg()) { if (TRI->regsOverlap(MOReg, Reg)) { if (MO.isUse()) Ref.Use = true; diff --git a/lib/Target/SystemZ/SystemZExpandPseudo.cpp b/lib/Target/SystemZ/SystemZExpandPseudo.cpp index 09708fb4241..45270da2181 100644 --- a/lib/Target/SystemZ/SystemZExpandPseudo.cpp +++ b/lib/Target/SystemZ/SystemZExpandPseudo.cpp @@ -68,8 +68,8 @@ bool SystemZExpandPseudo::expandLOCRMux(MachineBasicBlock &MBB, const BasicBlock *BB = MBB.getBasicBlock(); MachineInstr &MI = *MBBI; DebugLoc DL = MI.getDebugLoc(); - unsigned DestReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(2).getReg(); + Register DestReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(2).getReg(); unsigned CCValid = MI.getOperand(3).getImm(); unsigned CCMask = MI.getOperand(4).getImm(); diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp index da28faebb32..1893c082c83 100644 --- a/lib/Target/SystemZ/SystemZFrameLowering.cpp +++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp @@ -118,7 +118,7 @@ static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB, unsigned GPR64, bool IsImplicit) { const TargetRegisterInfo *RI = MBB.getParent()->getSubtarget().getRegisterInfo(); - unsigned GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32); + Register GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32); bool IsLive = MBB.isLiveIn(GPR64) || MBB.isLiveIn(GPR32); if (!IsLive || !IsImplicit) { MIB.addReg(GPR64, getImplRegState(IsImplicit) | getKillRegState(!IsLive)); diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index e7b7a5b0cd5..50a03d538d9 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -1335,7 +1335,7 @@ SDValue SystemZTargetLowering::LowerFormalArguments( break; } - unsigned VReg = MRI.createVirtualRegister(RC); + Register VReg = MRI.createVirtualRegister(RC); MRI.addLiveIn(VA.getLocReg(), VReg); ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); } else { @@ -1430,7 +1430,7 @@ static bool canUseSiblingCall(const CCState &ArgCCInfo, return false; if (!VA.isRegLoc()) return false; - unsigned Reg = VA.getLocReg(); + Register Reg = VA.getLocReg(); if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) return false; if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError()) @@ -1674,7 +1674,7 @@ SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); // Chain and glue the copies together. - unsigned Reg = VA.getLocReg(); + Register Reg = VA.getLocReg(); Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); Glue = Chain.getValue(1); RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); @@ -6574,9 +6574,9 @@ static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin, for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; MIIt = skipDebugInstructionsForward(++MIIt, MIItEnd)) { - unsigned DestReg = MIIt->getOperand(0).getReg(); - unsigned TrueReg = MIIt->getOperand(1).getReg(); - unsigned FalseReg = MIIt->getOperand(2).getReg(); + Register DestReg = MIIt->getOperand(0).getReg(); + Register TrueReg = MIIt->getOperand(1).getReg(); + Register FalseReg = MIIt->getOperand(2).getReg(); // If this Select we are generating is the opposite condition from // the jump we generated, then we have to swap the operands for the @@ -6678,10 +6678,10 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, const SystemZInstrInfo *TII = static_cast(Subtarget.getInstrInfo()); - unsigned SrcReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(0).getReg(); MachineOperand Base = MI.getOperand(1); int64_t Disp = MI.getOperand(2).getImm(); - unsigned IndexReg = MI.getOperand(3).getReg(); + Register IndexReg = MI.getOperand(3).getReg(); unsigned CCValid = MI.getOperand(4).getImm(); unsigned CCMask = MI.getOperand(5).getImm(); DebugLoc DL = MI.getDebugLoc(); @@ -6773,7 +6773,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( // Extract the operands. Base can be a register or a frame index. // Src2 can be a register or immediate. - unsigned Dest = MI.getOperand(0).getReg(); + Register Dest = MI.getOperand(0).getReg(); MachineOperand Base = earlyUseOperand(MI.getOperand(1)); int64_t Disp = MI.getOperand(2).getImm(); MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); @@ -6833,7 +6833,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( .addReg(OldVal).addReg(BitShift).addImm(0); if (Invert) { // Perform the operation normally and then invert every bit of the field. - unsigned Tmp = MRI.createVirtualRegister(RC); + Register Tmp = MRI.createVirtualRegister(RC); BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); if (BitSize <= 32) // XILF with the upper BitSize bits set. @@ -6842,7 +6842,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( else { // Use LCGR and add -1 to the result, which is more compact than // an XILF, XILH pair. - unsigned Tmp2 = MRI.createVirtualRegister(RC); + Register Tmp2 = MRI.createVirtualRegister(RC); BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) .addReg(Tmp2).addImm(-1); @@ -6891,7 +6891,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( bool IsSubWord = (BitSize < 32); // Extract the operands. Base can be a register or a frame index. - unsigned Dest = MI.getOperand(0).getReg(); + Register Dest = MI.getOperand(0).getReg(); MachineOperand Base = earlyUseOperand(MI.getOperand(1)); int64_t Disp = MI.getOperand(2).getImm(); Register Src2 = MI.getOperand(3).getReg(); @@ -7005,13 +7005,13 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, MachineRegisterInfo &MRI = MF.getRegInfo(); // Extract the operands. Base can be a register or a frame index. - unsigned Dest = MI.getOperand(0).getReg(); + Register Dest = MI.getOperand(0).getReg(); MachineOperand Base = earlyUseOperand(MI.getOperand(1)); int64_t Disp = MI.getOperand(2).getImm(); - unsigned OrigCmpVal = MI.getOperand(3).getReg(); - unsigned OrigSwapVal = MI.getOperand(4).getReg(); - unsigned BitShift = MI.getOperand(5).getReg(); - unsigned NegBitShift = MI.getOperand(6).getReg(); + Register OrigCmpVal = MI.getOperand(3).getReg(); + Register OrigSwapVal = MI.getOperand(4).getReg(); + Register BitShift = MI.getOperand(5).getReg(); + Register NegBitShift = MI.getOperand(6).getReg(); int64_t BitSize = MI.getOperand(7).getImm(); DebugLoc DL = MI.getDebugLoc(); @@ -7023,14 +7023,14 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, assert(LOpcode && CSOpcode && "Displacement out of range"); // Create virtual registers for temporary results. - unsigned OrigOldVal = MRI.createVirtualRegister(RC); - unsigned OldVal = MRI.createVirtualRegister(RC); - unsigned CmpVal = MRI.createVirtualRegister(RC); - unsigned SwapVal = MRI.createVirtualRegister(RC); - unsigned StoreVal = MRI.createVirtualRegister(RC); - unsigned RetryOldVal = MRI.createVirtualRegister(RC); - unsigned RetryCmpVal = MRI.createVirtualRegister(RC); - unsigned RetrySwapVal = MRI.createVirtualRegister(RC); + Register OrigOldVal = MRI.createVirtualRegister(RC); + Register OldVal = MRI.createVirtualRegister(RC); + Register CmpVal = MRI.createVirtualRegister(RC); + Register SwapVal = MRI.createVirtualRegister(RC); + Register StoreVal = MRI.createVirtualRegister(RC); + Register RetryOldVal = MRI.createVirtualRegister(RC); + Register RetryCmpVal = MRI.createVirtualRegister(RC); + Register RetrySwapVal = MRI.createVirtualRegister(RC); // Insert 2 basic blocks for the loop. MachineBasicBlock *StartMBB = MBB; @@ -7129,11 +7129,11 @@ SystemZTargetLowering::emitPair128(MachineInstr &MI, MachineRegisterInfo &MRI = MF.getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Dest = MI.getOperand(0).getReg(); - unsigned Hi = MI.getOperand(1).getReg(); - unsigned Lo = MI.getOperand(2).getReg(); - unsigned Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); - unsigned Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); + Register Dest = MI.getOperand(0).getReg(); + Register Hi = MI.getOperand(1).getReg(); + Register Lo = MI.getOperand(2).getReg(); + Register Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); + Register Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1); BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2) @@ -7157,14 +7157,14 @@ MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, MachineRegisterInfo &MRI = MF.getRegInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Dest = MI.getOperand(0).getReg(); - unsigned Src = MI.getOperand(1).getReg(); - unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); + Register Dest = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + Register In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); if (ClearEven) { - unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); - unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); + Register NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); + Register Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) .addImm(0); @@ -7308,7 +7308,7 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( // The previous iteration might have created out-of-range displacements. // Apply them using LAY if so. if (!isUInt<12>(DestDisp)) { - unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); + Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) .add(DestBase) .addImm(DestDisp) @@ -7317,7 +7317,7 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( DestDisp = 0; } if (!isUInt<12>(SrcDisp)) { - unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); + Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) .add(SrcBase) .addImm(SrcDisp) @@ -7474,11 +7474,11 @@ MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0( static_cast(Subtarget.getInstrInfo()); DebugLoc DL = MI.getDebugLoc(); - unsigned SrcReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(0).getReg(); // Create new virtual register of the same class as source. const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); - unsigned DstReg = MRI->createVirtualRegister(RC); + Register DstReg = MRI->createVirtualRegister(RC); // Replace pseudo with a normal load-and-test that models the def as // well. diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index 6841ce084cf..da71759cc3f 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -85,7 +85,7 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, // Set up the two 64-bit registers and remember super reg and its flags. MachineOperand &HighRegOp = EarlierMI->getOperand(0); MachineOperand &LowRegOp = MI->getOperand(0); - unsigned Reg128 = LowRegOp.getReg(); + Register Reg128 = LowRegOp.getReg(); unsigned Reg128Killed = getKillRegState(LowRegOp.isKill()); unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef()); HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64)); @@ -147,7 +147,7 @@ void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned HighOpcode, bool ConvertHigh) const { - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); bool IsHigh = isHighReg(Reg); MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode)); if (IsHigh && ConvertHigh) @@ -161,8 +161,8 @@ void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode, void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned LowOpcodeK, unsigned HighOpcode) const { - unsigned DestReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register DestReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); bool DestIsHigh = isHighReg(DestReg); bool SrcIsHigh = isHighReg(SrcReg); if (!DestIsHigh && !SrcIsHigh) @@ -184,7 +184,7 @@ void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode, // is a high GR32. void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned HighOpcode) const { - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode, MI.getOperand(2).getImm()); MI.setDesc(get(Opcode)); @@ -195,7 +195,7 @@ void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode, // register is a low GR32 and HighOpcode if the register is a high GR32. void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned HighOpcode) const { - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); unsigned Opcode = isHighReg(Reg) ? HighOpcode : LowOpcode; MI.setDesc(get(Opcode)); } @@ -205,8 +205,8 @@ void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode, // source and destination are both high GR32s. void SystemZInstrInfo::expandLOCRPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned HighOpcode) const { - unsigned DestReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(2).getReg(); + Register DestReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(2).getReg(); bool DestIsHigh = isHighReg(DestReg); bool SrcIsHigh = isHighReg(SrcReg); @@ -229,9 +229,9 @@ void SystemZInstrInfo::expandLOCRPseudo(MachineInstr &MI, unsigned LowOpcode, void SystemZInstrInfo::expandSELRPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned HighOpcode, unsigned MixedOpcode) const { - unsigned DestReg = MI.getOperand(0).getReg(); - unsigned Src1Reg = MI.getOperand(1).getReg(); - unsigned Src2Reg = MI.getOperand(2).getReg(); + Register DestReg = MI.getOperand(0).getReg(); + Register Src1Reg = MI.getOperand(1).getReg(); + Register Src2Reg = MI.getOperand(2).getReg(); bool DestIsHigh = isHighReg(DestReg); bool Src1IsHigh = isHighReg(Src1Reg); bool Src2IsHigh = isHighReg(Src2Reg); @@ -302,8 +302,8 @@ void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode, void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const { MachineBasicBlock *MBB = MI->getParent(); MachineFunction &MF = *MBB->getParent(); - const unsigned Reg64 = MI->getOperand(0).getReg(); - const unsigned Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32); + const Register Reg64 = MI->getOperand(0).getReg(); + const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32); // EAR can only load the low subregister so us a shift for %a0 to produce // the GR containing %a0 and %a1. @@ -676,8 +676,8 @@ void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB, else { Opc = SystemZ::LOCR; MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass); - unsigned TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); - unsigned FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); + Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); + Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg); BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg); TrueReg = TReg; diff --git a/lib/Target/SystemZ/SystemZPostRewrite.cpp b/lib/Target/SystemZ/SystemZPostRewrite.cpp index 8e4060eac74..6ef005f813a 100644 --- a/lib/Target/SystemZ/SystemZPostRewrite.cpp +++ b/lib/Target/SystemZ/SystemZPostRewrite.cpp @@ -83,7 +83,7 @@ bool SystemZPostRewrite::selectMI(MachineBasicBlock &MBB, if (TargetMemOpcode != -1) { MI.setDesc(TII->get(TargetMemOpcode)); MI.tieOperands(0, 1); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); MachineOperand &SrcMO = MI.getOperand(1); if (DstReg != SrcMO.getReg()) { BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(SystemZ::COPY), DstReg) diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/lib/Target/SystemZ/SystemZRegisterInfo.cpp index 210c0f44c2c..39ace5594b7 100644 --- a/lib/Target/SystemZ/SystemZRegisterInfo.cpp +++ b/lib/Target/SystemZ/SystemZRegisterInfo.cpp @@ -41,7 +41,7 @@ static const TargetRegisterClass *getRC32(MachineOperand &MO, return &SystemZ::GRH32BitRegClass; if (VRM && VRM->hasPhys(MO.getReg())) { - unsigned PhysReg = VRM->getPhys(MO.getReg()); + Register PhysReg = VRM->getPhys(MO.getReg()); if (SystemZ::GR32BitRegClass.contains(PhysReg)) return &SystemZ::GR32BitRegClass; assert (SystemZ::GRH32BitRegClass.contains(PhysReg) && @@ -120,8 +120,8 @@ SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg, } // Add the other operand of the LOCRMux to the worklist. - unsigned OtherReg = - (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg()); + Register OtherReg = + (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg()); if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass) Worklist.push_back(OtherReg); } // end LOCRMux @@ -298,8 +298,8 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, assert(Mask && "One offset must be OK"); } while (!OpcodeForOffset); - unsigned ScratchReg = - MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass); + Register ScratchReg = + MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass); int64_t HighOffset = OldOffset - Offset; if (MI->getDesc().TSFlags & SystemZII::HasIndex @@ -352,8 +352,8 @@ bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI, // regalloc may run out of registers. unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0); - unsigned GR128Reg = MI->getOperand(WideOpNo).getReg(); - unsigned GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg(); + Register GR128Reg = MI->getOperand(WideOpNo).getReg(); + Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg(); LiveInterval &IntGR128 = LIS.getInterval(GR128Reg); LiveInterval &IntGRNar = LIS.getInterval(GRNarReg); diff --git a/lib/Target/SystemZ/SystemZShortenInst.cpp b/lib/Target/SystemZ/SystemZShortenInst.cpp index e79dfc5b4b9..2aca22c9082 100644 --- a/lib/Target/SystemZ/SystemZShortenInst.cpp +++ b/lib/Target/SystemZ/SystemZShortenInst.cpp @@ -75,7 +75,7 @@ static void tieOpsIfNeeded(MachineInstr &MI) { // instead of IIxF. bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned LLIxL, unsigned LLIxH) { - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); // The new opcode will clear the other half of the GR64 reg, so // cancel if that is live. unsigned thisSubRegIdx = @@ -86,7 +86,7 @@ bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned LLIxL, : SystemZ::subreg_l32); unsigned GR64BitReg = TRI->getMatchingSuperReg(Reg, thisSubRegIdx, &SystemZ::GR64BitRegClass); - unsigned OtherReg = TRI->getSubReg(GR64BitReg, otherSubRegIdx); + Register OtherReg = TRI->getSubReg(GR64BitReg, otherSubRegIdx); if (LiveRegs.contains(OtherReg)) return false; diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp index 80120722e0e..7d9c74f8e2f 100644 --- a/lib/Target/X86/X86AsmPrinter.cpp +++ b/lib/Target/X86/X86AsmPrinter.cpp @@ -242,7 +242,7 @@ void X86AsmPrinter::PrintModifiedOperand(const MachineInstr *MI, unsigned OpNo, return PrintOperand(MI, OpNo, O); if (MI->getInlineAsmDialect() == InlineAsm::AD_ATT) O << '%'; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (strncmp(Modifier, "subreg", strlen("subreg")) == 0) { unsigned Size = (strcmp(Modifier+6,"64") == 0) ? 64 : (strcmp(Modifier+6,"32") == 0) ? 32 : @@ -388,7 +388,7 @@ void X86AsmPrinter::PrintIntelMemReference(const MachineInstr *MI, static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O) { - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); bool EmitPercent = true; if (!X86::GR8RegClass.contains(Reg) && diff --git a/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp index 3dcc1015dc7..7c195e51d16 100644 --- a/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp +++ b/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp @@ -390,7 +390,7 @@ void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, MachineMemOperand *LMMO = *LoadInst->memoperands_begin(); MachineMemOperand *SMMO = *StoreInst->memoperands_begin(); - unsigned Reg1 = MRI->createVirtualRegister( + Register Reg1 = MRI->createVirtualRegister( TII->getRegClass(TII->get(NLoadOpcode), 0, TRI, *(MBB->getParent()))); MachineInstr *NewLoad = BuildMI(*MBB, LoadInst, LoadInst->getDebugLoc(), TII->get(NLoadOpcode), diff --git a/lib/Target/X86/X86CallFrameOptimization.cpp b/lib/Target/X86/X86CallFrameOptimization.cpp index 7796945f1c3..ad7e32b4efc 100644 --- a/lib/Target/X86/X86CallFrameOptimization.cpp +++ b/lib/Target/X86/X86CallFrameOptimization.cpp @@ -335,7 +335,7 @@ X86CallFrameOptimization::classifyInstruction( for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg()) continue; - unsigned int Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isPhysicalRegister(Reg)) continue; if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister())) @@ -380,7 +380,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF, while (I->getOpcode() == X86::LEA32r || I->isDebugInstr()) ++I; - unsigned StackPtr = RegInfo.getStackRegister(); + Register StackPtr = RegInfo.getStackRegister(); auto StackPtrCopyInst = MBB.end(); // SelectionDAG (but not FastISel) inserts a copy of ESP into a virtual // register. If it's there, use that virtual register as stack pointer @@ -453,7 +453,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF, for (const MachineOperand &MO : I->uses()) { if (!MO.isReg()) continue; - unsigned int Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isPhysicalRegister(Reg)) UsedRegs.insert(Reg); } @@ -534,12 +534,12 @@ void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF, break; case X86::MOV32mr: case X86::MOV64mr: { - unsigned int Reg = PushOp.getReg(); + Register Reg = PushOp.getReg(); // If storing a 32-bit vreg on 64-bit targets, extend to a 64-bit vreg // in preparation for the PUSH64. The upper 32 bits can be undef. if (Is64Bit && Store->getOpcode() == X86::MOV32mr) { - unsigned UndefReg = MRI->createVirtualRegister(&X86::GR64RegClass); + Register UndefReg = MRI->createVirtualRegister(&X86::GR64RegClass); Reg = MRI->createVirtualRegister(&X86::GR64RegClass); BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg); BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg) diff --git a/lib/Target/X86/X86CallLowering.cpp b/lib/Target/X86/X86CallLowering.cpp index 0761dfe49ed..d559b2e2016 100644 --- a/lib/Target/X86/X86CallLowering.cpp +++ b/lib/Target/X86/X86CallLowering.cpp @@ -237,7 +237,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler { int FI = MFI.CreateFixedObject(Size, Offset, true); MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); - unsigned AddrReg = MRI.createGenericVirtualRegister( + Register AddrReg = MRI.createGenericVirtualRegister( LLT::pointer(0, DL.getPointerSizeInBits(0))); MIRBuilder.buildFrameIndex(AddrReg, FI); return AddrReg; diff --git a/lib/Target/X86/X86CmovConversion.cpp b/lib/Target/X86/X86CmovConversion.cpp index 6e7275b7e6b..5123853f545 100644 --- a/lib/Target/X86/X86CmovConversion.cpp +++ b/lib/Target/X86/X86CmovConversion.cpp @@ -436,7 +436,7 @@ bool X86CmovConverterPass::checkForProfitableCmovCandidates( // Checks for "isUse()" as "uses()" returns also implicit definitions. if (!MO.isReg() || !MO.isUse()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); auto &RDM = RegDefMaps[Register::isVirtualRegister(Reg)]; if (MachineInstr *DefMI = RDM.lookup(Reg)) { OperandToDefMap[&MO] = DefMI; @@ -456,7 +456,7 @@ bool X86CmovConverterPass::checkForProfitableCmovCandidates( for (auto &MO : MI.operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); RegDefMaps[Register::isVirtualRegister(Reg)][Reg] = &MI; } @@ -710,7 +710,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches( // Skip any CMOVs in this group which don't load from memory. if (!MI.mayLoad()) { // Remember the false-side register input. - unsigned FalseReg = + Register FalseReg = MI.getOperand(X86::getCondFromCMov(MI) == CC ? 1 : 2).getReg(); // Walk back through any intermediate cmovs referenced. while (true) { @@ -753,7 +753,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches( // Get a fresh register to use as the destination of the MOV. const TargetRegisterClass *RC = MRI->getRegClass(MI.getOperand(0).getReg()); - unsigned TmpReg = MRI->createVirtualRegister(RC); + Register TmpReg = MRI->createVirtualRegister(RC); SmallVector NewMIs; bool Unfolded = TII->unfoldMemoryOperand(*MBB->getParent(), MI, TmpReg, @@ -810,9 +810,9 @@ void X86CmovConverterPass::convertCmovInstsToBranches( DenseMap> RegRewriteTable; for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) { - unsigned DestReg = MIIt->getOperand(0).getReg(); - unsigned Op1Reg = MIIt->getOperand(1).getReg(); - unsigned Op2Reg = MIIt->getOperand(2).getReg(); + Register DestReg = MIIt->getOperand(0).getReg(); + Register Op1Reg = MIIt->getOperand(1).getReg(); + Register Op2Reg = MIIt->getOperand(2).getReg(); // If this CMOV we are processing is the opposite condition from the jump we // generated, then we have to swap the operands for the PHI that is going to diff --git a/lib/Target/X86/X86DomainReassignment.cpp b/lib/Target/X86/X86DomainReassignment.cpp index dc82987cf60..b4cf5cafbc6 100644 --- a/lib/Target/X86/X86DomainReassignment.cpp +++ b/lib/Target/X86/X86DomainReassignment.cpp @@ -182,7 +182,7 @@ public: MachineBasicBlock *MBB = MI->getParent(); auto &DL = MI->getDebugLoc(); - unsigned Reg = MRI->createVirtualRegister( + Register Reg = MRI->createVirtualRegister( TII->getRegClass(TII->get(DstOpcode), 0, MRI->getTargetRegisterInfo(), *MBB->getParent())); MachineInstrBuilder Bld = BuildMI(*MBB, MI, DL, TII->get(DstOpcode), Reg); @@ -219,12 +219,12 @@ public: // Don't allow copies to/flow GR8/GR16 physical registers. // FIXME: Is there some better way to support this? - unsigned DstReg = MI->getOperand(0).getReg(); + Register DstReg = MI->getOperand(0).getReg(); if (Register::isPhysicalRegister(DstReg) && (X86::GR8RegClass.contains(DstReg) || X86::GR16RegClass.contains(DstReg))) return false; - unsigned SrcReg = MI->getOperand(1).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); if (Register::isPhysicalRegister(SrcReg) && (X86::GR8RegClass.contains(SrcReg) || X86::GR16RegClass.contains(SrcReg))) @@ -593,7 +593,7 @@ void X86DomainReassignment::buildClosure(Closure &C, unsigned Reg) { if (!DefOp.isReg()) continue; - unsigned DefReg = DefOp.getReg(); + Register DefReg = DefOp.getReg(); if (!Register::isVirtualRegister(DefReg)) { C.setAllIllegal(); continue; diff --git a/lib/Target/X86/X86EvexToVex.cpp b/lib/Target/X86/X86EvexToVex.cpp index 58680f1815b..24c8e6d6f6e 100644 --- a/lib/Target/X86/X86EvexToVex.cpp +++ b/lib/Target/X86/X86EvexToVex.cpp @@ -131,7 +131,7 @@ static bool usesExtendedRegister(const MachineInstr &MI) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); assert(!(Reg >= X86::ZMM0 && Reg <= X86::ZMM31) && "ZMM instructions should not be in the EVEX->VEX tables"); diff --git a/lib/Target/X86/X86ExpandPseudo.cpp b/lib/Target/X86/X86ExpandPseudo.cpp index b8624b40f2f..f0da7b143ab 100644 --- a/lib/Target/X86/X86ExpandPseudo.cpp +++ b/lib/Target/X86/X86ExpandPseudo.cpp @@ -287,7 +287,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, assert(DestAddr.isReg() && "Offset should be in register!"); const bool Uses64BitFramePtr = STI->isTarget64BitLP64() || STI->isTargetNaCl64(); - unsigned StackPtr = TRI->getStackRegister(); + Register StackPtr = TRI->getStackRegister(); BuildMI(MBB, MBBI, DL, TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr) .addReg(DestAddr.getReg()); @@ -347,7 +347,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, // actualcmpxchg Addr // [E|R]BX = SaveRbx const MachineOperand &InArg = MBBI->getOperand(6); - unsigned SaveRbx = MBBI->getOperand(7).getReg(); + Register SaveRbx = MBBI->getOperand(7).getReg(); unsigned ActualInArg = Opcode == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX; diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 7b9ce027120..a4b23dc8771 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -1241,7 +1241,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { } // Make the copy. - unsigned DstReg = VA.getLocReg(); + Register DstReg = VA.getLocReg(); const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); // Avoid a cross-class copy. This is very unlikely. if (!SrcRC->contains(DstReg)) @@ -3547,7 +3547,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { CCValAssign &VA = RVLocs[i]; EVT CopyVT = VA.getValVT(); unsigned CopyReg = ResultReg + i; - unsigned SrcReg = VA.getLocReg(); + Register SrcReg = VA.getLocReg(); // If this is x86-64, and we disabled SSE, we can't return FP values if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp index bf541d93379..453e30677a5 100644 --- a/lib/Target/X86/X86FixupBWInsts.cpp +++ b/lib/Target/X86/X86FixupBWInsts.cpp @@ -172,7 +172,7 @@ bool FixupBWInstPass::getSuperRegDestIfDead(MachineInstr *OrigMI, unsigned &SuperDestReg) const { auto *TRI = &TII->getRegisterInfo(); - unsigned OrigDestReg = OrigMI->getOperand(0).getReg(); + Register OrigDestReg = OrigMI->getOperand(0).getReg(); SuperDestReg = getX86SubSuperRegister(OrigDestReg, 32); const auto SubRegIdx = TRI->getSubRegIndex(SuperDestReg, OrigDestReg); diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp index 041529a0be6..26cc90ced7b 100644 --- a/lib/Target/X86/X86FixupLEAs.cpp +++ b/lib/Target/X86/X86FixupLEAs.cpp @@ -372,9 +372,9 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I, !TII->isSafeToClobberEFLAGS(MBB, I)) return false; - unsigned DestReg = MI.getOperand(0).getReg(); - unsigned BaseReg = Base.getReg(); - unsigned IndexReg = Index.getReg(); + Register DestReg = MI.getOperand(0).getReg(); + Register BaseReg = Base.getReg(); + Register IndexReg = Index.getReg(); // Don't change stack adjustment LEAs. if (UseLEAForSP && (DestReg == X86::ESP || DestReg == X86::RSP)) @@ -500,9 +500,9 @@ void FixupLEAPass::processInstructionForSlowLEA(MachineBasicBlock::iterator &I, if (Segment.getReg() != 0 || !Offset.isImm() || !TII->isSafeToClobberEFLAGS(MBB, I)) return; - const unsigned DstR = Dst.getReg(); - const unsigned SrcR1 = Base.getReg(); - const unsigned SrcR2 = Index.getReg(); + const Register DstR = Dst.getReg(); + const Register SrcR1 = Base.getReg(); + const Register SrcR2 = Index.getReg(); if ((SrcR1 == 0 || SrcR1 != DstR) && (SrcR2 == 0 || SrcR2 != DstR)) return; if (Scale.getImm() > 1) @@ -553,8 +553,8 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI, return nullptr; unsigned DstR = Dst.getReg(); - unsigned BaseR = Base.getReg(); - unsigned IndexR = Index.getReg(); + Register BaseR = Base.getReg(); + Register IndexR = Index.getReg(); unsigned SSDstR = (LEAOpcode == X86::LEA64_32r) ? getX86SubSuperRegister(DstR, 64) : DstR; bool IsScale1 = Scale.getImm() == 1; diff --git a/lib/Target/X86/X86FixupSetCC.cpp b/lib/Target/X86/X86FixupSetCC.cpp index e2d4d1ede6f..cbde280aa28 100644 --- a/lib/Target/X86/X86FixupSetCC.cpp +++ b/lib/Target/X86/X86FixupSetCC.cpp @@ -136,8 +136,8 @@ bool X86FixupSetCCPass::runOnMachineFunction(MachineFunction &MF) { const TargetRegisterClass *RC = MF.getSubtarget().is64Bit() ? &X86::GR32RegClass : &X86::GR32_ABCDRegClass; - unsigned ZeroReg = MRI->createVirtualRegister(RC); - unsigned InsertReg = MRI->createVirtualRegister(RC); + Register ZeroReg = MRI->createVirtualRegister(RC); + Register InsertReg = MRI->createVirtualRegister(RC); // Initialize a register with 0. This must go before the eflags def BuildMI(MBB, FlagsDefMI, MI.getDebugLoc(), TII->get(X86::MOV32r0), diff --git a/lib/Target/X86/X86FlagsCopyLowering.cpp b/lib/Target/X86/X86FlagsCopyLowering.cpp index 6523d5049d3..cfba06fb653 100644 --- a/lib/Target/X86/X86FlagsCopyLowering.cpp +++ b/lib/Target/X86/X86FlagsCopyLowering.cpp @@ -740,7 +740,7 @@ CondRegArray X86FlagsCopyLoweringPass::collectCondsInRegs( unsigned X86FlagsCopyLoweringPass::promoteCondToReg( MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos, DebugLoc TestLoc, X86::CondCode Cond) { - unsigned Reg = MRI->createVirtualRegister(PromoteRC); + Register Reg = MRI->createVirtualRegister(PromoteRC); auto SetI = BuildMI(TestMBB, TestPos, TestLoc, TII->get(X86::SETCCr), Reg).addImm(Cond); (void)SetI; @@ -814,7 +814,7 @@ void X86FlagsCopyLoweringPass::rewriteArithmetic( MachineBasicBlock &MBB = *MI.getParent(); // Insert an instruction that will set the flag back to the desired value. - unsigned TmpReg = MRI->createVirtualRegister(PromoteRC); + Register TmpReg = MRI->createVirtualRegister(PromoteRC); auto AddI = BuildMI(MBB, MI.getIterator(), MI.getDebugLoc(), TII->get(X86::ADD8ri)) .addDef(TmpReg, RegState::Dead) @@ -975,7 +975,7 @@ void X86FlagsCopyLoweringPass::rewriteSetCarryExtended( // Now we need to turn this into a bitmask. We do this by subtracting it from // zero. - unsigned ZeroReg = MRI->createVirtualRegister(&X86::GR32RegClass); + Register ZeroReg = MRI->createVirtualRegister(&X86::GR32RegClass); BuildMI(MBB, SetPos, SetLoc, TII->get(X86::MOV32r0), ZeroReg); ZeroReg = AdjustReg(ZeroReg); @@ -1000,7 +1000,7 @@ void X86FlagsCopyLoweringPass::rewriteSetCarryExtended( default: llvm_unreachable("Invalid SETB_C* opcode!"); } - unsigned ResultReg = MRI->createVirtualRegister(&SetBRC); + Register ResultReg = MRI->createVirtualRegister(&SetBRC); BuildMI(MBB, SetPos, SetLoc, TII->get(Sub), ResultReg) .addReg(ZeroReg) .addReg(ExtCondReg); diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 074cf21d03f..fcfb5bc9131 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -288,8 +288,8 @@ namespace { // Check if a COPY instruction is using FP registers. static bool isFPCopy(MachineInstr &MI) { - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); return X86::RFP80RegClass.contains(DstReg) || X86::RFP80RegClass.contains(SrcReg); @@ -313,7 +313,7 @@ FunctionPass *llvm::createX86FloatingPointStackifierPass() { return new FPS(); } /// For example, this returns 3 for X86::FP3. static unsigned getFPReg(const MachineOperand &MO) { assert(MO.isReg() && "Expected an FP register!"); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); assert(Reg >= X86::FP0 && Reg <= X86::FP6 && "Expected FP register!"); return Reg - X86::FP0; } diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 9d8cb89dbef..5c0eddc8455 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -176,7 +176,7 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, MachineOperand &MO = MBBI->getOperand(i); if (!MO.isReg() || MO.isDef()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Reg) continue; for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) @@ -216,7 +216,7 @@ flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) { for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg != X86::EFLAGS) continue; @@ -1005,7 +1005,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF, const unsigned MachineFramePtr = STI.isTarget64BitILP32() ? getX86SubSuperRegister(FramePtr, 64) : FramePtr; - unsigned BasePtr = TRI->getBaseRegister(); + Register BasePtr = TRI->getBaseRegister(); bool HasWinCFI = false; // Debug location must be unknown since the first debug location is used @@ -1196,7 +1196,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF, (MBBI->getOpcode() == X86::PUSH32r || MBBI->getOpcode() == X86::PUSH64r)) { PushedRegs = true; - unsigned Reg = MBBI->getOperand(0).getReg(); + Register Reg = MBBI->getOperand(0).getReg(); ++MBBI; if (!HasFP && NeedsDwarfCFI) { @@ -2007,7 +2007,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots( // Since emitPrologue and emitEpilogue will handle spilling and restoring of // the frame register, we can delete it from CSI list and not have to worry // about avoiding it later. - unsigned FPReg = TRI->getFrameRegister(MF); + Register FPReg = TRI->getFrameRegister(MF); for (unsigned i = 0; i < CSI.size(); ++i) { if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) { CSI.erase(CSI.begin() + i); @@ -2275,7 +2275,7 @@ void X86FrameLowering::determineCalleeSaves(MachineFunction &MF, // Spill the BasePtr if it's used. if (TRI->hasBasePointer(MF)){ - unsigned BasePtr = TRI->getBaseRegister(); + Register BasePtr = TRI->getBaseRegister(); if (STI.isTarget64BitILP32()) BasePtr = getX86SubSuperRegister(BasePtr, 64); SavedRegs.set(BasePtr); @@ -2987,8 +2987,8 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers( "restoring EBP/ESI on non-32-bit target"); MachineFunction &MF = *MBB.getParent(); - unsigned FramePtr = TRI->getFrameRegister(MF); - unsigned BasePtr = TRI->getBaseRegister(); + Register FramePtr = TRI->getFrameRegister(MF); + Register BasePtr = TRI->getBaseRegister(); WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index fa6cc53df3d..297fa942c7e 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4361,7 +4361,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization( CCValAssign &VA = ArgLocs[i]; if (!VA.isRegLoc()) continue; - unsigned Reg = VA.getLocReg(); + Register Reg = VA.getLocReg(); switch (Reg) { default: break; case X86::EAX: case X86::EDX: case X86::ECX: @@ -21724,7 +21724,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, } const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy); - unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); + Register Vreg = MRI.createVirtualRegister(AddrRegClass); Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, DAG.getRegister(Vreg, SPTy)); @@ -21734,7 +21734,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, MF.getInfo()->setHasWinAlloca(true); const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); - unsigned SPReg = RegInfo->getStackRegister(); + Register SPReg = RegInfo->getStackRegister(); SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy); Chain = SP.getValue(1); @@ -23730,7 +23730,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(DAG.getDataLayout()); const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); - unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); + Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && "Invalid Frame Register!"); @@ -27668,7 +27668,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); SDValue Result; SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); - unsigned BasePtr = TRI->getBaseRegister(); + Register BasePtr = TRI->getBaseRegister(); MachineMemOperand *MMO = cast(N)->getMemOperand(); if (TRI->hasBasePointer(DAG.getMachineFunction()) && (BasePtr == X86::RBX || BasePtr == X86::EBX)) { @@ -28564,10 +28564,10 @@ static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB, sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); MachineRegisterInfo &MRI = MF->getRegInfo(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); const TargetRegisterClass *RC = MRI.getRegClass(DstReg); - unsigned mainDstReg = MRI.createVirtualRegister(RC); - unsigned fallDstReg = MRI.createVirtualRegister(RC); + Register mainDstReg = MRI.createVirtualRegister(RC); + Register fallDstReg = MRI.createVirtualRegister(RC); // thisMBB: // xbegin fallMBB @@ -28621,7 +28621,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, static_assert(X86::AddrNumOperands == 5, "VAARG_64 assumes 5 address operands"); - unsigned DestReg = MI.getOperand(0).getReg(); + Register DestReg = MI.getOperand(0).getReg(); MachineOperand &Base = MI.getOperand(1); MachineOperand &Scale = MI.getOperand(2); MachineOperand &Index = MI.getOperand(3); @@ -28757,7 +28757,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, assert(OffsetReg != 0); // Read the reg_save_area address. - unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); + Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass); BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) .add(Base) .add(Scale) @@ -28767,8 +28767,8 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, .setMemRefs(LoadOnlyMMO); // Zero-extend the offset - unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); - BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) + Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); + BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) .addImm(0) .addReg(OffsetReg) .addImm(X86::sub_32bit); @@ -28779,7 +28779,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, .addReg(RegSaveReg); // Compute the offset for the next argument - unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); + Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) .addReg(OffsetReg) .addImm(UseFPOffset ? 16 : 8); @@ -28804,7 +28804,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // // Load the overflow_area address into a register. - unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); + Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) .add(Base) .add(Scale) @@ -28818,7 +28818,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, if (NeedsAlign) { // Align the overflow address assert(isPowerOf2_32(Align) && "Alignment must be a power of 2"); - unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); + Register TmpReg = MRI.createVirtualRegister(AddrRegClass); // aligned_addr = (addr + (align-1)) & ~(align-1) BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) @@ -28835,7 +28835,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Compute the next overflow address after this argument. // (the overflow address should be kept 8-byte aligned) - unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); + Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass); BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) .addReg(OverflowDestReg) .addImm(ArgSizeA8); @@ -28899,7 +28899,7 @@ MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( const TargetInstrInfo *TII = Subtarget.getInstrInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned CountReg = MI.getOperand(0).getReg(); + Register CountReg = MI.getOperand(0).getReg(); int64_t RegSaveFrameIndex = MI.getOperand(1).getImm(); int64_t VarArgsFPOffset = MI.getOperand(2).getImm(); @@ -29036,9 +29036,9 @@ static MachineInstrBuilder createPHIsForCMOVsInSinkBB( MachineInstrBuilder MIB; for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) { - unsigned DestReg = MIIt->getOperand(0).getReg(); - unsigned Op1Reg = MIIt->getOperand(1).getReg(); - unsigned Op2Reg = MIIt->getOperand(2).getReg(); + Register DestReg = MIIt->getOperand(0).getReg(); + Register Op1Reg = MIIt->getOperand(1).getReg(); + Register Op2Reg = MIIt->getOperand(2).getReg(); // If this CMOV we are generating is the opposite condition from // the jump we generated, then we have to swap the operands for the @@ -29196,9 +29196,9 @@ X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV, // SinkMBB: // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ] - unsigned DestReg = FirstCMOV.getOperand(0).getReg(); - unsigned Op1Reg = FirstCMOV.getOperand(1).getReg(); - unsigned Op2Reg = FirstCMOV.getOperand(2).getReg(); + Register DestReg = FirstCMOV.getOperand(0).getReg(); + Register Op1Reg = FirstCMOV.getOperand(1).getReg(); + Register Op2Reg = FirstCMOV.getOperand(2).getReg(); MachineInstrBuilder MIB = BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg) .addReg(Op1Reg) @@ -29716,7 +29716,7 @@ X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI, // call the retpoline thunk. DebugLoc DL = MI.getDebugLoc(); const X86InstrInfo *TII = Subtarget.getInstrInfo(); - unsigned CalleeVReg = MI.getOperand(0).getReg(); + Register CalleeVReg = MI.getOperand(0).getReg(); unsigned Opc = getOpcodeForRetpoline(MI.getOpcode()); // Find an available scratch register to hold the callee. On 64-bit, we can @@ -29789,7 +29789,7 @@ void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI, // Initialize a register with zero. MVT PVT = getPointerTy(MF->getDataLayout()); const TargetRegisterClass *PtrRC = getRegClassFor(PVT); - unsigned ZReg = MRI.createVirtualRegister(PtrRC); + Register ZReg = MRI.createVirtualRegister(PtrRC); unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr; BuildMI(*MBB, MI, DL, TII->get(XorRROpc)) .addDef(ZReg) @@ -29797,7 +29797,7 @@ void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI, .addReg(ZReg, RegState::Undef); // Read the current SSP Register value to the zeroed register. - unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC); + Register SSPCopyReg = MRI.createVirtualRegister(PtrRC); unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD; BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg); @@ -29841,8 +29841,8 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, const TargetRegisterClass *RC = MRI.getRegClass(DstReg); assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); (void)TRI; - unsigned mainDstReg = MRI.createVirtualRegister(RC); - unsigned restoreDstReg = MRI.createVirtualRegister(RC); + Register mainDstReg = MRI.createVirtualRegister(RC); + Register restoreDstReg = MRI.createVirtualRegister(RC); MemOpndSlot = CurOp; @@ -29956,8 +29956,8 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64(); X86MachineFunctionInfo *X86FI = MF->getInfo(); X86FI->setRestoreBasePointer(MF); - unsigned FramePtr = RegInfo->getFrameRegister(*MF); - unsigned BasePtr = RegInfo->getBaseRegister(); + Register FramePtr = RegInfo->getFrameRegister(*MF); + Register BasePtr = RegInfo->getBaseRegister(); unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm; addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr), FramePtr, true, X86FI->getRestoreBasePointerOffset()) @@ -30039,7 +30039,7 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI, MBB->addSuccessor(checkSspMBB); // Initialize a register with zero. - unsigned ZReg = MRI.createVirtualRegister(PtrRC); + Register ZReg = MRI.createVirtualRegister(PtrRC); unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr; BuildMI(checkSspMBB, DL, TII->get(XorRROpc)) .addDef(ZReg) @@ -30047,7 +30047,7 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI, .addReg(ZReg, RegState::Undef); // Read the current SSP Register value to the zeroed register. - unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC); + Register SSPCopyReg = MRI.createVirtualRegister(PtrRC); unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD; BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg); @@ -30062,7 +30062,7 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI, checkSspMBB->addSuccessor(fallMBB); // Reload the previously saved SSP register value. - unsigned PrevSSPReg = MRI.createVirtualRegister(PtrRC); + Register PrevSSPReg = MRI.createVirtualRegister(PtrRC); unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm; const int64_t SPPOffset = 3 * PVT.getStoreSize(); MachineInstrBuilder MIB = @@ -30080,7 +30080,7 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI, MIB.setMemRefs(MMOs); // Subtract the current SSP from the previous SSP. - unsigned SspSubReg = MRI.createVirtualRegister(PtrRC); + Register SspSubReg = MRI.createVirtualRegister(PtrRC); unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr; BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg) .addReg(PrevSSPReg) @@ -30094,7 +30094,7 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI, // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8. unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri; unsigned Offset = (PVT == MVT::i64) ? 3 : 2; - unsigned SspFirstShrReg = MRI.createVirtualRegister(PtrRC); + Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC); BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg) .addReg(SspSubReg) .addImm(Offset); @@ -30104,7 +30104,7 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI, BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg); // Reset the lower 8 bits. - unsigned SspSecondShrReg = MRI.createVirtualRegister(PtrRC); + Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC); BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg) .addReg(SspFirstShrReg) .addImm(8); @@ -30116,12 +30116,12 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI, // Do a single shift left. unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1; - unsigned SspAfterShlReg = MRI.createVirtualRegister(PtrRC); + Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC); BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg) .addReg(SspSecondShrReg); // Save the value 128 to a register (will be used next with incssp). - unsigned Value128InReg = MRI.createVirtualRegister(PtrRC); + Register Value128InReg = MRI.createVirtualRegister(PtrRC); unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri; BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg) .addImm(128); @@ -30129,8 +30129,8 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI, // Since incssp only looks at the lower 8 bits, we might need to do several // iterations of incssp until we finish fixing the shadow stack. - unsigned DecReg = MRI.createVirtualRegister(PtrRC); - unsigned CounterReg = MRI.createVirtualRegister(PtrRC); + Register DecReg = MRI.createVirtualRegister(PtrRC); + Register CounterReg = MRI.createVirtualRegister(PtrRC); BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg) .addReg(SspAfterShlReg) .addMBB(fixShadowLoopPrepareMBB) @@ -30170,11 +30170,11 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, const TargetRegisterClass *RC = (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; - unsigned Tmp = MRI.createVirtualRegister(RC); + Register Tmp = MRI.createVirtualRegister(RC); // Since FP is only updated here but NOT referenced, it's treated as GPR. const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; - unsigned SP = RegInfo->getStackRegister(); + Register SP = RegInfo->getStackRegister(); MachineInstrBuilder MIB; @@ -30372,8 +30372,8 @@ X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, X86MachineFunctionInfo *MFI = MF->getInfo(); MFI->setRestoreBasePointer(MF); - unsigned FP = RI.getFrameRegister(*MF); - unsigned BP = RI.getBaseRegister(); + Register FP = RI.getFrameRegister(*MF); + Register BP = RI.getBaseRegister(); unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm; addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true, MFI->getRestoreBasePointerOffset()) @@ -30384,7 +30384,7 @@ X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, } // IReg is used as an index in a memory operand and therefore can't be SP - unsigned IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass); + Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass); addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI, Subtarget.is64Bit() ? 8 : 4); BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri)) @@ -30393,8 +30393,8 @@ X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE); if (Subtarget.is64Bit()) { - unsigned BReg = MRI->createVirtualRegister(&X86::GR64RegClass); - unsigned IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass); + Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass); + Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass); // leaq .LJTI0_0(%rip), BReg BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg) @@ -30420,9 +30420,9 @@ X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .addReg(0); break; case MachineJumpTableInfo::EK_LabelDifference32: { - unsigned OReg = MRI->createVirtualRegister(&X86::GR32RegClass); - unsigned OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass); - unsigned TReg = MRI->createVirtualRegister(&X86::GR64RegClass); + Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass); + Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass); + Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass); // movl (BReg,IReg64,4), OReg BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg) @@ -30616,20 +30616,18 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, TII->get(X86::FNSTCW16m)), OrigCWFrameIdx); // Load the old value of the control word... - unsigned OldCW = - MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass); + Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass); addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW), OrigCWFrameIdx); // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero. - unsigned NewCW = - MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass); + Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass); BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW) .addReg(OldCW, RegState::Kill).addImm(0xC00); // Extract to 16 bits. - unsigned NewCW16 = - MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass); + Register NewCW16 = + MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass); BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16) .addReg(NewCW, RegState::Kill, X86::sub_16bit); @@ -30733,7 +30731,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, MachineRegisterInfo &MRI = MF->getRegInfo(); MVT SPTy = getPointerTy(MF->getDataLayout()); const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy); - unsigned computedAddrVReg = MRI.createVirtualRegister(AddrRegClass); + Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass); X86AddressMode AM = getAddressFromInstr(&MI, 0); // Regalloc does not need any help when the memory operand of CMPXCHG8B @@ -44795,7 +44793,7 @@ void X86TargetLowering::insertCopiesSplitCSR( else llvm_unreachable("Unexpected register class in CSRsViaCopy!"); - unsigned NewVR = MRI->createVirtualRegister(RC); + Register NewVR = MRI->createVirtualRegister(RC); // Create copy from CSR to a virtual register. // FIXME: this currently does not emit CFI pseudo-instructions, it works // fine for CXX_FAST_TLS since the C++-style TLS access functions should be diff --git a/lib/Target/X86/X86InsertPrefetch.cpp b/lib/Target/X86/X86InsertPrefetch.cpp index 02ae73706a3..70225f0f4b2 100644 --- a/lib/Target/X86/X86InsertPrefetch.cpp +++ b/lib/Target/X86/X86InsertPrefetch.cpp @@ -79,8 +79,8 @@ ErrorOr getPrefetchHints(const FunctionSamples *TopSamples, // The prefetch instruction can't take memory operands involving vector // registers. bool IsMemOpCompatibleWithPrefetch(const MachineInstr &MI, int Op) { - unsigned BaseReg = MI.getOperand(Op + X86::AddrBaseReg).getReg(); - unsigned IndexReg = MI.getOperand(Op + X86::AddrIndexReg).getReg(); + Register BaseReg = MI.getOperand(Op + X86::AddrBaseReg).getReg(); + Register IndexReg = MI.getOperand(Op + X86::AddrIndexReg).getReg(); return (BaseReg == 0 || X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) || X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg)) && diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index a5cc2d1d0be..7e525d797a2 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -561,7 +561,7 @@ bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, MI.getOperand(1 + X86::AddrIndexReg).isReg() && MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && MI.isDereferenceableInvariantLoad(AA)) { - unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); + Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); if (BaseReg == 0 || BaseReg == X86::RIP) return true; // Allow re-materialization of PIC load. @@ -583,7 +583,7 @@ bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, // lea fi#, lea GV, etc. are all rematerializable. if (!MI.getOperand(1 + X86::AddrBaseReg).isReg()) return true; - unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); + Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg(); if (BaseReg == 0) return true; // Allow re-materialization of lea PICBase + x. @@ -675,7 +675,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, RC = Opc != X86::LEA32r ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; } - unsigned SrcReg = Src.getReg(); + Register SrcReg = Src.getReg(); // For both LEA64 and LEA32 the register already has essentially the right // type (32-bit or 64-bit) we may just need to forbid SP. @@ -740,8 +740,8 @@ MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA( return nullptr; unsigned Opcode = X86::LEA64_32r; - unsigned InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); - unsigned OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass); + Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); + Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass); // Build and insert into an implicit UNDEF value. This is OK because // we will be shifting and then extracting the lower 8/16-bits. @@ -751,8 +751,8 @@ MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA( // But testing has shown this *does* help performance in 64-bit mode (at // least on modern x86 machines). MachineBasicBlock::iterator MBBI = MI.getIterator(); - unsigned Dest = MI.getOperand(0).getReg(); - unsigned Src = MI.getOperand(1).getReg(); + Register Dest = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); bool IsDead = MI.getOperand(0).isDead(); bool IsKill = MI.getOperand(1).isKill(); unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit; @@ -794,7 +794,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA( case X86::ADD8rr_DB: case X86::ADD16rr: case X86::ADD16rr_DB: { - unsigned Src2 = MI.getOperand(2).getReg(); + Register Src2 = MI.getOperand(2).getReg(); bool IsKill2 = MI.getOperand(2).isKill(); assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization"); unsigned InRegLEA2 = 0; @@ -1859,7 +1859,7 @@ X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI, // CommutableOpIdx2 is well defined now. Let's choose another commutable // operand and assign its index to CommutableOpIdx1. - unsigned Op2Reg = MI.getOperand(CommutableOpIdx2).getReg(); + Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg(); unsigned CommutableOpIdx1; for (CommutableOpIdx1 = LastCommutableVecOp; @@ -3755,7 +3755,7 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI, MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Reg != FoldAsLoadDefReg) continue; // Do not fold if we have a subreg use or a def. @@ -3785,7 +3785,7 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI, static bool Expand2AddrUndef(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) { assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); - unsigned Reg = MIB->getOperand(0).getReg(); + Register Reg = MIB->getOperand(0).getReg(); MIB->setDesc(Desc); // MachineInstr::addOperand() will insert explicit operands before any @@ -3815,7 +3815,7 @@ static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, bool MinusOne) { MachineBasicBlock &MBB = *MIB->getParent(); DebugLoc DL = MIB->getDebugLoc(); - unsigned Reg = MIB->getOperand(0).getReg(); + Register Reg = MIB->getOperand(0).getReg(); // Insert the XOR. BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg) @@ -3891,7 +3891,7 @@ static void expandLoadStackGuard(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) { MachineBasicBlock &MBB = *MIB->getParent(); DebugLoc DL = MIB->getDebugLoc(); - unsigned Reg = MIB->getOperand(0).getReg(); + Register Reg = MIB->getOperand(0).getReg(); const GlobalValue *GV = cast((*MIB->memoperands_begin())->getValue()); auto Flags = MachineMemOperand::MOLoad | @@ -3929,7 +3929,7 @@ static bool expandNOVLXLoad(MachineInstrBuilder &MIB, const MCInstrDesc &LoadDesc, const MCInstrDesc &BroadcastDesc, unsigned SubIdx) { - unsigned DestReg = MIB->getOperand(0).getReg(); + Register DestReg = MIB->getOperand(0).getReg(); // Check if DestReg is XMM16-31 or YMM16-31. if (TRI->getEncodingValue(DestReg) < 16) { // We can use a normal VEX encoded load. @@ -3952,7 +3952,7 @@ static bool expandNOVLXStore(MachineInstrBuilder &MIB, const MCInstrDesc &StoreDesc, const MCInstrDesc &ExtractDesc, unsigned SubIdx) { - unsigned SrcReg = MIB->getOperand(X86::AddrNumOperands).getReg(); + Register SrcReg = MIB->getOperand(X86::AddrNumOperands).getReg(); // Check if DestReg is XMM16-31 or YMM16-31. if (TRI->getEncodingValue(SrcReg) < 16) { // We can use a normal VEX encoded store. @@ -4012,8 +4012,8 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case X86::AVX_SET0: { assert(HasAVX && "AVX not supported"); const TargetRegisterInfo *TRI = &getRegisterInfo(); - unsigned SrcReg = MIB->getOperand(0).getReg(); - unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); + Register SrcReg = MIB->getOperand(0).getReg(); + Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); MIB->getOperand(0).setReg(XReg); Expand2AddrUndef(MIB, get(X86::VXORPSrr)); MIB.addReg(SrcReg, RegState::ImplicitDefine); @@ -4023,7 +4023,7 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case X86::AVX512_FsFLD0SS: case X86::AVX512_FsFLD0SD: { bool HasVLX = Subtarget.hasVLX(); - unsigned SrcReg = MIB->getOperand(0).getReg(); + Register SrcReg = MIB->getOperand(0).getReg(); const TargetRegisterInfo *TRI = &getRegisterInfo(); if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) return Expand2AddrUndef(MIB, @@ -4037,10 +4037,10 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case X86::AVX512_256_SET0: case X86::AVX512_512_SET0: { bool HasVLX = Subtarget.hasVLX(); - unsigned SrcReg = MIB->getOperand(0).getReg(); + Register SrcReg = MIB->getOperand(0).getReg(); const TargetRegisterInfo *TRI = &getRegisterInfo(); if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) { - unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); + Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm); MIB->getOperand(0).setReg(XReg); Expand2AddrUndef(MIB, get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr)); @@ -4060,14 +4060,14 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case X86::AVX2_SETALLONES: return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr)); case X86::AVX1_SETALLONES: { - unsigned Reg = MIB->getOperand(0).getReg(); + Register Reg = MIB->getOperand(0).getReg(); // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS. MIB->setDesc(get(X86::VCMPPSYrri)); MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf); return true; } case X86::AVX512_512_SETALLONES: { - unsigned Reg = MIB->getOperand(0).getReg(); + Register Reg = MIB->getOperand(0).getReg(); MIB->setDesc(get(X86::VPTERNLOGDZrri)); // VPTERNLOGD needs 3 register inputs and an immediate. // 0xff will return 1s for any input. @@ -4077,8 +4077,8 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { } case X86::AVX512_512_SEXT_MASK_32: case X86::AVX512_512_SEXT_MASK_64: { - unsigned Reg = MIB->getOperand(0).getReg(); - unsigned MaskReg = MIB->getOperand(1).getReg(); + Register Reg = MIB->getOperand(0).getReg(); + Register MaskReg = MIB->getOperand(1).getReg(); unsigned MaskState = getRegState(MIB->getOperand(1)); unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ? X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz; @@ -4115,8 +4115,8 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr), get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); case X86::MOV32ri64: { - unsigned Reg = MIB->getOperand(0).getReg(); - unsigned Reg32 = RI.getSubReg(Reg, X86::sub_32bit); + Register Reg = MIB->getOperand(0).getReg(); + Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit); MI.setDesc(get(X86::MOV32ri)); MIB->getOperand(0).setReg(Reg32); MIB.addReg(Reg, RegState::ImplicitDefine); @@ -4251,7 +4251,7 @@ unsigned X86InstrInfo::getPartialRegUpdateClearance( // If MI is marked as reading Reg, the partial register update is wanted. const MachineOperand &MO = MI.getOperand(0); - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (Register::isVirtualRegister(Reg)) { if (MO.readsReg() || MI.readsVirtualRegister(Reg)) return 0; @@ -4464,7 +4464,7 @@ X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, void X86InstrInfo::breakPartialRegDependency( MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { - unsigned Reg = MI.getOperand(OpNum).getReg(); + Register Reg = MI.getOperand(OpNum).getReg(); // If MI kills this register, the false dependence is already broken. if (MI.killsRegister(Reg, TRI)) return; @@ -4480,7 +4480,7 @@ void X86InstrInfo::breakPartialRegDependency( } else if (X86::VR256RegClass.contains(Reg)) { // Use vxorps to clear the full ymm register. // It wants to read and write the xmm sub-register. - unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm); + Register XReg = TRI->getSubReg(Reg, X86::sub_xmm); BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg) .addReg(XReg, RegState::Undef) .addReg(XReg, RegState::Undef) @@ -4489,7 +4489,7 @@ void X86InstrInfo::breakPartialRegDependency( } else if (X86::GR64RegClass.contains(Reg)) { // Using XOR32rr because it has shorter encoding and zeros up the upper bits // as well. - unsigned XReg = TRI->getSubReg(Reg, X86::sub_32bit); + Register XReg = TRI->getSubReg(Reg, X86::sub_32bit); BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg) .addReg(XReg, RegState::Undef) .addReg(XReg, RegState::Undef) @@ -4538,7 +4538,7 @@ static void updateOperandRegConstraints(MachineFunction &MF, // We only need to update constraints on virtual register operands. if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); + Register Reg = MO.getReg(); if (!Register::isVirtualRegister(Reg)) continue; @@ -4821,7 +4821,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( // If this is the special case where we use a MOV32rm to load a 32-bit // value and zero-extend the top bits. Change the destination register // to a 32-bit one. - unsigned DstReg = NewMI->getOperand(0).getReg(); + Register DstReg = NewMI->getOperand(0).getReg(); if (Register::isPhysicalRegister(DstReg)) NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit)); else @@ -7585,9 +7585,8 @@ namespace { // movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx // addq %rcx, %rax // RAX now holds address of _GLOBAL_OFFSET_TABLE_. - unsigned PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass); - unsigned GOTReg = - RegInfo.createVirtualRegister(&X86::GR64RegClass); + Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass); + Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass); BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg) .addReg(X86::RIP) .addImm(0) diff --git a/lib/Target/X86/X86InstructionSelector.cpp b/lib/Target/X86/X86InstructionSelector.cpp index 19e1d5a97ee..01620b7b64c 100644 --- a/lib/Target/X86/X86InstructionSelector.cpp +++ b/lib/Target/X86/X86InstructionSelector.cpp @@ -231,11 +231,11 @@ static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) { // Set X86 Opcode and constrain DestReg. bool X86InstructionSelector::selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const { - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI); const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); - unsigned SrcReg = I.getOperand(1).getReg(); + Register SrcReg = I.getOperand(1).getReg(); const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI); const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); @@ -251,7 +251,7 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I, if (SrcRC != DstRC) { // This case can be generated by ABI lowering, performe anyext - unsigned ExtSrc = MRI.createVirtualRegister(DstRC); + Register ExtSrc = MRI.createVirtualRegister(DstRC); BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::SUBREG_TO_REG)) .addDef(ExtSrc) @@ -509,7 +509,7 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I, assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) && "unexpected instruction"); - const unsigned DefReg = I.getOperand(0).getReg(); + const Register DefReg = I.getOperand(0).getReg(); LLT Ty = MRI.getType(DefReg); const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); @@ -569,7 +569,7 @@ bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I, assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) && "unexpected instruction"); - const unsigned DefReg = I.getOperand(0).getReg(); + const Register DefReg = I.getOperand(0).getReg(); LLT Ty = MRI.getType(DefReg); // Use LEA to calculate frame index and GEP @@ -622,7 +622,7 @@ bool X86InstructionSelector::selectGlobalValue(MachineInstr &I, AM.Base.Reg = X86::RIP; } - const unsigned DefReg = I.getOperand(0).getReg(); + const Register DefReg = I.getOperand(0).getReg(); LLT Ty = MRI.getType(DefReg); unsigned NewOpc = getLeaOP(Ty, STI); @@ -641,7 +641,7 @@ bool X86InstructionSelector::selectConstant(MachineInstr &I, assert((I.getOpcode() == TargetOpcode::G_CONSTANT) && "unexpected instruction"); - const unsigned DefReg = I.getOperand(0).getReg(); + const Register DefReg = I.getOperand(0).getReg(); LLT Ty = MRI.getType(DefReg); if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID) @@ -714,8 +714,8 @@ bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I, I.getOpcode() == TargetOpcode::G_PTRTOINT) && "unexpected instruction"); - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned SrcReg = I.getOperand(1).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); const LLT DstTy = MRI.getType(DstReg); const LLT SrcTy = MRI.getType(SrcReg); @@ -778,8 +778,8 @@ bool X86InstructionSelector::selectZext(MachineInstr &I, MachineFunction &MF) const { assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction"); - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned SrcReg = I.getOperand(1).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); const LLT DstTy = MRI.getType(DstReg); const LLT SrcTy = MRI.getType(SrcReg); @@ -889,8 +889,8 @@ bool X86InstructionSelector::selectAnyext(MachineInstr &I, MachineFunction &MF) const { assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction"); - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned SrcReg = I.getOperand(1).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); const LLT DstTy = MRI.getType(DstReg); const LLT SrcTy = MRI.getType(SrcReg); @@ -949,8 +949,8 @@ bool X86InstructionSelector::selectCmp(MachineInstr &I, std::tie(CC, SwapArgs) = X86::getX86ConditionCode( (CmpInst::Predicate)I.getOperand(1).getPredicate()); - unsigned LHS = I.getOperand(2).getReg(); - unsigned RHS = I.getOperand(3).getReg(); + Register LHS = I.getOperand(2).getReg(); + Register RHS = I.getOperand(3).getReg(); if (SwapArgs) std::swap(LHS, RHS); @@ -995,8 +995,8 @@ bool X86InstructionSelector::selectFCmp(MachineInstr &I, MachineFunction &MF) const { assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction"); - unsigned LhsReg = I.getOperand(2).getReg(); - unsigned RhsReg = I.getOperand(3).getReg(); + Register LhsReg = I.getOperand(2).getReg(); + Register RhsReg = I.getOperand(3).getReg(); CmpInst::Predicate Predicate = (CmpInst::Predicate)I.getOperand(1).getPredicate(); @@ -1030,7 +1030,7 @@ bool X86InstructionSelector::selectFCmp(MachineInstr &I, break; } - unsigned ResultReg = I.getOperand(0).getReg(); + Register ResultReg = I.getOperand(0).getReg(); RBI.constrainGenericRegister( ResultReg, *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI); @@ -1040,8 +1040,8 @@ bool X86InstructionSelector::selectFCmp(MachineInstr &I, .addReg(LhsReg) .addReg(RhsReg); - unsigned FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass); - unsigned FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass); + Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass); + Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass); MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]); MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), @@ -1086,11 +1086,11 @@ bool X86InstructionSelector::selectUadde(MachineInstr &I, MachineFunction &MF) const { assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction"); - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned CarryOutReg = I.getOperand(1).getReg(); - const unsigned Op0Reg = I.getOperand(2).getReg(); - const unsigned Op1Reg = I.getOperand(3).getReg(); - unsigned CarryInReg = I.getOperand(4).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register CarryOutReg = I.getOperand(1).getReg(); + const Register Op0Reg = I.getOperand(2).getReg(); + const Register Op1Reg = I.getOperand(3).getReg(); + Register CarryInReg = I.getOperand(4).getReg(); const LLT DstTy = MRI.getType(DstReg); @@ -1146,8 +1146,8 @@ bool X86InstructionSelector::selectExtract(MachineInstr &I, assert((I.getOpcode() == TargetOpcode::G_EXTRACT) && "unexpected instruction"); - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned SrcReg = I.getOperand(1).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); int64_t Index = I.getOperand(2).getImm(); const LLT DstTy = MRI.getType(DstReg); @@ -1278,9 +1278,9 @@ bool X86InstructionSelector::selectInsert(MachineInstr &I, MachineFunction &MF) const { assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction"); - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned SrcReg = I.getOperand(1).getReg(); - const unsigned InsertReg = I.getOperand(2).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); + const Register InsertReg = I.getOperand(2).getReg(); int64_t Index = I.getOperand(3).getImm(); const LLT DstTy = MRI.getType(DstReg); @@ -1338,7 +1338,7 @@ bool X86InstructionSelector::selectUnmergeValues( // Split to extracts. unsigned NumDefs = I.getNumOperands() - 1; - unsigned SrcReg = I.getOperand(NumDefs).getReg(); + Register SrcReg = I.getOperand(NumDefs).getReg(); unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); for (unsigned Idx = 0; Idx < NumDefs; ++Idx) { @@ -1363,8 +1363,8 @@ bool X86InstructionSelector::selectMergeValues( "unexpected instruction"); // Split to inserts. - unsigned DstReg = I.getOperand(0).getReg(); - unsigned SrcReg0 = I.getOperand(1).getReg(); + Register DstReg = I.getOperand(0).getReg(); + Register SrcReg0 = I.getOperand(1).getReg(); const LLT DstTy = MRI.getType(DstReg); const LLT SrcTy = MRI.getType(SrcReg0); @@ -1373,13 +1373,13 @@ bool X86InstructionSelector::selectMergeValues( const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI); // For the first src use insertSubReg. - unsigned DefReg = MRI.createGenericVirtualRegister(DstTy); + Register DefReg = MRI.createGenericVirtualRegister(DstTy); MRI.setRegBank(DefReg, RegBank); if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF)) return false; for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) { - unsigned Tmp = MRI.createGenericVirtualRegister(DstTy); + Register Tmp = MRI.createGenericVirtualRegister(DstTy); MRI.setRegBank(Tmp, RegBank); MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(), @@ -1410,7 +1410,7 @@ bool X86InstructionSelector::selectCondBranch(MachineInstr &I, MachineFunction &MF) const { assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction"); - const unsigned CondReg = I.getOperand(0).getReg(); + const Register CondReg = I.getOperand(0).getReg(); MachineBasicBlock *DestMBB = I.getOperand(1).getMBB(); MachineInstr &TestInst = @@ -1437,7 +1437,7 @@ bool X86InstructionSelector::materializeFP(MachineInstr &I, if (CM != CodeModel::Small && CM != CodeModel::Large) return false; - const unsigned DstReg = I.getOperand(0).getReg(); + const Register DstReg = I.getOperand(0).getReg(); const LLT DstTy = MRI.getType(DstReg); const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI); unsigned Align = DstTy.getSizeInBits(); @@ -1455,7 +1455,7 @@ bool X86InstructionSelector::materializeFP(MachineInstr &I, // Under X86-64 non-small code model, GV (and friends) are 64-bits, so // they cannot be folded into immediate fields. - unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass); + Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass); BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg) .addConstantPoolIndex(CPI, 0, OpFlag); @@ -1498,7 +1498,7 @@ bool X86InstructionSelector::selectImplicitDefOrPHI( I.getOpcode() == TargetOpcode::G_PHI) && "unexpected instruction"); - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); if (!MRI.getRegClassOrNull(DstReg)) { const LLT DstTy = MRI.getType(DstReg); @@ -1532,7 +1532,7 @@ bool X86InstructionSelector::selectShift(MachineInstr &I, I.getOpcode() == TargetOpcode::G_LSHR) && "unexpected instruction"); - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); const LLT DstTy = MRI.getType(DstReg); const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); @@ -1573,8 +1573,8 @@ bool X86InstructionSelector::selectShift(MachineInstr &I, return false; } - unsigned Op0Reg = I.getOperand(1).getReg(); - unsigned Op1Reg = I.getOperand(2).getReg(); + Register Op0Reg = I.getOperand(1).getReg(); + Register Op1Reg = I.getOperand(2).getReg(); assert(MRI.getType(Op1Reg).getSizeInBits() == 8); @@ -1601,9 +1601,9 @@ bool X86InstructionSelector::selectDivRem(MachineInstr &I, I.getOpcode() == TargetOpcode::G_UREM) && "unexpected instruction"); - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned Op1Reg = I.getOperand(1).getReg(); - const unsigned Op2Reg = I.getOperand(2).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register Op1Reg = I.getOperand(1).getReg(); + const Register Op2Reg = I.getOperand(2).getReg(); const LLT RegTy = MRI.getType(DstReg); assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) && @@ -1727,7 +1727,7 @@ bool X86InstructionSelector::selectDivRem(MachineInstr &I, BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpSignExtend)); else { - unsigned Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass); + Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass); BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0), Zero32); @@ -1765,8 +1765,8 @@ bool X86InstructionSelector::selectDivRem(MachineInstr &I, if ((I.getOpcode() == Instruction::SRem || I.getOpcode() == Instruction::URem) && OpEntry.DivRemResultReg == X86::AH && STI.is64Bit()) { - unsigned SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass); - unsigned ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass); + Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass); + Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass); BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg) .addReg(X86::AX); diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index b1fefaa84be..d28caed5f8e 100644 --- a/lib/Target/X86/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -958,7 +958,7 @@ void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI, // FAULTING_LOAD_OP , , , // , - unsigned DefRegister = FaultingMI.getOperand(0).getReg(); + Register DefRegister = FaultingMI.getOperand(0).getReg(); FaultMaps::FaultKind FK = static_cast(FaultingMI.getOperand(1).getImm()); MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol(); @@ -1079,7 +1079,7 @@ void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI, // Emit MOV to materialize the target address and the CALL to target. // This is encoded with 12-13 bytes, depending on which register is used. - unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg(); + Register ScratchReg = MI.getOperand(ScratchIdx).getReg(); if (X86II::isX86_64ExtendedReg(ScratchReg)) EncodedBytes = 13; else @@ -1650,7 +1650,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { case X86::EH_RETURN: case X86::EH_RETURN64: { // Lower these as normal, but add some comments. - unsigned Reg = MI->getOperand(0).getReg(); + Register Reg = MI->getOperand(0).getReg(); OutStreamer->AddComment(StringRef("eh_return, addr: %") + X86ATTInstPrinter::getRegisterName(Reg)); break; @@ -1699,9 +1699,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement"); const X86RegisterInfo *RI = MF->getSubtarget().getRegisterInfo(); - unsigned Reg = MI->getOperand(0).getReg(); - unsigned Reg0 = RI->getSubReg(Reg, X86::sub_mask_0); - unsigned Reg1 = RI->getSubReg(Reg, X86::sub_mask_1); + Register Reg = MI->getOperand(0).getReg(); + Register Reg0 = RI->getSubReg(Reg, X86::sub_mask_0); + Register Reg1 = RI->getSubReg(Reg, X86::sub_mask_1); // Load the first mask register MCInstBuilder MIB = MCInstBuilder(X86::KMOVWkm); @@ -1732,9 +1732,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement"); const X86RegisterInfo *RI = MF->getSubtarget().getRegisterInfo(); - unsigned Reg = MI->getOperand(X86::AddrNumOperands).getReg(); - unsigned Reg0 = RI->getSubReg(Reg, X86::sub_mask_0); - unsigned Reg1 = RI->getSubReg(Reg, X86::sub_mask_1); + Register Reg = MI->getOperand(X86::AddrNumOperands).getReg(); + Register Reg0 = RI->getSubReg(Reg, X86::sub_mask_0); + Register Reg1 = RI->getSubReg(Reg, X86::sub_mask_1); // Store the first mask register MCInstBuilder MIB = MCInstBuilder(X86::KMOVWmk); diff --git a/lib/Target/X86/X86OptimizeLEAs.cpp b/lib/Target/X86/X86OptimizeLEAs.cpp index 92675ab8bcd..125c74eaf64 100644 --- a/lib/Target/X86/X86OptimizeLEAs.cpp +++ b/lib/Target/X86/X86OptimizeLEAs.cpp @@ -612,8 +612,8 @@ bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) { // Loop over all uses of the Last LEA and update their operands. Note // that the correctness of this has already been checked in the // isReplaceable function. - unsigned FirstVReg = First.getOperand(0).getReg(); - unsigned LastVReg = Last.getOperand(0).getReg(); + Register FirstVReg = First.getOperand(0).getReg(); + Register LastVReg = Last.getOperand(0).getReg(); for (auto UI = MRI->use_begin(LastVReg), UE = MRI->use_end(); UI != UE;) { MachineOperand &MO = *UI++; diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 2e2f1f9e438..9d6b27d3829 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -677,13 +677,13 @@ static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II) { MI.getOperand(4).getImm() != 0 || MI.getOperand(5).getReg() != X86::NoRegister) return false; - unsigned BasePtr = MI.getOperand(1).getReg(); + Register BasePtr = MI.getOperand(1).getReg(); // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will // be replaced with a 32-bit operand MOV which will zero extend the upper // 32-bits of the super register. if (Opc == X86::LEA64_32r) BasePtr = getX86SubSuperRegister(BasePtr, 32); - unsigned NewDestReg = MI.getOperand(0).getReg(); + Register NewDestReg = MI.getOperand(0).getReg(); const X86InstrInfo *TII = MI.getParent()->getParent()->getSubtarget().getInstrInfo(); TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr, @@ -773,7 +773,7 @@ Register X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { unsigned X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const { const X86Subtarget &Subtarget = MF.getSubtarget(); - unsigned FrameReg = getFrameRegister(MF); + Register FrameReg = getFrameRegister(MF); if (Subtarget.isTarget64BitILP32()) FrameReg = getX86SubSuperRegister(FrameReg, 32); return FrameReg; @@ -782,7 +782,7 @@ X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const { unsigned X86RegisterInfo::getPtrSizedStackRegister(const MachineFunction &MF) const { const X86Subtarget &Subtarget = MF.getSubtarget(); - unsigned StackReg = getStackRegister(); + Register StackReg = getStackRegister(); if (Subtarget.isTarget64BitILP32()) StackReg = getX86SubSuperRegister(StackReg, 32); return StackReg; diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp index 50690953eef..1ae8df977f8 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -36,7 +36,7 @@ bool X86SelectionDAGInfo::isBaseRegConflictPossible( const X86RegisterInfo *TRI = static_cast( DAG.getSubtarget().getRegisterInfo()); - unsigned BaseReg = TRI->getBaseRegister(); + Register BaseReg = TRI->getBaseRegister(); for (unsigned R : ClobberSet) if (BaseReg == R) return true; diff --git a/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/lib/Target/X86/X86SpeculativeLoadHardening.cpp index 2741254cbd0..29ab70548ef 100644 --- a/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -477,7 +477,7 @@ bool X86SpeculativeLoadHardeningPass::runOnMachineFunction( // Otherwise, just build the predicate state itself by zeroing a register // as we don't need any initial state. PS->InitialReg = MRI->createVirtualRegister(PS->RC); - unsigned PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass); + Register PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass); auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV32r0), PredStateSubReg); ++NumInstsInserted; @@ -750,7 +750,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG( int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8; auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes); - unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC); + Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC); // Note that we intentionally use an empty debug location so that // this picks up the preceding location. auto CMovI = BuildMI(CheckingMBB, InsertPt, DebugLoc(), @@ -907,7 +907,7 @@ void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads( MI.dump(); dbgs() << "\n"); report_fatal_error("Unable to unfold load!"); } - unsigned Reg = MRI->createVirtualRegister(UnfoldedRC); + Register Reg = MRI->createVirtualRegister(UnfoldedRC); SmallVector NewMIs; // If we were able to compute an unfolded reg class, any failure here // is just a programming error so just assert. @@ -1102,7 +1102,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches( // synthetic target in the predecessor. We do this at the bottom of the // predecessor. auto InsertPt = Pred->getFirstTerminator(); - unsigned TargetReg = MRI->createVirtualRegister(&X86::GR64RegClass); + Register TargetReg = MRI->createVirtualRegister(&X86::GR64RegClass); if (MF.getTarget().getCodeModel() == CodeModel::Small && !Subtarget->isPositionIndependent()) { // Directly materialize it into an immediate. @@ -1153,7 +1153,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches( LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n"); } else { // Otherwise compute the address into a register first. - unsigned AddrReg = MRI->createVirtualRegister(&X86::GR64RegClass); + Register AddrReg = MRI->createVirtualRegister(&X86::GR64RegClass); auto AddrI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::LEA64r), AddrReg) .addReg(/*Base*/ X86::RIP) @@ -1175,7 +1175,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches( // Now cmov over the predicate if the comparison wasn't equal. int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8; auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes); - unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC); + Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC); auto CMovI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(CMovOp), UpdatedStateReg) .addReg(PS->InitialReg) @@ -1878,7 +1878,7 @@ unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS( DebugLoc Loc) { // FIXME: Hard coding this to a 32-bit register class seems weird, but matches // what instruction selection does. - unsigned Reg = MRI->createVirtualRegister(&X86::GR32RegClass); + Register Reg = MRI->createVirtualRegister(&X86::GR32RegClass); // We directly copy the FLAGS register and rely on later lowering to clean // this up into the appropriate setCC instructions. BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS); @@ -1905,7 +1905,7 @@ void X86SpeculativeLoadHardeningPass::restoreEFLAGS( void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP( MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc, unsigned PredStateReg) { - unsigned TmpReg = MRI->createVirtualRegister(PS->RC); + Register TmpReg = MRI->createVirtualRegister(PS->RC); // FIXME: This hard codes a shift distance based on the number of bits needed // to stay canonical on 64-bit. We should compute this somehow and support // 32-bit as part of that. @@ -1925,8 +1925,8 @@ void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP( unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP( MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc) { - unsigned PredStateReg = MRI->createVirtualRegister(PS->RC); - unsigned TmpReg = MRI->createVirtualRegister(PS->RC); + Register PredStateReg = MRI->createVirtualRegister(PS->RC); + Register TmpReg = MRI->createVirtualRegister(PS->RC); // We know that the stack pointer will have any preserved predicate state in // its high bit. We just want to smear this across the other bits. Turns out, @@ -2031,9 +2031,9 @@ void X86SpeculativeLoadHardeningPass::hardenLoadAddr( } for (MachineOperand *Op : HardenOpRegs) { - unsigned OpReg = Op->getReg(); + Register OpReg = Op->getReg(); auto *OpRC = MRI->getRegClass(OpReg); - unsigned TmpReg = MRI->createVirtualRegister(OpRC); + Register TmpReg = MRI->createVirtualRegister(OpRC); // If this is a vector register, we'll need somewhat custom logic to handle // hardening it. @@ -2045,7 +2045,7 @@ void X86SpeculativeLoadHardeningPass::hardenLoadAddr( // Move our state into a vector register. // FIXME: We could skip this at the cost of longer encodings with AVX-512 // but that doesn't seem likely worth it. - unsigned VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass); + Register VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass); auto MovI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg) .addReg(StateReg); @@ -2054,7 +2054,7 @@ void X86SpeculativeLoadHardeningPass::hardenLoadAddr( LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n"); // Broadcast it across the vector register. - unsigned VBStateReg = MRI->createVirtualRegister(OpRC); + Register VBStateReg = MRI->createVirtualRegister(OpRC); auto BroadcastI = BuildMI(MBB, InsertPt, Loc, TII->get(Is128Bit ? X86::VPBROADCASTQrr : X86::VPBROADCASTQYrr), @@ -2084,7 +2084,7 @@ void X86SpeculativeLoadHardeningPass::hardenLoadAddr( assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!"); // Broadcast our state into a vector register. - unsigned VStateReg = MRI->createVirtualRegister(OpRC); + Register VStateReg = MRI->createVirtualRegister(OpRC); unsigned BroadcastOp = Is128Bit ? X86::VPBROADCASTQrZ128r : Is256Bit ? X86::VPBROADCASTQrZ256r : X86::VPBROADCASTQrZr; @@ -2153,7 +2153,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst( // See if we can sink hardening the loaded value. auto SinkCheckToSingleUse = [&](MachineInstr &MI) -> Optional { - unsigned DefReg = MI.getOperand(0).getReg(); + Register DefReg = MI.getOperand(0).getReg(); // We need to find a single use which we can sink the check. We can // primarily do this because many uses may already end up checked on their @@ -2210,7 +2210,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst( // If this register isn't a virtual register we can't walk uses of sanely, // just bail. Also check that its register class is one of the ones we // can harden. - unsigned UseDefReg = UseMI.getOperand(0).getReg(); + Register UseDefReg = UseMI.getOperand(0).getReg(); if (!Register::isVirtualRegister(UseDefReg) || !canHardenRegister(UseDefReg)) return {}; @@ -2289,7 +2289,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister( if (Bytes != 8) { unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit}; unsigned SubRegImm = SubRegImms[Log2_32(Bytes)]; - unsigned NarrowStateReg = MRI->createVirtualRegister(RC); + Register NarrowStateReg = MRI->createVirtualRegister(RC); BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), NarrowStateReg) .addReg(StateReg, 0, SubRegImm); StateReg = NarrowStateReg; @@ -2299,7 +2299,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister( if (isEFLAGSLive(MBB, InsertPt, *TRI)) FlagsReg = saveEFLAGS(MBB, InsertPt, Loc); - unsigned NewReg = MRI->createVirtualRegister(RC); + Register NewReg = MRI->createVirtualRegister(RC); unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr}; unsigned OrOpCode = OrOpCodes[Log2_32(Bytes)]; auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOpCode), NewReg) @@ -2329,13 +2329,13 @@ unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) { DebugLoc Loc = MI.getDebugLoc(); auto &DefOp = MI.getOperand(0); - unsigned OldDefReg = DefOp.getReg(); + Register OldDefReg = DefOp.getReg(); auto *DefRC = MRI->getRegClass(OldDefReg); // Because we want to completely replace the uses of this def'ed value with // the hardened value, create a dedicated new register that will only be used // to communicate the unhardened value to the hardening. - unsigned UnhardenedReg = MRI->createVirtualRegister(DefRC); + Register UnhardenedReg = MRI->createVirtualRegister(DefRC); DefOp.setReg(UnhardenedReg); // Now harden this register's value, getting a hardened reg that is safe to @@ -2537,7 +2537,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall( .addReg(ExpectedRetAddrReg, RegState::Kill) .addSym(RetSymbol); } else { - unsigned ActualRetAddrReg = MRI->createVirtualRegister(AddrRC); + Register ActualRetAddrReg = MRI->createVirtualRegister(AddrRC); BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ActualRetAddrReg) .addReg(/*Base*/ X86::RIP) .addImm(/*Scale*/ 1) @@ -2554,7 +2554,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall( int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8; auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes); - unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC); + Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC); auto CMovI = BuildMI(MBB, InsertPt, Loc, TII->get(CMovOp), UpdatedStateReg) .addReg(NewStateReg, RegState::Kill) .addReg(PS->PoisonReg) @@ -2611,7 +2611,7 @@ void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr( // For all of these, the target register is the first operand of the // instruction. auto &TargetOp = MI.getOperand(0); - unsigned OldTargetReg = TargetOp.getReg(); + Register OldTargetReg = TargetOp.getReg(); // Try to lookup a hardened version of this register. We retain a reference // here as we want to update the map to track any newly computed hardened diff --git a/lib/Target/X86/X86WinAllocaExpander.cpp b/lib/Target/X86/X86WinAllocaExpander.cpp index 9e499db1d7e..ae72c642758 100644 --- a/lib/Target/X86/X86WinAllocaExpander.cpp +++ b/lib/Target/X86/X86WinAllocaExpander.cpp @@ -81,7 +81,7 @@ static int64_t getWinAllocaAmount(MachineInstr *MI, MachineRegisterInfo *MRI) { MI->getOpcode() == X86::WIN_ALLOCA_64); assert(MI->getOperand(0).isReg()); - unsigned AmountReg = MI->getOperand(0).getReg(); + Register AmountReg = MI->getOperand(0).getReg(); MachineInstr *Def = MRI->getUniqueVRegDef(AmountReg); if (!Def || @@ -261,7 +261,7 @@ void X86WinAllocaExpander::lower(MachineInstr* MI, Lowering L) { break; } - unsigned AmountReg = MI->getOperand(0).getReg(); + Register AmountReg = MI->getOperand(0).getReg(); MI->eraseFromParent(); // Delete the definition of AmountReg. diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp index 5066407c74a..adc9c959e0e 100644 --- a/lib/Target/XCore/XCoreFrameLowering.cpp +++ b/lib/Target/XCore/XCoreFrameLowering.cpp @@ -367,8 +367,8 @@ void XCoreFrameLowering::emitEpilogue(MachineFunction &MF, RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList); // Return to the landing pad. - unsigned EhStackReg = MBBI->getOperand(0).getReg(); - unsigned EhHandlerReg = MBBI->getOperand(1).getReg(); + Register EhStackReg = MBBI->getOperand(0).getReg(); + Register EhHandlerReg = MBBI->getOperand(1).getReg(); BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(EhStackReg); BuildMI(MBB, MBBI, dl, TII.get(XCore::BAU_1r)).addReg(EhHandlerReg); MBB.erase(MBBI); // Erase the previous return instruction. diff --git a/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp b/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp index e433d21c59b..b5dbdea98ee 100644 --- a/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp +++ b/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp @@ -55,7 +55,7 @@ bool XCoreFTAOElim::runOnMachineFunction(MachineFunction &MF) { MBBI != EE; ++MBBI) { if (MBBI->getOpcode() == XCore::FRAME_TO_ARGS_OFFSET) { MachineInstr &OldInst = *MBBI; - unsigned Reg = OldInst.getOperand(0).getReg(); + Register Reg = OldInst.getOperand(0).getReg(); MBBI = TII.loadImmediate(MBB, MBBI, Reg, StackSize); OldInst.eraseFromParent(); } diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 072278d9fc4..34308904dba 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -1309,7 +1309,7 @@ SDValue XCoreTargetLowering::LowerCCCArguments( llvm_unreachable(nullptr); } case MVT::i32: - unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); + Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); @@ -1360,7 +1360,7 @@ SDValue XCoreTargetLowering::LowerCCCArguments( offset -= StackSlotSize; SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); // Move argument from phys reg -> virt reg - unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); + Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); RegInfo.addLiveIn(ArgRegs[i], VReg); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); diff --git a/lib/Target/XCore/XCoreRegisterInfo.cpp b/lib/Target/XCore/XCoreRegisterInfo.cpp index 3752274e2cd..86ec7f82d4d 100644 --- a/lib/Target/XCore/XCoreRegisterInfo.cpp +++ b/lib/Target/XCore/XCoreRegisterInfo.cpp @@ -301,7 +301,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, << "<--------->\n"); Offset/=4; - unsigned Reg = MI.getOperand(0).getReg(); + Register Reg = MI.getOperand(0).getReg(); assert(XCore::GRRegsRegClass.contains(Reg) && "Unexpected register operand"); if (TFI->hasFP(MF)) {