From 5ec84d66c86b6dbe7457d2c843afff929afebebd Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Thu, 5 Sep 2019 10:00:22 +0000 Subject: [PATCH] [LLVM][Alignment] Make functions using log of alignment explicit Summary: This patch renames functions that takes or returns alignment as log2, this patch will help with the transition to llvm::Align. The renaming makes it explicit that we deal with log(alignment) instead of a power of two alignment. A few renames uncovered dubious assignments: - `MirParser`/`MirPrinter` was expecting powers of two but `MachineFunction` and `MachineBasicBlock` were using deal with log2(align). This patch fixes it and updates the documentation. - `MachineBlockPlacement` exposes two flags (`align-all-blocks` and `align-all-nofallthru-blocks`) supposedly interpreted as power of two alignments, internally these values are interpreted as log2(align). This patch updates the documentation, - `MachineFunctionexposes` exposes `align-all-functions` also interpreted as power of two alignment, internally this value is interpreted as log2(align). This patch updates the documentation, Reviewers: lattner, thegameg, courbet Subscribers: dschuff, arsenm, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, javed.absar, hiraditya, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, dexonsmith, PkmX, jocewei, jsji, Jim, s.egerton, llvm-commits, courbet Tags: #llvm Differential Revision: https://reviews.llvm.org/D65945 llvm-svn: 371045 --- docs/MIRLangRef.rst | 12 ++++-- include/llvm/CodeGen/MachineBasicBlock.h | 6 +-- include/llvm/CodeGen/MachineFunction.h | 15 +++---- include/llvm/CodeGen/TargetLowering.h | 32 +++++++-------- lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 6 +-- lib/CodeGen/AsmPrinter/WinException.cpp | 4 +- lib/CodeGen/BranchRelaxation.cpp | 14 +++---- lib/CodeGen/MIRParser/MIParser.cpp | 2 +- lib/CodeGen/MIRParser/MIRParser.cpp | 2 +- lib/CodeGen/MIRPrinter.cpp | 7 ++-- lib/CodeGen/MachineBasicBlock.cpp | 4 +- lib/CodeGen/MachineBlockPlacement.cpp | 27 ++++++------- lib/CodeGen/MachineFunction.cpp | 17 ++++---- lib/CodeGen/PatchableFunction.cpp | 2 +- lib/CodeGen/TargetLoweringBase.cpp | 6 +-- lib/Target/AArch64/AArch64ISelLowering.cpp | 6 +-- lib/Target/AArch64/AArch64Subtarget.cpp | 32 +++++++-------- lib/Target/AArch64/AArch64Subtarget.h | 10 +++-- lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp | 2 +- lib/Target/AMDGPU/R600AsmPrinter.cpp | 2 +- lib/Target/AMDGPU/SIISelLowering.cpp | 24 ++++++------ lib/Target/AMDGPU/SIISelLowering.h | 3 +- lib/Target/ARC/ARCMachineFunctionInfo.h | 2 +- lib/Target/ARM/ARM.td | 2 +- lib/Target/ARM/ARMBasicBlockInfo.cpp | 4 +- lib/Target/ARM/ARMConstantIslandPass.cpp | 39 ++++++++++--------- lib/Target/ARM/ARMISelLowering.cpp | 4 +- lib/Target/ARM/ARMSubtarget.cpp | 2 +- lib/Target/ARM/ARMSubtarget.h | 6 +-- lib/Target/AVR/AVRISelLowering.cpp | 2 +- lib/Target/BPF/BPFISelLowering.cpp | 4 +- .../Hexagon/HexagonBranchRelaxation.cpp | 4 +- lib/Target/Hexagon/HexagonFixupHwLoops.cpp | 4 +- lib/Target/Hexagon/HexagonISelLowering.cpp | 6 +-- lib/Target/Lanai/LanaiISelLowering.cpp | 4 +- lib/Target/MSP430/MSP430ISelLowering.cpp | 4 +- lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h | 2 +- .../Mips/MCTargetDesc/MipsNaClELFStreamer.cpp | 2 +- lib/Target/Mips/MipsAsmPrinter.cpp | 6 +-- lib/Target/Mips/MipsBranchExpansion.cpp | 2 +- lib/Target/Mips/MipsConstantIslandPass.cpp | 29 +++++++------- lib/Target/Mips/MipsISelLowering.cpp | 2 +- lib/Target/PowerPC/PPCBranchSelector.cpp | 26 ++++++------- lib/Target/PowerPC/PPCISelLowering.cpp | 12 +++--- lib/Target/PowerPC/PPCISelLowering.h | 2 +- lib/Target/RISCV/RISCVISelLowering.cpp | 4 +- lib/Target/Sparc/SparcISelLowering.cpp | 2 +- lib/Target/SystemZ/SystemZISelLowering.cpp | 4 +- lib/Target/SystemZ/SystemZLongBranch.cpp | 21 +++++----- lib/Target/X86/X86ISelLowering.cpp | 4 +- lib/Target/X86/X86RetpolineThunks.cpp | 2 +- lib/Target/XCore/XCoreISelLowering.cpp | 4 +- test/CodeGen/ARM/constant-island-movwt.mir | 4 +- test/CodeGen/ARM/fp16-litpool-arm.mir | 10 ++--- test/CodeGen/ARM/fp16-litpool-thumb.mir | 6 +-- test/CodeGen/ARM/fp16-litpool2-arm.mir | 4 +- test/CodeGen/ARM/fp16-litpool3-arm.mir | 2 +- .../CodeGen/Mips/unaligned-memops-mapping.mir | 8 ++-- test/CodeGen/PowerPC/block-placement.mir | 2 +- test/CodeGen/X86/tail-merge-after-mbp.mir | 2 +- test/DebugInfo/X86/debug-loc-offset.mir | 6 +-- 61 files changed, 251 insertions(+), 239 deletions(-) diff --git a/docs/MIRLangRef.rst b/docs/MIRLangRef.rst index 407d6bc1c25..c2c14d9db7c 100644 --- a/docs/MIRLangRef.rst +++ b/docs/MIRLangRef.rst @@ -343,6 +343,8 @@ specified in brackets after the block's definition: .. TODO: Describe the way the reference to an unnamed LLVM IR block can be preserved. +``Alignment`` is specified in bytes, and must be a power of two. + Machine Instructions -------------------- @@ -614,9 +616,13 @@ following format is used: alignment: isTargetSpecific: -where ```` is a 32-bit unsigned integer, ```` is a `LLVM IR Constant -`_, alignment is a 32-bit -unsigned integer, and ```` is either true or false. +where: + - ```` is a 32-bit unsigned integer; + - ```` is a `LLVM IR Constant + `_; + - ```` is a 32-bit unsigned integer specified in bytes, and must be + power of two; + - ```` is either true or false. Example: diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h index 5bf78138f5d..e3947f4a8f1 100644 --- a/include/llvm/CodeGen/MachineBasicBlock.h +++ b/include/llvm/CodeGen/MachineBasicBlock.h @@ -105,7 +105,7 @@ private: /// Alignment of the basic block. Zero if the basic block does not need to be /// aligned. The alignment is specified as log2(bytes). - unsigned Alignment = 0; + unsigned LogAlignment = 0; /// Indicate that this basic block is entered via an exception handler. bool IsEHPad = false; @@ -374,11 +374,11 @@ public: /// Return alignment of the basic block. The alignment is specified as /// log2(bytes). - unsigned getAlignment() const { return Alignment; } + unsigned getLogAlignment() const { return LogAlignment; } /// Set alignment of the basic block. The alignment is specified as /// log2(bytes). - void setAlignment(unsigned Align) { Alignment = Align; } + void setLogAlignment(unsigned A) { LogAlignment = A; } /// Returns true if the block is a landing pad. That is this basic block is /// entered via an exception handler. diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h index c9325c746ba..b6c08f2bae7 100644 --- a/include/llvm/CodeGen/MachineFunction.h +++ b/include/llvm/CodeGen/MachineFunction.h @@ -277,7 +277,7 @@ class MachineFunction { unsigned FunctionNumber; /// Alignment - The alignment of the function. - unsigned Alignment; + unsigned LogAlignment; /// ExposesReturnsTwice - True if the function calls setjmp or related /// functions with attribute "returns twice", but doesn't have @@ -508,15 +508,16 @@ public: const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; } WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; } - /// getAlignment - Return the alignment (log2, not bytes) of the function. - unsigned getAlignment() const { return Alignment; } + /// getLogAlignment - Return the alignment of the function. + unsigned getLogAlignment() const { return LogAlignment; } - /// setAlignment - Set the alignment (log2, not bytes) of the function. - void setAlignment(unsigned A) { Alignment = A; } + /// setLogAlignment - Set the alignment of the function. + void setLogAlignment(unsigned A) { LogAlignment = A; } /// ensureAlignment - Make sure the function is at least 1 << A bytes aligned. - void ensureAlignment(unsigned A) { - if (Alignment < A) Alignment = A; + void ensureLogAlignment(unsigned A) { + if (LogAlignment < A) + LogAlignment = A; } /// exposesReturnsTwice - Returns true if the function calls setjmp or diff --git a/include/llvm/CodeGen/TargetLowering.h b/include/llvm/CodeGen/TargetLowering.h index 315f209417c..de82fd71736 100644 --- a/include/llvm/CodeGen/TargetLowering.h +++ b/include/llvm/CodeGen/TargetLowering.h @@ -1582,18 +1582,18 @@ public: } /// Return the minimum function alignment. - unsigned getMinFunctionAlignment() const { - return MinFunctionAlignment; + unsigned getMinFunctionLogAlignment() const { + return MinFunctionLogAlignment; } /// Return the preferred function alignment. - unsigned getPrefFunctionAlignment() const { - return PrefFunctionAlignment; + unsigned getPrefFunctionLogAlignment() const { + return PrefFunctionLogAlignment; } /// Return the preferred loop alignment. - virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const { - return PrefLoopAlignment; + virtual unsigned getPrefLoopLogAlignment(MachineLoop *ML = nullptr) const { + return PrefLoopLogAlignment; } /// Should loops be aligned even when the function is marked OptSize (but not @@ -2105,23 +2105,23 @@ protected: } /// Set the target's minimum function alignment (in log2(bytes)) - void setMinFunctionAlignment(unsigned Align) { - MinFunctionAlignment = Align; + void setMinFunctionLogAlignment(unsigned LogAlign) { + MinFunctionLogAlignment = LogAlign; } /// Set the target's preferred function alignment. This should be set if /// there is a performance benefit to higher-than-minimum alignment (in /// log2(bytes)) - void setPrefFunctionAlignment(unsigned Align) { - PrefFunctionAlignment = Align; + void setPrefFunctionLogAlignment(unsigned LogAlign) { + PrefFunctionLogAlignment = LogAlign; } /// Set the target's preferred loop alignment. Default alignment is zero, it /// means the target does not care about loop alignment. The alignment is /// specified in log2(bytes). The target may also override /// getPrefLoopAlignment to provide per-loop values. - void setPrefLoopAlignment(unsigned Align) { - PrefLoopAlignment = Align; + void setPrefLoopLogAlignment(unsigned LogAlign) { + PrefLoopLogAlignment = LogAlign; } /// Set the minimum stack alignment of an argument (in log2(bytes)). @@ -2692,14 +2692,14 @@ private: /// The minimum function alignment (used when optimizing for size, and to /// prevent explicitly provided alignment from leading to incorrect code). - unsigned MinFunctionAlignment; + unsigned MinFunctionLogAlignment; /// The preferred function alignment (used when alignment unspecified and /// optimizing for speed). - unsigned PrefFunctionAlignment; + unsigned PrefFunctionLogAlignment; - /// The preferred loop alignment. - unsigned PrefLoopAlignment; + /// The preferred loop alignment (in log2 bot in bytes). + unsigned PrefLoopLogAlignment; /// Size in bits of the maximum atomics size the backend supports. /// Accesses larger than this will be expanded by AtomicExpandPass. diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 220c4758956..bf0be8ecee9 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -667,7 +667,7 @@ void AsmPrinter::EmitFunctionHeader() { EmitLinkage(&F, CurrentFnSym); if (MAI->hasFunctionAlignment()) - EmitAlignment(MF->getAlignment(), &F); + EmitAlignment(MF->getLogAlignment(), &F); if (MAI->hasDotTypeDotSizeDirective()) OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction); @@ -2905,8 +2905,8 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) { } // Emit an alignment directive for this block, if needed. - if (unsigned Align = MBB.getAlignment()) - EmitAlignment(Align); + if (unsigned LogAlign = MBB.getLogAlignment()) + EmitAlignment(LogAlign); MCCodePaddingContext Context; setupCodePaddingContext(MBB, Context); OutStreamer->EmitCodePaddingBasicBlockStart(Context); diff --git a/lib/CodeGen/AsmPrinter/WinException.cpp b/lib/CodeGen/AsmPrinter/WinException.cpp index 155e91ce61a..ef5aa0499e1 100644 --- a/lib/CodeGen/AsmPrinter/WinException.cpp +++ b/lib/CodeGen/AsmPrinter/WinException.cpp @@ -203,8 +203,8 @@ void WinException::beginFunclet(const MachineBasicBlock &MBB, // We want our funclet's entry point to be aligned such that no nops will be // present after the label. - Asm->EmitAlignment(std::max(Asm->MF->getAlignment(), MBB.getAlignment()), - &F); + Asm->EmitAlignment( + std::max(Asm->MF->getLogAlignment(), MBB.getLogAlignment()), &F); // Now that we've emitted the alignment directive, point at our funclet. Asm->OutStreamer->EmitLabel(Sym); diff --git a/lib/CodeGen/BranchRelaxation.cpp b/lib/CodeGen/BranchRelaxation.cpp index 55fbf0163dd..4ee61cff4b4 100644 --- a/lib/CodeGen/BranchRelaxation.cpp +++ b/lib/CodeGen/BranchRelaxation.cpp @@ -65,13 +65,13 @@ class BranchRelaxation : public MachineFunctionPass { /// block. unsigned postOffset(const MachineBasicBlock &MBB) const { unsigned PO = Offset + Size; - unsigned Align = MBB.getAlignment(); - if (Align == 0) + unsigned LogAlign = MBB.getLogAlignment(); + if (LogAlign == 0) return PO; - unsigned AlignAmt = 1 << Align; - unsigned ParentAlign = MBB.getParent()->getAlignment(); - if (Align <= ParentAlign) + unsigned AlignAmt = 1 << LogAlign; + unsigned ParentLogAlign = MBB.getParent()->getLogAlignment(); + if (LogAlign <= ParentLogAlign) return PO + OffsetToAlignment(PO, AlignAmt); // The alignment of this MBB is larger than the function's alignment, so we @@ -128,9 +128,9 @@ void BranchRelaxation::verify() { #ifndef NDEBUG unsigned PrevNum = MF->begin()->getNumber(); for (MachineBasicBlock &MBB : *MF) { - unsigned Align = MBB.getAlignment(); + unsigned LogAlign = MBB.getLogAlignment(); unsigned Num = MBB.getNumber(); - assert(BlockInfo[Num].Offset % (1u << Align) == 0); + assert(BlockInfo[Num].Offset % (1u << LogAlign) == 0); assert(!Num || BlockInfo[PrevNum].postOffset(MBB) <= BlockInfo[Num].Offset); assert(BlockInfo[Num].Size == computeBlockSize(MBB)); PrevNum = Num; diff --git a/lib/CodeGen/MIRParser/MIParser.cpp b/lib/CodeGen/MIRParser/MIParser.cpp index a8fa2f1195d..f8c4dd66559 100644 --- a/lib/CodeGen/MIRParser/MIParser.cpp +++ b/lib/CodeGen/MIRParser/MIParser.cpp @@ -641,7 +641,7 @@ bool MIParser::parseBasicBlockDefinition( return error(Loc, Twine("redefinition of machine basic block with id #") + Twine(ID)); if (Alignment) - MBB->setAlignment(Alignment); + MBB->setLogAlignment(Log2_32(Alignment)); if (HasAddressTaken) MBB->setHasAddressTaken(); MBB->setIsEHPad(IsLandingPad); diff --git a/lib/CodeGen/MIRParser/MIRParser.cpp b/lib/CodeGen/MIRParser/MIRParser.cpp index 48ec0b2d6cf..2dd4fd3b9b7 100644 --- a/lib/CodeGen/MIRParser/MIRParser.cpp +++ b/lib/CodeGen/MIRParser/MIRParser.cpp @@ -393,7 +393,7 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF, } if (YamlMF.Alignment) - MF.setAlignment(YamlMF.Alignment); + MF.setLogAlignment(Log2_32(YamlMF.Alignment)); MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice); MF.setHasWinCFI(YamlMF.HasWinCFI); diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp index 7febb11dcb8..18efe1f80eb 100644 --- a/lib/CodeGen/MIRPrinter.cpp +++ b/lib/CodeGen/MIRPrinter.cpp @@ -197,7 +197,7 @@ void MIRPrinter::print(const MachineFunction &MF) { yaml::MachineFunction YamlMF; YamlMF.Name = MF.getName(); - YamlMF.Alignment = MF.getAlignment(); + YamlMF.Alignment = 1UL << MF.getLogAlignment(); YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice(); YamlMF.HasWinCFI = MF.hasWinCFI(); @@ -629,9 +629,10 @@ void MIPrinter::print(const MachineBasicBlock &MBB) { OS << "landing-pad"; HasAttributes = true; } - if (MBB.getAlignment()) { + if (MBB.getLogAlignment()) { OS << (HasAttributes ? ", " : " ("); - OS << "align " << MBB.getAlignment(); + OS << "align " + << (1UL << MBB.getLogAlignment()); HasAttributes = true; } if (HasAttributes) diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index 050934daa5a..bd2ab45f669 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -326,9 +326,9 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST, OS << "landing-pad"; HasAttributes = true; } - if (getAlignment()) { + if (getLogAlignment()) { OS << (HasAttributes ? ", " : " ("); - OS << "align " << getAlignment(); + OS << "align " << getLogAlignment(); HasAttributes = true; } if (HasAttributes) diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index 641f14d617c..f2a64faab2e 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -79,16 +79,17 @@ STATISTIC(CondBranchTakenFreq, STATISTIC(UncondBranchTakenFreq, "Potential frequency of taking unconditional branches"); -static cl::opt AlignAllBlock("align-all-blocks", - cl::desc("Force the alignment of all " - "blocks in the function."), - cl::init(0), cl::Hidden); +static cl::opt AlignAllBlock( + "align-all-blocks", + cl::desc("Force the alignment of all blocks in the function in log2 format " + "(e.g 4 means align on 16B boundaries)."), + cl::init(0), cl::Hidden); static cl::opt AlignAllNonFallThruBlocks( "align-all-nofallthru-blocks", - cl::desc("Force the alignment of all " - "blocks that have no fall-through predecessors (i.e. don't add " - "nops that are executed)."), + cl::desc("Force the alignment of all blocks that have no fall-through " + "predecessors (i.e. don't add nops that are executed). In log2 " + "format (e.g 4 means align on 16B boundaries)."), cl::init(0), cl::Hidden); // FIXME: Find a good default for this flag and remove the flag. @@ -2763,8 +2764,8 @@ void MachineBlockPlacement::alignBlocks() { if (!L) continue; - unsigned Align = TLI->getPrefLoopAlignment(L); - if (!Align) + unsigned LogAlign = TLI->getPrefLoopLogAlignment(L); + if (!LogAlign) continue; // Don't care about loop alignment. // If the block is cold relative to the function entry don't waste space @@ -2788,7 +2789,7 @@ void MachineBlockPlacement::alignBlocks() { // Force alignment if all the predecessors are jumps. We already checked // that the block isn't cold above. if (!LayoutPred->isSuccessor(ChainBB)) { - ChainBB->setAlignment(Align); + ChainBB->setLogAlignment(LogAlign); continue; } @@ -2800,7 +2801,7 @@ void MachineBlockPlacement::alignBlocks() { MBPI->getEdgeProbability(LayoutPred, ChainBB); BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb; if (LayoutEdgeFreq <= (Freq * ColdProb)) - ChainBB->setAlignment(Align); + ChainBB->setLogAlignment(LogAlign); } } @@ -3062,14 +3063,14 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) { if (AlignAllBlock) // Align all of the blocks in the function to a specific alignment. for (MachineBasicBlock &MBB : MF) - MBB.setAlignment(AlignAllBlock); + MBB.setLogAlignment(AlignAllBlock); else if (AlignAllNonFallThruBlocks) { // Align all of the blocks that have no fall-through predecessors to a // specific alignment. for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) { auto LayoutPred = std::prev(MBI); if (!LayoutPred->isSuccessor(&*MBI)) - MBI->setAlignment(AlignAllNonFallThruBlocks); + MBI->setLogAlignment(AlignAllNonFallThruBlocks); } } if (ViewBlockLayoutWithBFI != GVDT_None && diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index b771dd1a351..d136ebd437f 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -78,10 +78,11 @@ using namespace llvm; #define DEBUG_TYPE "codegen" -static cl::opt -AlignAllFunctions("align-all-functions", - cl::desc("Force the alignment of all functions."), - cl::init(0), cl::Hidden); +static cl::opt AlignAllFunctions( + "align-all-functions", + cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " + "means align on 16B boundaries)."), + cl::init(0), cl::Hidden); static const char *getPropertyName(MachineFunctionProperties::Property Prop) { using P = MachineFunctionProperties::Property; @@ -172,16 +173,16 @@ void MachineFunction::init() { FrameInfo->ensureMaxAlignment(F.getFnStackAlignment()); ConstantPool = new (Allocator) MachineConstantPool(getDataLayout()); - Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); + LogAlignment = STI->getTargetLowering()->getMinFunctionLogAlignment(); // FIXME: Shouldn't use pref alignment if explicit alignment is set on F. // FIXME: Use Function::hasOptSize(). if (!F.hasFnAttribute(Attribute::OptimizeForSize)) - Alignment = std::max(Alignment, - STI->getTargetLowering()->getPrefFunctionAlignment()); + LogAlignment = std::max( + LogAlignment, STI->getTargetLowering()->getPrefFunctionLogAlignment()); if (AlignAllFunctions) - Alignment = AlignAllFunctions; + LogAlignment = AlignAllFunctions; JumpTableInfo = nullptr; diff --git a/lib/CodeGen/PatchableFunction.cpp b/lib/CodeGen/PatchableFunction.cpp index a3fa1b0ad8e..07f88597fe6 100644 --- a/lib/CodeGen/PatchableFunction.cpp +++ b/lib/CodeGen/PatchableFunction.cpp @@ -78,7 +78,7 @@ bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) { MIB.add(MO); FirstActualI->eraseFromParent(); - MF.ensureAlignment(4); + MF.ensureLogAlignment(4); return true; } diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index e9a7d4b5252..970b2067d42 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -583,9 +583,9 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { BooleanFloatContents = UndefinedBooleanContent; BooleanVectorContents = UndefinedBooleanContent; SchedPreferenceInfo = Sched::ILP; - MinFunctionAlignment = 0; - PrefFunctionAlignment = 0; - PrefLoopAlignment = 0; + MinFunctionLogAlignment = 0; + PrefFunctionLogAlignment = 0; + PrefLoopLogAlignment = 0; GatherAllAliasesMaxDepth = 18; MinStackArgumentAlignment = 1; // TODO: the default will be switched to 0 in the next commit, along diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 00b86eea3cf..9eb7047cc6b 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -640,10 +640,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, EnableExtLdPromotion = true; // Set required alignment. - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); // Set preferred alignments. - setPrefFunctionAlignment(STI.getPrefFunctionAlignment()); - setPrefLoopAlignment(STI.getPrefLoopAlignment()); + setPrefFunctionLogAlignment(STI.getPrefFunctionLogAlignment()); + setPrefLoopLogAlignment(STI.getPrefLoopLogAlignment()); // Only change the limit for entries in a jump table if specified by // the sub target, but not at the command line. diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp index 05112afb417..558bea368ef 100644 --- a/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/lib/Target/AArch64/AArch64Subtarget.cpp @@ -71,22 +71,22 @@ void AArch64Subtarget::initializeProperties() { case CortexA35: break; case CortexA53: - PrefFunctionAlignment = 3; + PrefFunctionLogAlignment = 3; break; case CortexA55: break; case CortexA57: MaxInterleaveFactor = 4; - PrefFunctionAlignment = 4; + PrefFunctionLogAlignment = 4; break; case CortexA65: - PrefFunctionAlignment = 3; + PrefFunctionLogAlignment = 3; break; case CortexA72: case CortexA73: case CortexA75: case CortexA76: - PrefFunctionAlignment = 4; + PrefFunctionLogAlignment = 4; break; case Cyclone: CacheLineSize = 64; @@ -97,14 +97,14 @@ void AArch64Subtarget::initializeProperties() { case ExynosM1: MaxInterleaveFactor = 4; MaxJumpTableSize = 8; - PrefFunctionAlignment = 4; - PrefLoopAlignment = 3; + PrefFunctionLogAlignment = 4; + PrefLoopLogAlignment = 3; break; case ExynosM3: MaxInterleaveFactor = 4; MaxJumpTableSize = 20; - PrefFunctionAlignment = 5; - PrefLoopAlignment = 4; + PrefFunctionLogAlignment = 5; + PrefLoopLogAlignment = 4; break; case Falkor: MaxInterleaveFactor = 4; @@ -126,10 +126,10 @@ void AArch64Subtarget::initializeProperties() { MinVectorRegisterBitWidth = 128; break; case NeoverseE1: - PrefFunctionAlignment = 3; + PrefFunctionLogAlignment = 3; break; case NeoverseN1: - PrefFunctionAlignment = 4; + PrefFunctionLogAlignment = 4; break; case Saphira: MaxInterleaveFactor = 4; @@ -138,8 +138,8 @@ void AArch64Subtarget::initializeProperties() { break; case ThunderX2T99: CacheLineSize = 64; - PrefFunctionAlignment = 3; - PrefLoopAlignment = 2; + PrefFunctionLogAlignment = 3; + PrefLoopLogAlignment = 2; MaxInterleaveFactor = 4; PrefetchDistance = 128; MinPrefetchStride = 1024; @@ -152,15 +152,15 @@ void AArch64Subtarget::initializeProperties() { case ThunderXT81: case ThunderXT83: CacheLineSize = 128; - PrefFunctionAlignment = 3; - PrefLoopAlignment = 2; + PrefFunctionLogAlignment = 3; + PrefLoopLogAlignment = 2; // FIXME: remove this to enable 64-bit SLP if performance looks good. MinVectorRegisterBitWidth = 128; break; case TSV110: CacheLineSize = 64; - PrefFunctionAlignment = 4; - PrefLoopAlignment = 2; + PrefFunctionLogAlignment = 4; + PrefLoopLogAlignment = 2; break; } } diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h index 3f9d9f5a3b2..ef360926aa9 100644 --- a/lib/Target/AArch64/AArch64Subtarget.h +++ b/lib/Target/AArch64/AArch64Subtarget.h @@ -198,8 +198,8 @@ protected: uint16_t PrefetchDistance = 0; uint16_t MinPrefetchStride = 1; unsigned MaxPrefetchIterationsAhead = UINT_MAX; - unsigned PrefFunctionAlignment = 0; - unsigned PrefLoopAlignment = 0; + unsigned PrefFunctionLogAlignment = 0; + unsigned PrefLoopLogAlignment = 0; unsigned MaxJumpTableSize = 0; unsigned WideningBaseCost = 0; @@ -359,8 +359,10 @@ public: unsigned getMaxPrefetchIterationsAhead() const { return MaxPrefetchIterationsAhead; } - unsigned getPrefFunctionAlignment() const { return PrefFunctionAlignment; } - unsigned getPrefLoopAlignment() const { return PrefLoopAlignment; } + unsigned getPrefFunctionLogAlignment() const { + return PrefFunctionLogAlignment; + } + unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; } unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; } diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp index 68792a8b55e..12118e6f5b3 100644 --- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -417,7 +417,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { // The starting address of all shader programs must be 256 bytes aligned. // Regular functions just need the basic required instruction alignment. - MF.setAlignment(MFI->isEntryFunction() ? 8 : 2); + MF.setLogAlignment(MFI->isEntryFunction() ? 8 : 2); SetupMachineFunction(MF); diff --git a/lib/Target/AMDGPU/R600AsmPrinter.cpp b/lib/Target/AMDGPU/R600AsmPrinter.cpp index 3fb18862fca..7918f6bd57c 100644 --- a/lib/Target/AMDGPU/R600AsmPrinter.cpp +++ b/lib/Target/AMDGPU/R600AsmPrinter.cpp @@ -104,7 +104,7 @@ bool R600AsmPrinter::runOnMachineFunction(MachineFunction &MF) { // Functions needs to be cacheline (256B) aligned. - MF.ensureAlignment(8); + MF.ensureLogAlignment(8); SetupMachineFunction(MF); diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index 5516f3742dd..7430e878a09 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -10681,15 +10681,15 @@ void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); } -unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { - const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML); - const unsigned CacheLineAlign = 6; // log2(64) +unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const { + const unsigned PrefLogAlign = TargetLowering::getPrefLoopLogAlignment(ML); + const unsigned CacheLineLogAlign = 6; // log2(64) // Pre-GFX10 target did not benefit from loop alignment if (!ML || DisableLoopAlignment || (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || getSubtarget()->hasInstFwdPrefetchBug()) - return PrefAlign; + return PrefLogAlign; // On GFX10 I$ is 4 x 64 bytes cache lines. // By default prefetcher keeps one cache line behind and reads two ahead. @@ -10703,28 +10703,28 @@ unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); const MachineBasicBlock *Header = ML->getHeader(); - if (Header->getAlignment() != PrefAlign) - return Header->getAlignment(); // Already processed. + if (Header->getLogAlignment() != PrefLogAlign) + return Header->getLogAlignment(); // Already processed. unsigned LoopSize = 0; for (const MachineBasicBlock *MBB : ML->blocks()) { // If inner loop block is aligned assume in average half of the alignment // size to be added as nops. if (MBB != Header) - LoopSize += (1 << MBB->getAlignment()) / 2; + LoopSize += (1 << MBB->getLogAlignment()) / 2; for (const MachineInstr &MI : *MBB) { LoopSize += TII->getInstSizeInBytes(MI); if (LoopSize > 192) - return PrefAlign; + return PrefLogAlign; } } if (LoopSize <= 64) - return PrefAlign; + return PrefLogAlign; if (LoopSize <= 128) - return CacheLineAlign; + return CacheLineLogAlign; // If any of parent loops is surrounded by prefetch instructions do not // insert new for inner loop, which would reset parent's settings. @@ -10732,7 +10732,7 @@ unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { if (MachineBasicBlock *Exit = P->getExitBlock()) { auto I = Exit->getFirstNonDebugInstr(); if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) - return CacheLineAlign; + return CacheLineLogAlign; } } @@ -10749,7 +10749,7 @@ unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { .addImm(2); // prefetch 1 line behind PC } - return CacheLineAlign; + return CacheLineLogAlign; } LLVM_ATTRIBUTE_UNUSED diff --git a/lib/Target/AMDGPU/SIISelLowering.h b/lib/Target/AMDGPU/SIISelLowering.h index 4e98b2aab3a..217152f78f2 100644 --- a/lib/Target/AMDGPU/SIISelLowering.h +++ b/lib/Target/AMDGPU/SIISelLowering.h @@ -379,8 +379,7 @@ public: unsigned Depth = 0) const override; AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; - unsigned getPrefLoopAlignment(MachineLoop *ML) const override; - + unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override; void allocateHSAUserSGPRs(CCState &CCInfo, MachineFunction &MF, diff --git a/lib/Target/ARC/ARCMachineFunctionInfo.h b/lib/Target/ARC/ARCMachineFunctionInfo.h index 31aa5b93246..f59b0ae65db 100644 --- a/lib/Target/ARC/ARCMachineFunctionInfo.h +++ b/lib/Target/ARC/ARCMachineFunctionInfo.h @@ -35,7 +35,7 @@ public: : ReturnStackOffsetSet(false), VarArgsFrameIndex(0), ReturnStackOffset(-1U), MaxCallStackReq(0) { // Functions are 4-byte (2**2) aligned. - MF.setAlignment(2); + MF.setLogAlignment(2); } ~ARCFunctionInfo() {} diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td index 8dcddd25429..6616ae5d160 100644 --- a/lib/Target/ARM/ARM.td +++ b/lib/Target/ARM/ARM.td @@ -302,7 +302,7 @@ def FeatureVMLxForwarding : SubtargetFeature<"vmlx-forwarding", def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Pref32BitThumb", "true", "Prefer 32-bit Thumb instrs">; -def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopAlignment","2", +def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopLogAlignment","2", "Prefer 32-bit alignment for loops">; def FeatureMVEVectorCostFactor1 : SubtargetFeature<"mve1beat", "MVEVectorCostFactor", "1", diff --git a/lib/Target/ARM/ARMBasicBlockInfo.cpp b/lib/Target/ARM/ARMBasicBlockInfo.cpp index 2de90e816b3..d8ca5fdda80 100644 --- a/lib/Target/ARM/ARMBasicBlockInfo.cpp +++ b/lib/Target/ARM/ARMBasicBlockInfo.cpp @@ -63,7 +63,7 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) { // tBR_JTr contains a .align 2 directive. if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) { BBI.PostAlign = 2; - MBB->getParent()->ensureAlignment(2); + MBB->getParent()->ensureLogAlignment(2); } } @@ -126,7 +126,7 @@ void ARMBasicBlockUtils::adjustBBOffsetsAfter(MachineBasicBlock *BB) { for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) { // Get the offset and known bits at the end of the layout predecessor. // Include the alignment of the current block. - unsigned LogAlign = MF.getBlockNumbered(i)->getAlignment(); + unsigned LogAlign = MF.getBlockNumbered(i)->getLogAlignment(); unsigned Offset = BBInfo[i - 1].postOffset(LogAlign); unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign); diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index 5283bb52ee4..ae62d9789bb 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -396,7 +396,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) { // Functions with jump tables need an alignment of 4 because they use the ADR // instruction, which aligns the PC to 4 bytes before adding an offset. if (!T2JumpTables.empty()) - MF->ensureAlignment(2); + MF->ensureLogAlignment(2); /// Remove dead constant pool entries. MadeChange |= removeUnusedCPEntries(); @@ -486,20 +486,21 @@ ARMConstantIslands::doInitialConstPlacement(std::vector &CPEMIs) MF->push_back(BB); // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment()); + unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment()); // Mark the basic block as required by the const-pool. - BB->setAlignment(MaxAlign); + BB->setLogAlignment(MaxLogAlign); // The function needs to be as aligned as the basic blocks. The linker may // move functions around based on their alignment. - MF->ensureAlignment(BB->getAlignment()); + MF->ensureLogAlignment(BB->getLogAlignment()); // Order the entries in BB by descending alignment. That ensures correct // alignment of all entries as long as BB is sufficiently aligned. Keep // track of the insertion point for each alignment. We are going to bucket // sort the entries as they are created. - SmallVector InsPoint(MaxAlign + 1, BB->end()); + SmallVector InsPoint(MaxLogAlign + 1, + BB->end()); // Add all of the constants from the constant pool to the end block, use an // identity mapping of CPI's to CPE's. @@ -524,7 +525,7 @@ ARMConstantIslands::doInitialConstPlacement(std::vector &CPEMIs) // Ensure that future entries with higher alignment get inserted before // CPEMI. This is bucket sort with iterators. - for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a) + for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a) if (InsPoint[a] == InsAt) InsPoint[a] = CPEMI; @@ -685,7 +686,7 @@ initializeFunctionInfo(const std::vector &CPEMIs) { BBInfoVector &BBInfo = BBUtils->getBBInfo(); // The known bits of the entry block offset are determined by the function // alignment. - BBInfo.front().KnownBits = MF->getAlignment(); + BBInfo.front().KnownBits = MF->getLogAlignment(); // Compute block offsets and known bits. BBUtils->adjustBBOffsetsAfter(&MF->front()); @@ -1015,14 +1016,14 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset, BBInfoVector &BBInfo = BBUtils->getBBInfo(); unsigned CPELogAlign = getCPELogAlign(U.CPEMI); unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign); - unsigned NextBlockOffset, NextBlockAlignment; + unsigned NextBlockOffset, NextBlockLogAlignment; MachineFunction::const_iterator NextBlock = Water->getIterator(); if (++NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); - NextBlockAlignment = 0; + NextBlockLogAlignment = 0; } else { NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; - NextBlockAlignment = NextBlock->getAlignment(); + NextBlockLogAlignment = NextBlock->getLogAlignment(); } unsigned Size = U.CPEMI->getOperand(2).getImm(); unsigned CPEEnd = CPEOffset + Size; @@ -1034,13 +1035,13 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset, Growth = CPEEnd - NextBlockOffset; // Compute the padding that would go at the end of the CPE to align the next // block. - Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment); + Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment); // If the CPE is to be inserted before the instruction, that will raise // the offset of the instruction. Also account for unknown alignment padding // in blocks between CPE and the user. if (CPEOffset < UserOffset) - UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign); + UserOffset += Growth + UnknownPadding(MF->getLogAlignment(), CPELogAlign); } else // CPE fits in existing padding. Growth = 0; @@ -1315,7 +1316,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex, // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // LogAlign which is the largest possible alignment in the function. - unsigned LogAlign = MF->getAlignment(); + unsigned LogAlign = MF->getLogAlignment(); assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry"); unsigned KnownBits = UserBBI.internalKnownBits(); unsigned UPad = UnknownPadding(LogAlign, KnownBits); @@ -1493,9 +1494,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex, // Always align the new block because CP entries can be smaller than 4 // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may // be an already aligned constant pool block. - const unsigned Align = isThumb ? 1 : 2; - if (NewMBB->getAlignment() < Align) - NewMBB->setAlignment(Align); + const unsigned LogAlign = isThumb ? 1 : 2; + if (NewMBB->getLogAlignment() < LogAlign) + NewMBB->setLogAlignment(LogAlign); // Remove the original WaterList entry; we want subsequent insertions in // this vicinity to go after the one we're about to insert. This @@ -1524,7 +1525,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex, decrementCPEReferenceCount(CPI, CPEMI); // Mark the basic block as aligned as required by the const-pool entry. - NewIsland->setAlignment(getCPELogAlign(U.CPEMI)); + NewIsland->setLogAlignment(getCPELogAlign(U.CPEMI)); // Increase the size of the island block to account for the new entry. BBUtils->adjustBBSize(NewIsland, Size); @@ -1558,10 +1559,10 @@ void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) { BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setAlignment(0); + CPEBB->setLogAlignment(0); } else // Entries are sorted by descending alignment, so realign from the front. - CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin())); + CPEBB->setLogAlignment(getCPELogAlign(&*CPEBB->begin())); BBUtils->adjustBBOffsetsAfter(CPEBB); // An island has only one predecessor BB and one successor BB. Check if diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 907517461e7..bbaa9431dec 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -1419,9 +1419,9 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, // Prefer likely predicted branches to selects on out-of-order cores. PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); - setPrefLoopAlignment(Subtarget->getPrefLoopAlignment()); + setPrefLoopLogAlignment(Subtarget->getPrefLoopLogAlignment()); - setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); + setMinFunctionLogAlignment(Subtarget->isThumb() ? 1 : 2); if (Subtarget->isThumb() || Subtarget->isThumb2()) setTargetDAGCombine(ISD::ABS); diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp index 54443d2126f..155fbce98a8 100644 --- a/lib/Target/ARM/ARMSubtarget.cpp +++ b/lib/Target/ARM/ARMSubtarget.cpp @@ -300,7 +300,7 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { LdStMultipleTiming = SingleIssuePlusExtras; MaxInterleaveFactor = 4; if (!isThumb()) - PrefLoopAlignment = 3; + PrefLoopLogAlignment = 3; break; case Kryo: break; diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h index dde9dcbdb1c..0491420c51f 100644 --- a/lib/Target/ARM/ARMSubtarget.h +++ b/lib/Target/ARM/ARMSubtarget.h @@ -470,7 +470,7 @@ protected: int PreISelOperandLatencyAdjustment = 2; /// What alignment is preferred for loop bodies, in log2(bytes). - unsigned PrefLoopAlignment = 0; + unsigned PrefLoopLogAlignment = 0; /// The cost factor for MVE instructions, representing the multiple beats an // instruction can take. The default is 2, (set in initSubtargetFeatures so @@ -859,9 +859,7 @@ public: return isROPI() || !isTargetELF(); } - unsigned getPrefLoopAlignment() const { - return PrefLoopAlignment; - } + unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; } unsigned getMVEVectorCostFactor() const { return MVEVectorCostFactor; } diff --git a/lib/Target/AVR/AVRISelLowering.cpp b/lib/Target/AVR/AVRISelLowering.cpp index 3d5c481b506..12b08e8ece5 100644 --- a/lib/Target/AVR/AVRISelLowering.cpp +++ b/lib/Target/AVR/AVRISelLowering.cpp @@ -236,7 +236,7 @@ AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM, setLibcallName(RTLIB::SIN_F32, "sin"); setLibcallName(RTLIB::COS_F32, "cos"); - setMinFunctionAlignment(1); + setMinFunctionLogAlignment(1); setMinimumJumpTableEntries(UINT_MAX); } diff --git a/lib/Target/BPF/BPFISelLowering.cpp b/lib/Target/BPF/BPFISelLowering.cpp index 716bef461a9..4521c6de4ee 100644 --- a/lib/Target/BPF/BPFISelLowering.cpp +++ b/lib/Target/BPF/BPFISelLowering.cpp @@ -133,8 +133,8 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM, setBooleanContents(ZeroOrOneBooleanContent); // Function alignments (log2) - setMinFunctionAlignment(3); - setPrefFunctionAlignment(3); + setMinFunctionLogAlignment(3); + setPrefFunctionLogAlignment(3); if (BPFExpandMemcpyInOrder) { // LLVM generic code will try to expand memcpy into load/store pairs at this diff --git a/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/lib/Target/Hexagon/HexagonBranchRelaxation.cpp index ee93739b2c7..fec081d324e 100644 --- a/lib/Target/Hexagon/HexagonBranchRelaxation.cpp +++ b/lib/Target/Hexagon/HexagonBranchRelaxation.cpp @@ -105,11 +105,11 @@ void HexagonBranchRelaxation::computeOffset(MachineFunction &MF, // offset of the current instruction from the start. unsigned InstOffset = 0; for (auto &B : MF) { - if (B.getAlignment()) { + if (B.getLogAlignment()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. - int ByteAlign = (1u << B.getAlignment()) - 1; + int ByteAlign = (1u << B.getLogAlignment()) - 1; InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign); } OffsetMap[&B] = InstOffset; diff --git a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp index f7edc168de4..12eda8af363 100644 --- a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -114,11 +114,11 @@ bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) { // First pass - compute the offset of each basic block. for (const MachineBasicBlock &MBB : MF) { - if (MBB.getAlignment()) { + if (MBB.getLogAlignment()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. - int ByteAlign = (1u << MBB.getAlignment()) - 1; + int ByteAlign = (1u << MBB.getLogAlignment()) - 1; InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign); } diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index a443a29e061..8cdf06252ad 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1235,9 +1235,9 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, Subtarget(ST) { auto &HRI = *Subtarget.getRegisterInfo(); - setPrefLoopAlignment(4); - setPrefFunctionAlignment(4); - setMinFunctionAlignment(2); + setPrefLoopLogAlignment(4); + setPrefFunctionLogAlignment(4); + setMinFunctionLogAlignment(2); setStackPointerRegisterToSaveRestore(HRI.getStackRegister()); setBooleanContents(TargetLoweringBase::UndefinedBooleanContent); setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent); diff --git a/lib/Target/Lanai/LanaiISelLowering.cpp b/lib/Target/Lanai/LanaiISelLowering.cpp index 86e7769caf7..4a2f6dac6e3 100644 --- a/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/lib/Target/Lanai/LanaiISelLowering.cpp @@ -145,8 +145,8 @@ LanaiTargetLowering::LanaiTargetLowering(const TargetMachine &TM, setTargetDAGCombine(ISD::XOR); // Function alignments (log2) - setMinFunctionAlignment(2); - setPrefFunctionAlignment(2); + setMinFunctionLogAlignment(2); + setPrefFunctionLogAlignment(2); setJumpIsExpensive(true); diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index c400fa5e45f..2701ca57cfd 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -327,8 +327,8 @@ MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM, setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN); // TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll - setMinFunctionAlignment(1); - setPrefFunctionAlignment(1); + setMinFunctionLogAlignment(1); + setPrefFunctionLogAlignment(1); } SDValue MSP430TargetLowering::LowerOperation(SDValue Op, diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h index ad5aff6552f..94265250e9b 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h +++ b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h @@ -14,7 +14,7 @@ namespace llvm { // Log2 of the NaCl MIPS sandbox's instruction bundle size. -static const unsigned MIPS_NACL_BUNDLE_ALIGN = 4u; +static const unsigned MIPS_NACL_BUNDLE_LOG_ALIGN = 4u; bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx, bool *IsStore = nullptr); diff --git a/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp index c050db8a17f..191ac823641 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp @@ -270,7 +270,7 @@ MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context, S->getAssembler().setRelaxAll(true); // Set bundle-alignment as required by the NaCl ABI for the target. - S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_ALIGN); + S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_LOG_ALIGN); return S; } diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index b9e67bb470b..750d0c5d463 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -400,7 +400,7 @@ void MipsAsmPrinter::EmitFunctionEntryLabel() { // NaCl sandboxing requires that indirect call instructions are masked. // This means that function entry points should be bundle-aligned. if (Subtarget->isTargetNaCl()) - EmitAlignment(std::max(MF->getAlignment(), MIPS_NACL_BUNDLE_ALIGN)); + EmitAlignment(std::max(MF->getLogAlignment(), MIPS_NACL_BUNDLE_LOG_ALIGN)); if (Subtarget->inMicroMipsMode()) { TS.emitDirectiveSetMicroMips(); @@ -1278,14 +1278,14 @@ void MipsAsmPrinter::NaClAlignIndirectJumpTargets(MachineFunction &MF) { const std::vector &MBBs = JT[I].MBBs; for (unsigned J = 0; J < MBBs.size(); ++J) - MBBs[J]->setAlignment(MIPS_NACL_BUNDLE_ALIGN); + MBBs[J]->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN); } } // If basic block address is taken, block can be target of indirect branch. for (auto &MBB : MF) { if (MBB.hasAddressTaken()) - MBB.setAlignment(MIPS_NACL_BUNDLE_ALIGN); + MBB.setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN); } } diff --git a/lib/Target/Mips/MipsBranchExpansion.cpp b/lib/Target/Mips/MipsBranchExpansion.cpp index 1523a6c020a..8b558ce4039 100644 --- a/lib/Target/Mips/MipsBranchExpansion.cpp +++ b/lib/Target/Mips/MipsBranchExpansion.cpp @@ -507,7 +507,7 @@ void MipsBranchExpansion::expandToLongBranch(MBBInfo &I) { .addImm(0); if (STI->isTargetNaCl()) // Bundle-align the target of indirect branch JR. - TgtMBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN); + TgtMBB->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN); // In NaCl, modifying the sp is not allowed in branch delay slot. // For MIPS32R6, we can skip using a delay slot branch. diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp index eea28df7eda..8907a72ac87 100644 --- a/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -534,21 +534,22 @@ MipsConstantIslands::doInitialPlacement(std::vector &CPEMIs) { MF->push_back(BB); // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment()); + unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment()); // Mark the basic block as required by the const-pool. // If AlignConstantIslands isn't set, use 4-byte alignment for everything. - BB->setAlignment(AlignConstantIslands ? MaxAlign : 2); + BB->setLogAlignment(AlignConstantIslands ? MaxLogAlign : 2); // The function needs to be as aligned as the basic blocks. The linker may // move functions around based on their alignment. - MF->ensureAlignment(BB->getAlignment()); + MF->ensureLogAlignment(BB->getLogAlignment()); // Order the entries in BB by descending alignment. That ensures correct // alignment of all entries as long as BB is sufficiently aligned. Keep // track of the insertion point for each alignment. We are going to bucket // sort the entries as they are created. - SmallVector InsPoint(MaxAlign + 1, BB->end()); + SmallVector InsPoint(MaxLogAlign + 1, + BB->end()); // Add all of the constants from the constant pool to the end block, use an // identity mapping of CPI's to CPE's. @@ -576,7 +577,7 @@ MipsConstantIslands::doInitialPlacement(std::vector &CPEMIs) { // Ensure that future entries with higher alignment get inserted before // CPEMI. This is bucket sort with iterators. - for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a) + for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a) if (InsPoint[a] == InsAt) InsPoint[a] = CPEMI; // Add a new CPEntry, but no corresponding CPUser yet. @@ -942,14 +943,14 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset, unsigned &Growth) { unsigned CPELogAlign = getCPELogAlign(*U.CPEMI); unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign); - unsigned NextBlockOffset, NextBlockAlignment; + unsigned NextBlockOffset, NextBlockLogAlignment; MachineFunction::const_iterator NextBlock = ++Water->getIterator(); if (NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); - NextBlockAlignment = 0; + NextBlockLogAlignment = 0; } else { NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; - NextBlockAlignment = NextBlock->getAlignment(); + NextBlockLogAlignment = NextBlock->getLogAlignment(); } unsigned Size = U.CPEMI->getOperand(2).getImm(); unsigned CPEEnd = CPEOffset + Size; @@ -961,7 +962,7 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset, Growth = CPEEnd - NextBlockOffset; // Compute the padding that would go at the end of the CPE to align the next // block. - Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment); + Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment); // If the CPE is to be inserted before the instruction, that will raise // the offset of the instruction. Also account for unknown alignment padding @@ -1258,7 +1259,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex, // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // LogAlign which is the largest possible alignment in the function. - unsigned LogAlign = MF->getAlignment(); + unsigned LogAlign = MF->getLogAlignment(); assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry"); unsigned BaseInsertOffset = UserOffset + U.getMaxDisp(); LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x", @@ -1399,7 +1400,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { ++NumCPEs; // Mark the basic block as aligned as required by the const-pool entry. - NewIsland->setAlignment(getCPELogAlign(*U.CPEMI)); + NewIsland->setLogAlignment(getCPELogAlign(*U.CPEMI)); // Increase the size of the island block to account for the new entry. BBInfo[NewIsland->getNumber()].Size += Size; @@ -1431,10 +1432,10 @@ void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) { BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setAlignment(0); + CPEBB->setLogAlignment(0); } else // Entries are sorted by descending alignment, so realign from the front. - CPEBB->setAlignment(getCPELogAlign(*CPEBB->begin())); + CPEBB->setLogAlignment(getCPELogAlign(*CPEBB->begin())); adjustBBOffsetsAfter(CPEBB); // An island has only one predecessor BB and one successor BB. Check if @@ -1529,7 +1530,7 @@ MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) { // We should have a way to back out this alignment restriction if we "can" later. // but it is not harmful. // - DestBB->setAlignment(2); + DestBB->setLogAlignment(2); Br.MaxDisp = ((1<<24)-1) * 2; MI->setDesc(TII->get(Mips::JalB16)); } diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index e556aa5fda1..c1df9a63b40 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -518,7 +518,7 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM, setLibcallName(RTLIB::SRA_I128, nullptr); } - setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2); + setMinFunctionLogAlignment(Subtarget.isGP64bit() ? 3 : 2); // The arguments on the stack are defined in terms of 4-byte slots on O32 // and 8-byte slots on N32/N64. diff --git a/lib/Target/PowerPC/PPCBranchSelector.cpp b/lib/Target/PowerPC/PPCBranchSelector.cpp index 6d1f09278b7..353a3481132 100644 --- a/lib/Target/PowerPC/PPCBranchSelector.cpp +++ b/lib/Target/PowerPC/PPCBranchSelector.cpp @@ -81,14 +81,14 @@ FunctionPass *llvm::createPPCBranchSelectionPass() { /// original Offset. unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB, unsigned Offset) { - unsigned Align = MBB.getAlignment(); - if (!Align) + unsigned LogAlign = MBB.getLogAlignment(); + if (!LogAlign) return 0; - unsigned AlignAmt = 1 << Align; - unsigned ParentAlign = MBB.getParent()->getAlignment(); + unsigned AlignAmt = 1 << LogAlign; + unsigned ParentLogAlign = MBB.getParent()->getLogAlignment(); - if (Align <= ParentAlign) + if (LogAlign <= ParentLogAlign) return OffsetToAlignment(Offset, AlignAmt); // The alignment of this MBB is larger than the function's alignment, so we @@ -179,21 +179,21 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn, const MachineBasicBlock *Dest, unsigned BrOffset) { int BranchSize; - unsigned MaxAlign = 2; + unsigned MaxLogAlign = 2; bool NeedExtraAdjustment = false; if (Dest->getNumber() <= Src->getNumber()) { // If this is a backwards branch, the delta is the offset from the // start of this block to this branch, plus the sizes of all blocks // from this block to the dest. BranchSize = BrOffset; - MaxAlign = std::max(MaxAlign, Src->getAlignment()); + MaxLogAlign = std::max(MaxLogAlign, Src->getLogAlignment()); int DestBlock = Dest->getNumber(); BranchSize += BlockSizes[DestBlock].first; for (unsigned i = DestBlock+1, e = Src->getNumber(); i < e; ++i) { BranchSize += BlockSizes[i].first; - MaxAlign = std::max(MaxAlign, - Fn.getBlockNumbered(i)->getAlignment()); + MaxLogAlign = + std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment()); } NeedExtraAdjustment = (FirstImpreciseBlock >= 0) && @@ -204,11 +204,11 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn, unsigned StartBlock = Src->getNumber(); BranchSize = BlockSizes[StartBlock].first - BrOffset; - MaxAlign = std::max(MaxAlign, Dest->getAlignment()); + MaxLogAlign = std::max(MaxLogAlign, Dest->getLogAlignment()); for (unsigned i = StartBlock+1, e = Dest->getNumber(); i != e; ++i) { BranchSize += BlockSizes[i].first; - MaxAlign = std::max(MaxAlign, - Fn.getBlockNumbered(i)->getAlignment()); + MaxLogAlign = + std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment()); } NeedExtraAdjustment = (FirstImpreciseBlock >= 0) && @@ -258,7 +258,7 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn, // The computed offset is at most ((1 << alignment) - 4) bytes smaller // than actual offset. So we add this number to the offset for safety. if (NeedExtraAdjustment) - BranchSize += (1 << MaxAlign) - 4; + BranchSize += (1 << MaxLogAlign) - 4; return BranchSize; } diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 06df60c0266..c7fc7d87ad4 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1180,9 +1180,9 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, setJumpIsExpensive(); } - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); if (Subtarget.isDarwin()) - setPrefFunctionAlignment(4); + setPrefFunctionLogAlignment(4); switch (Subtarget.getDarwinDirective()) { default: break; @@ -1199,8 +1199,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, case PPC::DIR_PWR7: case PPC::DIR_PWR8: case PPC::DIR_PWR9: - setPrefFunctionAlignment(4); - setPrefLoopAlignment(4); + setPrefFunctionLogAlignment(4); + setPrefLoopLogAlignment(4); break; } @@ -14007,7 +14007,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, } } -unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { +unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const { switch (Subtarget.getDarwinDirective()) { default: break; case PPC::DIR_970: @@ -14050,7 +14050,7 @@ unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { } } - return TargetLowering::getPrefLoopAlignment(ML); + return TargetLowering::getPrefLoopLogAlignment(ML); } /// getConstraintType - Given a constraint, return the type of diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index 757e2d36da8..249d7e48f85 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -735,7 +735,7 @@ namespace llvm { const SelectionDAG &DAG, unsigned Depth = 0) const override; - unsigned getPrefLoopAlignment(MachineLoop *ML) const override; + unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override; bool shouldInsertFencesForAtomic(const Instruction *I) const override { return true; diff --git a/lib/Target/RISCV/RISCVISelLowering.cpp b/lib/Target/RISCV/RISCVISelLowering.cpp index 86add6ecccf..8bf291f8d86 100644 --- a/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/lib/Target/RISCV/RISCVISelLowering.cpp @@ -199,8 +199,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, // Function alignments (log2). unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2; - setMinFunctionAlignment(FunctionAlignment); - setPrefFunctionAlignment(FunctionAlignment); + setMinFunctionLogAlignment(FunctionAlignment); + setPrefFunctionLogAlignment(FunctionAlignment); // Effectively disable jump table generation. setMinimumJumpTableEntries(INT_MAX); diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 99e555a0626..cfd6a72d364 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -1805,7 +1805,7 @@ SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM, setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); computeRegisterProperties(Subtarget->getRegisterInfo()); } diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 7605a7e8034..bc0ebe6f301 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -120,9 +120,9 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // Instructions are strings of 2-byte aligned 2-byte values. - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); // For performance reasons we prefer 16-byte alignment. - setPrefFunctionAlignment(4); + setPrefFunctionLogAlignment(4); // Handle operations that are handled in a similar way for all types. for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; diff --git a/lib/Target/SystemZ/SystemZLongBranch.cpp b/lib/Target/SystemZ/SystemZLongBranch.cpp index 95d7e22dec3..955a9fd951f 100644 --- a/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -87,7 +87,7 @@ struct MBBInfo { // The minimum alignment of the block, as a log2 value. // This value never changes. - unsigned Alignment = 0; + unsigned LogAlignment = 0; // The number of terminators in this block. This value never changes. unsigned NumTerminators = 0; @@ -127,7 +127,8 @@ struct BlockPosition { // as the runtime address. unsigned KnownBits; - BlockPosition(unsigned InitialAlignment) : KnownBits(InitialAlignment) {} + BlockPosition(unsigned InitialLogAlignment) + : KnownBits(InitialLogAlignment) {} }; class SystemZLongBranch : public MachineFunctionPass { @@ -178,16 +179,16 @@ const uint64_t MaxForwardRange = 0xfffe; // instructions. void SystemZLongBranch::skipNonTerminators(BlockPosition &Position, MBBInfo &Block) { - if (Block.Alignment > Position.KnownBits) { + if (Block.LogAlignment > Position.KnownBits) { // When calculating the address of Block, we need to conservatively // assume that Block had the worst possible misalignment. - Position.Address += ((uint64_t(1) << Block.Alignment) - + Position.Address += ((uint64_t(1) << Block.LogAlignment) - (uint64_t(1) << Position.KnownBits)); - Position.KnownBits = Block.Alignment; + Position.KnownBits = Block.LogAlignment; } // Align the addresses. - uint64_t AlignMask = (uint64_t(1) << Block.Alignment) - 1; + uint64_t AlignMask = (uint64_t(1) << Block.LogAlignment) - 1; Position.Address = (Position.Address + AlignMask) & ~AlignMask; // Record the block's position. @@ -275,13 +276,13 @@ uint64_t SystemZLongBranch::initMBBInfo() { Terminators.clear(); Terminators.reserve(NumBlocks); - BlockPosition Position(MF->getAlignment()); + BlockPosition Position(MF->getLogAlignment()); for (unsigned I = 0; I < NumBlocks; ++I) { MachineBasicBlock *MBB = MF->getBlockNumbered(I); MBBInfo &Block = MBBs[I]; // Record the alignment, for quick access. - Block.Alignment = MBB->getAlignment(); + Block.LogAlignment = MBB->getLogAlignment(); // Calculate the size of the fixed part of the block. MachineBasicBlock::iterator MI = MBB->begin(); @@ -339,7 +340,7 @@ bool SystemZLongBranch::mustRelaxABranch() { // must be long. void SystemZLongBranch::setWorstCaseAddresses() { SmallVector::iterator TI = Terminators.begin(); - BlockPosition Position(MF->getAlignment()); + BlockPosition Position(MF->getLogAlignment()); for (auto &Block : MBBs) { skipNonTerminators(Position, Block); for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) { @@ -440,7 +441,7 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) { // Run a shortening pass and relax any branches that need to be relaxed. void SystemZLongBranch::relaxBranches() { SmallVector::iterator TI = Terminators.begin(); - BlockPosition Position(MF->getAlignment()); + BlockPosition Position(MF->getLogAlignment()); for (auto &Block : MBBs) { skipNonTerminators(Position, Block); for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) { diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 3cc45a7add1..dc031459a27 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1892,13 +1892,13 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, MaxLoadsPerMemcmpOptSize = 2; // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4). - setPrefLoopAlignment(ExperimentalPrefLoopAlignment); + setPrefLoopLogAlignment(ExperimentalPrefLoopAlignment); // An out-of-order CPU can speculatively execute past a predictable branch, // but a conditional move could be stalled by an expensive earlier operation. PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder(); EnableExtLdPromotion = true; - setPrefFunctionAlignment(4); // 2^4 bytes. + setPrefFunctionLogAlignment(4); // 2^4 bytes. verifyIntrinsicTables(); } diff --git a/lib/Target/X86/X86RetpolineThunks.cpp b/lib/Target/X86/X86RetpolineThunks.cpp index b435b22e8ac..1a0f6ecb01e 100644 --- a/lib/Target/X86/X86RetpolineThunks.cpp +++ b/lib/Target/X86/X86RetpolineThunks.cpp @@ -279,7 +279,7 @@ void X86RetpolineThunks::populateThunk(MachineFunction &MF, CallTarget->addLiveIn(Reg); CallTarget->setHasAddressTaken(); - CallTarget->setAlignment(4); + CallTarget->setLogAlignment(4); insertRegReturnAddrClobber(*CallTarget, Reg); CallTarget->back().setPreInstrSymbol(MF, TargetSym); BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc)); diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 34308904dba..924744343eb 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -171,8 +171,8 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM, setTargetDAGCombine(ISD::INTRINSIC_VOID); setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); - setMinFunctionAlignment(1); - setPrefFunctionAlignment(2); + setMinFunctionLogAlignment(1); + setPrefFunctionLogAlignment(2); } bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { diff --git a/test/CodeGen/ARM/constant-island-movwt.mir b/test/CodeGen/ARM/constant-island-movwt.mir index 12361c97c0d..4aac918ad33 100644 --- a/test/CodeGen/ARM/constant-island-movwt.mir +++ b/test/CodeGen/ARM/constant-island-movwt.mir @@ -892,12 +892,12 @@ body: | # CHECK-NEXT: renamable $q12 = VDUP32q killed renamable $r5, 14, $noreg # CHECK-NEXT: t2B %bb.2, 14, $noreg # CHECK-NEXT: {{^ $}} -# CHECK-NEXT: bb.1 (align 2): +# CHECK-NEXT: bb.1 (align 4): # CHECK-NEXT: successors:{{ }} # CHECK-NEXT: {{^ $}} # CHECK-NEXT: CONSTPOOL_ENTRY 1, %const.0, 4 # CHECK-NEXT: {{^ $}} -# CHECK-NEXT: bb.2.entry (align 1): +# CHECK-NEXT: bb.2.entry (align 2): # CHECK-NEXT: liveins: $d13, $s27, $r10, $r9, $r8, $s26, $d12, $s25, $s24, # CHECK-SAME: $d15, $s30, $s31, $d14, $s28, $s29, $lr, $r0, $d21, # CHECK-SAME: $r3, $q10, $d20, $d17, $r2, $d25, $q11, $d22, $d23, diff --git a/test/CodeGen/ARM/fp16-litpool-arm.mir b/test/CodeGen/ARM/fp16-litpool-arm.mir index 218e22b91ae..c893c4ef931 100644 --- a/test/CodeGen/ARM/fp16-litpool-arm.mir +++ b/test/CodeGen/ARM/fp16-litpool-arm.mir @@ -55,16 +55,16 @@ constants: alignment: 2 #CHECK: B %[[BB4:bb.[0-9]]] -#CHECK: bb.{{.}} (align 2): +#CHECK: bb.{{.}} (align 4): +#CHECK: successors: +#CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 4 +#CHECK: bb.{{.}} (align 4): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 4 #CHECK: bb.{{.}} (align 2): #CHECK: successors: -#CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 4 -#CHECK: bb.{{.}} (align 1): -#CHECK: successors: #CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 2 -#CHECK: [[BB4]].entry (align 2): +#CHECK: [[BB4]].entry (align 4): body: | bb.0.entry: diff --git a/test/CodeGen/ARM/fp16-litpool-thumb.mir b/test/CodeGen/ARM/fp16-litpool-thumb.mir index 46e308f2d94..d2bb9bfefd0 100644 --- a/test/CodeGen/ARM/fp16-litpool-thumb.mir +++ b/test/CodeGen/ARM/fp16-litpool-thumb.mir @@ -53,13 +53,13 @@ constants: alignment: 2 #CHECK: t2B %[[BB3:bb.[0-9]]] -#CHECK: bb.{{.}} (align 2): +#CHECK: bb.{{.}} (align 4): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY 2, %const.{{.}}, 4 -#CHECK: bb.{{.}} (align 1): +#CHECK: bb.{{.}} (align 2): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY 3, %const.{{.}}, 2 -#CHECK: [[BB3]].entry (align 1): +#CHECK: [[BB3]].entry (align 2): body: | bb.0.entry: diff --git a/test/CodeGen/ARM/fp16-litpool2-arm.mir b/test/CodeGen/ARM/fp16-litpool2-arm.mir index 65dae64dc0f..6f40c86ce8e 100644 --- a/test/CodeGen/ARM/fp16-litpool2-arm.mir +++ b/test/CodeGen/ARM/fp16-litpool2-arm.mir @@ -76,11 +76,11 @@ constants: isTargetSpecific: false -#CHECK: bb.{{.*}} (align 1): +#CHECK: bb.{{.*}} (align 2): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY 1, %const{{.*}}, 2 # We want this block to be 4 byte aligned: -#CHECK: bb.{{.*}}.LA (align 2): +#CHECK: bb.{{.*}}.LA (align 4): body: | bb.0.entry: diff --git a/test/CodeGen/ARM/fp16-litpool3-arm.mir b/test/CodeGen/ARM/fp16-litpool3-arm.mir index d5682f5809e..97ddebc426c 100644 --- a/test/CodeGen/ARM/fp16-litpool3-arm.mir +++ b/test/CodeGen/ARM/fp16-litpool3-arm.mir @@ -77,7 +77,7 @@ constants: isTargetSpecific: false -#CHECK: bb.{{.*}} (align 1): +#CHECK: bb.{{.*}} (align 2): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY 1, %const{{.*}}, 2 # diff --git a/test/CodeGen/Mips/unaligned-memops-mapping.mir b/test/CodeGen/Mips/unaligned-memops-mapping.mir index f21f148f587..e7a2edbec95 100644 --- a/test/CodeGen/Mips/unaligned-memops-mapping.mir +++ b/test/CodeGen/Mips/unaligned-memops-mapping.mir @@ -118,7 +118,7 @@ body: | # CHECK: c: 60 25 90 03 swr $1, 3($5) # CHECK-LABEL: g2: -# CHECK: 14: 60 24 64 00 lwle $1, 0($4) -# CHECK: 18: 60 24 66 03 lwre $1, 3($4) -# CHECK: 1c: 60 25 a0 00 swle $1, 0($5) -# CHECK: 20: 60 25 a2 03 swre $1, 3($5) +# CHECK: 12: 60 24 64 00 lwle $1, 0($4) +# CHECK: 16: 60 24 66 03 lwre $1, 3($4) +# CHECK: 1a: 60 25 a0 00 swle $1, 0($5) +# CHECK: 1e: 60 25 a2 03 swre $1, 3($5) diff --git a/test/CodeGen/PowerPC/block-placement.mir b/test/CodeGen/PowerPC/block-placement.mir index 9406e13b354..6e6f19c4053 100644 --- a/test/CodeGen/PowerPC/block-placement.mir +++ b/test/CodeGen/PowerPC/block-placement.mir @@ -212,7 +212,7 @@ body: | ; CHECK: successors: %bb.11(0x80000000) ; CHECK: B %bb.11 - ; CHECK: bb.8.while.body.i (align 4): + ; CHECK: bb.8.while.body.i (align 16): ; CHECK: successors: %bb.11(0x04000000), %bb.9(0x7c000000) ; CHECK: BCC 76, killed renamable $cr0, %bb.11 diff --git a/test/CodeGen/X86/tail-merge-after-mbp.mir b/test/CodeGen/X86/tail-merge-after-mbp.mir index 30505526176..bf9b0decc9d 100644 --- a/test/CodeGen/X86/tail-merge-after-mbp.mir +++ b/test/CodeGen/X86/tail-merge-after-mbp.mir @@ -26,7 +26,7 @@ body: | ; CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8) ; CHECK: TEST64rr $rax, $rax, implicit-def $eflags ; CHECK: JCC_1 %bb.1, 4, implicit $eflags - ; CHECK: bb.5 (align 4): + ; CHECK: bb.5 (align 16): ; CHECK: successors: %bb.6(0x71555555), %bb.8(0x0eaaaaab) ; CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8), (load 8) ; CHECK: JCC_1 %bb.8, 5, implicit $eflags diff --git a/test/DebugInfo/X86/debug-loc-offset.mir b/test/DebugInfo/X86/debug-loc-offset.mir index aab8f66ebd7..a6c4766830d 100644 --- a/test/DebugInfo/X86/debug-loc-offset.mir +++ b/test/DebugInfo/X86/debug-loc-offset.mir @@ -32,7 +32,7 @@ # Checking that we have two compile units with two sets of high/lo_pc. # CHECK: .debug_info contents # CHECK: DW_TAG_compile_unit -# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000020 ".text") +# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000018 ".text") # CHECK: DW_AT_high_pc # # CHECK: DW_TAG_subprogram @@ -42,8 +42,8 @@ # CHECK: DW_TAG_formal_parameter # CHECK-NOT: DW_TAG # CHECK: DW_AT_location [DW_FORM_sec_offset] ({{.*}} -# CHECK-NEXT: [0x00000029, 0x00000037): DW_OP_breg0 EAX+0, DW_OP_deref -# CHECK-NEXT: [0x00000037, 0x00000063): DW_OP_breg5 EBP-8, DW_OP_deref, DW_OP_deref +# CHECK-NEXT: [0x00000021, 0x0000002f): DW_OP_breg0 EAX+0, DW_OP_deref +# CHECK-NEXT: [0x0000002f, 0x0000005b): DW_OP_breg5 EBP-8, DW_OP_deref, DW_OP_deref # CHECK-NEXT: DW_AT_name [DW_FORM_strp]{{.*}}"a" # # CHECK: DW_TAG_variable