mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
[Alignment][NFC] Remove LogAlignment functions
Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, MaskRay, atanasyan, jsji, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D67620 llvm-svn: 372231
This commit is contained in:
parent
4055f4094f
commit
631cc30829
@ -372,16 +372,10 @@ public:
|
||||
/// \see getBeginClobberMask()
|
||||
const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const;
|
||||
|
||||
/// Return alignment of the basic block. The alignment is specified as
|
||||
/// log2(bytes).
|
||||
/// FIXME: Remove the Log versions once migration to llvm::Align is over.
|
||||
unsigned getLogAlignment() const { return Log2(Alignment); }
|
||||
/// Return alignment of the basic block.
|
||||
llvm::Align getAlignment() const { return Alignment; }
|
||||
|
||||
/// Set alignment of the basic block. The alignment is specified as
|
||||
/// log2(bytes).
|
||||
/// FIXME: Remove the Log versions once migration to llvm::Align is over.
|
||||
void setLogAlignment(unsigned A) { Alignment = llvm::Align(1ULL << A); }
|
||||
/// Set alignment of the basic block.
|
||||
void setAlignment(llvm::Align A) { Alignment = A; }
|
||||
|
||||
/// Returns true if the block is a landing pad. That is this basic block is
|
||||
|
@ -127,9 +127,8 @@ void BranchRelaxation::verify() {
|
||||
#ifndef NDEBUG
|
||||
unsigned PrevNum = MF->begin()->getNumber();
|
||||
for (MachineBasicBlock &MBB : *MF) {
|
||||
unsigned LogAlign = MBB.getLogAlignment();
|
||||
unsigned Num = MBB.getNumber();
|
||||
assert(BlockInfo[Num].Offset % (1u << LogAlign) == 0);
|
||||
const unsigned Num = MBB.getNumber();
|
||||
assert(isAligned(MBB.getAlignment(), BlockInfo[Num].Offset));
|
||||
assert(!Num || BlockInfo[PrevNum].postOffset(MBB) <= BlockInfo[Num].Offset);
|
||||
assert(BlockInfo[Num].Size == computeBlockSize(MBB));
|
||||
PrevNum = Num;
|
||||
|
@ -641,7 +641,7 @@ bool MIParser::parseBasicBlockDefinition(
|
||||
return error(Loc, Twine("redefinition of machine basic block with id #") +
|
||||
Twine(ID));
|
||||
if (Alignment)
|
||||
MBB->setLogAlignment(Log2_32(Alignment));
|
||||
MBB->setAlignment(llvm::Align(Alignment));
|
||||
if (HasAddressTaken)
|
||||
MBB->setHasAddressTaken();
|
||||
MBB->setIsEHPad(IsLandingPad);
|
||||
|
@ -629,10 +629,9 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
|
||||
OS << "landing-pad";
|
||||
HasAttributes = true;
|
||||
}
|
||||
if (MBB.getLogAlignment()) {
|
||||
if (MBB.getAlignment() > 1) {
|
||||
OS << (HasAttributes ? ", " : " (");
|
||||
OS << "align "
|
||||
<< (1UL << MBB.getLogAlignment());
|
||||
OS << "align " << MBB.getAlignment().value();
|
||||
HasAttributes = true;
|
||||
}
|
||||
if (HasAttributes)
|
||||
|
@ -326,9 +326,9 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST,
|
||||
OS << "landing-pad";
|
||||
HasAttributes = true;
|
||||
}
|
||||
if (getLogAlignment()) {
|
||||
if (getAlignment() > 1) {
|
||||
OS << (HasAttributes ? ", " : " (");
|
||||
OS << "align " << getLogAlignment();
|
||||
OS << "align " << Log2(getAlignment());
|
||||
HasAttributes = true;
|
||||
}
|
||||
if (HasAttributes)
|
||||
|
@ -2832,7 +2832,7 @@ void MachineBlockPlacement::alignBlocks() {
|
||||
// Force alignment if all the predecessors are jumps. We already checked
|
||||
// that the block isn't cold above.
|
||||
if (!LayoutPred->isSuccessor(ChainBB)) {
|
||||
ChainBB->setLogAlignment(Log2(Align));
|
||||
ChainBB->setAlignment(Align);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2844,7 +2844,7 @@ void MachineBlockPlacement::alignBlocks() {
|
||||
MBPI->getEdgeProbability(LayoutPred, ChainBB);
|
||||
BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
|
||||
if (LayoutEdgeFreq <= (Freq * ColdProb))
|
||||
ChainBB->setLogAlignment(Log2(Align));
|
||||
ChainBB->setAlignment(Align);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3109,14 +3109,14 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
|
||||
if (AlignAllBlock)
|
||||
// Align all of the blocks in the function to a specific alignment.
|
||||
for (MachineBasicBlock &MBB : MF)
|
||||
MBB.setLogAlignment(AlignAllBlock);
|
||||
MBB.setAlignment(llvm::Align(1ULL << AlignAllBlock));
|
||||
else if (AlignAllNonFallThruBlocks) {
|
||||
// Align all of the blocks that have no fall-through predecessors to a
|
||||
// specific alignment.
|
||||
for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) {
|
||||
auto LayoutPred = std::prev(MBI);
|
||||
if (!LayoutPred->isSuccessor(&*MBI))
|
||||
MBI->setLogAlignment(AlignAllNonFallThruBlocks);
|
||||
MBI->setAlignment(llvm::Align(1ULL << AlignAllNonFallThruBlocks));
|
||||
}
|
||||
}
|
||||
if (ViewBlockLayoutWithBFI != GVDT_None &&
|
||||
|
@ -10714,7 +10714,7 @@ llvm::Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
|
||||
// If inner loop block is aligned assume in average half of the alignment
|
||||
// size to be added as nops.
|
||||
if (MBB != Header)
|
||||
LoopSize += (1 << MBB->getLogAlignment()) / 2;
|
||||
LoopSize += MBB->getAlignment().value() / 2;
|
||||
|
||||
for (const MachineInstr &MI : *MBB) {
|
||||
LoopSize += TII->getInstSizeInBytes(MI);
|
||||
|
@ -47,7 +47,7 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
|
||||
BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
|
||||
BBI.Size = 0;
|
||||
BBI.Unalign = 0;
|
||||
BBI.PostAlign = 0;
|
||||
BBI.PostAlign = llvm::Align::None();
|
||||
|
||||
for (MachineInstr &I : *MBB) {
|
||||
BBI.Size += TII->getInstSizeInBytes(I);
|
||||
@ -62,7 +62,7 @@ void ARMBasicBlockUtils::computeBlockSize(MachineBasicBlock *MBB) {
|
||||
|
||||
// tBR_JTr contains a .align 2 directive.
|
||||
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
|
||||
BBI.PostAlign = 2;
|
||||
BBI.PostAlign = llvm::Align(4);
|
||||
MBB->getParent()->ensureAlignment(llvm::Align(4));
|
||||
}
|
||||
}
|
||||
@ -126,9 +126,9 @@ void ARMBasicBlockUtils::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
|
||||
for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) {
|
||||
// Get the offset and known bits at the end of the layout predecessor.
|
||||
// Include the alignment of the current block.
|
||||
unsigned LogAlign = MF.getBlockNumbered(i)->getLogAlignment();
|
||||
unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
|
||||
unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
|
||||
const llvm::Align Align = MF.getBlockNumbered(i)->getAlignment();
|
||||
const unsigned Offset = BBInfo[i - 1].postOffset(Align);
|
||||
const unsigned KnownBits = BBInfo[i - 1].postKnownBits(Align);
|
||||
|
||||
// This is where block i begins. Stop if the offset is already correct,
|
||||
// and we have updated 2 blocks. This is the maximum number of blocks
|
||||
|
@ -27,11 +27,11 @@ using BBInfoVector = SmallVectorImpl<BasicBlockInfo>;
|
||||
/// unknown offset bits. This does not include alignment padding caused by
|
||||
/// known offset bits.
|
||||
///
|
||||
/// @param LogAlign log2(alignment)
|
||||
/// @param Align alignment
|
||||
/// @param KnownBits Number of known low offset bits.
|
||||
inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
|
||||
if (KnownBits < LogAlign)
|
||||
return (1u << LogAlign) - (1u << KnownBits);
|
||||
inline unsigned UnknownPadding(llvm::Align Align, unsigned KnownBits) {
|
||||
if (KnownBits < Log2(Align))
|
||||
return Align.value() - (1u << KnownBits);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -65,10 +65,9 @@ struct BasicBlockInfo {
|
||||
/// multiple of 1 << Unalign.
|
||||
uint8_t Unalign = 0;
|
||||
|
||||
/// PostAlign - When non-zero, the block terminator contains a .align
|
||||
/// directive, so the end of the block is aligned to 1 << PostAlign
|
||||
/// bytes.
|
||||
uint8_t PostAlign = 0;
|
||||
/// PostAlign - When > 1, the block terminator contains a .align
|
||||
/// directive, so the end of the block is aligned to PostAlign bytes.
|
||||
llvm::Align PostAlign;
|
||||
|
||||
BasicBlockInfo() = default;
|
||||
|
||||
@ -84,16 +83,16 @@ struct BasicBlockInfo {
|
||||
return Bits;
|
||||
}
|
||||
|
||||
/// Compute the offset immediately following this block. If LogAlign is
|
||||
/// Compute the offset immediately following this block. If Align is
|
||||
/// specified, return the offset the successor block will get if it has
|
||||
/// this alignment.
|
||||
unsigned postOffset(unsigned LogAlign = 0) const {
|
||||
unsigned postOffset(llvm::Align Align = llvm::Align::None()) const {
|
||||
unsigned PO = Offset + Size;
|
||||
unsigned LA = std::max(unsigned(PostAlign), LogAlign);
|
||||
if (!LA)
|
||||
const llvm::Align PA = std::max(PostAlign, Align);
|
||||
if (PA == llvm::Align::None())
|
||||
return PO;
|
||||
// Add alignment padding from the terminator.
|
||||
return PO + UnknownPadding(LA, internalKnownBits());
|
||||
return PO + UnknownPadding(PA, internalKnownBits());
|
||||
}
|
||||
|
||||
/// Compute the number of known low bits of postOffset. If this block
|
||||
@ -101,9 +100,8 @@ struct BasicBlockInfo {
|
||||
/// instruction alignment. An aligned terminator may increase the number
|
||||
/// of know bits.
|
||||
/// If LogAlign is given, also consider the alignment of the next block.
|
||||
unsigned postKnownBits(unsigned LogAlign = 0) const {
|
||||
return std::max(std::max(unsigned(PostAlign), LogAlign),
|
||||
internalKnownBits());
|
||||
unsigned postKnownBits(llvm::Align Align = llvm::Align::None()) const {
|
||||
return std::max(Log2(std::max(PostAlign, Align)), internalKnownBits());
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -247,7 +247,7 @@ namespace {
|
||||
void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
|
||||
bool BBHasFallthrough(MachineBasicBlock *MBB);
|
||||
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
|
||||
unsigned getCPELogAlign(const MachineInstr *CPEMI);
|
||||
llvm::Align getCPEAlign(const MachineInstr *CPEMI);
|
||||
void scanFunctionJumpTables();
|
||||
void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
|
||||
MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
|
||||
@ -336,8 +336,7 @@ LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() {
|
||||
const BasicBlockInfo &BBI = BBInfo[J];
|
||||
dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
|
||||
<< " kb=" << unsigned(BBI.KnownBits)
|
||||
<< " ua=" << unsigned(BBI.Unalign)
|
||||
<< " pa=" << unsigned(BBI.PostAlign)
|
||||
<< " ua=" << unsigned(BBI.Unalign) << " pa=" << Log2(BBI.PostAlign)
|
||||
<< format(" size=%#x\n", BBInfo[J].Size);
|
||||
}
|
||||
});
|
||||
@ -494,11 +493,12 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
|
||||
MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
|
||||
MF->push_back(BB);
|
||||
|
||||
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
|
||||
unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment());
|
||||
// MachineConstantPool measures alignment in bytes.
|
||||
const llvm::Align MaxAlign(MCP->getConstantPoolAlignment());
|
||||
const unsigned MaxLogAlign = Log2(MaxAlign);
|
||||
|
||||
// Mark the basic block as required by the const-pool.
|
||||
BB->setLogAlignment(MaxLogAlign);
|
||||
BB->setAlignment(MaxAlign);
|
||||
|
||||
// The function needs to be as aligned as the basic blocks. The linker may
|
||||
// move functions around based on their alignment.
|
||||
@ -648,29 +648,27 @@ ARMConstantIslands::findConstPoolEntry(unsigned CPI,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// getCPELogAlign - Returns the required alignment of the constant pool entry
|
||||
/// represented by CPEMI. Alignment is measured in log2(bytes) units.
|
||||
unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
|
||||
/// getCPEAlign - Returns the required alignment of the constant pool entry
|
||||
/// represented by CPEMI.
|
||||
llvm::Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
|
||||
switch (CPEMI->getOpcode()) {
|
||||
case ARM::CONSTPOOL_ENTRY:
|
||||
break;
|
||||
case ARM::JUMPTABLE_TBB:
|
||||
return isThumb1 ? 2 : 0;
|
||||
return isThumb1 ? llvm::Align(4) : llvm::Align(1);
|
||||
case ARM::JUMPTABLE_TBH:
|
||||
return isThumb1 ? 2 : 1;
|
||||
return isThumb1 ? llvm::Align(4) : llvm::Align(2);
|
||||
case ARM::JUMPTABLE_INSTS:
|
||||
return 1;
|
||||
return llvm::Align(2);
|
||||
case ARM::JUMPTABLE_ADDRS:
|
||||
return 2;
|
||||
return llvm::Align(4);
|
||||
default:
|
||||
llvm_unreachable("unknown constpool entry kind");
|
||||
}
|
||||
|
||||
unsigned CPI = getCombinedIndex(CPEMI);
|
||||
assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
|
||||
unsigned Align = MCP->getConstants()[CPI].getAlignment();
|
||||
assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
|
||||
return Log2_32(Align);
|
||||
return llvm::Align(MCP->getConstants()[CPI].getAlignment());
|
||||
}
|
||||
|
||||
/// scanFunctionJumpTables - Do a scan of the function, building up
|
||||
@ -1023,8 +1021,8 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
|
||||
MachineBasicBlock* Water, CPUser &U,
|
||||
unsigned &Growth) {
|
||||
BBInfoVector &BBInfo = BBUtils->getBBInfo();
|
||||
unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
|
||||
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
|
||||
const llvm::Align CPEAlign = getCPEAlign(U.CPEMI);
|
||||
const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign);
|
||||
unsigned NextBlockOffset;
|
||||
llvm::Align NextBlockAlignment;
|
||||
MachineFunction::const_iterator NextBlock = Water->getIterator();
|
||||
@ -1050,8 +1048,7 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
|
||||
// the offset of the instruction. Also account for unknown alignment padding
|
||||
// in blocks between CPE and the user.
|
||||
if (CPEOffset < UserOffset)
|
||||
UserOffset +=
|
||||
Growth + UnknownPadding(Log2(MF->getAlignment()), CPELogAlign);
|
||||
UserOffset += Growth + UnknownPadding(MF->getAlignment(), Log2(CPEAlign));
|
||||
} else
|
||||
// CPE fits in existing padding.
|
||||
Growth = 0;
|
||||
@ -1217,8 +1214,8 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
|
||||
// inserting islands between BB0 and BB1 makes other accesses out of range.
|
||||
MachineBasicBlock *UserBB = U.MI->getParent();
|
||||
BBInfoVector &BBInfo = BBUtils->getBBInfo();
|
||||
unsigned MinNoSplitDisp =
|
||||
BBInfo[UserBB->getNumber()].postOffset(getCPELogAlign(U.CPEMI));
|
||||
const llvm::Align CPEAlign = getCPEAlign(U.CPEMI);
|
||||
unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign);
|
||||
if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
|
||||
return false;
|
||||
for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
|
||||
@ -1271,7 +1268,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
CPUser &U = CPUsers[CPUserIndex];
|
||||
MachineInstr *UserMI = U.MI;
|
||||
MachineInstr *CPEMI = U.CPEMI;
|
||||
unsigned CPELogAlign = getCPELogAlign(CPEMI);
|
||||
const llvm::Align CPEAlign = getCPEAlign(CPEMI);
|
||||
MachineBasicBlock *UserMBB = UserMI->getParent();
|
||||
BBInfoVector &BBInfo = BBUtils->getBBInfo();
|
||||
const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
|
||||
@ -1284,7 +1281,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
// Size of branch to insert.
|
||||
unsigned Delta = isThumb1 ? 2 : 4;
|
||||
// Compute the offset where the CPE will begin.
|
||||
unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
|
||||
unsigned CPEOffset = UserBBI.postOffset(CPEAlign) + Delta;
|
||||
|
||||
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
|
||||
LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
|
||||
@ -1325,11 +1322,11 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
|
||||
// Try to split the block so it's fully aligned. Compute the latest split
|
||||
// point where we can add a 4-byte branch instruction, and then align to
|
||||
// LogAlign which is the largest possible alignment in the function.
|
||||
unsigned LogAlign = Log2(MF->getAlignment());
|
||||
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
|
||||
// Align which is the largest possible alignment in the function.
|
||||
const llvm::Align Align = MF->getAlignment();
|
||||
assert(Align >= CPEAlign && "Over-aligned constant pool entry");
|
||||
unsigned KnownBits = UserBBI.internalKnownBits();
|
||||
unsigned UPad = UnknownPadding(LogAlign, KnownBits);
|
||||
unsigned UPad = UnknownPadding(Align, KnownBits);
|
||||
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
|
||||
LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
|
||||
BaseInsertOffset));
|
||||
@ -1340,7 +1337,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
BaseInsertOffset -= 4;
|
||||
|
||||
LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
|
||||
<< " la=" << LogAlign << " kb=" << KnownBits
|
||||
<< " la=" << Log2(Align) << " kb=" << KnownBits
|
||||
<< " up=" << UPad << '\n');
|
||||
|
||||
// This could point off the end of the block if we've already got constant
|
||||
@ -1393,8 +1390,8 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
CPUser &U = CPUsers[CPUIndex];
|
||||
if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
|
||||
// Shift intertion point by one unit of alignment so it is within reach.
|
||||
BaseInsertOffset -= 1u << LogAlign;
|
||||
EndInsertOffset -= 1u << LogAlign;
|
||||
BaseInsertOffset -= Align.value();
|
||||
EndInsertOffset -= Align.value();
|
||||
}
|
||||
// This is overly conservative, as we don't account for CPEMIs being
|
||||
// reused within the block, but it doesn't matter much. Also assume CPEs
|
||||
@ -1504,9 +1501,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
|
||||
// Always align the new block because CP entries can be smaller than 4
|
||||
// bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
|
||||
// be an already aligned constant pool block.
|
||||
const unsigned LogAlign = isThumb ? 1 : 2;
|
||||
if (NewMBB->getLogAlignment() < LogAlign)
|
||||
NewMBB->setLogAlignment(LogAlign);
|
||||
const llvm::Align Align = isThumb ? llvm::Align(2) : llvm::Align(4);
|
||||
if (NewMBB->getAlignment() < Align)
|
||||
NewMBB->setAlignment(Align);
|
||||
|
||||
// Remove the original WaterList entry; we want subsequent insertions in
|
||||
// this vicinity to go after the one we're about to insert. This
|
||||
@ -1535,7 +1532,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
|
||||
decrementCPEReferenceCount(CPI, CPEMI);
|
||||
|
||||
// Mark the basic block as aligned as required by the const-pool entry.
|
||||
NewIsland->setLogAlignment(getCPELogAlign(U.CPEMI));
|
||||
NewIsland->setAlignment(getCPEAlign(U.CPEMI));
|
||||
|
||||
// Increase the size of the island block to account for the new entry.
|
||||
BBUtils->adjustBBSize(NewIsland, Size);
|
||||
@ -1569,10 +1566,11 @@ void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
|
||||
BBInfo[CPEBB->getNumber()].Size = 0;
|
||||
|
||||
// This block no longer needs to be aligned.
|
||||
CPEBB->setLogAlignment(0);
|
||||
} else
|
||||
CPEBB->setAlignment(llvm::Align::None());
|
||||
} else {
|
||||
// Entries are sorted by descending alignment, so realign from the front.
|
||||
CPEBB->setLogAlignment(getCPELogAlign(&*CPEBB->begin()));
|
||||
CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin()));
|
||||
}
|
||||
|
||||
BBUtils->adjustBBOffsetsAfter(CPEBB);
|
||||
// An island has only one predecessor BB and one successor BB. Check if
|
||||
|
@ -105,12 +105,11 @@ void HexagonBranchRelaxation::computeOffset(MachineFunction &MF,
|
||||
// offset of the current instruction from the start.
|
||||
unsigned InstOffset = 0;
|
||||
for (auto &B : MF) {
|
||||
if (B.getLogAlignment()) {
|
||||
if (B.getAlignment() != llvm::Align::None()) {
|
||||
// Although we don't know the exact layout of the final code, we need
|
||||
// to account for alignment padding somehow. This heuristic pads each
|
||||
// aligned basic block according to the alignment value.
|
||||
int ByteAlign = (1u << B.getLogAlignment()) - 1;
|
||||
InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign);
|
||||
InstOffset = alignTo(InstOffset, B.getAlignment());
|
||||
}
|
||||
OffsetMap[&B] = InstOffset;
|
||||
for (auto &MI : B.instrs()) {
|
||||
|
@ -114,12 +114,11 @@ bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) {
|
||||
|
||||
// First pass - compute the offset of each basic block.
|
||||
for (const MachineBasicBlock &MBB : MF) {
|
||||
if (MBB.getLogAlignment()) {
|
||||
if (MBB.getAlignment() != llvm::Align::None()) {
|
||||
// Although we don't know the exact layout of the final code, we need
|
||||
// to account for alignment padding somehow. This heuristic pads each
|
||||
// aligned basic block according to the alignment value.
|
||||
int ByteAlign = (1u << MBB.getLogAlignment()) - 1;
|
||||
InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign);
|
||||
InstOffset = alignTo(InstOffset, MBB.getAlignment());
|
||||
}
|
||||
|
||||
BlockToInstOffset[&MBB] = InstOffset;
|
||||
|
@ -222,12 +222,7 @@ namespace {
|
||||
|
||||
BasicBlockInfo() = default;
|
||||
|
||||
// FIXME: ignore LogAlign for this patch
|
||||
//
|
||||
unsigned postOffset(unsigned LogAlign = 0) const {
|
||||
unsigned PO = Offset + Size;
|
||||
return PO;
|
||||
}
|
||||
unsigned postOffset() const { return Offset + Size; }
|
||||
};
|
||||
|
||||
std::vector<BasicBlockInfo> BBInfo;
|
||||
@ -376,7 +371,7 @@ namespace {
|
||||
|
||||
void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
|
||||
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
|
||||
unsigned getCPELogAlign(const MachineInstr &CPEMI);
|
||||
llvm::Align getCPEAlign(const MachineInstr &CPEMI);
|
||||
void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
|
||||
unsigned getOffsetOf(MachineInstr *MI) const;
|
||||
unsigned getUserOffset(CPUser&) const;
|
||||
@ -534,11 +529,11 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
|
||||
MF->push_back(BB);
|
||||
|
||||
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
|
||||
unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment());
|
||||
const llvm::Align MaxAlign(MCP->getConstantPoolAlignment());
|
||||
|
||||
// Mark the basic block as required by the const-pool.
|
||||
// If AlignConstantIslands isn't set, use 4-byte alignment for everything.
|
||||
BB->setLogAlignment(AlignConstantIslands ? MaxLogAlign : 2);
|
||||
BB->setAlignment(AlignConstantIslands ? MaxAlign : llvm::Align(4));
|
||||
|
||||
// The function needs to be as aligned as the basic blocks. The linker may
|
||||
// move functions around based on their alignment.
|
||||
@ -548,7 +543,7 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
|
||||
// alignment of all entries as long as BB is sufficiently aligned. Keep
|
||||
// track of the insertion point for each alignment. We are going to bucket
|
||||
// sort the entries as they are created.
|
||||
SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxLogAlign + 1,
|
||||
SmallVector<MachineBasicBlock::iterator, 8> InsPoint(Log2(MaxAlign) + 1,
|
||||
BB->end());
|
||||
|
||||
// Add all of the constants from the constant pool to the end block, use an
|
||||
@ -577,7 +572,7 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
|
||||
|
||||
// Ensure that future entries with higher alignment get inserted before
|
||||
// CPEMI. This is bucket sort with iterators.
|
||||
for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a)
|
||||
for (unsigned a = LogAlign + 1; a <= Log2(MaxAlign); ++a)
|
||||
if (InsPoint[a] == InsAt)
|
||||
InsPoint[a] = CPEMI;
|
||||
// Add a new CPEntry, but no corresponding CPUser yet.
|
||||
@ -622,20 +617,18 @@ MipsConstantIslands::CPEntry
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// getCPELogAlign - Returns the required alignment of the constant pool entry
|
||||
/// getCPEAlign - Returns the required alignment of the constant pool entry
|
||||
/// represented by CPEMI. Alignment is measured in log2(bytes) units.
|
||||
unsigned MipsConstantIslands::getCPELogAlign(const MachineInstr &CPEMI) {
|
||||
llvm::Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) {
|
||||
assert(CPEMI.getOpcode() == Mips::CONSTPOOL_ENTRY);
|
||||
|
||||
// Everything is 4-byte aligned unless AlignConstantIslands is set.
|
||||
if (!AlignConstantIslands)
|
||||
return 2;
|
||||
return llvm::Align(4);
|
||||
|
||||
unsigned CPI = CPEMI.getOperand(1).getIndex();
|
||||
assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
|
||||
unsigned Align = MCP->getConstants()[CPI].getAlignment();
|
||||
assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
|
||||
return Log2_32(Align);
|
||||
return llvm::Align(MCP->getConstants()[CPI].getAlignment());
|
||||
}
|
||||
|
||||
/// initializeFunctionInfo - Do the initial scan of the function, building up
|
||||
@ -941,8 +934,7 @@ bool MipsConstantIslands::isOffsetInRange(unsigned UserOffset,
|
||||
bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
|
||||
MachineBasicBlock* Water, CPUser &U,
|
||||
unsigned &Growth) {
|
||||
unsigned CPELogAlign = getCPELogAlign(*U.CPEMI);
|
||||
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
|
||||
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset();
|
||||
unsigned NextBlockOffset;
|
||||
llvm::Align NextBlockAlignment;
|
||||
MachineFunction::const_iterator NextBlock = ++Water->getIterator();
|
||||
@ -1223,7 +1215,6 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
CPUser &U = CPUsers[CPUserIndex];
|
||||
MachineInstr *UserMI = U.MI;
|
||||
MachineInstr *CPEMI = U.CPEMI;
|
||||
unsigned CPELogAlign = getCPELogAlign(*CPEMI);
|
||||
MachineBasicBlock *UserMBB = UserMI->getParent();
|
||||
const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
|
||||
|
||||
@ -1233,7 +1224,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
// Size of branch to insert.
|
||||
unsigned Delta = 2;
|
||||
// Compute the offset where the CPE will begin.
|
||||
unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
|
||||
unsigned CPEOffset = UserBBI.postOffset() + Delta;
|
||||
|
||||
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
|
||||
LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
|
||||
@ -1259,9 +1250,8 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
|
||||
// Try to split the block so it's fully aligned. Compute the latest split
|
||||
// point where we can add a 4-byte branch instruction, and then align to
|
||||
// LogAlign which is the largest possible alignment in the function.
|
||||
unsigned LogAlign = Log2(MF->getAlignment());
|
||||
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
|
||||
// Align which is the largest possible alignment in the function.
|
||||
const llvm::Align Align = MF->getAlignment();
|
||||
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
|
||||
LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
|
||||
BaseInsertOffset));
|
||||
@ -1272,7 +1262,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
BaseInsertOffset -= 4;
|
||||
|
||||
LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
|
||||
<< " la=" << LogAlign << '\n');
|
||||
<< " la=" << Log2(Align) << '\n');
|
||||
|
||||
// This could point off the end of the block if we've already got constant
|
||||
// pool entries following this block; only the last one is in the water list.
|
||||
@ -1297,8 +1287,8 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
|
||||
CPUser &U = CPUsers[CPUIndex];
|
||||
if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
|
||||
// Shift intertion point by one unit of alignment so it is within reach.
|
||||
BaseInsertOffset -= 1u << LogAlign;
|
||||
EndInsertOffset -= 1u << LogAlign;
|
||||
BaseInsertOffset -= Align.value();
|
||||
EndInsertOffset -= Align.value();
|
||||
}
|
||||
// This is overly conservative, as we don't account for CPEMIs being
|
||||
// reused within the block, but it doesn't matter much. Also assume CPEs
|
||||
@ -1401,7 +1391,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
|
||||
++NumCPEs;
|
||||
|
||||
// Mark the basic block as aligned as required by the const-pool entry.
|
||||
NewIsland->setLogAlignment(getCPELogAlign(*U.CPEMI));
|
||||
NewIsland->setAlignment(getCPEAlign(*U.CPEMI));
|
||||
|
||||
// Increase the size of the island block to account for the new entry.
|
||||
BBInfo[NewIsland->getNumber()].Size += Size;
|
||||
@ -1433,10 +1423,11 @@ void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
|
||||
BBInfo[CPEBB->getNumber()].Size = 0;
|
||||
|
||||
// This block no longer needs to be aligned.
|
||||
CPEBB->setLogAlignment(0);
|
||||
} else
|
||||
CPEBB->setAlignment(llvm::Align(1));
|
||||
} else {
|
||||
// Entries are sorted by descending alignment, so realign from the front.
|
||||
CPEBB->setLogAlignment(getCPELogAlign(*CPEBB->begin()));
|
||||
CPEBB->setAlignment(getCPEAlign(*CPEBB->begin()));
|
||||
}
|
||||
|
||||
adjustBBOffsetsAfter(CPEBB);
|
||||
// An island has only one predecessor BB and one successor BB. Check if
|
||||
@ -1531,7 +1522,7 @@ MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
|
||||
// We should have a way to back out this alignment restriction if we "can" later.
|
||||
// but it is not harmful.
|
||||
//
|
||||
DestBB->setLogAlignment(2);
|
||||
DestBB->setAlignment(llvm::Align(4));
|
||||
Br.MaxDisp = ((1<<24)-1) * 2;
|
||||
MI->setDesc(TII->get(Mips::JalB16));
|
||||
}
|
||||
|
@ -178,21 +178,20 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
|
||||
const MachineBasicBlock *Dest,
|
||||
unsigned BrOffset) {
|
||||
int BranchSize;
|
||||
unsigned MaxLogAlign = 2;
|
||||
llvm::Align MaxAlign = llvm::Align(4);
|
||||
bool NeedExtraAdjustment = false;
|
||||
if (Dest->getNumber() <= Src->getNumber()) {
|
||||
// If this is a backwards branch, the delta is the offset from the
|
||||
// start of this block to this branch, plus the sizes of all blocks
|
||||
// from this block to the dest.
|
||||
BranchSize = BrOffset;
|
||||
MaxLogAlign = std::max(MaxLogAlign, Src->getLogAlignment());
|
||||
MaxAlign = std::max(MaxAlign, Src->getAlignment());
|
||||
|
||||
int DestBlock = Dest->getNumber();
|
||||
BranchSize += BlockSizes[DestBlock].first;
|
||||
for (unsigned i = DestBlock+1, e = Src->getNumber(); i < e; ++i) {
|
||||
BranchSize += BlockSizes[i].first;
|
||||
MaxLogAlign =
|
||||
std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment());
|
||||
MaxAlign = std::max(MaxAlign, Fn.getBlockNumbered(i)->getAlignment());
|
||||
}
|
||||
|
||||
NeedExtraAdjustment = (FirstImpreciseBlock >= 0) &&
|
||||
@ -203,11 +202,10 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
|
||||
unsigned StartBlock = Src->getNumber();
|
||||
BranchSize = BlockSizes[StartBlock].first - BrOffset;
|
||||
|
||||
MaxLogAlign = std::max(MaxLogAlign, Dest->getLogAlignment());
|
||||
MaxAlign = std::max(MaxAlign, Dest->getAlignment());
|
||||
for (unsigned i = StartBlock+1, e = Dest->getNumber(); i != e; ++i) {
|
||||
BranchSize += BlockSizes[i].first;
|
||||
MaxLogAlign =
|
||||
std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment());
|
||||
MaxAlign = std::max(MaxAlign, Fn.getBlockNumbered(i)->getAlignment());
|
||||
}
|
||||
|
||||
NeedExtraAdjustment = (FirstImpreciseBlock >= 0) &&
|
||||
@ -257,7 +255,7 @@ int PPCBSel::computeBranchSize(MachineFunction &Fn,
|
||||
// The computed offset is at most ((1 << alignment) - 4) bytes smaller
|
||||
// than actual offset. So we add this number to the offset for safety.
|
||||
if (NeedExtraAdjustment)
|
||||
BranchSize += (1 << MaxLogAlign) - 4;
|
||||
BranchSize += MaxAlign.value() - 4;
|
||||
|
||||
return BranchSize;
|
||||
}
|
||||
|
@ -85,9 +85,9 @@ struct MBBInfo {
|
||||
// This value never changes.
|
||||
uint64_t Size = 0;
|
||||
|
||||
// The minimum alignment of the block, as a log2 value.
|
||||
// The minimum alignment of the block.
|
||||
// This value never changes.
|
||||
unsigned LogAlignment = 0;
|
||||
llvm::Align Alignment;
|
||||
|
||||
// The number of terminators in this block. This value never changes.
|
||||
unsigned NumTerminators = 0;
|
||||
@ -179,17 +179,16 @@ const uint64_t MaxForwardRange = 0xfffe;
|
||||
// instructions.
|
||||
void SystemZLongBranch::skipNonTerminators(BlockPosition &Position,
|
||||
MBBInfo &Block) {
|
||||
if (Block.LogAlignment > Position.KnownBits) {
|
||||
if (Log2(Block.Alignment) > Position.KnownBits) {
|
||||
// When calculating the address of Block, we need to conservatively
|
||||
// assume that Block had the worst possible misalignment.
|
||||
Position.Address += ((uint64_t(1) << Block.LogAlignment) -
|
||||
(uint64_t(1) << Position.KnownBits));
|
||||
Position.KnownBits = Block.LogAlignment;
|
||||
Position.Address +=
|
||||
(Block.Alignment.value() - (uint64_t(1) << Position.KnownBits));
|
||||
Position.KnownBits = Log2(Block.Alignment);
|
||||
}
|
||||
|
||||
// Align the addresses.
|
||||
uint64_t AlignMask = (uint64_t(1) << Block.LogAlignment) - 1;
|
||||
Position.Address = (Position.Address + AlignMask) & ~AlignMask;
|
||||
Position.Address = alignTo(Position.Address, Block.Alignment);
|
||||
|
||||
// Record the block's position.
|
||||
Block.Address = Position.Address;
|
||||
@ -282,7 +281,7 @@ uint64_t SystemZLongBranch::initMBBInfo() {
|
||||
MBBInfo &Block = MBBs[I];
|
||||
|
||||
// Record the alignment, for quick access.
|
||||
Block.LogAlignment = MBB->getLogAlignment();
|
||||
Block.Alignment = MBB->getAlignment();
|
||||
|
||||
// Calculate the size of the fixed part of the block.
|
||||
MachineBasicBlock::iterator MI = MBB->begin();
|
||||
|
@ -279,7 +279,7 @@ void X86RetpolineThunks::populateThunk(MachineFunction &MF,
|
||||
|
||||
CallTarget->addLiveIn(Reg);
|
||||
CallTarget->setHasAddressTaken();
|
||||
CallTarget->setLogAlignment(4);
|
||||
CallTarget->setAlignment(llvm::Align(16));
|
||||
insertRegReturnAddrClobber(*CallTarget, Reg);
|
||||
CallTarget->back().setPreInstrSymbol(MF, TargetSym);
|
||||
BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
|
||||
|
Loading…
x
Reference in New Issue
Block a user