1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 02:33:06 +01:00

[Alignment][NFC] Use llvm::Align for TargetLowering::getPrefLoopAlignment

Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Reviewed By: courbet

Subscribers: wuzish, arsenm, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, MaskRay, jsji, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D67386

llvm-svn: 371511
This commit is contained in:
Guillaume Chatelet 2019-09-10 12:00:43 +00:00
parent f1f74d729d
commit 85e945f7fe
7 changed files with 32 additions and 28 deletions

View File

@ -103,9 +103,9 @@ private:
using LiveInVector = std::vector<RegisterMaskPair>; using LiveInVector = std::vector<RegisterMaskPair>;
LiveInVector LiveIns; LiveInVector LiveIns;
/// Alignment of the basic block. Zero if the basic block does not need to be /// Alignment of the basic block. One if the basic block does not need to be
/// aligned. The alignment is specified as log2(bytes). /// aligned.
unsigned LogAlignment = 0; llvm::Align Alignment;
/// Indicate that this basic block is entered via an exception handler. /// Indicate that this basic block is entered via an exception handler.
bool IsEHPad = false; bool IsEHPad = false;
@ -374,11 +374,15 @@ public:
/// Return alignment of the basic block. The alignment is specified as /// Return alignment of the basic block. The alignment is specified as
/// log2(bytes). /// log2(bytes).
unsigned getLogAlignment() const { return LogAlignment; } /// FIXME: Remove the Log versions once migration to llvm::Align is over.
unsigned getLogAlignment() const { return Log2(Alignment); }
llvm::Align getAlignment() const { return Alignment; }
/// Set alignment of the basic block. The alignment is specified as /// Set alignment of the basic block. The alignment is specified as
/// log2(bytes). /// log2(bytes).
void setLogAlignment(unsigned A) { LogAlignment = A; } /// FIXME: Remove the Log versions once migration to llvm::Align is over.
void setLogAlignment(unsigned A) { Alignment = llvm::Align(1ULL << A); }
void setAlignment(llvm::Align A) { Alignment = A; }
/// Returns true if the block is a landing pad. That is this basic block is /// Returns true if the block is a landing pad. That is this basic block is
/// entered via an exception handler. /// entered via an exception handler.

View File

@ -1593,8 +1593,8 @@ public:
} }
/// Return the preferred loop alignment. /// Return the preferred loop alignment.
virtual unsigned getPrefLoopLogAlignment(MachineLoop *ML = nullptr) const { virtual llvm::Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
return Log2(PrefLoopAlignment); return PrefLoopAlignment;
} }
/// Should loops be aligned even when the function is marked OptSize (but not /// Should loops be aligned even when the function is marked OptSize (but not

View File

@ -2807,8 +2807,8 @@ void MachineBlockPlacement::alignBlocks() {
if (!L) if (!L)
continue; continue;
unsigned LogAlign = TLI->getPrefLoopLogAlignment(L); const llvm::Align Align = TLI->getPrefLoopAlignment(L);
if (!LogAlign) if (Align == 1)
continue; // Don't care about loop alignment. continue; // Don't care about loop alignment.
// If the block is cold relative to the function entry don't waste space // If the block is cold relative to the function entry don't waste space
@ -2832,7 +2832,7 @@ void MachineBlockPlacement::alignBlocks() {
// Force alignment if all the predecessors are jumps. We already checked // Force alignment if all the predecessors are jumps. We already checked
// that the block isn't cold above. // that the block isn't cold above.
if (!LayoutPred->isSuccessor(ChainBB)) { if (!LayoutPred->isSuccessor(ChainBB)) {
ChainBB->setLogAlignment(LogAlign); ChainBB->setLogAlignment(Log2(Align));
continue; continue;
} }
@ -2844,7 +2844,7 @@ void MachineBlockPlacement::alignBlocks() {
MBPI->getEdgeProbability(LayoutPred, ChainBB); MBPI->getEdgeProbability(LayoutPred, ChainBB);
BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb; BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
if (LayoutEdgeFreq <= (Freq * ColdProb)) if (LayoutEdgeFreq <= (Freq * ColdProb))
ChainBB->setLogAlignment(LogAlign); ChainBB->setLogAlignment(Log2(Align));
} }
} }

View File

@ -10673,15 +10673,15 @@ void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
} }
unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const { llvm::Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
const unsigned PrefLogAlign = TargetLowering::getPrefLoopLogAlignment(ML); const llvm::Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
const unsigned CacheLineLogAlign = 6; // log2(64) const llvm::Align CacheLineAlign = llvm::Align(64);
// Pre-GFX10 target did not benefit from loop alignment // Pre-GFX10 target did not benefit from loop alignment
if (!ML || DisableLoopAlignment || if (!ML || DisableLoopAlignment ||
(getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
getSubtarget()->hasInstFwdPrefetchBug()) getSubtarget()->hasInstFwdPrefetchBug())
return PrefLogAlign; return PrefAlign;
// On GFX10 I$ is 4 x 64 bytes cache lines. // On GFX10 I$ is 4 x 64 bytes cache lines.
// By default prefetcher keeps one cache line behind and reads two ahead. // By default prefetcher keeps one cache line behind and reads two ahead.
@ -10695,8 +10695,8 @@ unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
const MachineBasicBlock *Header = ML->getHeader(); const MachineBasicBlock *Header = ML->getHeader();
if (Header->getLogAlignment() != PrefLogAlign) if (Header->getAlignment() != PrefAlign)
return Header->getLogAlignment(); // Already processed. return Header->getAlignment(); // Already processed.
unsigned LoopSize = 0; unsigned LoopSize = 0;
for (const MachineBasicBlock *MBB : ML->blocks()) { for (const MachineBasicBlock *MBB : ML->blocks()) {
@ -10708,15 +10708,15 @@ unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
for (const MachineInstr &MI : *MBB) { for (const MachineInstr &MI : *MBB) {
LoopSize += TII->getInstSizeInBytes(MI); LoopSize += TII->getInstSizeInBytes(MI);
if (LoopSize > 192) if (LoopSize > 192)
return PrefLogAlign; return PrefAlign;
} }
} }
if (LoopSize <= 64) if (LoopSize <= 64)
return PrefLogAlign; return PrefAlign;
if (LoopSize <= 128) if (LoopSize <= 128)
return CacheLineLogAlign; return CacheLineAlign;
// If any of parent loops is surrounded by prefetch instructions do not // If any of parent loops is surrounded by prefetch instructions do not
// insert new for inner loop, which would reset parent's settings. // insert new for inner loop, which would reset parent's settings.
@ -10724,7 +10724,7 @@ unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
if (MachineBasicBlock *Exit = P->getExitBlock()) { if (MachineBasicBlock *Exit = P->getExitBlock()) {
auto I = Exit->getFirstNonDebugInstr(); auto I = Exit->getFirstNonDebugInstr();
if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
return CacheLineLogAlign; return CacheLineAlign;
} }
} }
@ -10741,7 +10741,7 @@ unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
.addImm(2); // prefetch 1 line behind PC .addImm(2); // prefetch 1 line behind PC
} }
return CacheLineLogAlign; return CacheLineAlign;
} }
LLVM_ATTRIBUTE_UNUSED LLVM_ATTRIBUTE_UNUSED

View File

@ -379,7 +379,7 @@ public:
unsigned Depth = 0) const override; unsigned Depth = 0) const override;
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override; llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
void allocateHSAUserSGPRs(CCState &CCInfo, void allocateHSAUserSGPRs(CCState &CCInfo,
MachineFunction &MF, MachineFunction &MF,

View File

@ -14006,7 +14006,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
} }
} }
unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const { llvm::Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
switch (Subtarget.getDarwinDirective()) { switch (Subtarget.getDarwinDirective()) {
default: break; default: break;
case PPC::DIR_970: case PPC::DIR_970:
@ -14027,7 +14027,7 @@ unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
// Actual alignment of the loop will depend on the hotness check and other // Actual alignment of the loop will depend on the hotness check and other
// logic in alignBlocks. // logic in alignBlocks.
if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
return 5; return llvm::Align(32);
} }
const PPCInstrInfo *TII = Subtarget.getInstrInfo(); const PPCInstrInfo *TII = Subtarget.getInstrInfo();
@ -14043,13 +14043,13 @@ unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
} }
if (LoopSize > 16 && LoopSize <= 32) if (LoopSize > 16 && LoopSize <= 32)
return 5; return llvm::Align(32);
break; break;
} }
} }
return TargetLowering::getPrefLoopLogAlignment(ML); return TargetLowering::getPrefLoopAlignment(ML);
} }
/// getConstraintType - Given a constraint, return the type of /// getConstraintType - Given a constraint, return the type of

View File

@ -735,7 +735,7 @@ namespace llvm {
const SelectionDAG &DAG, const SelectionDAG &DAG,
unsigned Depth = 0) const override; unsigned Depth = 0) const override;
unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override; llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
bool shouldInsertFencesForAtomic(const Instruction *I) const override { bool shouldInsertFencesForAtomic(const Instruction *I) const override {
return true; return true;