mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-21 18:22:53 +01:00
[Alignment][NFC] Use llvm::Align for TargetLowering::getPrefLoopAlignment
Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Reviewed By: courbet Subscribers: wuzish, arsenm, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, MaskRay, jsji, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D67386 llvm-svn: 371511
This commit is contained in:
parent
f1f74d729d
commit
85e945f7fe
@ -103,9 +103,9 @@ private:
|
||||
using LiveInVector = std::vector<RegisterMaskPair>;
|
||||
LiveInVector LiveIns;
|
||||
|
||||
/// Alignment of the basic block. Zero if the basic block does not need to be
|
||||
/// aligned. The alignment is specified as log2(bytes).
|
||||
unsigned LogAlignment = 0;
|
||||
/// Alignment of the basic block. One if the basic block does not need to be
|
||||
/// aligned.
|
||||
llvm::Align Alignment;
|
||||
|
||||
/// Indicate that this basic block is entered via an exception handler.
|
||||
bool IsEHPad = false;
|
||||
@ -374,11 +374,15 @@ public:
|
||||
|
||||
/// Return alignment of the basic block. The alignment is specified as
|
||||
/// log2(bytes).
|
||||
unsigned getLogAlignment() const { return LogAlignment; }
|
||||
/// FIXME: Remove the Log versions once migration to llvm::Align is over.
|
||||
unsigned getLogAlignment() const { return Log2(Alignment); }
|
||||
llvm::Align getAlignment() const { return Alignment; }
|
||||
|
||||
/// Set alignment of the basic block. The alignment is specified as
|
||||
/// log2(bytes).
|
||||
void setLogAlignment(unsigned A) { LogAlignment = A; }
|
||||
/// FIXME: Remove the Log versions once migration to llvm::Align is over.
|
||||
void setLogAlignment(unsigned A) { Alignment = llvm::Align(1ULL << A); }
|
||||
void setAlignment(llvm::Align A) { Alignment = A; }
|
||||
|
||||
/// Returns true if the block is a landing pad. That is this basic block is
|
||||
/// entered via an exception handler.
|
||||
|
@ -1593,8 +1593,8 @@ public:
|
||||
}
|
||||
|
||||
/// Return the preferred loop alignment.
|
||||
virtual unsigned getPrefLoopLogAlignment(MachineLoop *ML = nullptr) const {
|
||||
return Log2(PrefLoopAlignment);
|
||||
virtual llvm::Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
|
||||
return PrefLoopAlignment;
|
||||
}
|
||||
|
||||
/// Should loops be aligned even when the function is marked OptSize (but not
|
||||
|
@ -2807,8 +2807,8 @@ void MachineBlockPlacement::alignBlocks() {
|
||||
if (!L)
|
||||
continue;
|
||||
|
||||
unsigned LogAlign = TLI->getPrefLoopLogAlignment(L);
|
||||
if (!LogAlign)
|
||||
const llvm::Align Align = TLI->getPrefLoopAlignment(L);
|
||||
if (Align == 1)
|
||||
continue; // Don't care about loop alignment.
|
||||
|
||||
// If the block is cold relative to the function entry don't waste space
|
||||
@ -2832,7 +2832,7 @@ void MachineBlockPlacement::alignBlocks() {
|
||||
// Force alignment if all the predecessors are jumps. We already checked
|
||||
// that the block isn't cold above.
|
||||
if (!LayoutPred->isSuccessor(ChainBB)) {
|
||||
ChainBB->setLogAlignment(LogAlign);
|
||||
ChainBB->setLogAlignment(Log2(Align));
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2844,7 +2844,7 @@ void MachineBlockPlacement::alignBlocks() {
|
||||
MBPI->getEdgeProbability(LayoutPred, ChainBB);
|
||||
BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb;
|
||||
if (LayoutEdgeFreq <= (Freq * ColdProb))
|
||||
ChainBB->setLogAlignment(LogAlign);
|
||||
ChainBB->setLogAlignment(Log2(Align));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10673,15 +10673,15 @@ void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
|
||||
Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
|
||||
}
|
||||
|
||||
unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||
const unsigned PrefLogAlign = TargetLowering::getPrefLoopLogAlignment(ML);
|
||||
const unsigned CacheLineLogAlign = 6; // log2(64)
|
||||
llvm::Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
|
||||
const llvm::Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
|
||||
const llvm::Align CacheLineAlign = llvm::Align(64);
|
||||
|
||||
// Pre-GFX10 target did not benefit from loop alignment
|
||||
if (!ML || DisableLoopAlignment ||
|
||||
(getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
|
||||
getSubtarget()->hasInstFwdPrefetchBug())
|
||||
return PrefLogAlign;
|
||||
return PrefAlign;
|
||||
|
||||
// On GFX10 I$ is 4 x 64 bytes cache lines.
|
||||
// By default prefetcher keeps one cache line behind and reads two ahead.
|
||||
@ -10695,8 +10695,8 @@ unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||
|
||||
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
|
||||
const MachineBasicBlock *Header = ML->getHeader();
|
||||
if (Header->getLogAlignment() != PrefLogAlign)
|
||||
return Header->getLogAlignment(); // Already processed.
|
||||
if (Header->getAlignment() != PrefAlign)
|
||||
return Header->getAlignment(); // Already processed.
|
||||
|
||||
unsigned LoopSize = 0;
|
||||
for (const MachineBasicBlock *MBB : ML->blocks()) {
|
||||
@ -10708,15 +10708,15 @@ unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||
for (const MachineInstr &MI : *MBB) {
|
||||
LoopSize += TII->getInstSizeInBytes(MI);
|
||||
if (LoopSize > 192)
|
||||
return PrefLogAlign;
|
||||
return PrefAlign;
|
||||
}
|
||||
}
|
||||
|
||||
if (LoopSize <= 64)
|
||||
return PrefLogAlign;
|
||||
return PrefAlign;
|
||||
|
||||
if (LoopSize <= 128)
|
||||
return CacheLineLogAlign;
|
||||
return CacheLineAlign;
|
||||
|
||||
// If any of parent loops is surrounded by prefetch instructions do not
|
||||
// insert new for inner loop, which would reset parent's settings.
|
||||
@ -10724,7 +10724,7 @@ unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||
if (MachineBasicBlock *Exit = P->getExitBlock()) {
|
||||
auto I = Exit->getFirstNonDebugInstr();
|
||||
if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
|
||||
return CacheLineLogAlign;
|
||||
return CacheLineAlign;
|
||||
}
|
||||
}
|
||||
|
||||
@ -10741,7 +10741,7 @@ unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||
.addImm(2); // prefetch 1 line behind PC
|
||||
}
|
||||
|
||||
return CacheLineLogAlign;
|
||||
return CacheLineAlign;
|
||||
}
|
||||
|
||||
LLVM_ATTRIBUTE_UNUSED
|
||||
|
@ -379,7 +379,7 @@ public:
|
||||
unsigned Depth = 0) const override;
|
||||
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
|
||||
|
||||
unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override;
|
||||
llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
|
||||
|
||||
void allocateHSAUserSGPRs(CCState &CCInfo,
|
||||
MachineFunction &MF,
|
||||
|
@ -14006,7 +14006,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
|
||||
}
|
||||
}
|
||||
|
||||
unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||
llvm::Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
|
||||
switch (Subtarget.getDarwinDirective()) {
|
||||
default: break;
|
||||
case PPC::DIR_970:
|
||||
@ -14027,7 +14027,7 @@ unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||
// Actual alignment of the loop will depend on the hotness check and other
|
||||
// logic in alignBlocks.
|
||||
if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
|
||||
return 5;
|
||||
return llvm::Align(32);
|
||||
}
|
||||
|
||||
const PPCInstrInfo *TII = Subtarget.getInstrInfo();
|
||||
@ -14043,13 +14043,13 @@ unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const {
|
||||
}
|
||||
|
||||
if (LoopSize > 16 && LoopSize <= 32)
|
||||
return 5;
|
||||
return llvm::Align(32);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return TargetLowering::getPrefLoopLogAlignment(ML);
|
||||
return TargetLowering::getPrefLoopAlignment(ML);
|
||||
}
|
||||
|
||||
/// getConstraintType - Given a constraint, return the type of
|
||||
|
@ -735,7 +735,7 @@ namespace llvm {
|
||||
const SelectionDAG &DAG,
|
||||
unsigned Depth = 0) const override;
|
||||
|
||||
unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override;
|
||||
llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
|
||||
|
||||
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
|
||||
return true;
|
||||
|
Loading…
Reference in New Issue
Block a user