1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-02-01 05:01:59 +01:00
Andrea Di Biagio 13160fb6a6 [MCA][LSUnit] Track loads and stores until retirement.
Before this patch, loads and stores were only tracked by their corresponding
queues in the LSUnit from dispatch until execute stage. In practice we should be
more conservative and assume that memory opcodes leave their queues at
retirement stage.

Basically, loads should leave the load queue only when they have completed and
delivered their data. We conservatively assume that a load is completed when it
is retired. Stores should be tracked by the store queue from dispatch until
retirement. In practice, stores can only leave the store queue if their data can
be written to the data cache.

This is mostly a mechanical change. With this patch, the retire stage notifies
the LSUnit when a memory instruction is retired. That would triggers the release
of LDQ/STQ entries.  The only visible change is in memory tests for the bdver2
model. That is because bdver2 is the only model that defines the load/store
queue size.

This patch partially addresses PR39830.

Differential Revision: https://reviews.llvm.org/D68266

llvm-svn: 374034
2019-10-08 10:46:01 +00:00

209 lines
7.1 KiB
C++

//===----------------------- LSUnit.cpp --------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// A Load-Store Unit for the llvm-mca tool.
///
//===----------------------------------------------------------------------===//
#include "llvm/MCA/HardwareUnits/LSUnit.h"
#include "llvm/MCA/Instruction.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#define DEBUG_TYPE "llvm-mca"
namespace llvm {
namespace mca {
LSUnitBase::LSUnitBase(const MCSchedModel &SM, unsigned LQ, unsigned SQ,
bool AssumeNoAlias)
: LQSize(LQ), SQSize(SQ), UsedLQEntries(0), UsedSQEntries(0),
NoAlias(AssumeNoAlias), NextGroupID(1) {
if (SM.hasExtraProcessorInfo()) {
const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
if (!LQSize && EPI.LoadQueueID) {
const MCProcResourceDesc &LdQDesc = *SM.getProcResource(EPI.LoadQueueID);
LQSize = std::max(0, LdQDesc.BufferSize);
}
if (!SQSize && EPI.StoreQueueID) {
const MCProcResourceDesc &StQDesc = *SM.getProcResource(EPI.StoreQueueID);
SQSize = std::max(0, StQDesc.BufferSize);
}
}
}
LSUnitBase::~LSUnitBase() {}
void LSUnitBase::cycleEvent() {
for (const std::pair<unsigned, std::unique_ptr<MemoryGroup>> &G : Groups)
G.second->cycleEvent();
}
#ifndef NDEBUG
void LSUnitBase::dump() const {
dbgs() << "[LSUnit] LQ_Size = " << getLoadQueueSize() << '\n';
dbgs() << "[LSUnit] SQ_Size = " << getStoreQueueSize() << '\n';
dbgs() << "[LSUnit] NextLQSlotIdx = " << getUsedLQEntries() << '\n';
dbgs() << "[LSUnit] NextSQSlotIdx = " << getUsedSQEntries() << '\n';
dbgs() << "\n";
for (const auto &GroupIt : Groups) {
const MemoryGroup &Group = *GroupIt.second;
dbgs() << "[LSUnit] Group (" << GroupIt.first << "): "
<< "[ #Preds = " << Group.getNumPredecessors()
<< ", #GIssued = " << Group.getNumExecutingPredecessors()
<< ", #GExecuted = " << Group.getNumExecutedPredecessors()
<< ", #Inst = " << Group.getNumInstructions()
<< ", #IIssued = " << Group.getNumExecuting()
<< ", #IExecuted = " << Group.getNumExecuted() << '\n';
}
}
#endif
unsigned LSUnit::dispatch(const InstRef &IR) {
const InstrDesc &Desc = IR.getInstruction()->getDesc();
unsigned IsMemBarrier = Desc.HasSideEffects;
assert((Desc.MayLoad || Desc.MayStore) && "Not a memory operation!");
if (Desc.MayLoad)
acquireLQSlot();
if (Desc.MayStore)
acquireSQSlot();
if (Desc.MayStore) {
// Always create a new group for store operations.
// A store may not pass a previous store or store barrier.
unsigned NewGID = createMemoryGroup();
MemoryGroup &NewGroup = getGroup(NewGID);
NewGroup.addInstruction();
// A store may not pass a previous load or load barrier.
unsigned ImmediateLoadDominator =
std::max(CurrentLoadGroupID, CurrentLoadBarrierGroupID);
if (ImmediateLoadDominator) {
MemoryGroup &IDom = getGroup(ImmediateLoadDominator);
LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: (" << ImmediateLoadDominator
<< ") --> (" << NewGID << ")\n");
IDom.addSuccessor(&NewGroup);
}
if (CurrentStoreGroupID) {
MemoryGroup &StoreGroup = getGroup(CurrentStoreGroupID);
LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: (" << CurrentStoreGroupID
<< ") --> (" << NewGID << ")\n");
StoreGroup.addSuccessor(&NewGroup);
}
CurrentStoreGroupID = NewGID;
if (Desc.MayLoad) {
CurrentLoadGroupID = NewGID;
if (IsMemBarrier)
CurrentLoadBarrierGroupID = NewGID;
}
return NewGID;
}
assert(Desc.MayLoad && "Expected a load!");
// Always create a new memory group if this is the first load of the sequence.
// A load may not pass a previous store unless flag 'NoAlias' is set.
// A load may pass a previous load.
// A younger load cannot pass a older load barrier.
// A load barrier cannot pass a older load.
bool ShouldCreateANewGroup = !CurrentLoadGroupID || IsMemBarrier ||
CurrentLoadGroupID <= CurrentStoreGroupID ||
CurrentLoadGroupID <= CurrentLoadBarrierGroupID;
if (ShouldCreateANewGroup) {
unsigned NewGID = createMemoryGroup();
MemoryGroup &NewGroup = getGroup(NewGID);
NewGroup.addInstruction();
if (!assumeNoAlias() && CurrentStoreGroupID) {
MemoryGroup &StGroup = getGroup(CurrentStoreGroupID);
LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: (" << CurrentStoreGroupID
<< ") --> (" << NewGID << ")\n");
StGroup.addSuccessor(&NewGroup);
}
if (CurrentLoadBarrierGroupID) {
MemoryGroup &LdGroup = getGroup(CurrentLoadBarrierGroupID);
LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: (" << CurrentLoadBarrierGroupID
<< ") --> (" << NewGID << ")\n");
LdGroup.addSuccessor(&NewGroup);
}
CurrentLoadGroupID = NewGID;
if (IsMemBarrier)
CurrentLoadBarrierGroupID = NewGID;
return NewGID;
}
MemoryGroup &Group = getGroup(CurrentLoadGroupID);
Group.addInstruction();
return CurrentLoadGroupID;
}
LSUnit::Status LSUnit::isAvailable(const InstRef &IR) const {
const InstrDesc &Desc = IR.getInstruction()->getDesc();
if (Desc.MayLoad && isLQFull())
return LSUnit::LSU_LQUEUE_FULL;
if (Desc.MayStore && isSQFull())
return LSUnit::LSU_SQUEUE_FULL;
return LSUnit::LSU_AVAILABLE;
}
void LSUnitBase::onInstructionExecuted(const InstRef &IR) {
unsigned GroupID = IR.getInstruction()->getLSUTokenID();
auto It = Groups.find(GroupID);
assert(It != Groups.end() && "Instruction not dispatched to the LS unit");
It->second->onInstructionExecuted();
if (It->second->isExecuted())
Groups.erase(It);
}
void LSUnitBase::onInstructionRetired(const InstRef &IR) {
const InstrDesc &Desc = IR.getInstruction()->getDesc();
bool IsALoad = Desc.MayLoad;
bool IsAStore = Desc.MayStore;
assert((IsALoad || IsAStore) && "Expected a memory operation!");
if (IsALoad) {
releaseLQSlot();
LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << IR.getSourceIndex()
<< " has been removed from the load queue.\n");
}
if (IsAStore) {
releaseSQSlot();
LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << IR.getSourceIndex()
<< " has been removed from the store queue.\n");
}
}
void LSUnit::onInstructionExecuted(const InstRef &IR) {
const Instruction &IS = *IR.getInstruction();
if (!IS.isMemOp())
return;
LSUnitBase::onInstructionExecuted(IR);
unsigned GroupID = IS.getLSUTokenID();
if (!isValidGroupID(GroupID)) {
if (GroupID == CurrentLoadGroupID)
CurrentLoadGroupID = 0;
if (GroupID == CurrentStoreGroupID)
CurrentStoreGroupID = 0;
if (GroupID == CurrentLoadBarrierGroupID)
CurrentLoadBarrierGroupID = 0;
}
}
} // namespace mca
} // namespace llvm