1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

[llvm-mca] Refactor how execution is orchestrated by the Pipeline.

This patch changes how instruction execution is orchestrated by the Pipeline.
In particular, this patch makes it more explicit how instructions transition
through the various pipeline stages during execution.

The main goal is to simplify both the stage API and the Pipeline execution.  At
the same time, this patch fixes some design issues which are currently latent,
but that are likely to cause problems in future if people start defining custom
pipelines.

The new design assumes that each pipeline stage knows the "next-in-sequence".
The Stage API has gained three new methods:
 -   isAvailable(IR)
 -   checkNextStage(IR)
 -   moveToTheNextStage(IR).

An instruction IR can be executed by a Stage if method `Stage::isAvailable(IR)`
returns true.
Instructions can move to next stages using method moveToTheNextStage(IR).
An instruction cannot be moved to the next stage if method checkNextStage(IR)
(called on the current stage) returns false.
Stages are now responsible for moving instructions to the next stage in sequence
if necessary.

Instructions are allowed to transition through multiple stages during a single
cycle (as long as stages are available, and as long as all the calls to
`checkNextStage(IR)` returns true).

Methods `Stage::preExecute()` and `Stage::postExecute()` have now become
redundant, and those are removed by this patch.

Method Pipeline::runCycle() is now simpler, and it correctly visits stages
on every begin/end of cycle.

Other changes:
 - DispatchStage no longer requires a reference to the Scheduler.
 - ExecuteStage no longer needs to directly interact with the
   RetireControlUnit. Instead, executed instructions are now directly moved to the
   next stage (i.e. the retire stage).
 - RetireStage gained an execute method. This allowed us to remove the
   dependency with the RCU in ExecuteStage.
 - FecthStage now updates the "program counter" during cycleBegin() (i.e.
   before we start executing new instructions).
 - We no longer need Stage::Status to be returned by method execute(). It has
   been dropped in favor of a more lightweight llvm::Error.

Overally, I measured a ~11% performance gain w.r.t. the previous design.  I also
think that the Stage interface is probably easier to read now.  That being said,
code comments have to be improved, and I plan to do it in a follow-up patch.

Differential revision: https://reviews.llvm.org/D50849

llvm-svn: 339923
This commit is contained in:
Andrea Di Biagio 2018-08-16 19:00:48 +00:00
parent ca89c6b055
commit 081e1ff89b
14 changed files with 171 additions and 149 deletions

View File

@ -40,12 +40,12 @@ Context::createDefaultPipeline(const PipelineOptions &Opts, InstrBuilder &IB,
SM, Opts.LoadQueueSize, Opts.StoreQueueSize, Opts.AssumeNoAlias); SM, Opts.LoadQueueSize, Opts.StoreQueueSize, Opts.AssumeNoAlias);
// Create the pipeline and its stages. // Create the pipeline and its stages.
auto P = llvm::make_unique<Pipeline>(); auto StagePipeline = llvm::make_unique<Pipeline>();
auto F = llvm::make_unique<FetchStage>(IB, SrcMgr); auto Fetch = llvm::make_unique<FetchStage>(IB, SrcMgr);
auto D = llvm::make_unique<DispatchStage>( auto Dispatch = llvm::make_unique<DispatchStage>(
STI, MRI, Opts.RegisterFileSize, Opts.DispatchWidth, *RCU, *PRF, *HWS); STI, MRI, Opts.RegisterFileSize, Opts.DispatchWidth, *RCU, *PRF);
auto R = llvm::make_unique<RetireStage>(*RCU, *PRF); auto Execute = llvm::make_unique<ExecuteStage>(*HWS);
auto E = llvm::make_unique<ExecuteStage>(*RCU, *HWS); auto Retire = llvm::make_unique<RetireStage>(*RCU, *PRF);
// Add the hardware to the context. // Add the hardware to the context.
addHardwareUnit(std::move(RCU)); addHardwareUnit(std::move(RCU));
@ -53,11 +53,11 @@ Context::createDefaultPipeline(const PipelineOptions &Opts, InstrBuilder &IB,
addHardwareUnit(std::move(HWS)); addHardwareUnit(std::move(HWS));
// Build the pipeline. // Build the pipeline.
P->appendStage(std::move(F)); StagePipeline->appendStage(std::move(Fetch));
P->appendStage(std::move(D)); StagePipeline->appendStage(std::move(Dispatch));
P->appendStage(std::move(R)); StagePipeline->appendStage(std::move(Execute));
P->appendStage(std::move(E)); StagePipeline->appendStage(std::move(Retire));
return P; return StagePipeline;
} }
} // namespace mca } // namespace mca

View File

@ -59,12 +59,8 @@ bool DispatchStage::checkRCU(const InstRef &IR) const {
return false; return false;
} }
bool DispatchStage::checkScheduler(const InstRef &IR) const { bool DispatchStage::canDispatch(const InstRef &IR) const {
HWStallEvent::GenericEventType Event; return checkRCU(IR) && checkPRF(IR) && checkNextStage(IR);
const bool Ready = SC.canBeDispatched(IR, Event);
if (!Ready)
notifyEvent<HWStallEvent>(HWStallEvent(Event, IR));
return Ready;
} }
void DispatchStage::updateRAWDependencies(ReadState &RS, void DispatchStage::updateRAWDependencies(ReadState &RS,
@ -87,7 +83,7 @@ void DispatchStage::updateRAWDependencies(ReadState &RS,
} }
} }
void DispatchStage::dispatch(InstRef IR) { llvm::Error DispatchStage::dispatch(InstRef IR) {
assert(!CarryOver && "Cannot dispatch another instruction!"); assert(!CarryOver && "Cannot dispatch another instruction!");
Instruction &IS = *IR.getInstruction(); Instruction &IS = *IR.getInstruction();
const InstrDesc &Desc = IS.getDesc(); const InstrDesc &Desc = IS.getDesc();
@ -127,8 +123,10 @@ void DispatchStage::dispatch(InstRef IR) {
// dispatched to the schedulers for execution. // dispatched to the schedulers for execution.
IS.dispatch(RCU.reserveSlot(IR, NumMicroOps)); IS.dispatch(RCU.reserveSlot(IR, NumMicroOps));
// Notify listeners of the "instruction dispatched" event. // Notify listeners of the "instruction dispatched" event,
// and move IR to the next stage.
notifyInstructionDispatched(IR, RegisterFiles); notifyInstructionDispatched(IR, RegisterFiles);
return std::move(moveToTheNextStage(IR));
} }
llvm::Error DispatchStage::cycleStart() { llvm::Error DispatchStage::cycleStart() {
@ -137,12 +135,20 @@ llvm::Error DispatchStage::cycleStart() {
return llvm::ErrorSuccess(); return llvm::ErrorSuccess();
} }
Stage::Status DispatchStage::execute(InstRef &IR) { bool DispatchStage::isAvailable(const InstRef &IR) const {
const InstrDesc &Desc = IR.getInstruction()->getDesc(); const InstrDesc &Desc = IR.getInstruction()->getDesc();
if (!isAvailable(Desc.NumMicroOps) || !canDispatch(IR)) unsigned Required = std::min(Desc.NumMicroOps, DispatchWidth);
return Stage::Stop; if (Required > AvailableEntries)
dispatch(IR); return false;
return Stage::Continue; // The dispatch logic doesn't internally buffer instructions. It only accepts
// instructions that can be successfully moved to the next stage during this
// same cycle.
return canDispatch(IR);
}
llvm::Error DispatchStage::execute(InstRef &IR) {
assert(canDispatch(IR) && "Cannot dispatch another instruction!");
return std::move(dispatch(IR));
} }
#ifndef NDEBUG #ifndef NDEBUG

View File

@ -29,8 +29,6 @@
namespace mca { namespace mca {
class Scheduler;
// Implements the hardware dispatch logic. // Implements the hardware dispatch logic.
// //
// This class is responsible for the dispatch stage, in which instructions are // This class is responsible for the dispatch stage, in which instructions are
@ -56,26 +54,17 @@ class DispatchStage final : public Stage {
const llvm::MCSubtargetInfo &STI; const llvm::MCSubtargetInfo &STI;
RetireControlUnit &RCU; RetireControlUnit &RCU;
RegisterFile &PRF; RegisterFile &PRF;
Scheduler &SC;
bool checkRCU(const InstRef &IR) const; bool checkRCU(const InstRef &IR) const;
bool checkPRF(const InstRef &IR) const; bool checkPRF(const InstRef &IR) const;
bool checkScheduler(const InstRef &IR) const; bool canDispatch(const InstRef &IR) const;
void dispatch(InstRef IR); llvm::Error dispatch(InstRef IR);
void updateRAWDependencies(ReadState &RS, const llvm::MCSubtargetInfo &STI); void updateRAWDependencies(ReadState &RS, const llvm::MCSubtargetInfo &STI);
void notifyInstructionDispatched(const InstRef &IR, void notifyInstructionDispatched(const InstRef &IR,
llvm::ArrayRef<unsigned> UsedPhysRegs); llvm::ArrayRef<unsigned> UsedPhysRegs);
bool isAvailable(unsigned NumEntries) const {
return NumEntries <= AvailableEntries || AvailableEntries == DispatchWidth;
}
bool canDispatch(const InstRef &IR) {
assert(isAvailable(IR.getInstruction()->getDesc().NumMicroOps));
return checkRCU(IR) && checkPRF(IR) && checkScheduler(IR);
}
void collectWrites(llvm::SmallVectorImpl<WriteRef> &Vec, void collectWrites(llvm::SmallVectorImpl<WriteRef> &Vec,
unsigned RegID) const { unsigned RegID) const {
return PRF.collectWrites(Vec, RegID); return PRF.collectWrites(Vec, RegID);
@ -85,16 +74,17 @@ public:
DispatchStage(const llvm::MCSubtargetInfo &Subtarget, DispatchStage(const llvm::MCSubtargetInfo &Subtarget,
const llvm::MCRegisterInfo &MRI, unsigned RegisterFileSize, const llvm::MCRegisterInfo &MRI, unsigned RegisterFileSize,
unsigned MaxDispatchWidth, RetireControlUnit &R, unsigned MaxDispatchWidth, RetireControlUnit &R,
RegisterFile &F, Scheduler &Sched) RegisterFile &F)
: DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth), : DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
CarryOver(0U), STI(Subtarget), RCU(R), PRF(F), SC(Sched) {} CarryOver(0U), STI(Subtarget), RCU(R), PRF(F) {}
// We can always try to dispatch, so returning false is okay in this case. bool isAvailable(const InstRef &IR) const override;
// The retire stage, which controls the RCU, might have items to complete but
// RetireStage::hasWorkToComplete will check for that case. // The dispatch logic internally doesn't buffer instructions. So there is
// never work to do at the beginning of every cycle.
bool hasWorkToComplete() const override { return false; } bool hasWorkToComplete() const override { return false; }
llvm::Error cycleStart() override; llvm::Error cycleStart() override;
Status execute(InstRef &IR) override; llvm::Error execute(InstRef &IR) override;
void notifyDispatchStall(const InstRef &IR, unsigned EventType); void notifyDispatchStall(const InstRef &IR, unsigned EventType);
#ifndef NDEBUG #ifndef NDEBUG

View File

@ -26,6 +26,14 @@ namespace mca {
using namespace llvm; using namespace llvm;
bool ExecuteStage::isAvailable(const InstRef &IR) const {
HWStallEvent::GenericEventType Event;
if (HWS.canBeDispatched(IR, Event))
return true;
notifyEvent<HWStallEvent>(HWStallEvent(Event, IR));
return false;
}
// Reclaim the simulated resources used by the scheduler. // Reclaim the simulated resources used by the scheduler.
void ExecuteStage::reclaimSchedulerResources() { void ExecuteStage::reclaimSchedulerResources() {
SmallVector<ResourceRef, 8> ResourcesFreed; SmallVector<ResourceRef, 8> ResourcesFreed;
@ -38,8 +46,12 @@ void ExecuteStage::reclaimSchedulerResources() {
Error ExecuteStage::updateSchedulerQueues() { Error ExecuteStage::updateSchedulerQueues() {
SmallVector<InstRef, 4> InstructionIDs; SmallVector<InstRef, 4> InstructionIDs;
HWS.updateIssuedSet(InstructionIDs); HWS.updateIssuedSet(InstructionIDs);
for (const InstRef &IR : InstructionIDs) for (InstRef &IR : InstructionIDs) {
notifyInstructionExecuted(IR); notifyInstructionExecuted(IR);
//FIXME: add a buffer of executed instructions.
if (Error S = moveToTheNextStage(IR))
return S;
}
InstructionIDs.clear(); InstructionIDs.clear();
HWS.updatePendingQueue(InstructionIDs); HWS.updatePendingQueue(InstructionIDs);
@ -60,8 +72,12 @@ Error ExecuteStage::issueReadyInstructions() {
const InstrDesc &Desc = IR.getInstruction()->getDesc(); const InstrDesc &Desc = IR.getInstruction()->getDesc();
notifyReleasedBuffers(Desc.Buffers); notifyReleasedBuffers(Desc.Buffers);
notifyInstructionIssued(IR, Used); notifyInstructionIssued(IR, Used);
if (IR.getInstruction()->isExecuted()) if (IR.getInstruction()->isExecuted()) {
notifyInstructionExecuted(IR); notifyInstructionExecuted(IR);
//FIXME: add a buffer of executed instructions.
if (Error S = moveToTheNextStage(IR))
return S;
}
// Instructions that have been issued during this cycle might have unblocked // Instructions that have been issued during this cycle might have unblocked
// other dependent instructions. Dependent instructions may be issued during // other dependent instructions. Dependent instructions may be issued during
@ -100,7 +116,9 @@ Error ExecuteStage::cycleStart() {
} }
// Schedule the instruction for execution on the hardware. // Schedule the instruction for execution on the hardware.
Stage::Status ExecuteStage::execute(InstRef &IR) { Error ExecuteStage::execute(InstRef &IR) {
assert(isAvailable(IR) && "Scheduler is not available!");
#ifndef NDEBUG #ifndef NDEBUG
// Ensure that the HWS has not stored this instruction in its queues. // Ensure that the HWS has not stored this instruction in its queues.
HWS.sanityCheck(IR); HWS.sanityCheck(IR);
@ -116,7 +134,7 @@ Stage::Status ExecuteStage::execute(InstRef &IR) {
// Obtain a slot in the LSU. If we cannot reserve resources, return true, so // Obtain a slot in the LSU. If we cannot reserve resources, return true, so
// that succeeding stages can make progress. // that succeeding stages can make progress.
if (!HWS.reserveResources(IR)) if (!HWS.reserveResources(IR))
return Stage::Continue; return ErrorSuccess();
// If we did not return early, then the scheduler is ready for execution. // If we did not return early, then the scheduler is ready for execution.
notifyInstructionReady(IR); notifyInstructionReady(IR);
@ -137,7 +155,7 @@ Stage::Status ExecuteStage::execute(InstRef &IR) {
// If we cannot issue immediately, the HWS will add IR to its ready queue for // If we cannot issue immediately, the HWS will add IR to its ready queue for
// execution later, so we must return early here. // execution later, so we must return early here.
if (!HWS.issueImmediately(IR)) if (!HWS.issueImmediately(IR))
return Stage::Continue; return ErrorSuccess();
LLVM_DEBUG(dbgs() << "[SCHEDULER] Instruction #" << IR LLVM_DEBUG(dbgs() << "[SCHEDULER] Instruction #" << IR
<< " issued immediately\n"); << " issued immediately\n");
@ -149,10 +167,12 @@ Stage::Status ExecuteStage::execute(InstRef &IR) {
// Perform notifications. // Perform notifications.
notifyReleasedBuffers(Desc.Buffers); notifyReleasedBuffers(Desc.Buffers);
notifyInstructionIssued(IR, Used); notifyInstructionIssued(IR, Used);
if (IR.getInstruction()->isExecuted()) if (IR.getInstruction()->isExecuted()) {
notifyInstructionExecuted(IR); notifyInstructionExecuted(IR);
//FIXME: add a buffer of executed instructions.
return Stage::Continue; return std::move(moveToTheNextStage(IR));
}
return ErrorSuccess();
} }
void ExecuteStage::notifyInstructionExecuted(const InstRef &IR) { void ExecuteStage::notifyInstructionExecuted(const InstRef &IR) {
@ -160,7 +180,6 @@ void ExecuteStage::notifyInstructionExecuted(const InstRef &IR) {
LLVM_DEBUG(dbgs() << "[E] Instruction Executed: #" << IR << '\n'); LLVM_DEBUG(dbgs() << "[E] Instruction Executed: #" << IR << '\n');
notifyEvent<HWInstructionEvent>( notifyEvent<HWInstructionEvent>(
HWInstructionEvent(HWInstructionEvent::Executed, IR)); HWInstructionEvent(HWInstructionEvent::Executed, IR));
RCU.onInstructionExecuted(IR.getInstruction()->getRCUTokenID());
} }
void ExecuteStage::notifyInstructionReady(const InstRef &IR) { void ExecuteStage::notifyInstructionReady(const InstRef &IR) {

View File

@ -19,7 +19,6 @@
#define LLVM_TOOLS_LLVM_MCA_EXECUTE_STAGE_H #define LLVM_TOOLS_LLVM_MCA_EXECUTE_STAGE_H
#include "Instruction.h" #include "Instruction.h"
#include "RetireControlUnit.h"
#include "Scheduler.h" #include "Scheduler.h"
#include "Stage.h" #include "Stage.h"
#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/ArrayRef.h"
@ -27,8 +26,6 @@
namespace mca { namespace mca {
class ExecuteStage final : public Stage { class ExecuteStage final : public Stage {
// Owner will go away when we move listeners/eventing to the stages.
RetireControlUnit &RCU;
Scheduler &HWS; Scheduler &HWS;
// The following routines are used to maintain the HWS. // The following routines are used to maintain the HWS.
@ -40,7 +37,7 @@ class ExecuteStage final : public Stage {
ExecuteStage &operator=(const ExecuteStage &Other) = delete; ExecuteStage &operator=(const ExecuteStage &Other) = delete;
public: public:
ExecuteStage(RetireControlUnit &R, Scheduler &S) : Stage(), RCU(R), HWS(S) {} ExecuteStage(Scheduler &S) : Stage(), HWS(S) {}
// This stage works under the assumption that the Pipeline will eventually // This stage works under the assumption that the Pipeline will eventually
// execute a retire stage. We don't need to check if pipelines and/or // execute a retire stage. We don't need to check if pipelines and/or
@ -49,9 +46,9 @@ public:
// RetireControlUnit::hasWorkToComplete() is responsible for checking if there // RetireControlUnit::hasWorkToComplete() is responsible for checking if there
// are still instructions in-flight in the out-of-order backend. // are still instructions in-flight in the out-of-order backend.
bool hasWorkToComplete() const override { return false; } bool hasWorkToComplete() const override { return false; }
bool isAvailable(const InstRef &IR) const override;
llvm::Error cycleStart() override; llvm::Error cycleStart() override;
Status execute(InstRef &IR) override; llvm::Error execute(InstRef &IR) override;
void void
notifyInstructionIssued(const InstRef &IR, notifyInstructionIssued(const InstRef &IR,

View File

@ -17,23 +17,53 @@
namespace mca { namespace mca {
bool FetchStage::hasWorkToComplete() const { return SM.hasNext(); } bool FetchStage::hasWorkToComplete() const {
return CurrentInstruction.get() || SM.hasNext();
}
Stage::Status FetchStage::execute(InstRef &IR) { bool FetchStage::isAvailable(const InstRef & /* unused */) const {
if (!CurrentInstruction)
return false;
assert(SM.hasNext() && "Unexpected internal state!");
const SourceRef SR = SM.peekNext();
InstRef IR(SR.first, CurrentInstruction.get());
return checkNextStage(IR);
}
llvm::Error FetchStage::getNextInstruction() {
assert(!CurrentInstruction && "There is already an instruction to process!");
if (!SM.hasNext()) if (!SM.hasNext())
return Stage::Stop; return llvm::ErrorSuccess();
const SourceRef SR = SM.peekNext(); const SourceRef SR = SM.peekNext();
llvm::Expected<std::unique_ptr<Instruction>> InstOrErr = llvm::Expected<std::unique_ptr<Instruction>> InstOrErr =
IB.createInstruction(*SR.second); IB.createInstruction(*SR.second);
if (!InstOrErr) if (!InstOrErr)
return InstOrErr.takeError(); return InstOrErr.takeError();
std::unique_ptr<Instruction> I = std::move(*InstOrErr); CurrentInstruction = std::move(InstOrErr.get());
IR = InstRef(SR.first, I.get()); return llvm::ErrorSuccess();
Instructions[IR.getSourceIndex()] = std::move(I);
return Stage::Continue;
} }
void FetchStage::postExecute() { SM.updateNext(); } llvm::Error FetchStage::execute(InstRef & /*unused */) {
assert(CurrentInstruction && "There is no instruction to process!");
const SourceRef SR = SM.peekNext();
InstRef IR(SR.first, CurrentInstruction.get());
assert(checkNextStage(IR) && "Invalid fetch!");
Instructions[IR.getSourceIndex()] = std::move(CurrentInstruction);
if (llvm::Error Val = moveToTheNextStage(IR))
return Val;
SM.updateNext();
// Move the program counter.
return getNextInstruction();
}
llvm::Error FetchStage::cycleStart() {
if (!CurrentInstruction && SM.hasNext())
return getNextInstruction();
return llvm::ErrorSuccess();
}
llvm::Error FetchStage::cycleEnd() { llvm::Error FetchStage::cycleEnd() {
// Find the first instruction which hasn't been retired. // Find the first instruction which hasn't been retired.

View File

@ -24,20 +24,26 @@
namespace mca { namespace mca {
class FetchStage final : public Stage { class FetchStage final : public Stage {
std::unique_ptr<Instruction> CurrentInstruction;
using InstMap = std::map<unsigned, std::unique_ptr<Instruction>>; using InstMap = std::map<unsigned, std::unique_ptr<Instruction>>;
InstMap Instructions; InstMap Instructions;
InstrBuilder &IB; InstrBuilder &IB;
SourceMgr &SM; SourceMgr &SM;
// Updates the program counter, and sets 'CurrentInstruction'.
llvm::Error getNextInstruction();
FetchStage(const FetchStage &Other) = delete; FetchStage(const FetchStage &Other) = delete;
FetchStage &operator=(const FetchStage &Other) = delete; FetchStage &operator=(const FetchStage &Other) = delete;
public: public:
FetchStage(InstrBuilder &IB, SourceMgr &SM) : IB(IB), SM(SM) {} FetchStage(InstrBuilder &IB, SourceMgr &SM)
: CurrentInstruction(), IB(IB), SM(SM) {}
bool isAvailable(const InstRef &IR) const override;
bool hasWorkToComplete() const override; bool hasWorkToComplete() const override;
Status execute(InstRef &IR) override; llvm::Error execute(InstRef &IR) override;
void postExecute() override; llvm::Error cycleStart() override;
llvm::Error cycleEnd() override; llvm::Error cycleEnd() override;
}; };

View File

@ -21,7 +21,7 @@ namespace mca {
using namespace llvm; using namespace llvm;
Stage::Status InstructionTables::execute(InstRef &IR) { Error InstructionTables::execute(InstRef &IR) {
ArrayRef<uint64_t> Masks = IB.getProcResourceMasks(); ArrayRef<uint64_t> Masks = IB.getProcResourceMasks();
const InstrDesc &Desc = IR.getInstruction()->getDesc(); const InstrDesc &Desc = IR.getInstruction()->getDesc();
UsedResources.clear(); UsedResources.clear();
@ -64,7 +64,7 @@ Stage::Status InstructionTables::execute(InstRef &IR) {
// Send a fake instruction issued event to all the views. // Send a fake instruction issued event to all the views.
HWInstructionIssuedEvent Event(IR, UsedResources); HWInstructionIssuedEvent Event(IR, UsedResources);
notifyEvent<HWInstructionIssuedEvent>(Event); notifyEvent<HWInstructionIssuedEvent>(Event);
return Stage::Continue; return ErrorSuccess();
} }
} // namespace mca } // namespace mca

View File

@ -26,7 +26,7 @@
namespace mca { namespace mca {
class InstructionTables : public Stage { class InstructionTables final : public Stage {
const llvm::MCSchedModel &SM; const llvm::MCSchedModel &SM;
InstrBuilder &IB; InstrBuilder &IB;
llvm::SmallVector<std::pair<ResourceRef, double>, 4> UsedResources; llvm::SmallVector<std::pair<ResourceRef, double>, 4> UsedResources;
@ -35,8 +35,8 @@ public:
InstructionTables(const llvm::MCSchedModel &Model, InstrBuilder &Builder) InstructionTables(const llvm::MCSchedModel &Model, InstrBuilder &Builder)
: Stage(), SM(Model), IB(Builder) {} : Stage(), SM(Model), IB(Builder) {}
bool hasWorkToComplete() const override final { return false; } bool hasWorkToComplete() const override { return false; }
Status execute(InstRef &IR) override final; llvm::Error execute(InstRef &IR) override;
}; };
} // namespace mca } // namespace mca

View File

@ -37,29 +37,6 @@ bool Pipeline::hasWorkToProcess() {
}); });
} }
// This routine returns early if any stage returns 'false' after execute() is
// called on it.
Stage::Status Pipeline::executeStages(InstRef &IR) {
for (const std::unique_ptr<Stage> &S : Stages) {
Stage::Status StatusOrErr = S->execute(IR);
if (!StatusOrErr)
return StatusOrErr.takeError();
else if (StatusOrErr.get() == Stage::Stop)
return Stage::Stop;
}
return Stage::Continue;
}
void Pipeline::preExecuteStages() {
for (const std::unique_ptr<Stage> &S : Stages)
S->preExecute();
}
void Pipeline::postExecuteStages() {
for (const std::unique_ptr<Stage> &S : Stages)
S->postExecute();
}
llvm::Error Pipeline::run() { llvm::Error Pipeline::run() {
assert(!Stages.empty() && "Unexpected empty pipeline found!"); assert(!Stages.empty() && "Unexpected empty pipeline found!");
@ -74,36 +51,38 @@ llvm::Error Pipeline::run() {
} }
llvm::Error Pipeline::runCycle() { llvm::Error Pipeline::runCycle() {
// Update stages before we start processing new instructions.
llvm::Error Err = llvm::ErrorSuccess(); llvm::Error Err = llvm::ErrorSuccess();
for (auto I = Stages.begin(), E = Stages.end(); I != E && !Err; ++I) { // Update stages before we start processing new instructions.
for (auto I = Stages.rbegin(), E = Stages.rend(); I != E && !Err; ++I) {
const std::unique_ptr<Stage> &S = *I; const std::unique_ptr<Stage> &S = *I;
Err = S->cycleStart(); Err = S->cycleStart();
} }
if (Err)
return Err;
// Now fetch and execute new instructions. // Now fetch and execute new instructions.
InstRef IR; InstRef IR;
while (true) { Stage &FirstStage = *Stages[0];
preExecuteStages(); while (!Err && FirstStage.isAvailable(IR))
Stage::Status Val = executeStages(IR); Err = FirstStage.execute(IR);
if (!Val)
return Val.takeError();
if (Val.get() == Stage::Stop)
break;
postExecuteStages();
}
// Update stages in preparation for a new cycle. // Update stages in preparation for a new cycle.
for (auto I = Stages.begin(), E = Stages.end(); I != E && !Err; ++I) { for (auto I = Stages.rbegin(), E = Stages.rend(); I != E && !Err; ++I) {
const std::unique_ptr<Stage> &S = *I; const std::unique_ptr<Stage> &S = *I;
Err = S->cycleEnd(); Err = S->cycleEnd();
} }
return Err; return Err;
} }
void Pipeline::appendStage(std::unique_ptr<Stage> S) {
assert(S && "Invalid null stage in input!");
if (!Stages.empty()) {
Stage *Last = Stages.back().get();
Last->setNextInSequence(S.get());
}
Stages.push_back(std::move(S));
}
void Pipeline::notifyCycleBegin() { void Pipeline::notifyCycleBegin() {
LLVM_DEBUG(dbgs() << "[E] Cycle begin: " << Cycles << '\n'); LLVM_DEBUG(dbgs() << "[E] Cycle begin: " << Cycles << '\n');
for (HWEventListener *Listener : Listeners) for (HWEventListener *Listener : Listeners)

View File

@ -60,18 +60,14 @@ class Pipeline {
std::set<HWEventListener *> Listeners; std::set<HWEventListener *> Listeners;
unsigned Cycles; unsigned Cycles;
void preExecuteStages();
Stage::Status executeStages(InstRef &IR);
void postExecuteStages();
llvm::Error runCycle(); llvm::Error runCycle();
bool hasWorkToProcess(); bool hasWorkToProcess();
void notifyCycleBegin(); void notifyCycleBegin();
void notifyCycleEnd(); void notifyCycleEnd();
public: public:
Pipeline() : Cycles(0) {} Pipeline() : Cycles(0) {}
void appendStage(std::unique_ptr<Stage> S) { Stages.push_back(std::move(S)); } void appendStage(std::unique_ptr<Stage> S);
llvm::Error run(); llvm::Error run();
void addEventListener(HWEventListener *Listener); void addEventListener(HWEventListener *Listener);
}; };

View File

@ -42,6 +42,11 @@ llvm::Error RetireStage::cycleStart() {
return llvm::ErrorSuccess(); return llvm::ErrorSuccess();
} }
llvm::Error RetireStage::execute(InstRef &IR) {
RCU.onInstructionExecuted(IR.getInstruction()->getRCUTokenID());
return llvm::ErrorSuccess();
}
void RetireStage::notifyInstructionRetired(const InstRef &IR) { void RetireStage::notifyInstructionRetired(const InstRef &IR) {
LLVM_DEBUG(llvm::dbgs() << "[E] Instruction Retired: #" << IR << '\n'); LLVM_DEBUG(llvm::dbgs() << "[E] Instruction Retired: #" << IR << '\n');
llvm::SmallVector<unsigned, 4> FreedRegs(PRF.getNumRegisterFiles()); llvm::SmallVector<unsigned, 4> FreedRegs(PRF.getNumRegisterFiles());

View File

@ -37,9 +37,8 @@ public:
bool hasWorkToComplete() const override { return !RCU.isEmpty(); } bool hasWorkToComplete() const override { return !RCU.isEmpty(); }
llvm::Error cycleStart() override; llvm::Error cycleStart() override;
Status execute(InstRef &IR) override { return Stage::Continue; } llvm::Error execute(InstRef &IR) override;
void notifyInstructionRetired(const InstRef &IR); void notifyInstructionRetired(const InstRef &IR);
void onInstructionExecuted(unsigned TokenID);
}; };
} // namespace mca } // namespace mca

View File

@ -25,35 +25,23 @@ namespace mca {
class InstRef; class InstRef;
class Stage { class Stage {
Stage *NextInSequence;
std::set<HWEventListener *> Listeners; std::set<HWEventListener *> Listeners;
Stage(const Stage &Other) = delete; Stage(const Stage &Other) = delete;
Stage &operator=(const Stage &Other) = delete; Stage &operator=(const Stage &Other) = delete;
public:
/// A Stage's execute() returns Continue, Stop, or an error. Returning
/// Continue means that the stage successfully completed its 'execute'
/// action, and that the instruction being processed can be moved to the next
/// pipeline stage during this cycle. Continue allows the pipeline to
/// continue calling 'execute' on subsequent stages. Returning Stop
/// signifies that the stage ran into an error, and tells the pipeline to stop
/// passing the instruction to subsequent stages during this cycle. Any
/// failures that occur during 'execute' are represented by the error variant
/// that is provided by the Expected template.
enum State { Stop, Continue };
using Status = llvm::Expected<State>;
protected: protected:
const std::set<HWEventListener *> &getListeners() const { return Listeners; } const std::set<HWEventListener *> &getListeners() const { return Listeners; }
public: public:
Stage() {} Stage() : NextInSequence(nullptr) {}
virtual ~Stage(); virtual ~Stage();
/// Called prior to preExecute to ensure that the stage has items that it /// Returns true if it can execute IR during this cycle.
/// is to process. For example, a FetchStage might have more instructions virtual bool isAvailable(const InstRef &IR) const { return true; }
/// that need to be processed, or a RCU might have items that have yet to
/// retire. /// Returns true if some instructions are still executing this stage.
virtual bool hasWorkToComplete() const = 0; virtual bool hasWorkToComplete() const = 0;
/// Called once at the start of each cycle. This can be used as a setup /// Called once at the start of each cycle. This can be used as a setup
@ -63,19 +51,26 @@ public:
/// Called once at the end of each cycle. /// Called once at the end of each cycle.
virtual llvm::Error cycleEnd() { return llvm::ErrorSuccess(); } virtual llvm::Error cycleEnd() { return llvm::ErrorSuccess(); }
/// Called prior to executing the list of stages. /// The primary action that this stage performs on instruction IR.
/// This can be called multiple times per cycle. virtual llvm::Error execute(InstRef &IR) = 0;
virtual void preExecute() {}
/// Called as a cleanup and finalization phase after each execution. void setNextInSequence(Stage *NextStage) {
/// This will only be called if all stages return a success from their assert(!NextInSequence && "This stage already has a NextInSequence!");
/// execute callback. This can be called multiple times per cycle. NextInSequence = NextStage;
virtual void postExecute() {} }
/// The primary action that this stage performs. bool checkNextStage(const InstRef &IR) const {
/// Returning false prevents successor stages from having their 'execute' return NextInSequence && NextInSequence->isAvailable(IR);
/// routine called. This can be called multiple times during a single cycle. }
virtual Status execute(InstRef &IR) = 0;
/// Called when an instruction is ready to move the next pipeline stage.
///
/// Stages are responsible for moving instructions to their immediate
/// successor stages.
llvm::Error moveToTheNextStage(InstRef &IR) {
assert(checkNextStage(IR) && "Next stage is not ready!");
return std::move(NextInSequence->execute(IR));
}
/// Add a listener to receive callbacks during the execution of this stage. /// Add a listener to receive callbacks during the execution of this stage.
void addListener(HWEventListener *Listener); void addListener(HWEventListener *Listener);