1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

[llvm-mca] Ensure that instructions with a schedule read-advance are always issued in the right order.

Normally, the Scheduler prioritizes older instructions over younger instructions
during the instruction issue stage. In one particular case where a dependent
instruction had a schedule read-advance associated to one of the input operands,
this rule was not correctly applied.

This patch fixes the issue and adds a test to verify that we don't regress that
particular case.

llvm-svn: 330032
This commit is contained in:
Andrea Di Biagio 2018-04-13 15:19:07 +00:00
parent f63761c20a
commit 8de05aaa85
3 changed files with 90 additions and 56 deletions

View File

@ -0,0 +1,44 @@
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=btver2 -iterations=1 -resource-pressure=0 -timeline -dispatch=3 < %s | FileCheck %s
add %rdi, %rsi
add (%rsp), %rsi
add %rdx, %r8
# CHECK: Iterations: 1
# CHECK-NEXT: Instructions: 3
# CHECK-NEXT: Total Cycles: 7
# CHECK-NEXT: Dispatch Width: 3
# CHECK-NEXT: IPC: 0.43
# CHECK: Instruction Info:
# CHECK-NEXT: [1]: #uOps
# CHECK-NEXT: [2]: Latency
# CHECK-NEXT: [3]: RThroughput
# CHECK-NEXT: [4]: MayLoad
# CHECK-NEXT: [5]: MayStore
# CHECK-NEXT: [6]: HasSideEffects
# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
# CHECK-NEXT: 1 1 0.50 addq %rdi, %rsi
# CHECK-NEXT: 1 4 1.00 * addq (%rsp), %rsi
# CHECK-NEXT: 1 1 0.50 addq %rdx, %r8
# CHECK: Timeline view:
# CHECK: Index 0123456
# CHECK: [0,0] DeER .. addq %rdi, %rsi
# CHECK-NEXT: [0,1] DeeeeER addq (%rsp), %rsi
# CHECK-NEXT: [0,2] D=eE--R addq %rdx, %r8
# CHECK: Average Wait times (based on the timeline view):
# CHECK-NEXT: [0]: Executions
# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
# CHECK: [0] [1] [2] [3]
# CHECK-NEXT: 0. 1 1.0 1.0 0.0 addq %rdi, %rsi
# CHECK-NEXT: 1. 1 1.0 0.0 0.0 addq (%rsp), %rsi
# CHECK-NEXT: 2. 1 2.0 2.0 2.0 addq %rdx, %r8

View File

@ -277,7 +277,7 @@ void Scheduler::scheduleInstruction(unsigned Idx, Instruction &MCIS) {
if (Resources->mustIssueImmediately(Desc)) {
DEBUG(dbgs() << "[SCHEDULER] Instruction " << Idx
<< " issued immediately\n");
return issueInstruction(MCIS, Idx);
return issueInstruction(Idx, MCIS);
}
DEBUG(dbgs() << "[SCHEDULER] Adding " << Idx << " to the Ready Queue\n");
@ -293,10 +293,15 @@ void Scheduler::cycleEvent() {
updateIssuedQueue();
updatePendingQueue();
bool InstructionsWerePromoted = false;
do {
InstructionsWerePromoted = issue();
} while(InstructionsWerePromoted);
while (issue()) {
// Instructions that have been issued during this cycle might have unblocked
// other dependent instructions. Dependent instructions may be issued during
// this same cycle if operands have ReadAdvance entries. Promote those
// instructions to the ReadyQueue and tell to the caller that we need
// another round of 'issue()'.
promoteToReadyQueue();
}
}
#ifndef NDEBUG
@ -317,7 +322,8 @@ bool Scheduler::canBeDispatched(unsigned Index, const InstrDesc &Desc) const {
Type = HWStallEvent::StoreQueueFull;
else {
switch (Resources->canBeDispatched(Desc.Buffers)) {
default: return true;
default:
return true;
case ResourceStateEvent::RS_BUFFER_UNAVAILABLE:
Type = HWStallEvent::SchedulerQueueFull;
break;
@ -330,7 +336,7 @@ bool Scheduler::canBeDispatched(unsigned Index, const InstrDesc &Desc) const {
return false;
}
void Scheduler::issueInstruction(Instruction &IS, unsigned InstrIndex) {
void Scheduler::issueInstruction(unsigned InstrIndex, Instruction &IS) {
const InstrDesc &D = IS.getDesc();
if (!D.Buffers.empty()) {
@ -362,65 +368,51 @@ void Scheduler::issueInstruction(Instruction &IS, unsigned InstrIndex) {
notifyInstructionExecuted(InstrIndex);
}
bool Scheduler::promoteToReadyQueue() {
void Scheduler::promoteToReadyQueue() {
// Scan the set of waiting instructions and promote them to the
// ready queue if operands are all ready.
bool InstructionsWerePromoted = false;
for (auto I = WaitQueue.begin(), E = WaitQueue.end(); I != E;) {
const QueueEntryTy &Entry = *I;
unsigned IID = Entry.first;
Instruction &Inst = *Entry.second;
// Check if this instruction is now ready. In case, force
// a transition in state using method 'update()'.
Entry.second->update();
bool IsReady = Entry.second->isReady();
Inst.update();
const InstrDesc &Desc = Entry.second->getDesc();
const InstrDesc &Desc = Inst.getDesc();
bool IsMemOp = Desc.MayLoad || Desc.MayStore;
if (IsReady && IsMemOp)
IsReady &= LSU->isReady(Entry.first);
if (IsReady) {
notifyInstructionReady(Entry.first);
ReadyQueue[Entry.first] = Entry.second;
auto ToRemove = I;
++I;
WaitQueue.erase(ToRemove);
InstructionsWerePromoted = true;
} else {
if (!Inst.isReady() || (IsMemOp && !LSU->isReady(IID))) {
++I;
continue;
}
notifyInstructionReady(IID);
ReadyQueue[IID] = &Inst;
auto ToRemove = I;
++I;
WaitQueue.erase(ToRemove);
}
return InstructionsWerePromoted;
}
bool Scheduler::issue() {
std::vector<unsigned> ToRemove;
for (const QueueEntryTy QueueEntry : ReadyQueue) {
// Give priority to older instructions in ReadyQueue. The ready queue is
// ordered by key, and therefore older instructions are visited first.
Instruction &IS = *QueueEntry.second;
const InstrDesc &D = IS.getDesc();
if (!Resources->canBeIssued(D))
continue;
unsigned InstrIndex = QueueEntry.first;
issueInstruction(IS, InstrIndex);
ToRemove.emplace_back(InstrIndex);
}
// Give priority to older instructions in the ReadyQueue. Since the ready
// queue is ordered by key, this will always prioritize older instructions.
const auto It = std::find_if(ReadyQueue.begin(), ReadyQueue.end(),
[&](const QueueEntryTy &Entry) {
const Instruction &IS = *Entry.second;
const InstrDesc &D = IS.getDesc();
return Resources->canBeIssued(D);
});
if (ToRemove.empty())
if (It == ReadyQueue.end())
return false;
for (const unsigned InstrIndex : ToRemove)
ReadyQueue.erase(InstrIndex);
// Instructions that have been issued during this cycle might have unblocked
// other dependent instructions. Dependent instructions
// may be issued during this same cycle if operands have ReadAdvance entries.
// Promote those instructions to the ReadyQueue and tell to the caller that
// we need another round of 'issue()'.
return promoteToReadyQueue();
// We found an instruction. Issue it, and update the ready queue.
const QueueEntryTy &Entry = *It;
issueInstruction(Entry.first, *Entry.second);
ReadyQueue.erase(Entry.first);
return true;
}
void Scheduler::updatePendingQueue() {
@ -428,7 +420,6 @@ void Scheduler::updatePendingQueue() {
// started.
for (QueueEntryTy Entry : WaitQueue)
Entry.second->cycleEvent();
promoteToReadyQueue();
}

View File

@ -430,17 +430,16 @@ class Scheduler {
// Notify the Backend that buffered resources were freed.
void notifyReleasedBuffers(llvm::ArrayRef<uint64_t> Buffers);
/// Issue instructions from the ReadyQueue by giving priority to older
/// instructions. This method returns true if at least one instruction has
/// been promoted in the process from the WaitQueue to the ReadyQueue.
/// Issue the next instruction from the ReadyQueue. This method gives priority
/// to older instructions.
bool issue();
/// Scans the WaitQueue in search of instructions that can be moved to
/// the ReadyQueue.
bool promoteToReadyQueue();
/// Move instructions from the WaitQueue to the ReadyQueue if input operands
/// are all available.
void promoteToReadyQueue();
/// Issue an instruction without updating the ready queue.
void issueInstruction(Instruction &IS, unsigned InstrIndex);
void issueInstruction(unsigned Index, Instruction &IS);
void updatePendingQueue();
void updateIssuedQueue();