1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 10:42:36 +01:00

SPU: Implement interrupts handling for remaining events

This commit is contained in:
Eladash 2021-09-23 18:17:16 +03:00 committed by Ivan
parent 4ed92f4155
commit c0c52c33b9
3 changed files with 87 additions and 17 deletions

View File

@ -125,7 +125,13 @@ void spu_interpreter::set_interrupt_status(spu_thread& spu, spu_opcode_t op)
bool spu_interpreter::STOP(spu_thread& spu, spu_opcode_t op)
{
if (!spu.stop_and_signal(op.opcode & 0x3fff))
const bool allow = std::exchange(spu.allow_interrupts_in_cpu_work, false);
const bool advance_pc = spu.stop_and_signal(op.opcode & 0x3fff);
spu.allow_interrupts_in_cpu_work = allow;
if (!advance_pc)
{
return false;
}
@ -166,8 +172,12 @@ bool spu_interpreter::MFSPR(spu_thread& spu, spu_opcode_t op)
bool spu_interpreter::RDCH(spu_thread& spu, spu_opcode_t op)
{
const bool allow = std::exchange(spu.allow_interrupts_in_cpu_work, false);
const s64 result = spu.get_ch_value(op.ra);
spu.allow_interrupts_in_cpu_work = allow;
if (result < 0)
{
return false;
@ -428,7 +438,13 @@ bool spu_interpreter::MTSPR(spu_thread&, spu_opcode_t)
bool spu_interpreter::WRCH(spu_thread& spu, spu_opcode_t op)
{
if (!spu.set_ch_value(op.ra, spu.gpr[op.rt]._u32[3]))
const bool allow = std::exchange(spu.allow_interrupts_in_cpu_work, false);
const bool advance_pc = spu.set_ch_value(op.ra, spu.gpr[op.rt]._u32[3]);
spu.allow_interrupts_in_cpu_work = allow;
if (!advance_pc)
{
return false;
}

View File

@ -1643,6 +1643,8 @@ void spu_thread::cpu_task()
{
ensure(spu_runtime::g_interpreter);
allow_interrupts_in_cpu_work = true;
while (true)
{
if (state) [[unlikely]]
@ -1653,6 +1655,8 @@ void spu_thread::cpu_task()
spu_runtime::g_interpreter(*this, _ptr<u8>(0), nullptr);
}
allow_interrupts_in_cpu_work = false;
}
}
@ -1663,15 +1667,57 @@ void spu_thread::cpu_work()
return;
}
const u32 old_iter_count = cpu_work_iteration_count++;
const auto timeout = +g_cfg.core.mfc_transfers_timeout;
// If either MFC size exceeds limit or timeout has been reached execute pending MFC commands
if (mfc_size > g_cfg.core.mfc_transfers_shuffling || (timeout && get_system_time() - mfc_last_timestamp >= timeout))
bool work_left = false;
if (u32 shuffle_count = g_cfg.core.mfc_transfers_shuffling)
{
do_mfc(false, false);
// If either MFC size exceeds limit or timeout has been reached execute pending MFC commands
if (mfc_size > shuffle_count || (timeout && get_system_time() - mfc_last_timestamp >= timeout))
{
work_left = do_mfc(false, false);
}
else
{
work_left = mfc_size != 0; // TODO: Optimize
}
}
bool gen_interrupt = false;
// Check interrupts every 16 iterations
if (!(old_iter_count % 16) && allow_interrupts_in_cpu_work)
{
if (u32 mask = ch_events.load().mask & SPU_EVENT_INTR_BUSY_CHECK)
{
// LR check is expensive, do it once in a while
if (old_iter_count /*% 256*/)
{
mask &= ~SPU_EVENT_LR;
}
get_events(mask);
}
gen_interrupt = check_mfc_interrupts(pc);
work_left |= interrupts_enabled;
}
in_cpu_work = false;
if (!work_left)
{
state -= cpu_flag::pending;
}
if (gen_interrupt)
{
// Interrupt! escape everything and restart execution
spu_runtime::g_escape(this);
}
}
struct raw_spu_cleanup
@ -2967,7 +3013,7 @@ void spu_thread::do_putlluc(const spu_mfc_cmd& args)
vm::reservation_notifier(addr).notify_all(-128);
}
void spu_thread::do_mfc(bool can_escape, bool must_finish)
bool spu_thread::do_mfc(bool can_escape, bool must_finish)
{
u32 removed = 0;
u32 barrier = 0;
@ -3119,15 +3165,11 @@ void spu_thread::do_mfc(bool can_escape, bool must_finish)
// Exit early, not all pending commands have to be executed at a single iteration
// Update last timestamp so the next MFC timeout check will use the current time
mfc_last_timestamp = get_system_time();
return;
return true;
}
}
if (state & cpu_flag::pending)
{
// No more pending work
state -= cpu_flag::pending;
}
return false;
}
bool spu_thread::check_mfc_interrupts(u32 next_pc)
@ -3752,9 +3794,20 @@ void spu_thread::set_interrupt_status(bool enable)
if (enable)
{
// Detect enabling interrupts with events masked
if (auto mask = ch_events.load().mask; mask & ~SPU_EVENT_INTR_IMPLEMENTED)
if (auto mask = ch_events.load().mask; mask & SPU_EVENT_INTR_BUSY_CHECK)
{
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x)", mask);
if (g_cfg.core.spu_decoder != spu_decoder_type::precise && g_cfg.core.spu_decoder != spu_decoder_type::fast)
{
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x): Use interpreterts", mask);
}
spu_log.trace("SPU Interrupts (mask=0x%x) are using CPU busy checking mode", mask);
// Process interrupts in cpu_work()
if (state.none_of(cpu_flag::pending))
{
state += cpu_flag::pending;
}
}
}

View File

@ -82,8 +82,7 @@ enum : u32
SPU_EVENT_IMPLEMENTED = SPU_EVENT_LR | SPU_EVENT_TM | SPU_EVENT_SN | SPU_EVENT_S1 | SPU_EVENT_S2, // Mask of implemented events
SPU_EVENT_INTR_IMPLEMENTED = SPU_EVENT_SN,
SPU_EVENT_INTR_TEST = SPU_EVENT_INTR_IMPLEMENTED,
SPU_EVENT_INTR_BUSY_CHECK = SPU_EVENT_IMPLEMENTED & ~SPU_EVENT_INTR_IMPLEMENTED,
};
// SPU Class 0 Interrupts
@ -779,6 +778,8 @@ public:
static constexpr u32 max_mfc_dump_idx = 2048;
bool in_cpu_work = false;
bool allow_interrupts_in_cpu_work = false;
u8 cpu_work_iteration_count = 0;
std::array<v128, 0x4000> stack_mirror; // Return address information
@ -793,7 +794,7 @@ public:
bool do_list_transfer(spu_mfc_cmd& args);
void do_putlluc(const spu_mfc_cmd& args);
bool do_putllc(const spu_mfc_cmd& args);
void do_mfc(bool can_escape = true, bool must_finish = true);
bool do_mfc(bool can_escape = true, bool must_finish = true);
u32 get_mfc_completed() const;
bool process_mfc_cmd();