mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 10:42:36 +01:00
Fix GETLLAR spin detection
This commit is contained in:
parent
bf63a18c5f
commit
ee87fdc869
@ -3348,6 +3348,8 @@ u32 spu_thread::get_mfc_completed() const
|
||||
|
||||
bool spu_thread::process_mfc_cmd()
|
||||
{
|
||||
mfc_cmd_id++;
|
||||
|
||||
// Stall infinitely if MFC queue is full
|
||||
while (mfc_size >= 16) [[unlikely]]
|
||||
{
|
||||
@ -3440,20 +3442,22 @@ bool spu_thread::process_mfc_cmd()
|
||||
if ([&]() -> bool
|
||||
{
|
||||
// Validation that it is indeed GETLLAR spinning (large time window is intentional)
|
||||
if (last_getllar != pc || perf0.get() - last_gtsc >= 50'000)
|
||||
if (last_getllar != pc || mfc_cmd_id - 1 != last_getllar_id || perf0.get() - last_gtsc >= 10'000)
|
||||
{
|
||||
// Seemingly not
|
||||
getllar_busy_waiting_switch = umax;
|
||||
return true;
|
||||
}
|
||||
|
||||
getllar_spin_count++;
|
||||
|
||||
if (getllar_busy_waiting_switch == umax)
|
||||
{
|
||||
// Evalute its value (shift-right to ensure its randomness with different CPUs)
|
||||
getllar_busy_waiting_switch = !g_use_rtm && ((perf0.get() >> 8) % 100 < g_cfg.core.spu_reservation_busy_waiting_percentage);
|
||||
getllar_busy_waiting_switch = ((perf0.get() >> 8) % 100 < g_cfg.core.spu_getllar_busy_waiting_percentage);
|
||||
}
|
||||
|
||||
return !!getllar_busy_waiting_switch;
|
||||
return !!getllar_busy_waiting_switch || getllar_spin_count < 3;
|
||||
}())
|
||||
{
|
||||
if (g_cfg.core.mfc_debug)
|
||||
@ -3465,11 +3469,12 @@ bool spu_thread::process_mfc_cmd()
|
||||
}
|
||||
|
||||
last_getllar = pc;
|
||||
last_getllar_id = mfc_cmd_id;
|
||||
last_gtsc = perf0.get();
|
||||
|
||||
if (getllar_busy_waiting_switch == true)
|
||||
{
|
||||
busy_wait();
|
||||
busy_wait(300);
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -3477,16 +3482,14 @@ bool spu_thread::process_mfc_cmd()
|
||||
|
||||
// Spinning, might as well yield cpu resources
|
||||
state += cpu_flag::wait;
|
||||
vm::reservation_notifier(addr).wait(rtime, -128, atomic_wait_timeout{50'000});
|
||||
vm::reservation_notifier(addr).wait(rtime, atomic_wait_timeout{50'000});
|
||||
|
||||
// Reset perf
|
||||
perf0.restart();
|
||||
|
||||
// Quick check if there were reservation changes
|
||||
if (rtime == vm::reservation_acquire(addr))
|
||||
if (rtime == vm::reservation_acquire(addr) && cmp_rdata(rdata, data))
|
||||
{
|
||||
// None at least in rtime
|
||||
|
||||
if (g_cfg.core.mfc_debug)
|
||||
{
|
||||
auto& dump = mfc_history[mfc_dump_idx++ % spu_thread::max_mfc_dump_idx];
|
||||
@ -3495,6 +3498,9 @@ bool spu_thread::process_mfc_cmd()
|
||||
std::memcpy(dump.data, rdata, 128);
|
||||
}
|
||||
|
||||
// Let the game recheck its state, maybe after a long period of time something else changed which satisfies its waiting condition
|
||||
getllar_spin_count = 1;
|
||||
last_getllar_id = mfc_cmd_id;
|
||||
last_gtsc = perf0.get();
|
||||
return true;
|
||||
}
|
||||
@ -3507,6 +3513,12 @@ bool spu_thread::process_mfc_cmd()
|
||||
}
|
||||
}
|
||||
|
||||
last_getllar_id = mfc_cmd_id;
|
||||
last_getllar = pc;
|
||||
last_gtsc = perf0.get();
|
||||
getllar_spin_count = 0;
|
||||
getllar_busy_waiting_switch = umax;
|
||||
|
||||
u64 ntime;
|
||||
rsx::reservation_lock rsx_lock(addr, 128);
|
||||
|
||||
@ -3592,9 +3604,6 @@ bool spu_thread::process_mfc_cmd()
|
||||
raddr = addr;
|
||||
rtime = ntime;
|
||||
mov_rdata(_ref<spu_rdata_t>(ch_mfc_cmd.lsa & 0x3ff80), rdata);
|
||||
last_getllar = pc;
|
||||
last_gtsc = perf0.get();
|
||||
getllar_busy_waiting_switch = umax;
|
||||
|
||||
ch_atomic_stat.set_value(MFC_GETLLAR_SUCCESS);
|
||||
|
||||
@ -4217,6 +4226,7 @@ s64 spu_thread::get_ch_value(u32 ch)
|
||||
case SPU_RdEventStat:
|
||||
{
|
||||
const u32 mask1 = ch_events.load().mask;
|
||||
|
||||
auto events = get_events(mask1, false, true);
|
||||
|
||||
if (events.count)
|
||||
@ -4267,7 +4277,7 @@ s64 spu_thread::get_ch_value(u32 ch)
|
||||
}
|
||||
}
|
||||
|
||||
const bool reservation_busy_waiting = !g_use_rtm && ((utils::get_tsc() >> 8) % 100) < g_cfg.core.spu_reservation_busy_waiting_percentage;
|
||||
const bool reservation_busy_waiting = ((utils::get_tsc() >> 8) % 100 + ((raddr == spurs_addr) ? 50 : 0)) < g_cfg.core.spu_reservation_busy_waiting_percentage;
|
||||
|
||||
for (; !events.count; events = get_events(mask1 & ~SPU_EVENT_LR, true, true))
|
||||
{
|
||||
|
@ -721,6 +721,7 @@ public:
|
||||
|
||||
// MFC command data
|
||||
spu_mfc_cmd ch_mfc_cmd;
|
||||
u32 mfc_cmd_id = 0;
|
||||
|
||||
// MFC command queue
|
||||
spu_mfc_cmd mfc_queue[16]{};
|
||||
@ -840,6 +841,8 @@ public:
|
||||
u64 last_succ = 0;
|
||||
u64 last_gtsc = 0;
|
||||
u32 last_getllar = umax; // LS address of last GETLLAR (if matches current GETLLAR we can let the thread rest)
|
||||
u32 last_getllar_id = umax;
|
||||
u32 getllar_spin_count = 0;
|
||||
u32 getllar_busy_waiting_switch = umax; // umax means the test needs evaluation, otherwise it's a boolean
|
||||
|
||||
std::vector<mfc_cmd_dump> mfc_history;
|
||||
|
@ -32,6 +32,7 @@ struct cfg_root : cfg::node
|
||||
cfg::_bool set_daz_and_ftz{ this, "Set DAZ and FTZ", false };
|
||||
cfg::_enum<spu_decoder_type> spu_decoder{ this, "SPU Decoder", spu_decoder_type::llvm };
|
||||
cfg::uint<0, 100> spu_reservation_busy_waiting_percentage{ this, "SPU Reservation Busy Waiting Percentage", 0, true };
|
||||
cfg::uint<0, 100> spu_getllar_busy_waiting_percentage{ this, "SPU GETLLAR Busy Waiting Percentage", 100, true };
|
||||
cfg::_bool spu_debug{ this, "SPU Debug" };
|
||||
cfg::_bool mfc_debug{ this, "MFC Debug" };
|
||||
cfg::_int<0, 6> preferred_spu_threads{ this, "Preferred SPU Threads", 0, true }; // Number of hardware threads dedicated to heavy simultaneous spu tasks
|
||||
|
Loading…
Reference in New Issue
Block a user