1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-25 04:02:42 +01:00

SPU: Implement custom reservation condition in atomic wait

This commit is contained in:
Eladash 2022-08-18 22:56:44 +03:00 committed by Ivan
parent 86702186f2
commit 28bec8e1bf
4 changed files with 56 additions and 8 deletions

View File

@ -3456,7 +3456,7 @@ bool spu_thread::process_mfc_cmd()
// Spinning, might as well yield cpu resources
state += cpu_flag::wait;
vm::reservation_notifier(addr).wait(rtime, -128, atomic_wait_timeout{100'000});
vm::reservation_notifier(addr).wait(rtime, -128, atomic_wait_timeout{50'000});
// Reset perf
perf0.restart();
@ -4208,7 +4208,7 @@ s64 spu_thread::get_ch_value(u32 ch)
using resrv_ptr = std::add_pointer_t<const decltype(rdata)>;
resrv_ptr resrv_mem = vm::get_super_ptr<decltype(rdata)>(raddr);
resrv_mem = vm::get_super_ptr<decltype(rdata)>(raddr);
std::shared_ptr<utils::shm> rdata_shm;
// Does not need to safe-access reservation if LR is the only event masked
@ -4275,7 +4275,34 @@ s64 spu_thread::get_ch_value(u32 ch)
// Don't busy-wait with TSX - memory is sensitive
if (g_use_rtm || !g_cfg.core.spu_reservation_busy_waiting)
{
vm::reservation_notifier(raddr).wait(rtime, -128, atomic_wait_timeout{100'000});
atomic_wait_engine::set_one_time_use_wait_callback(mask1 != SPU_EVENT_LR ? nullptr : +[](u64) -> bool
{
const auto _this = static_cast<spu_thread*>(cpu_thread::get_current());
AUDIT(_this->id_type() == 1);
const auto old = +_this->state;
if (is_stopped(old))
{
return false;
}
if (is_paused(old))
{
return true;
}
if (!vm::check_addr(_this->raddr) || !cmp_rdata(_this->rdata, *_this->resrv_mem))
{
_this->set_events(SPU_EVENT_LR);
_this->raddr = 0;
return false;
}
return true;
});
vm::reservation_notifier(raddr).wait(rtime, -128, atomic_wait_timeout{80'000});
}
else
{

View File

@ -751,6 +751,7 @@ public:
u64 rtime = 0;
alignas(64) std::byte rdata[128]{};
u32 raddr = 0;
const decltype(rdata)* resrv_mem{};
// Range Lock pointer
atomic_t<u64, 64>* range_lock{};

View File

@ -49,6 +49,9 @@ static bool s_null_wait_cb(const void*, u64, u64){ return true; };
// Callback for wait() function, returns false if wait should return
static thread_local bool(*s_tls_wait_cb)(const void* data, u64 attempts, u64 stamp0) = s_null_wait_cb;
// Callback for wait() function for a second custon condition, commonly passed with timeout
static thread_local bool(*s_tls_one_time_wait_cb)(u64 attempts) = nullptr;
// Callback for notification functions for optimizations
static thread_local void(*s_tls_notify_cb)(const void* data, u64 progress) = nullptr;
@ -1183,17 +1186,27 @@ SAFE_BUFFERS(void) atomic_wait_engine::wait(const void* data, u32 size, u128 old
}
}
#endif
if (!s_tls_wait_cb(data, ++attempts, stamp0))
{
break;
}
if (s_tls_one_time_wait_cb)
{
if (!s_tls_one_time_wait_cb(attempts))
{
break;
}
// The condition of the callback overrides timeout escape because it makes little sense to do so when a custom condition is passed
continue;
}
if (timeout + 1)
{
// TODO: reduce timeout instead
break;
}
if (!s_tls_wait_cb(data, ++attempts, stamp0))
{
break;
}
}
while (!fallback)
@ -1243,6 +1256,7 @@ SAFE_BUFFERS(void) atomic_wait_engine::wait(const void* data, u32 size, u128 old
root_info::slot_free(iptr, slot, 0);
s_tls_wait_cb(data, -1, stamp0);
s_tls_one_time_wait_cb = nullptr;
}
template <bool NoAlert = false>
@ -1309,6 +1323,11 @@ void atomic_wait_engine::set_wait_callback(bool(*cb)(const void*, u64, u64))
}
}
void atomic_wait_engine::set_one_time_use_wait_callback(bool(*cb)(u64 progress))
{
s_tls_one_time_wait_cb = cb;
}
void atomic_wait_engine::set_notify_callback(void(*cb)(const void*, u64))
{
s_tls_notify_cb = cb;

View File

@ -322,6 +322,7 @@ public:
static void set_wait_callback(bool(*cb)(const void* data, u64 attempts, u64 stamp0));
static void set_notify_callback(void(*cb)(const void* data, u64 progress));
static void set_one_time_use_wait_callback(bool(*cb)(u64 progress));
static void notify_all(const void* data)
{