mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-23 03:02:53 +01:00
CELL: Postponed address notifications
This commit is contained in:
parent
384c807d6a
commit
dddd12f66b
@ -3049,12 +3049,29 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
|
|||||||
return false;
|
return false;
|
||||||
}())
|
}())
|
||||||
{
|
{
|
||||||
// Test a common pattern in lwmutex
|
|
||||||
extern atomic_t<u32> liblv2_begin, liblv2_end;
|
extern atomic_t<u32> liblv2_begin, liblv2_end;
|
||||||
|
|
||||||
|
const u32 notify = ppu.res_notify;
|
||||||
|
|
||||||
|
if (notify)
|
||||||
|
{
|
||||||
|
vm::reservation_notifier(notify).notify_all();
|
||||||
|
ppu.res_notify = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Avoid notifications from lwmutex or sys_spinlock
|
||||||
if (ppu.cia < liblv2_begin || ppu.cia >= liblv2_end)
|
if (ppu.cia < liblv2_begin || ppu.cia >= liblv2_end)
|
||||||
{
|
{
|
||||||
res.notify_all();
|
if (!notify)
|
||||||
|
{
|
||||||
|
// Try to postpone notification to when PPU is asleep or join notifications on the same address
|
||||||
|
// This also optimizes a mutex - won't notify after lock is aqcuired (prolonging the critical section duration), only notifies on unlock
|
||||||
|
ppu.res_notify = addr;
|
||||||
|
}
|
||||||
|
else if ((addr ^ notify) & -128)
|
||||||
|
{
|
||||||
|
res.notify_all();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (addr == ppu.last_faddr)
|
if (addr == ppu.last_faddr)
|
||||||
@ -3066,6 +3083,16 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const u32 notify = ppu.res_notify;
|
||||||
|
|
||||||
|
// Do not risk postponing too much (because this is probably an indefinite loop)
|
||||||
|
// And on failure it has some time to do something else
|
||||||
|
if (notify && ((addr ^ notify) & -128))
|
||||||
|
{
|
||||||
|
vm::reservation_notifier(notify).notify_all();
|
||||||
|
ppu.res_notify = 0;
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,6 +262,7 @@ public:
|
|||||||
u64 rtime{0};
|
u64 rtime{0};
|
||||||
alignas(64) std::byte rdata[128]{}; // Reservation data
|
alignas(64) std::byte rdata[128]{}; // Reservation data
|
||||||
bool use_full_rdata{};
|
bool use_full_rdata{};
|
||||||
|
u32 res_notify{};
|
||||||
|
|
||||||
union ppu_prio_t
|
union ppu_prio_t
|
||||||
{
|
{
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include "Emu/System.h"
|
#include "Emu/System.h"
|
||||||
#include "Emu/system_config.h"
|
#include "Emu/system_config.h"
|
||||||
#include "Emu/Memory/vm_ptr.h"
|
#include "Emu/Memory/vm_ptr.h"
|
||||||
|
#include "Emu/Memory/vm_reservation.h"
|
||||||
#include "Emu/Memory/vm_locking.h"
|
#include "Emu/Memory/vm_locking.h"
|
||||||
|
|
||||||
#include "Emu/Cell/PPUFunction.h"
|
#include "Emu/Cell/PPUFunction.h"
|
||||||
@ -1268,6 +1269,31 @@ bool lv2_obj::sleep(cpu_thread& cpu, const u64 timeout)
|
|||||||
prepare_for_sleep(cpu);
|
prepare_for_sleep(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cpu.id_type() == 1)
|
||||||
|
{
|
||||||
|
if (u32 addr = static_cast<ppu_thread&>(cpu).res_notify)
|
||||||
|
{
|
||||||
|
static_cast<ppu_thread&>(cpu).res_notify = 0;
|
||||||
|
|
||||||
|
const usz notify_later_idx = std::basic_string_view<const void*>{g_to_notify, std::size(g_to_notify)}.find_first_of(std::add_pointer_t<const void>{});
|
||||||
|
|
||||||
|
if (notify_later_idx != umax)
|
||||||
|
{
|
||||||
|
g_to_notify[notify_later_idx] = &vm::reservation_notifier(addr);
|
||||||
|
|
||||||
|
if (notify_later_idx < std::size(g_to_notify) - 1)
|
||||||
|
{
|
||||||
|
// Null-terminate the list if it ends before last slot
|
||||||
|
g_to_notify[notify_later_idx + 1] = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
vm::reservation_notifier(addr).notify_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool result = false;
|
bool result = false;
|
||||||
const u64 current_time = get_guest_system_time();
|
const u64 current_time = get_guest_system_time();
|
||||||
{
|
{
|
||||||
@ -1294,6 +1320,31 @@ bool lv2_obj::sleep(cpu_thread& cpu, const u64 timeout)
|
|||||||
|
|
||||||
bool lv2_obj::awake(cpu_thread* thread, s32 prio)
|
bool lv2_obj::awake(cpu_thread* thread, s32 prio)
|
||||||
{
|
{
|
||||||
|
if (ppu_thread* ppu = cpu_thread::get_current<ppu_thread>())
|
||||||
|
{
|
||||||
|
if (u32 addr = ppu->res_notify)
|
||||||
|
{
|
||||||
|
ppu->res_notify = 0;
|
||||||
|
|
||||||
|
const usz notify_later_idx = std::basic_string_view<const void*>{g_to_notify, std::size(g_to_notify)}.find_first_of(std::add_pointer_t<const void>{});
|
||||||
|
|
||||||
|
if (notify_later_idx != umax)
|
||||||
|
{
|
||||||
|
g_to_notify[notify_later_idx] = &vm::reservation_notifier(addr);
|
||||||
|
|
||||||
|
if (notify_later_idx < std::size(g_to_notify) - 1)
|
||||||
|
{
|
||||||
|
// Null-terminate the list if it ends before last slot
|
||||||
|
g_to_notify[notify_later_idx + 1] = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
vm::reservation_notifier(addr).notify_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool result = false;
|
bool result = false;
|
||||||
{
|
{
|
||||||
std::lock_guard lock(g_mutex);
|
std::lock_guard lock(g_mutex);
|
||||||
@ -1673,7 +1724,7 @@ void lv2_obj::cleanup()
|
|||||||
|
|
||||||
void lv2_obj::schedule_all(u64 current_time)
|
void lv2_obj::schedule_all(u64 current_time)
|
||||||
{
|
{
|
||||||
usz notify_later_idx = 0;
|
usz notify_later_idx = std::basic_string_view<const void*>{g_to_notify, std::size(g_to_notify)}.find_first_of(std::add_pointer_t<const void>{});
|
||||||
|
|
||||||
if (!g_pending && g_scheduler_ready)
|
if (!g_pending && g_scheduler_ready)
|
||||||
{
|
{
|
||||||
@ -1692,7 +1743,7 @@ void lv2_obj::schedule_all(u64 current_time)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (notify_later_idx == std::size(g_to_notify))
|
if (notify_later_idx >= std::size(g_to_notify))
|
||||||
{
|
{
|
||||||
// Out of notification slots, notify locally (resizable container is not worth it)
|
// Out of notification slots, notify locally (resizable container is not worth it)
|
||||||
target->state.notify_one();
|
target->state.notify_one();
|
||||||
@ -1726,7 +1777,7 @@ void lv2_obj::schedule_all(u64 current_time)
|
|||||||
ensure(!target->state.test_and_set(cpu_flag::notify));
|
ensure(!target->state.test_and_set(cpu_flag::notify));
|
||||||
|
|
||||||
// Otherwise notify it to wake itself
|
// Otherwise notify it to wake itself
|
||||||
if (notify_later_idx == std::size(g_to_notify))
|
if (notify_later_idx >= std::size(g_to_notify))
|
||||||
{
|
{
|
||||||
// Out of notification slots, notify locally (resizable container is not worth it)
|
// Out of notification slots, notify locally (resizable container is not worth it)
|
||||||
target->state.notify_one();
|
target->state.notify_one();
|
||||||
|
@ -465,7 +465,8 @@ public:
|
|||||||
|
|
||||||
// While IDM mutex is still locked (this function assumes so) check if the notification is still needed
|
// While IDM mutex is still locked (this function assumes so) check if the notification is still needed
|
||||||
// Pending flag is meant for forced notification (if the CPU really has pending work it can restore the flag in theory)
|
// Pending flag is meant for forced notification (if the CPU really has pending work it can restore the flag in theory)
|
||||||
if (cpu != &g_to_notify && static_cast<const decltype(cpu_thread::state)*>(cpu)->none_of(cpu_flag::signal + cpu_flag::pending))
|
// Disabled to allow reservation notifications from here
|
||||||
|
if (false && cpu != &g_to_notify && static_cast<const decltype(cpu_thread::state)*>(cpu)->none_of(cpu_flag::signal + cpu_flag::pending))
|
||||||
{
|
{
|
||||||
// Omit it (this is a void pointer, it can hold anything)
|
// Omit it (this is a void pointer, it can hold anything)
|
||||||
cpu = &g_to_notify;
|
cpu = &g_to_notify;
|
||||||
|
Loading…
Reference in New Issue
Block a user