diff --git a/Utilities/bit_set.h b/Utilities/bit_set.h index 9f21fbecd7..6bcbc768c0 100644 --- a/Utilities/bit_set.h +++ b/Utilities/bit_set.h @@ -371,12 +371,12 @@ public: bool bit_test_reset(uint bit) = delete; bool bit_test_invert(uint bit) = delete; - bool all_of(bs_t arg) + bool all_of(bs_t arg) const { return base::load().all_of(arg); } - bool none_of(bs_t arg) + bool none_of(bs_t arg) const { return base::load().none_of(arg); } diff --git a/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp b/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp index f2c292858b..ec15ac3414 100644 --- a/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp @@ -173,6 +173,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout) } mutex.sleep(ppu, timeout); + notify.cleanup(); return false; }); @@ -308,7 +309,7 @@ error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id) } mutex.awake(cpu); - return; + notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of the time, can be ignored } }); @@ -345,7 +346,7 @@ error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id) static_cast(cpu)->gpr[3] = CELL_EBUSY; mutex.awake(cpu); - return; + notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of the time, can be ignored } }); diff --git a/rpcs3/Emu/Cell/lv2/sys_mutex.cpp b/rpcs3/Emu/Cell/lv2/sys_mutex.cpp index 8c64a7e3d0..013167130d 100644 --- a/rpcs3/Emu/Cell/lv2/sys_mutex.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_mutex.cpp @@ -169,6 +169,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout) else { mutex.sleep(ppu, timeout); + notify.cleanup(); } } @@ -314,6 +315,7 @@ error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id) result = {}; } + notify.cleanup(); return result; }); diff --git a/rpcs3/Emu/Cell/lv2/sys_sync.h b/rpcs3/Emu/Cell/lv2/sys_sync.h index 7a84d3eba5..4f73afeab8 100644 --- a/rpcs3/Emu/Cell/lv2/sys_sync.h +++ b/rpcs3/Emu/Cell/lv2/sys_sync.h @@ -507,9 +507,12 @@ public: return; } - // Note: by the time of notification the thread could have been deallocated which is why the direct function is used - // TODO: Pass a narrower mask - atomic_wait_engine::notify_one(cpu, 4, atomic_wait::default_mask>); + if (cpu != &g_to_notify) + { + // Note: by the time of notification the thread could have been deallocated which is why the direct function is used + // TODO: Pass a narrower mask + atomic_wait_engine::notify_one(cpu, 4, atomic_wait::default_mask>); + } } } @@ -527,6 +530,26 @@ public: g_postpone_notify_barrier = true; } + notify_all_t(const notify_all_t&) = delete; + + static void cleanup() + { + for (auto& cpu : g_to_notify) + { + if (!cpu) + { + return; + } + + // While IDM mutex is still locked (this function assumes so) check if the notification is still needed + if (cpu != &g_to_notify && !static_cast(cpu)->all_of(cpu_flag::signal + cpu_flag::wait)) + { + // Omit it (this is a void pointer, it can hold anything) + cpu = &g_to_notify; + } + } + } + ~notify_all_t() noexcept { lv2_obj::notify_all();