1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 02:32:36 +01:00

LV2: Re-add dropped optimization by previous commit

Currently only for lock-free syscalls.
This commit is contained in:
Eladash 2022-08-04 15:29:24 +03:00 committed by Ivan
parent 34bae90820
commit 26e731b487
4 changed files with 33 additions and 7 deletions

View File

@ -371,12 +371,12 @@ public:
bool bit_test_reset(uint bit) = delete; bool bit_test_reset(uint bit) = delete;
bool bit_test_invert(uint bit) = delete; bool bit_test_invert(uint bit) = delete;
bool all_of(bs_t arg) bool all_of(bs_t arg) const
{ {
return base::load().all_of(arg); return base::load().all_of(arg);
} }
bool none_of(bs_t arg) bool none_of(bs_t arg) const
{ {
return base::load().none_of(arg); return base::load().none_of(arg);
} }

View File

@ -173,6 +173,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
} }
mutex.sleep(ppu, timeout); mutex.sleep(ppu, timeout);
notify.cleanup();
return false; return false;
}); });
@ -308,7 +309,7 @@ error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id)
} }
mutex.awake(cpu); mutex.awake(cpu);
return; notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of the time, can be ignored
} }
}); });
@ -345,7 +346,7 @@ error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id)
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY; static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
mutex.awake(cpu); mutex.awake(cpu);
return; notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of the time, can be ignored
} }
}); });

View File

@ -169,6 +169,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
else else
{ {
mutex.sleep(ppu, timeout); mutex.sleep(ppu, timeout);
notify.cleanup();
} }
} }
@ -314,6 +315,7 @@ error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id)
result = {}; result = {};
} }
notify.cleanup();
return result; return result;
}); });

View File

@ -507,11 +507,14 @@ public:
return; return;
} }
if (cpu != &g_to_notify)
{
// Note: by the time of notification the thread could have been deallocated which is why the direct function is used // Note: by the time of notification the thread could have been deallocated which is why the direct function is used
// TODO: Pass a narrower mask // TODO: Pass a narrower mask
atomic_wait_engine::notify_one(cpu, 4, atomic_wait::default_mask<atomic_bs_t<cpu_flag>>); atomic_wait_engine::notify_one(cpu, 4, atomic_wait::default_mask<atomic_bs_t<cpu_flag>>);
} }
} }
}
// Can be called before the actual sleep call in order to move it out of mutex scope // Can be called before the actual sleep call in order to move it out of mutex scope
static inline void prepare_for_sleep(cpu_thread& cpu) static inline void prepare_for_sleep(cpu_thread& cpu)
@ -527,6 +530,26 @@ public:
g_postpone_notify_barrier = true; g_postpone_notify_barrier = true;
} }
notify_all_t(const notify_all_t&) = delete;
static void cleanup()
{
for (auto& cpu : g_to_notify)
{
if (!cpu)
{
return;
}
// While IDM mutex is still locked (this function assumes so) check if the notification is still needed
if (cpu != &g_to_notify && !static_cast<const decltype(cpu_thread::state)*>(cpu)->all_of(cpu_flag::signal + cpu_flag::wait))
{
// Omit it (this is a void pointer, it can hold anything)
cpu = &g_to_notify;
}
}
}
~notify_all_t() noexcept ~notify_all_t() noexcept
{ {
lv2_obj::notify_all(); lv2_obj::notify_all();