1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-25 12:12:50 +01:00

Atomic waiting refactoring (#9208)

* Use atomic waitables instead instead of global thread wait as often as possible.
* Add ::is_stopped() and and ::is_paued() which can be used in atomic loops and with atomic wait. (constexpr cpu flags test functions)
* Fix notification bug of sys_spu_thread_group_exit/terminate. (old bug, enhanced by #9117)
* Function time statistics at Emu.Stop() restored. (instead of current "X syscall failed with 0x00000000 : 0")
This commit is contained in:
Eladash 2021-02-13 16:50:07 +02:00 committed by GitHub
parent cf384795d2
commit f43260bd58
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 375 additions and 234 deletions

View File

@ -1529,14 +1529,14 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context) no
else
{
// Wait until the thread is recovered
while (!cpu->state.test_and_reset(cpu_flag::signal))
while (auto state = cpu->state.fetch_sub(cpu_flag::signal))
{
if (cpu->is_stopped())
if (is_stopped(state) || state & cpu_flag::signal)
{
break;
}
thread_ctrl::wait();
thread_ctrl::wait_on(cpu->state, state);
}
}

View File

@ -185,7 +185,7 @@ struct cpu_prof
if (threads.empty())
{
// Wait for messages if no work (don't waste CPU)
atomic_wait::list(registered).wait();
thread_ctrl::wait_on(registered, nullptr);
continue;
}
@ -557,7 +557,14 @@ void cpu_thread::operator()()
while (!(state & cpu_flag::exit) && thread_ctrl::state() != thread_state::aborting)
{
// Check stop status
if (!(state & cpu_flag::stop))
const auto state0 = +state;
if (is_stopped(state0 - cpu_flag::stop))
{
break;
}
if (!(state0 & cpu_flag::stop))
{
cpu_task();
@ -569,7 +576,7 @@ void cpu_thread::operator()()
continue;
}
thread_ctrl::wait();
thread_ctrl::wait_on(state, state0);
if (state & cpu_flag::ret && state.test_and_reset(cpu_flag::ret))
{
@ -593,9 +600,9 @@ cpu_thread::cpu_thread(u32 id)
g_threads_created++;
}
void cpu_thread::cpu_wait()
void cpu_thread::cpu_wait(bs_t<cpu_flag> old)
{
thread_ctrl::wait();
thread_ctrl::wait_on(state, old);
}
bool cpu_thread::check_state() noexcept
@ -607,6 +614,7 @@ bool cpu_thread::check_state() noexcept
while (true)
{
// Process all flags in a single atomic op
bs_t<cpu_flag> state1;
const auto state0 = state.fetch_op([&](bs_t<cpu_flag>& flags)
{
bool store = false;
@ -660,7 +668,7 @@ bool cpu_thread::check_state() noexcept
}
// Atomically clean wait flag and escape
if (!(flags & (cpu_flag::exit + cpu_flag::ret + cpu_flag::stop)))
if (!(flags & (cpu_flag::exit + cpu_flag::ret + cpu_flag::stop)))
{
// Check pause flags which hold thread inside check_state (ignore suspend on cpu_flag::temp)
if (flags & (cpu_flag::pause + cpu_flag::dbg_global_pause + cpu_flag::dbg_pause + cpu_flag::memory + (cpu_can_stop ? cpu_flag::suspend : cpu_flag::pause)))
@ -672,6 +680,7 @@ bool cpu_thread::check_state() noexcept
}
escape = false;
state1 = flags;
return store;
}
@ -703,6 +712,7 @@ bool cpu_thread::check_state() noexcept
}
escape = true;
state1 = flags;
return store;
}).first;
@ -714,6 +724,11 @@ bool cpu_thread::check_state() noexcept
cpu_counter::add(this);
}
if (retval)
{
cpu_on_stop();
}
ensure(cpu_can_stop || !retval);
return retval;
}
@ -739,7 +754,7 @@ bool cpu_thread::check_state() noexcept
g_fxo->get<gdb_server>()->pause_from(this);
}
cpu_wait();
cpu_wait(state1);
}
else
{
@ -799,6 +814,9 @@ void cpu_thread::notify()
void cpu_thread::abort()
{
state += cpu_flag::exit;
state.notify_one(cpu_flag::exit);
// Downcast to correct type
if (id_type() == 1)
{
@ -1076,7 +1094,6 @@ void cpu_thread::stop_all() noexcept
{
auto on_stop = [](u32, cpu_thread& cpu)
{
cpu.state += cpu_flag::exit;
cpu.abort();
};

View File

@ -25,6 +25,18 @@ enum class cpu_flag : u32
__bitset_enum_max
};
// Test stopped state
constexpr bool is_stopped(bs_t<cpu_flag> state)
{
return !!(state & (cpu_flag::stop + cpu_flag::exit));
}
// Test paused state
constexpr bool is_paused(bs_t<cpu_flag> state)
{
return !!(state & (cpu_flag::suspend + cpu_flag::dbg_global_pause + cpu_flag::dbg_pause));
}
class cpu_thread
{
public:
@ -60,16 +72,25 @@ public:
return false;
}
// Test stopped state
bool is_stopped() const
// Wrappers
static constexpr bool is_stopped(bs_t<cpu_flag> s)
{
return !!(state & (cpu_flag::stop + cpu_flag::exit));
return ::is_stopped(s);
}
static constexpr bool is_paused(bs_t<cpu_flag> s)
{
return ::is_paused(s);
}
bool is_stopped() const
{
return ::is_stopped(state);
}
// Test paused state
bool is_paused() const
{
return !!(state & (cpu_flag::suspend + cpu_flag::dbg_global_pause + cpu_flag::dbg_pause));
return ::is_paused(state);
}
bool has_pause_flag() const
@ -122,7 +143,10 @@ public:
virtual void cpu_return() {}
// Callback for thread_ctrl::wait or RSX wait
virtual void cpu_wait();
virtual void cpu_wait(bs_t<cpu_flag> flags);
// Callback for function abortion stats on Emu.Stop()
virtual void cpu_on_stop() {}
// For internal use
struct suspend_work

View File

@ -95,7 +95,7 @@ struct msg_dlg_thread_info
if (new_value == 0)
{
wait_until.wait(0);
thread_ctrl::wait_on(wait_until, 0);
continue;
}
@ -217,14 +217,19 @@ error_code open_msg_dialog(bool is_blocking, u32 type, vm::cptr<char> msgString,
lv2_obj::awake(&ppu);
});
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
thread_ctrl::wait();
if (state & cpu_flag::signal)
{
break;
}
thread_ctrl::wait_on(ppu.state, state);
}
if (is_blocking)

View File

@ -1162,7 +1162,7 @@ static NEVER_INLINE error_code savedata_op(ppu_thread& ppu, u32 operation, u32 v
// Reschedule after a blocking dialog returns
if (ppu.check_state())
{
return 0;
return {};
}
if (res != CELL_OK)

View File

@ -413,7 +413,7 @@ error_code cellSysutilCheckCallback(ppu_thread& ppu)
if (ppu.is_stopped())
{
return 0;
return {};
}
}

View File

@ -673,7 +673,7 @@ static error_code vdecOpen(ppu_thread& ppu, T type, U res, vm::cptr<CellVdecCb>
});
thrd->state -= cpu_flag::stop;
thread_ctrl::notify(*thrd);
thrd->state.notify_one(cpu_flag::stop);
return CELL_OK;
}

View File

@ -628,11 +628,11 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
for (u32 old_value; current < until && (old_value = *queued);
current = get_system_time())
{
queued->wait(old_value, atomic_wait_timeout{(until - current) * 1000});
thread_ctrl::wait_on(*queued, old_value, until - current);
if (ppu.is_stopped())
{
return 0;
return {};
}
}
}

View File

@ -13,6 +13,7 @@ using ppu_function_t = bool(*)(ppu_thread&);
ppu.current_function = #func;\
std::memcpy(ppu.syscall_args, ppu.gpr + 3, sizeof(ppu.syscall_args)); \
ppu_func_detail::do_call(ppu, func);\
static_cast<void>(ppu.test_stopped());\
ppu.current_function = old_f;\
ppu.cia += 4;\
__VA_ARGS__;\

View File

@ -971,6 +971,23 @@ void ppu_thread::cpu_sleep()
lv2_obj::awake(this);
}
void ppu_thread::cpu_on_stop()
{
if (current_function)
{
if (start_time)
{
ppu_log.warning("'%s' aborted (%fs)", current_function, (get_guest_system_time() - start_time) / 1000000.);
}
else
{
ppu_log.warning("'%s' aborted", current_function);
}
current_function = {};
}
}
void ppu_thread::exec_task()
{
if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm)
@ -1138,20 +1155,18 @@ cmd64 ppu_thread::cmd_wait()
{
while (true)
{
if (state) [[unlikely]]
{
if (is_stopped())
{
return cmd64{};
}
}
if (cmd64 result = cmd_queue[cmd_queue.peek()].exchange(cmd64{}))
{
return result;
}
thread_ctrl::wait();
if (is_stopped())
{
return {};
}
thread_ctrl::wait_on(cmd_notify, 0);
cmd_notify = 0;
}
}
@ -1205,18 +1220,7 @@ void ppu_thread::fast_call(u32 addr, u32 rtoc)
{
if (std::uncaught_exceptions())
{
if (current_function)
{
if (start_time)
{
ppu_log.warning("'%s' aborted (%fs)", current_function, (get_guest_system_time() - start_time) / 1000000.);
}
else
{
ppu_log.warning("'%s' aborted", current_function);
}
}
cpu_on_stop();
current_function = old_func;
}
else

View File

@ -124,6 +124,7 @@ public:
virtual std::string dump_misc() const override;
virtual void cpu_task() override final;
virtual void cpu_sleep() override;
virtual void cpu_on_stop() override;
virtual ~ppu_thread() override;
ppu_thread(const ppu_thread_params&, std::string_view name, u32 prio, int detached = 0);
@ -257,6 +258,7 @@ public:
void cmd_pop(u32 = 0);
cmd64 cmd_wait(); // Empty command means caller must return, like true from cpu_thread::check_status().
cmd64 cmd_get(u32 index) { return cmd_queue[cmd_queue.peek() + index].load(); }
atomic_t<u32> cmd_notify = 0;
const ppu_func_opd_t entry_func;
u64 start_time{0}; // Sleep start timepoint

View File

@ -20,7 +20,7 @@ inline void try_start(spu_thread& spu)
}).second)
{
spu.state -= cpu_flag::stop;
thread_ctrl::notify(static_cast<named_thread<spu_thread>&>(spu));
spu.state.notify_one(cpu_flag::stop);
}
};

View File

@ -9076,7 +9076,7 @@ struct spu_llvm
{
// Interrupt profiler thread and put it to sleep
static_cast<void>(prof_mutex.reset());
atomic_wait::list(registered).wait(); // TODO
thread_ctrl::wait_on(registered, nullptr);
continue;
}

View File

@ -3070,14 +3070,14 @@ bool spu_thread::process_mfc_cmd()
// Stall infinitely if MFC queue is full
while (mfc_size >= 16) [[unlikely]]
{
state += cpu_flag::wait;
auto old = state.add_fetch(cpu_flag::wait);
if (is_stopped())
if (is_stopped(old))
{
return false;
}
thread_ctrl::wait();
thread_ctrl::wait_on(state, old);;
}
spu::scheduler::concurrent_execution_watchdog watchdog(*this);
@ -3672,12 +3672,14 @@ s64 spu_thread::get_ch_value(u32 ch)
return out;
}
if (is_stopped())
auto old = +state;
if (is_stopped(old))
{
return -1;
}
thread_ctrl::wait();
thread_ctrl::wait_on(state, old);
}
}
@ -3773,17 +3775,18 @@ s64 spu_thread::get_ch_value(u32 ch)
for (; !events.count; events = get_events(mask1, false, true))
{
if (is_paused())
const auto old = state.add_fetch(cpu_flag::wait);
if (is_stopped(old))
{
return -1;
}
if (is_paused(old))
{
// Ensure reservation data won't change while paused for debugging purposes
check_state();
}
state += cpu_flag::wait;
if (is_stopped())
{
return -1;
continue;
}
vm::reservation_notifier(raddr, 128).wait(rtime, -128, atomic_wait_timeout{100'000});
@ -3795,19 +3798,20 @@ s64 spu_thread::get_ch_value(u32 ch)
for (; !events.count; events = get_events(mask1, true, true))
{
if (is_paused())
{
check_state();
}
const auto old = state.add_fetch(cpu_flag::wait);
state += cpu_flag::wait;
if (is_stopped())
if (is_stopped(old))
{
return -1;
}
thread_ctrl::wait_for(100);
if (is_paused(old))
{
check_state();
continue;
}
thread_ctrl::wait_on(state, old, 100);
}
check_state();
@ -4209,7 +4213,7 @@ bool spu_thread::stop_and_signal(u32 code)
case 0x001:
{
state += cpu_flag::wait;
thread_ctrl::wait_for(1000); // hack
std::this_thread::sleep_for(1ms); // hack
check_state();
return true;
}
@ -4260,12 +4264,14 @@ bool spu_thread::stop_and_signal(u32 code)
_state >= SPU_THREAD_GROUP_STATUS_WAITING && _state <= SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED;
_state = group->run_state)
{
if (is_stopped())
const auto old = state.load();
if (is_stopped(old))
{
return false;
}
thread_ctrl::wait();
thread_ctrl::wait_on(state, old);;
}
reader_lock rlock(id_manager::g_mutex);
@ -4337,23 +4343,21 @@ bool spu_thread::stop_and_signal(u32 code)
}
}
while (true)
while (auto old = state.fetch_sub(cpu_flag::signal))
{
if (is_stopped())
if (is_stopped(old))
{
// The thread group cannot be stopped while waiting for an event
ensure(!(state & cpu_flag::stop));
ensure(!(old & cpu_flag::stop));
return false;
}
if (!state.test_and_reset(cpu_flag::signal))
{
thread_ctrl::wait();
}
else
if (old & cpu_flag::signal)
{
break;
}
thread_ctrl::wait_on(state, old);;
}
std::lock_guard lock(group->mutex);
@ -4375,7 +4379,7 @@ bool spu_thread::stop_and_signal(u32 code)
if (thread.get() != this)
{
thread_ctrl::notify(*thread);
thread->state.notify_one(cpu_flag::suspend);
}
}
}
@ -4476,12 +4480,14 @@ bool spu_thread::stop_and_signal(u32 code)
_state >= SPU_THREAD_GROUP_STATUS_WAITING && _state <= SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED;
_state = group->run_state)
{
if (is_stopped())
const auto old = +state;
if (is_stopped(old))
{
return false;
}
thread_ctrl::wait();
thread_ctrl::wait_on(state, old);;
}
std::lock_guard lock(group->mutex);
@ -4515,12 +4521,8 @@ bool spu_thread::stop_and_signal(u32 code)
return true;
});
while (thread.get() != this && thread->state & cpu_flag::wait)
{
// TODO: replace with proper solution
if (atomic_wait_engine::raw_notify(nullptr, thread_ctrl::get_native_id(*thread)))
break;
}
if (thread.get() != this)
thread_ctrl::notify(*thread);
}
}

View File

@ -305,7 +305,7 @@ public:
return -1;
}
data.wait(bit_wait);
thread_ctrl::wait_on(data, bit_wait);
}
}
@ -345,7 +345,7 @@ public:
return false;
}
data.wait(state);
thread_ctrl::wait_on(data, state);
}
}

View File

@ -1361,11 +1361,7 @@ void lv2_obj::schedule_all()
ppu_log.trace("schedule(): %s", target->id);
target->state ^= (cpu_flag::signal + cpu_flag::suspend);
target->start_time = 0;
if (target != get_current_cpu_thread())
{
target->notify();
}
target->state.notify_one(cpu_flag::signal + cpu_flag::suspend);
}
}
}

View File

@ -246,11 +246,16 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
return CELL_EPERM;
}
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
@ -260,7 +265,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
continue;
return {};
}
std::lock_guard lock(cond->mutex->mutex);
@ -291,7 +296,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -87,7 +87,7 @@ CellError lv2_event_queue::send(lv2_event event)
spu.ch_in_mbox.set_values(4, CELL_OK, data1, data2, data3);
spu.state += cpu_flag::signal;
spu.notify();
spu.state.notify_one(cpu_flag::signal);
}
return {};
@ -216,7 +216,7 @@ error_code sys_event_queue_destroy(ppu_thread& ppu, u32 equeue_id, s32 mode)
{
static_cast<spu_thread&>(*cpu).ch_in_mbox.set_values(1, CELL_ECANCELED);
cpu->state += cpu_flag::signal;
cpu->notify();
cpu->state.notify_one(cpu_flag::signal);
}
}
}
@ -307,11 +307,11 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
}
// If cancelled, gpr[3] will be non-zero. Other registers must contain event data.
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state) || state & cpu_flag::signal)
{
return 0;
break;
}
if (timeout)
@ -321,7 +321,7 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard lock(queue->mutex);
@ -337,7 +337,7 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -161,11 +161,16 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
return CELL_OK;
}
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
@ -175,7 +180,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard lock(flag->mutex);
@ -193,7 +198,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -19,7 +19,8 @@ void lv2_int_serv::exec()
{ ppu_cmd::sleep, 0 }
});
thread_ctrl::notify(*thread);
thread->cmd_notify++;
thread->cmd_notify.notify_one();
}
bool ppu_thread_exit(ppu_thread& ppu);
@ -32,7 +33,8 @@ void lv2_int_serv::join()
std::bit_cast<u64>(&ppu_thread_exit)
});
thread_ctrl::notify(*thread);
thread->cmd_notify++;
thread->cmd_notify.notify_one();
(*thread)();
idm::remove_verify<named_thread<ppu_thread>>(thread->id, static_cast<std::weak_ptr<named_thread<ppu_thread>>>(thread));
@ -114,7 +116,7 @@ error_code _sys_interrupt_thread_establish(ppu_thread& ppu, vm::ptr<u32> ih, u32
result = std::make_shared<lv2_int_serv>(it, arg1, arg2);
tag->handler = result;
it->state -= cpu_flag::stop;
thread_ctrl::notify(*it);
it->state.notify_one(cpu_flag::stop);
return result;
});

View File

@ -332,11 +332,16 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
return CELL_ESRCH;
}
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
@ -346,7 +351,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard lock(cond->mutex);
@ -372,7 +377,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -73,11 +73,11 @@ error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
continue;
}
mutex->lwcond_waiters.wait(old);
thread_ctrl::wait_on(mutex->lwcond_waiters, old);
if (ppu.is_stopped())
{
return 0;
return {};
}
}
else
@ -153,11 +153,16 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
return not_an_error(ppu.gpr[3]);
}
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
@ -167,7 +172,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard lock(mutex->mutex);
@ -183,7 +188,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -770,7 +770,7 @@ error_code mmapper_thread_recover_page_fault(cpu_thread* cpu)
else
{
cpu->state += cpu_flag::signal;
cpu->notify();
cpu->state.notify_one(cpu_flag::signal);
}
return CELL_OK;

View File

@ -165,11 +165,11 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state) || state & cpu_flag::signal)
{
return 0;
break;
}
if (timeout)
@ -179,7 +179,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard lock(mutex->mutex);
@ -195,7 +195,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -1349,14 +1349,19 @@ error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr>
if (!sock.ret)
{
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
thread_ctrl::wait();
if (state & cpu_flag::signal)
{
break;
}
thread_ctrl::wait_on(ppu.state, state);
}
if (result)
@ -1377,7 +1382,7 @@ error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr>
if (ppu.is_stopped())
{
return 0;
return {};
}
auto newsock = std::make_shared<lv2_socket>(native_socket, 0, 0);
@ -1777,14 +1782,19 @@ error_code sys_net_bnet_connect(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr
if (!sock.ret)
{
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
thread_ctrl::wait();
if (state & cpu_flag::signal)
{
break;
}
thread_ctrl::wait_on(ppu.state, state);
}
if (result)
@ -2458,14 +2468,19 @@ error_code sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32
if (!sock.ret)
{
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
thread_ctrl::wait();
if (state & cpu_flag::signal)
{
break;
}
thread_ctrl::wait_on(ppu.state, state);
}
if (result)
@ -2487,7 +2502,7 @@ error_code sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32
if (ppu.is_stopped())
{
return 0;
return {};
}
// addr is set earlier for P2P socket
@ -2734,14 +2749,16 @@ error_code sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr<void> buf, u32 l
if (!sock.ret)
{
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (true)
{
if (ppu.is_stopped())
const auto state = ppu.state.fetch_sub(cpu_flag::signal);
if (is_stopped(state) || state & cpu_flag::signal)
{
return 0;
break;
}
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
if (result)
@ -3349,11 +3366,16 @@ error_code sys_net_bnet_poll(ppu_thread& ppu, vm::ptr<sys_net_pollfd> fds, s32 n
lv2_obj::sleep(ppu, timeout);
}
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
@ -3363,7 +3385,7 @@ error_code sys_net_bnet_poll(ppu_thread& ppu, vm::ptr<sys_net_pollfd> fds, s32 n
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard nw_lock(g_fxo->get<network_context>()->s_nw_mutex);
@ -3379,7 +3401,7 @@ error_code sys_net_bnet_poll(ppu_thread& ppu, vm::ptr<sys_net_pollfd> fds, s32 n
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}
@ -3568,11 +3590,16 @@ error_code sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set
return -SYS_NET_EINVAL;
}
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
@ -3582,7 +3609,7 @@ error_code sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard nw_lock(g_fxo->get<network_context>()->s_nw_mutex);
@ -3598,15 +3625,10 @@ error_code sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}
if (ppu.is_stopped())
{
return 0;
}
if (readfds)
*readfds = rread;
if (writefds)

View File

@ -185,7 +185,7 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr
if (ppu.test_stopped())
{
return 0;
return {};
}
// Get the exit status from the register
@ -533,7 +533,8 @@ error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id)
}
else
{
thread_ctrl::notify(*thread);
thread->cmd_notify++;
thread->cmd_notify.notify_one();
// Dirty hack for sound: confirm the creation of _mxr000 event queue
if (*thread->ppu_tname.load() == "_cellsurMixerMain"sv)
@ -548,7 +549,7 @@ error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id)
{
if (ppu.is_stopped())
{
return 0;
return {};
}
thread_ctrl::wait_for(50000);

View File

@ -350,9 +350,14 @@ void _sys_process_exit(ppu_thread& ppu, s32 status, u32 arg2, u32 arg3)
});
// Wait for GUI thread
while (!ppu.is_stopped())
while (auto state = +ppu.state)
{
thread_ctrl::wait();
if (is_stopped(state))
{
break;
}
thread_ctrl::wait_on(ppu.state, state);
}
}
@ -437,9 +442,14 @@ void _sys_process_exit2(ppu_thread& ppu, s32 status, vm::ptr<sys_exit2_param> ar
});
// Wait for GUI thread
while (!ppu.is_stopped())
while (auto state = +ppu.state)
{
thread_ctrl::wait();
if (is_stopped(state))
{
break;
}
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -65,7 +65,7 @@ void lv2_rsx_config::send_event(u64 data1, u64 event_flags, u64 data3) const
thread_ctrl::wait_for(100);
if (cpu && cpu->id_type() == 0x55)
cpu->cpu_wait();
cpu->cpu_wait({});
if (Emu.IsStopped() || (cpu && cpu->check_state()))
{

View File

@ -127,11 +127,16 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
@ -141,7 +146,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard lock(rwlock->mutex);
@ -157,7 +162,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}
@ -329,11 +334,16 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
@ -343,7 +353,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard lock(rwlock->mutex);
@ -381,7 +391,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -126,11 +126,16 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
ppu.gpr[3] = CELL_OK;
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
@ -140,7 +145,7 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return 0;
return {};
}
std::lock_guard lock(sem->mutex);
@ -164,7 +169,7 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
}
else
{
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -762,7 +762,7 @@ error_code sys_spu_thread_group_start(ppu_thread& ppu, u32 id)
if (thread && ran_threads--)
{
thread->state -= cpu_flag::stop;
thread_ctrl::notify(*thread);
thread->state.notify_one(cpu_flag::stop);
}
}
@ -912,7 +912,7 @@ error_code sys_spu_thread_group_resume(ppu_thread& ppu, u32 id)
if (thread)
{
thread->state -= cpu_flag::suspend;
thread_ctrl::notify(*thread);
thread->state.notify_one(cpu_flag::suspend);
}
}
@ -1029,9 +1029,7 @@ error_code sys_spu_thread_group_terminate(ppu_thread& ppu, u32 id, s32 value)
{
while (thread && group->running && thread->state & cpu_flag::wait)
{
// TODO: replace with proper solution
if (atomic_wait_engine::raw_notify(nullptr, thread_ctrl::get_native_id(*thread)))
break;
thread_ctrl::notify(*thread);
}
}
@ -1103,14 +1101,16 @@ error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause
lv2_obj::sleep(ppu);
lock.unlock();
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (true)
{
if (ppu.is_stopped())
const auto state = ppu.state.fetch_sub(cpu_flag::signal);
if (is_stopped(state) || state & cpu_flag::signal)
{
return 0;
break;
}
thread_ctrl::wait();
thread_ctrl::wait_on(ppu.state, state);
}
}
while (0);

View File

@ -310,6 +310,22 @@ public:
u64 remaining;
const u64 start_time = get_system_time();
auto wait_for = [cpu](u64 timeout)
{
atomic_bs_t<cpu_flag> dummy{};
auto& state = cpu ? cpu->state : dummy;
const auto old = +state;
if (old & cpu_flag::signal)
{
return true;
}
thread_ctrl::wait_on(state, old, timeout);
return false;
};
while (usec >= passed)
{
remaining = usec - passed;
@ -322,10 +338,10 @@ public:
constexpr u64 host_min_quantum = 500;
#endif
// TODO: Tune for other non windows operating sytems
bool escape = false;
if (g_cfg.core.sleep_timers_accuracy < (IsUsleep ? sleep_timers_accuracy_level::_usleep : sleep_timers_accuracy_level::_all_timers))
{
thread_ctrl::wait_for(remaining, !IsUsleep);
escape = wait_for(remaining);
}
else
{
@ -333,10 +349,10 @@ public:
{
#ifdef __linux__
// Do not wait for the last quantum to avoid loss of accuracy
thread_ctrl::wait_for(remaining - ((remaining % host_min_quantum) + host_min_quantum), !IsUsleep);
escape = wait_for(remaining - ((remaining % host_min_quantum) + host_min_quantum));
#else
// Wait on multiple of min quantum for large durations to avoid overloading low thread cpus
thread_ctrl::wait_for(remaining - (remaining % host_min_quantum), !IsUsleep);
escape = wait_for(remaining - (remaining % host_min_quantum));
#endif
}
else
@ -351,7 +367,7 @@ public:
return false;
}
if (cpu && cpu->state & cpu_flag::signal)
if (escape)
{
return false;
}

View File

@ -18,7 +18,9 @@ void lv2_timer_context::operator()()
{
while (thread_ctrl::state() != thread_state::aborting)
{
if (state == SYS_TIMER_STATE_RUN)
const u32 _state = +state;
if (_state == SYS_TIMER_STATE_RUN)
{
const u64 _now = get_guest_system_time();
u64 next = expire;
@ -55,7 +57,7 @@ void lv2_timer_context::operator()()
continue;
}
thread_ctrl::wait();
thread_ctrl::wait_on(state, _state);
}
}
@ -166,7 +168,7 @@ error_code _sys_timer_start(ppu_thread& ppu, u32 timer_id, u64 base_time, u64 pe
timer.state = SYS_TIMER_STATE_RUN;
lock.unlock();
thread_ctrl::notify(timer);
timer.state.notify_one();
return {};
});
@ -301,11 +303,6 @@ error_code sys_timer_usleep(ppu_thread& ppu, u64 sleep_time)
lv2_obj::sleep(ppu, sleep_time);
lv2_obj::wait_timeout<true>(sleep_time);
if (ppu.is_stopped())
{
return 0;
}
}
else
{

View File

@ -722,14 +722,19 @@ error_code sys_usbd_receive_event(ppu_thread& ppu, u32 handle, vm::ptr<u64> arg1
usbh->sq.emplace_back(&ppu);
}
while (!ppu.state.test_and_reset(cpu_flag::signal))
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (ppu.is_stopped())
if (is_stopped(state))
{
return 0;
return {};
}
thread_ctrl::wait();
if (state & cpu_flag::signal)
{
break;
}
thread_ctrl::wait_on(ppu.state, state);
}
*arg1 = ppu.gpr[4];

View File

@ -747,7 +747,7 @@ bool gdb_thread::cmd_vcont(gdb_cmd& cmd)
if (Emu.IsPaused()) {
Emu.Resume();
} else {
thread_ctrl::notify(*ppu);
ppu->state.notify_one();
}
wait_with_interrupts();
//we are in all-stop mode

View File

@ -530,7 +530,7 @@ namespace rsx
on_exit();
}
void thread::cpu_wait()
void thread::cpu_wait(bs_t<cpu_flag>)
{
if (external_interrupt_lock)
{
@ -604,7 +604,7 @@ namespace rsx
{ ppu_cmd::sleep, 0 }
});
thread_ctrl::notify(*intr_thread);
intr_thread->cmd_notify.notify_one();
}
}
else
@ -3079,7 +3079,8 @@ namespace rsx
{ ppu_cmd::sleep, 0 }
});
thread_ctrl::notify(*intr_thread);
intr_thread->cmd_notify++;
intr_thread->cmd_notify.notify_one();
}
}

View File

@ -657,7 +657,7 @@ namespace rsx
u32 get_fifo_cmd() const;
std::string dump_regs() const override;
void cpu_wait() override;
void cpu_wait(bs_t<cpu_flag> old) override;
// Performance approximation counters
struct

View File

@ -90,7 +90,7 @@ namespace rsx
while (rsx->is_paused())
{
rsx->cpu_wait();
rsx->cpu_wait({});
}
// Reset
@ -107,7 +107,7 @@ namespace rsx
}
}
rsx->cpu_wait();
rsx->cpu_wait({});
}
rsx->fifo_wake_delay();
@ -1608,7 +1608,8 @@ namespace rsx
{ ppu_cmd::sleep, 0 }
});
thread_ctrl::notify(*rsx->intr_thread);
rsx->intr_thread->cmd_notify++;
rsx->intr_thread->cmd_notify.notify_one();
}
}

View File

@ -582,7 +582,7 @@ bool Emulator::BootRsxCapture(const std::string& path)
auto replay_thr = g_fxo->init<named_thread<rsx::rsx_replay_thread>>("RSX Replay"sv, std::move(frame));
replay_thr->state -= cpu_flag::stop;
thread_ctrl::notify(*replay_thr);
replay_thr->state.notify_one(cpu_flag::stop);
return true;
}
@ -1697,16 +1697,16 @@ void Emulator::Run(bool start_playtime)
ConfigureLogs();
// Run main thread
idm::check<named_thread<ppu_thread>>(ppu_thread::id_base, [](cpu_thread& cpu)
idm::check<named_thread<ppu_thread>>(ppu_thread::id_base, [](named_thread<ppu_thread>& cpu)
{
cpu.state -= cpu_flag::stop;
cpu.notify();
ensure(cpu.state.test_and_reset(cpu_flag::stop));
cpu.state.notify_one(cpu_flag::stop);
});
if (auto thr = g_fxo->get<named_thread<rsx::rsx_replay_thread>>())
{
thr->state -= cpu_flag::stop;
thread_ctrl::notify(*thr);
thr->state.notify_one(cpu_flag::stop);
}
if (g_cfg.misc.prevent_display_sleep)
@ -1811,7 +1811,7 @@ void Emulator::Resume()
auto on_select = [](u32, cpu_thread& cpu)
{
cpu.state -= cpu_flag::dbg_global_pause;
cpu.notify();
cpu.state.notify_one(cpu_flag::dbg_global_pause);
};
idm::select<named_thread<ppu_thread>>(on_select);

View File

@ -161,7 +161,7 @@ debugger_frame::debugger_frame(std::shared_ptr<gui_settings> settings, QWidget *
// Notify only if no pause flags are set after this change
if (!(old & s_pause_flags))
{
cpu->notify();
cpu->state.notify_one(s_pause_flags);
}
}
UpdateUI();
@ -852,7 +852,7 @@ void debugger_frame::DoStep(bool stepOver)
if (!should_step_over) state += cpu_flag::dbg_step;
});
cpu->notify();
cpu->state.notify_one(s_pause_flags);
}
}