1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 10:42:36 +01:00

sys_cond, sys_mutex

This commit is contained in:
Nekotekina 2017-02-02 20:47:25 +03:00
parent c4cc3ad81e
commit 16944b0c4c
8 changed files with 415 additions and 271 deletions

View File

@ -11,6 +11,18 @@
#include <errno.h> #include <errno.h>
#endif #endif
template <>
void fmt_class_string<std::pair<const fmt_type_info*, u64>>::format(std::string& out, u64 arg)
{
// Dynamic format arg
const auto& pair = get_object(arg);
if (pair.first)
{
pair.first->fmt_string(out, pair.second);
}
}
void fmt_class_string<const void*>::format(std::string& out, u64 arg) void fmt_class_string<const void*>::format(std::string& out, u64 arg)
{ {
if (arg) if (arg)

View File

@ -8,241 +8,277 @@
#include "sys_mutex.h" #include "sys_mutex.h"
#include "sys_cond.h" #include "sys_cond.h"
#include <algorithm>
namespace vm { using namespace ps3; } namespace vm { using namespace ps3; }
logs::channel sys_cond("sys_cond", logs::level::notice); logs::channel sys_cond("sys_cond", logs::level::notice);
extern u64 get_system_time(); extern u64 get_system_time();
void lv2_cond::notify(lv2_lock_t, cpu_thread* thread) error_code sys_cond_create(vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribute_t> attr)
{
if (mutex->owner)
{
// add thread to the mutex sleep queue if cannot lock immediately
mutex->sq.emplace_back(thread);
}
else
{
mutex->owner = idm::get<ppu_thread>(thread->id);
thread->set_signal();
}
}
s32 sys_cond_create(vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribute_t> attr)
{ {
sys_cond.warning("sys_cond_create(cond_id=*0x%x, mutex_id=0x%x, attr=*0x%x)", cond_id, mutex_id, attr); sys_cond.warning("sys_cond_create(cond_id=*0x%x, mutex_id=0x%x, attr=*0x%x)", cond_id, mutex_id, attr);
LV2_LOCK;
const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id);
if (!mutex)
{
return CELL_ESRCH;
}
if (attr->pshared != SYS_SYNC_NOT_PROCESS_SHARED || attr->ipc_key || attr->flags) if (attr->pshared != SYS_SYNC_NOT_PROCESS_SHARED || attr->ipc_key || attr->flags)
{ {
sys_cond.error("sys_cond_create(): unknown attributes (pshared=0x%x, ipc_key=0x%llx, flags=0x%x)", attr->pshared, attr->ipc_key, attr->flags); sys_cond.error("sys_cond_create(): unknown attributes (pshared=0x%x, ipc_key=0x%llx, flags=0x%x)", attr->pshared, attr->ipc_key, attr->flags);
return CELL_EINVAL; return CELL_EINVAL;
} }
if (!++mutex->cond_count) auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id);
if (!mutex)
{ {
fmt::throw_exception("Unexpected cond_count" HERE); return CELL_ESRCH;
} }
*cond_id = idm::make<lv2_obj, lv2_cond>(mutex, attr->name_u64); if (const u32 id = idm::make<lv2_obj, lv2_cond>(attr->name_u64, std::move(mutex)))
{
*cond_id = id;
return CELL_OK;
}
return CELL_OK; return CELL_EAGAIN;
} }
s32 sys_cond_destroy(u32 cond_id) error_code sys_cond_destroy(u32 cond_id)
{ {
sys_cond.warning("sys_cond_destroy(cond_id=0x%x)", cond_id); sys_cond.warning("sys_cond_destroy(cond_id=0x%x)", cond_id);
LV2_LOCK; const auto cond = idm::withdraw<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> CellError
{
if (cond.waiters)
{
return CELL_EBUSY;
}
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id); cond.mutex->cond_count--;
return {};
});
if (!cond) if (!cond)
{ {
return CELL_ESRCH; return CELL_ESRCH;
} }
if (!cond->sq.empty()) if (cond.ret)
{ {
return CELL_EBUSY; return cond.ret;
} }
if (!cond->mutex->cond_count--)
{
fmt::throw_exception("Unexpected cond_count" HERE);
}
idm::remove<lv2_obj, lv2_cond>(cond_id);
return CELL_OK; return CELL_OK;
} }
s32 sys_cond_signal(u32 cond_id) error_code sys_cond_signal(u32 cond_id)
{ {
sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id); sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id);
LV2_LOCK; const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [](lv2_cond& cond) -> cpu_thread*
{
if (cond.waiters)
{
semaphore_lock lock(cond.mutex->mutex);
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id); if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
{
cond.waiters--;
if (cond.mutex->try_own(*cpu, cpu->id))
{
return cpu;
}
}
}
return nullptr;
});
if (!cond) if (!cond)
{ {
return CELL_ESRCH; return CELL_ESRCH;
} }
// signal one waiting thread; protocol is ignored in current implementation if (cond.ret)
if (!cond->sq.empty())
{ {
cond->notify(lv2_lock, cond->sq.front()); cond.ret->set_signal();
cond->sq.pop_front();
} }
return CELL_OK; return CELL_OK;
} }
s32 sys_cond_signal_all(u32 cond_id) error_code sys_cond_signal_all(u32 cond_id)
{ {
sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id); sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id);
LV2_LOCK; const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [](lv2_cond& cond)
{
cpu_thread* result = nullptr;
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id); if (cond.waiters)
{
semaphore_lock lock(cond.mutex->mutex);
while (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
{
cond.waiters--;
if (cond.mutex->try_own(*cpu, cpu->id))
{
result = cpu;
}
}
}
return result;
});
if (!cond) if (!cond)
{ {
return CELL_ESRCH; return CELL_ESRCH;
} }
// signal all waiting threads; protocol is ignored in current implementation if (cond.ret)
for (auto& thread : cond->sq)
{ {
cond->notify(lv2_lock, thread); cond.ret->set_signal();
} }
cond->sq.clear();
return CELL_OK; return CELL_OK;
} }
s32 sys_cond_signal_to(u32 cond_id, u32 thread_id) error_code sys_cond_signal_to(u32 cond_id, u32 thread_id)
{ {
sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id, thread_id); sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id, thread_id);
LV2_LOCK; const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> cpu_thread*
{
if (cond.waiters)
{
semaphore_lock lock(cond.mutex->mutex);
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id); for (auto cpu : cond.sq)
{
if (cpu->id == thread_id)
{
verify(HERE), cond.unqueue(cond.sq, cpu), cond.waiters--;
if (cond.mutex->try_own(*cpu, cpu->id))
{
return cpu;
}
return (cpu_thread*)(1);
}
}
}
return nullptr;
});
if (!cond) if (!cond)
{ {
return CELL_ESRCH; return CELL_ESRCH;
} }
const auto found = std::find_if(cond->sq.begin(), cond->sq.end(), [=](cpu_thread* thread) if (cond.ret && cond.ret != (cpu_thread*)(1))
{ {
return thread->id == thread_id; cond.ret->set_signal();
}); }
else if (!cond.ret)
// TODO: check if CELL_ESRCH is returned if thread_id is invalid {
if (found == cond->sq.end()) return not_an_error(CELL_EPERM);
{
return CELL_EPERM;
} }
// signal specified thread
cond->notify(lv2_lock, *found);
cond->sq.erase(found);
return CELL_OK; return CELL_OK;
} }
s32 sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
{ {
sys_cond.trace("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout); sys_cond.trace("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout);
const u64 start_time = get_system_time(); const u64 start_time = get_system_time();
LV2_LOCK; const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond)
{
// Add a "promise" to add a waiter
cond.waiters++;
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id); // Save the recursive value
return cond.mutex->lock_count.load();
});
if (!cond) if (!cond)
{ {
return CELL_ESRCH; return CELL_ESRCH;
} }
// check current ownership // Verify ownership
if (cond->mutex->owner.get() != &ppu) if (cond->mutex->owner >> 1 != ppu.id)
{ {
// Awww
cond->waiters--;
return CELL_EPERM; return CELL_EPERM;
} }
else
{
semaphore_lock lock(cond->mutex->mutex);
// save the recursive value // Register waiter
const u32 recursive_value = cond->mutex->recursive_count.exchange(0); cond->sq.emplace_back(&ppu);
// unlock the mutex // Unlock the mutex
cond->mutex->unlock(lv2_lock); cond->mutex->lock_count = 0;
cond->mutex->reown<ppu_thread>();
// add waiter; protocol is ignored in current implementation // Further function result
sleep_entry<cpu_thread> waiter(cond->sq, ppu); ppu.gpr[3] = CELL_OK;
}
// potential mutex waiter (not added immediately) // SLEEP
sleep_entry<cpu_thread> mutex_waiter(cond->mutex->sq, ppu, defer_sleep);
while (!ppu.state.test_and_reset(cpu_flag::signal)) while (!ppu.state.test_and_reset(cpu_flag::signal))
{ {
CHECK_EMU_STATUS; if (timeout)
// timeout is ignored if waiting on the cond var is already dropped
if (timeout && waiter)
{ {
const u64 passed = get_system_time() - start_time; const u64 passed = get_system_time() - start_time;
if (passed >= timeout) if (passed >= timeout)
{ {
// try to reown mutex and exit if timed out semaphore_lock lock(cond->mutex->mutex);
if (!cond->mutex->owner)
// Try to cancel the waiting
if (cond->unqueue(cond->sq, &ppu))
{ {
cond->mutex->owner = idm::get<ppu_thread>(ppu.id); cond->waiters--;
break;
ppu.gpr[3] = CELL_ETIMEDOUT;
// Own or requeue
if (cond->mutex->try_own(ppu, ppu.id))
{
break;
}
} }
// drop condition variable and start waiting on the mutex queue timeout = 0;
mutex_waiter.enter();
waiter.leave();
continue; continue;
} }
LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed); thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
LV2_UNLOCK, thread_ctrl::wait(); thread_ctrl::wait();
} }
} }
// mutex owner is restored after notification or unlocking // Verify ownership
if (cond->mutex->owner.get() != &ppu) verify(HERE), cond->mutex->owner >> 1 == ppu.id;
{
fmt::throw_exception("Unexpected mutex owner" HERE);
}
// restore the recursive value // Restore the recursive value
cond->mutex->recursive_count = recursive_value; cond->mutex->lock_count = cond.ret;
// check timeout (unclear) if (ppu.gpr[3] == CELL_ETIMEDOUT)
if (timeout && get_system_time() - start_time > timeout)
{ {
return CELL_ETIMEDOUT; return not_an_error(CELL_ETIMEDOUT);
} }
return CELL_OK; return CELL_OK;

View File

@ -21,26 +21,33 @@ struct lv2_cond final : lv2_obj
{ {
static const u32 id_base = 0x86000000; static const u32 id_base = 0x86000000;
const u32 shared;
const s32 flags;
const u64 key;
const u64 name; const u64 name;
const std::shared_ptr<lv2_mutex> mutex; // associated mutex
sleep_queue<cpu_thread> sq; std::shared_ptr<lv2_mutex> mutex; // Associated Mutex
atomic_t<u32> waiters{0};
std::deque<cpu_thread*> sq;
lv2_cond(const std::shared_ptr<lv2_mutex>& mutex, u64 name) lv2_cond(u64 name, std::shared_ptr<lv2_mutex> mutex)
: mutex(mutex) : shared(0)
, key(0)
, flags(0)
, name(name) , name(name)
, mutex(std::move(mutex))
{ {
this->mutex->cond_count++;
} }
void notify(lv2_lock_t, cpu_thread* thread);
}; };
class ppu_thread; class ppu_thread;
// SysCalls // Syscalls
s32 sys_cond_create(vm::ps3::ptr<u32> cond_id, u32 mutex_id, vm::ps3::ptr<sys_cond_attribute_t> attr);
s32 sys_cond_destroy(u32 cond_id); error_code sys_cond_create(vm::ps3::ptr<u32> cond_id, u32 mutex_id, vm::ps3::ptr<sys_cond_attribute_t> attr);
s32 sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout); error_code sys_cond_destroy(u32 cond_id);
s32 sys_cond_signal(u32 cond_id); error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout);
s32 sys_cond_signal_all(u32 cond_id); error_code sys_cond_signal(u32 cond_id);
s32 sys_cond_signal_to(u32 cond_id, u32 thread_id); error_code sys_cond_signal_all(u32 cond_id);
error_code sys_cond_signal_to(u32 cond_id, u32 thread_id);

View File

@ -13,19 +13,7 @@ logs::channel sys_mutex("sys_mutex", logs::level::notice);
extern u64 get_system_time(); extern u64 get_system_time();
void lv2_mutex::unlock(lv2_lock_t) error_code sys_mutex_create(vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr)
{
owner.reset();
if (sq.size())
{
// pick new owner; protocol is ignored in current implementation
owner = idm::get<ppu_thread>(sq.front()->id);
owner->set_signal();
}
}
s32 sys_mutex_create(vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr)
{ {
sys_mutex.warning("sys_mutex_create(mutex_id=*0x%x, attr=*0x%x)", mutex_id, attr); sys_mutex.warning("sys_mutex_create(mutex_id=*0x%x, attr=*0x%x)", mutex_id, attr);
@ -49,195 +37,176 @@ s32 sys_mutex_create(vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr)
} }
} }
const bool recursive = attr->recursive == SYS_SYNC_RECURSIVE; const u32 recursive = attr->recursive;
if ((!recursive && attr->recursive != SYS_SYNC_NOT_RECURSIVE) || attr->pshared != SYS_SYNC_NOT_PROCESS_SHARED || attr->adaptive != SYS_SYNC_NOT_ADAPTIVE || attr->ipc_key || attr->flags) switch (recursive)
{ {
sys_mutex.error("sys_mutex_create(): unknown attributes (recursive=0x%x, pshared=0x%x, adaptive=0x%x, ipc_key=0x%llx, flags=0x%x)", attr->recursive, attr->pshared, attr->adaptive, attr->ipc_key, attr->flags); case SYS_SYNC_RECURSIVE: break;
case SYS_SYNC_NOT_RECURSIVE: break;
default:
{
sys_mutex.error("sys_mutex_create(): unknown recursive (0x%x)", recursive);
return CELL_EINVAL;
}
}
if (attr->pshared != SYS_SYNC_NOT_PROCESS_SHARED || attr->adaptive != SYS_SYNC_NOT_ADAPTIVE || attr->ipc_key || attr->flags)
{
sys_mutex.error("sys_mutex_create(): unknown attributes (pshared=0x%x, adaptive=0x%x, ipc_key=0x%llx, flags=0x%x)", attr->pshared, attr->adaptive, attr->ipc_key, attr->flags);
return CELL_EINVAL; return CELL_EINVAL;
} }
*mutex_id = idm::make<lv2_obj, lv2_mutex>(recursive, protocol, attr->name_u64); if (const u32 id = idm::make<lv2_obj, lv2_mutex>(protocol, recursive, attr->name_u64))
{
*mutex_id = id;
return CELL_OK;
}
return CELL_OK; return CELL_EAGAIN;
} }
s32 sys_mutex_destroy(u32 mutex_id) error_code sys_mutex_destroy(u32 mutex_id)
{ {
sys_mutex.warning("sys_mutex_destroy(mutex_id=0x%x)", mutex_id); sys_mutex.warning("sys_mutex_destroy(mutex_id=0x%x)", mutex_id);
LV2_LOCK; const auto mutex = idm::withdraw<lv2_obj, lv2_mutex>(mutex_id, [](lv2_mutex& mutex) -> CellError
{
if (mutex.owner || mutex.lock_count)
{
return CELL_EBUSY;
}
const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id); if (mutex.cond_count)
{
return CELL_EPERM;
}
return {};
});
if (!mutex) if (!mutex)
{ {
return CELL_ESRCH; return CELL_ESRCH;
} }
if (mutex->owner || mutex->sq.size()) if (mutex.ret)
{ {
return CELL_EBUSY; return mutex.ret;
} }
if (mutex->cond_count)
{
return CELL_EPERM;
}
idm::remove<lv2_obj, lv2_mutex>(mutex_id);
return CELL_OK; return CELL_OK;
} }
s32 sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout) error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
{ {
sys_mutex.trace("sys_mutex_lock(mutex_id=0x%x, timeout=0x%llx)", mutex_id, timeout); sys_mutex.trace("sys_mutex_lock(mutex_id=0x%x, timeout=0x%llx)", mutex_id, timeout);
const u64 start_time = get_system_time(); const u64 start_time = get_system_time();
LV2_LOCK; const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id, [&](lv2_mutex& mutex)
{
const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id); return mutex.lock(ppu, ppu.id);
});
if (!mutex) if (!mutex)
{ {
return CELL_ESRCH; return CELL_ESRCH;
} }
// check current ownership if (mutex.ret)
if (mutex->owner.get() == &ppu)
{ {
if (mutex->recursive) if (mutex.ret != CELL_EBUSY)
{ {
if (mutex->recursive_count == 0xffffffffu) return mutex.ret;
{
return CELL_EKRESOURCE;
}
mutex->recursive_count++;
return CELL_OK;
} }
return CELL_EDEADLK;
} }
else
// lock immediately if not locked
if (!mutex->owner)
{ {
mutex->owner = idm::get<ppu_thread>(ppu.id);
return CELL_OK; return CELL_OK;
} }
// add waiter; protocol is ignored in current implementation // SLEEP
sleep_entry<cpu_thread> waiter(mutex->sq, ppu);
while (!ppu.state.test_and_reset(cpu_flag::signal)) while (!ppu.state.test_and_reset(cpu_flag::signal))
{ {
CHECK_EMU_STATUS;
if (timeout) if (timeout)
{ {
const u64 passed = get_system_time() - start_time; const u64 passed = get_system_time() - start_time;
if (passed >= timeout) if (passed >= timeout)
{ {
return CELL_ETIMEDOUT; semaphore_lock lock(mutex->mutex);
if (!mutex->unqueue(mutex->sq, &ppu))
{
timeout = 0;
continue;
}
return not_an_error(CELL_ETIMEDOUT);
} }
LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed); thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
LV2_UNLOCK, thread_ctrl::wait(); thread_ctrl::wait();
} }
} }
// new owner must be set when unlocked
if (mutex->owner.get() != &ppu)
{
fmt::throw_exception("Unexpected mutex owner" HERE);
}
return CELL_OK; return CELL_OK;
} }
s32 sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id) error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id)
{ {
sys_mutex.trace("sys_mutex_trylock(mutex_id=0x%x)", mutex_id); sys_mutex.trace("sys_mutex_trylock(mutex_id=0x%x)", mutex_id);
LV2_LOCK; const auto mutex = idm::check<lv2_obj, lv2_mutex>(mutex_id, [&](lv2_mutex& mutex)
{
const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id); return mutex.try_lock(ppu.id);
});
if (!mutex) if (!mutex)
{ {
return CELL_ESRCH; return CELL_ESRCH;
} }
// check current ownership if (mutex.ret)
if (mutex->owner.get() == &ppu)
{ {
if (mutex->recursive) if (mutex.ret == CELL_EBUSY)
{ {
if (mutex->recursive_count == 0xffffffffu) return not_an_error(CELL_EBUSY);
{
return CELL_EKRESOURCE;
}
mutex->recursive_count++;
return CELL_OK;
} }
return CELL_EDEADLK; return mutex.ret;
} }
if (mutex->owner)
{
return CELL_EBUSY;
}
// own the mutex if free
mutex->owner = idm::get<ppu_thread>(ppu.id);
return CELL_OK; return CELL_OK;
} }
s32 sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id) error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id)
{ {
sys_mutex.trace("sys_mutex_unlock(mutex_id=0x%x)", mutex_id); sys_mutex.trace("sys_mutex_unlock(mutex_id=0x%x)", mutex_id);
LV2_LOCK; const auto mutex = idm::check<lv2_obj, lv2_mutex>(mutex_id, [&](lv2_mutex& mutex)
{
const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id); return mutex.try_unlock(ppu.id);
});
if (!mutex) if (!mutex)
{ {
return CELL_ESRCH; return CELL_ESRCH;
} }
// check current ownership if (mutex.ret == CELL_EBUSY)
if (mutex->owner.get() != &ppu)
{ {
return CELL_EPERM; semaphore_lock lock(mutex->mutex);
}
if (mutex->recursive_count) mutex->reown<ppu_thread>();
{
if (!mutex->recursive)
{
fmt::throw_exception("Unexpected recursive_count" HERE);
}
mutex->recursive_count--;
} }
else else if (mutex.ret)
{ {
mutex->unlock(lv2_lock); return mutex.ret;
} }
return CELL_OK; return CELL_OK;

View File

@ -23,31 +23,149 @@ struct lv2_mutex final : lv2_obj
{ {
static const u32 id_base = 0x85000000; static const u32 id_base = 0x85000000;
const bool recursive;
const u32 protocol; const u32 protocol;
const u32 recursive;
const u32 shared;
const u32 adaptive;
const u64 key;
const u64 name; const u64 name;
const s32 flags;
atomic_t<u32> cond_count{ 0 }; // count of condition variables associated semaphore<> mutex;
atomic_t<u32> recursive_count{ 0 }; // count of recursive locks atomic_t<u32> owner{0}; // Owner Thread ID
std::shared_ptr<cpu_thread> owner; // current mutex owner atomic_t<u32> lock_count{0}; // Recursive Locks
atomic_t<u32> cond_count{0}; // Condition Variables
std::deque<cpu_thread*> sq;
sleep_queue<cpu_thread> sq; lv2_mutex(u32 protocol, u32 recursive, u64 name)
: protocol(protocol)
lv2_mutex(bool recursive, u32 protocol, u64 name) , recursive(recursive)
: recursive(recursive) , shared(0)
, protocol(protocol) , adaptive(0)
, key(0)
, flags(0)
, name(name) , name(name)
{ {
} }
void unlock(lv2_lock_t); CellError try_lock(u32 id)
{
const u32 value = owner;
if (value >> 1 == id)
{
// Recursive locking
if (recursive == SYS_SYNC_RECURSIVE)
{
if (lock_count == 0xffffffffu)
{
return CELL_EKRESOURCE;
}
lock_count++;
return {};
}
return CELL_EDEADLK;
}
if (value == 0)
{
if (owner.compare_and_swap_test(0, id << 1))
{
return {};
}
}
return CELL_EBUSY;
}
bool try_own(cpu_thread& cpu, u32 id)
{
if (owner.fetch_op([&](u32& val)
{
if (val == 0)
{
val = id << 1;
}
else
{
val |= 1;
}
}))
{
sq.emplace_back(&cpu);
return false;
}
return true;
}
CellError lock(cpu_thread& cpu, u32 id)
{
CellError result = try_lock(id);
if (result == CELL_EBUSY)
{
semaphore_lock lock(mutex);
if (try_own(cpu, id))
{
return {};
}
}
return result;
}
CellError try_unlock(u32 id)
{
const u32 value = owner;
if (value >> 1 != id)
{
return CELL_EPERM;
}
if (lock_count)
{
lock_count--;
return {};
}
if (value == id << 1)
{
if (owner.compare_and_swap_test(value, 0))
{
return {};
}
}
return CELL_EBUSY;
}
template <typename T>
void reown()
{
if (auto cpu = schedule<T>(sq, protocol))
{
owner = cpu->id << 1 | !sq.empty();
cpu->set_signal();
}
else
{
owner = 0;
}
}
}; };
class ppu_thread; class ppu_thread;
// SysCalls // Syscalls
s32 sys_mutex_create(vm::ps3::ptr<u32> mutex_id, vm::ps3::ptr<sys_mutex_attribute_t> attr);
s32 sys_mutex_destroy(u32 mutex_id); error_code sys_mutex_create(vm::ps3::ptr<u32> mutex_id, vm::ps3::ptr<sys_mutex_attribute_t> attr);
s32 sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout); error_code sys_mutex_destroy(u32 mutex_id);
s32 sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id); error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout);
s32 sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id); error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id);
error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id);

View File

@ -6,6 +6,8 @@
#include "Utilities/sema.h" #include "Utilities/sema.h"
#include "Utilities/cond.h" #include "Utilities/cond.h"
#include "Emu/Cell/ErrorCodes.h"
// attr_protocol (waiting scheduling policy) // attr_protocol (waiting scheduling policy)
enum enum
{ {

View File

@ -498,47 +498,46 @@ void Emulator::Stop()
s32 error_code::error_report(const fmt_type_info* sup, u64 arg) s32 error_code::error_report(const fmt_type_info* sup, u64 arg)
{ {
std::string out; logs::channel* channel = &logs::GENERAL;
logs::level level = logs::level::error;
const char* func = "Unknown function";
if (auto thread = get_current_cpu_thread()) if (auto thread = get_current_cpu_thread())
{ {
if (g_system == system_type::ps3 && thread->id_type() == 1) if (g_system == system_type::ps3 && thread->id_type() == 1)
{ {
if (auto func = static_cast<ppu_thread*>(thread)->last_function) auto& ppu = static_cast<ppu_thread&>(*thread);
// Filter some annoying reports
switch (arg)
{ {
out += "'"; case CELL_EDEADLK:
out += func; {
out += "'"; if (ppu.m_name == "_cellsurMixerMain" && std::memcmp(ppu.last_function, "sys_mutex_lock", 15) == 0)
{
level = logs::level::trace;
}
break;
}
}
if (ppu.last_function)
{
func = ppu.last_function;
} }
} }
if (g_system == system_type::psv) if (g_system == system_type::psv)
{ {
if (auto func = static_cast<ARMv7Thread*>(thread)->last_function) if (auto _func = static_cast<ARMv7Thread*>(thread)->last_function)
{ {
out += "'"; func = _func;
out += func;
out += "'";
} }
} }
} }
if (out.empty()) channel->format(level, "'%s' failed with 0x%08x%s%s", func, arg, sup ? " : " : "", std::make_pair(sup, arg));
{
fmt::append(out, "Unknown function failed with 0x%08x", arg);
}
else
{
fmt::append(out, " failed with 0x%08x", arg);
}
if (sup)
{
fmt::raw_append(out, " : %s", sup, fmt_args_t<void>{arg});
}
LOG_ERROR(GENERAL, "%s", out);
return static_cast<s32>(arg); return static_cast<s32>(arg);
} }

View File

@ -137,13 +137,14 @@ void KernelExplorer::Update()
case SYS_MUTEX_OBJECT: case SYS_MUTEX_OBJECT:
{ {
auto& mutex = static_cast<lv2_mutex&>(obj); auto& mutex = static_cast<lv2_mutex&>(obj);
m_tree->AppendItem(node, fmt::format("Mutex: ID = 0x%08x \"%s\"", id, +name64(mutex.name))); m_tree->AppendItem(node, fmt::format("Mutex: ID = 0x%08x \"%s\",%s Owner = 0x%x, Locks = %u, Conds = %u, Wq = %zu", id, +name64(mutex.name),
mutex.recursive == SYS_SYNC_RECURSIVE ? " Recursive," : "", mutex.owner >> 1, +mutex.lock_count, +mutex.cond_count, mutex.sq.size()));
break; break;
} }
case SYS_COND_OBJECT: case SYS_COND_OBJECT:
{ {
auto& cond = static_cast<lv2_cond&>(obj); auto& cond = static_cast<lv2_cond&>(obj);
m_tree->AppendItem(node, fmt::format("Cond: ID = 0x%08x \"%s\"", id, +name64(cond.name))); m_tree->AppendItem(node, fmt::format("Cond: ID = 0x%08x \"%s\", Waiters = %u", id, +name64(cond.name), +cond.waiters));
break; break;
} }
case SYS_RWLOCK_OBJECT: case SYS_RWLOCK_OBJECT: