1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 02:32:36 +01:00

Sync primitives reworked

(rwlock rewritten)
This commit is contained in:
Nekotekina 2015-01-02 02:41:29 +03:00
parent ac75b62f4d
commit f3cd908d5c
22 changed files with 641 additions and 503 deletions

View File

@ -314,32 +314,6 @@ public:
return pop(data, &no_wait);
}
void clear()
{
while (m_sync.atomic_op_sync(SQSVR_OK, [](squeue_sync_var_t& sync) -> u32
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
if (sync.pop_lock || sync.push_lock)
{
return SQSVR_LOCKED;
}
sync.pop_lock = 1;
sync.push_lock = 1;
return SQSVR_OK;
}))
{
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
m_sync.exchange({});
m_wcv.notify_one();
m_rcv.notify_one();
}
bool peek(T& data, u32 start_pos = 0, const volatile bool* do_exit = nullptr)
{
assert(start_pos < sq_size);
@ -393,4 +367,93 @@ public:
return peek(data, start_pos, &no_wait);
}
class squeue_data_t
{
T* const m_data;
const u32 m_pos;
const u32 m_count;
squeue_data_t(T* data, u32 pos, u32 count)
: m_data(data)
, m_pos(pos)
, m_count(count)
{
}
public:
T& operator [] (u32 index)
{
assert(index < m_count);
index += m_pos;
index = index < sq_size ? index : index - sq_size;
return m_data[index];
}
};
void process(void(*proc)(squeue_data_t data))
{
u32 pos, count;
while (m_sync.atomic_op_sync(SQSVR_OK, [&pos, &count](squeue_sync_var_t& sync) -> u32
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
if (sync.pop_lock || sync.push_lock)
{
return SQSVR_LOCKED;
}
pos = sync.position;
count = sync.count;
sync.pop_lock = 1;
sync.push_lock = 1;
return SQSVR_OK;
}))
{
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
proc(squeue_data_t(m_data, pos, count));
m_sync.atomic_op([](squeue_sync_var_t& sync)
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
assert(sync.pop_lock && sync.push_lock);
sync.pop_lock = 0;
sync.push_lock = 0;
});
m_wcv.notify_one();
m_rcv.notify_one();
}
void clear()
{
while (m_sync.atomic_op_sync(SQSVR_OK, [](squeue_sync_var_t& sync) -> u32
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
if (sync.pop_lock || sync.push_lock)
{
return SQSVR_LOCKED;
}
sync.pop_lock = 1;
sync.push_lock = 1;
return SQSVR_OK;
}))
{
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
m_sync.exchange({});
m_wcv.notify_one();
m_rcv.notify_one();
}
};

View File

@ -3664,7 +3664,14 @@ private:
void DCBZ(u32 ra, u32 rb)
{
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
auto const cache_line = vm::get_ptr<u8>(addr & ~127);
if ((u32)addr != addr)
{
LOG_ERROR(PPU, "%s(): invalid address (0x%llx)", __FUNCTION__, addr);
Emu.Pause();
return;
}
auto const cache_line = vm::get_ptr<u8>((u32)addr & ~127);
if (cache_line)
memset(cache_line, 0, 128);
}

View File

@ -1095,7 +1095,7 @@ void SPUThread::StopAndSignal(u32 code)
{
case 0:
{
const u32 next = eq->events.count() ? eq->sq.pop(eq->protocol) : 0;
const u32 next = eq->events.count() ? eq->sq.signal(eq->protocol) : 0;
if (next != tid)
{
if (!eq->owner.compare_and_swap_test(tid, next))
@ -1118,13 +1118,20 @@ void SPUThread::StopAndSignal(u32 code)
SPU.In_MBox.PushUncond((u32)event.data1);
SPU.In_MBox.PushUncond((u32)event.data2);
SPU.In_MBox.PushUncond((u32)event.data3);
if (!eq->sq.invalidate(tid, eq->protocol))
{
assert(!"sys_spu_thread_receive_event() failed (receiving)");
}
return;
}
}
if (!~old_owner)
{
eq->sq.invalidate(tid);
if (!eq->sq.invalidate(tid, eq->protocol))
{
assert(!"sys_spu_thread_receive_event() failed (cancelling)");
}
SPU.In_MBox.PushUncond(CELL_ECANCELED);
return;
}
@ -1133,7 +1140,6 @@ void SPUThread::StopAndSignal(u32 code)
if (Emu.IsStopped())
{
LOG_WARNING(Log::SPU, "sys_spu_thread_receive_event(spuq=0x%x) aborted", spuq);
eq->sq.invalidate(tid);
return;
}
}

View File

@ -170,36 +170,28 @@ void MemoryBase::Close()
MemoryBlocks.clear();
}
void MemoryBase::WriteMMIO32(u32 addr, const u32 data)
bool MemoryBase::WriteMMIO32(u32 addr, const u32 data)
{
{
LV2_LOCK(0);
LV2_LOCK(0);
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] &&
((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Write32(addr, data))
{
return;
}
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] && ((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Write32(addr, data))
{
return true;
}
*(u32*)((u8*)GetBaseAddr() + addr) = re32(data); // provoke error
return false;
}
u32 MemoryBase::ReadMMIO32(u32 addr)
bool MemoryBase::ReadMMIO32(u32 addr, u32& res)
{
u32 res;
{
LV2_LOCK(0);
LV2_LOCK(0);
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] &&
((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Read32(addr, &res))
{
return res;
}
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] && ((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Read32(addr, &res))
{
return true;
}
res = re32(*(u32*)((u8*)GetBaseAddr() + addr)); // provoke error
return res;
return false;
}
bool MemoryBase::Map(const u64 addr, const u32 size)

View File

@ -127,9 +127,9 @@ public:
void Close();
__noinline void WriteMMIO32(u32 addr, const u32 data);
__noinline bool WriteMMIO32(u32 addr, const u32 data);
__noinline u32 ReadMMIO32(u32 addr);
__noinline bool ReadMMIO32(u32 addr, u32& res);
u32 GetUserMemTotalSize()
{

View File

@ -64,26 +64,20 @@ namespace vm
static u32 read32(u32 addr)
{
if (addr < RAW_SPU_BASE_ADDR || (addr % RAW_SPU_OFFSET) < RAW_SPU_PROB_OFFSET)
u32 res;
if (addr < RAW_SPU_BASE_ADDR || (addr % RAW_SPU_OFFSET) < RAW_SPU_PROB_OFFSET || !Memory.ReadMMIO32((u32)addr, res))
{
return re32(*(u32*)((u8*)g_base_addr + addr));
}
else
{
return Memory.ReadMMIO32((u32)addr);
res = re32(*(u32*)((u8*)g_base_addr + addr));
}
return res;
}
static void write32(u32 addr, be_t<u32> value)
{
if (addr < RAW_SPU_BASE_ADDR || (addr % RAW_SPU_OFFSET) < RAW_SPU_PROB_OFFSET)
if (addr < RAW_SPU_BASE_ADDR || (addr % RAW_SPU_OFFSET) < RAW_SPU_PROB_OFFSET || !Memory.WriteMMIO32((u32)addr, value))
{
*(be_t<u32>*)((u8*)g_base_addr + addr) = value;
}
else
{
Memory.WriteMMIO32((u32)addr, value);
}
}
static u64 read64(u32 addr)

View File

@ -67,12 +67,12 @@ namespace vm
__forceinline _ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>& operator *() const
{
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>(m_addr);
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>((u32)m_addr);
}
__forceinline _ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>& operator [](AT index) const
{
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>(m_addr + sizeof(AT)* index);
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>((u32)(m_addr + sizeof(AT)* index));
}
//typedef typename invert_be_t<AT>::type AT2;
@ -112,7 +112,7 @@ namespace vm
__forceinline T* const operator -> () const
{
return vm::get_ptr<T>(m_addr);
return vm::get_ptr<T>((u32)m_addr);
}
_ptr_base operator++ (int)
@ -160,17 +160,17 @@ namespace vm
__forceinline T& operator *() const
{
return vm::get_ref<T>(m_addr);
return vm::get_ref<T>((u32)m_addr);
}
__forceinline T& operator [](typename remove_be_t<AT>::type index) const
{
return vm::get_ref<T>(m_addr + sizeof(T)* index);
return vm::get_ref<T>((u32)(m_addr + sizeof(T) * index));
}
__forceinline T& operator [](typename to_be_t<AT>::forced_type index) const
{
return vm::get_ref<T>(m_addr + sizeof(T)* index);
return vm::get_ref<T>((u32)(m_addr + sizeof(T)* index));
}
__forceinline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
@ -224,7 +224,7 @@ namespace vm
T* get_ptr() const
{
return vm::get_ptr<T>(m_addr);
return vm::get_ptr<T>((u32)m_addr);
}
static _ptr_base make(AT addr)
@ -253,7 +253,7 @@ namespace vm
void* get_ptr() const
{
return vm::get_ptr<void>(m_addr);
return vm::get_ptr<void>((u32)m_addr);
}
explicit operator void*() const
@ -313,7 +313,7 @@ namespace vm
const void* get_ptr() const
{
return vm::get_ptr<const void>(m_addr);
return vm::get_ptr<const void>((u32)m_addr);
}
explicit operator const void*() const
@ -356,9 +356,9 @@ namespace vm
public:
typedef RT(*type)(T...);
RT call(CPUThread& CPU, T... args) const; // call using specified CPU thread context, defined in CB_FUNC.h
RT call(CPUThread& CPU, T... args) const; // defined in CB_FUNC.h, call using specified CPU thread context
RT operator()(T... args) const; // call using current CPU thread context, defined in CB_FUNC.h
RT operator()(T... args) const; // defined in CB_FUNC.h, call using current CPU thread context
AT addr() const
{

View File

@ -20,7 +20,7 @@ SemaphoreAttributes SyncPrimManager::GetSemaphoreData(u32 id)
return{};
}
return{ std::string((const char*)&sem->name, 8), sem->value, sem->max };
return{ std::string((const char*)&sem->name, 8), sem->value.read_sync(), sem->max };
}
LwMutexAttributes SyncPrimManager::GetLwMutexData(u32 id)

View File

@ -11,9 +11,13 @@
sleep_queue_t::~sleep_queue_t()
{
for (auto& tid : m_list)
for (auto& tid : m_waiting)
{
LOG_NOTICE(HLE, "~sleep_queue_t('%s'): m_list[%lld]=%d", m_name.c_str(), &tid - m_list.data(), tid);
LOG_NOTICE(HLE, "~sleep_queue_t['%s']: m_waiting[%lld]=%d", m_name.c_str(), &tid - m_waiting.data(), tid);
}
for (auto& tid : m_signaled)
{
LOG_NOTICE(HLE, "~sleep_queue_t['%s']: m_signaled[%lld]=%d", m_name.c_str(), &tid - m_signaled.data(), tid);
}
}
@ -28,15 +32,27 @@ void sleep_queue_t::push(u32 tid, u32 protocol)
{
std::lock_guard<std::mutex> lock(m_mutex);
for (auto& v : m_list)
for (auto& v : m_waiting)
{
if (v == tid)
{
assert(!"sleep_queue_t::push() failed (duplication)");
LOG_ERROR(HLE, "sleep_queue_t['%s']::push() failed: thread already waiting (%d)", m_name.c_str(), tid);
Emu.Pause();
return;
}
}
m_list.push_back(tid);
for (auto& v : m_signaled)
{
if (v == tid)
{
LOG_ERROR(HLE, "sleep_queue_t['%s']::push() failed: thread already signaled (%d)", m_name.c_str(), tid);
Emu.Pause();
return;
}
}
m_waiting.push_back(tid);
return;
}
case SYS_SYNC_RETRY:
@ -45,34 +61,86 @@ void sleep_queue_t::push(u32 tid, u32 protocol)
}
}
LOG_ERROR(HLE, "sleep_queue_t::push(): unsupported protocol (0x%x)", protocol);
LOG_ERROR(HLE, "sleep_queue_t['%s']::push() failed: unsupported protocol (0x%x)", m_name.c_str(), protocol);
Emu.Pause();
}
u32 sleep_queue_t::pop(u32 protocol)
bool sleep_queue_t::pop(u32 tid, u32 protocol)
{
assert(tid);
switch (protocol & SYS_SYNC_ATTR_PROTOCOL_MASK)
{
case SYS_SYNC_FIFO:
case SYS_SYNC_PRIORITY:
{
std::lock_guard<std::mutex> lock(m_mutex);
if (m_signaled.size() && m_signaled[0] == tid)
{
m_signaled.erase(m_signaled.begin());
return true;
}
for (auto& v : m_signaled)
{
if (v == tid)
{
return false;
}
}
for (auto& v : m_waiting)
{
if (v == tid)
{
return false;
}
}
LOG_ERROR(HLE, "sleep_queue_t['%s']::pop() failed: thread not found (%d)", m_name.c_str(), tid);
Emu.Pause();
return true; // ???
}
//case SYS_SYNC_RETRY: // ???
//{
// return true; // ???
//}
}
LOG_ERROR(HLE, "sleep_queue_t['%s']::pop() failed: unsupported protocol (0x%x)", m_name.c_str(), protocol);
Emu.Pause();
return false; // ???
}
u32 sleep_queue_t::signal(u32 protocol)
{
u32 res = ~0;
switch (protocol & SYS_SYNC_ATTR_PROTOCOL_MASK)
{
case SYS_SYNC_FIFO:
{
std::lock_guard<std::mutex> lock(m_mutex);
if (m_list.size())
if (m_waiting.size())
{
const u32 res = m_list[0];
res = m_waiting[0];
if (!Emu.GetIdManager().CheckID(res))
{
LOG_ERROR(HLE, "sleep_queue_t::pop(SYS_SYNC_FIFO): invalid thread (%d)", res);
LOG_ERROR(HLE, "sleep_queue_t['%s']::signal(SYS_SYNC_FIFO) failed: invalid thread (%d)", m_name.c_str(), res);
Emu.Pause();
}
m_list.erase(m_list.begin());
return res;
m_waiting.erase(m_waiting.begin());
m_signaled.push_back(res);
}
else
{
return 0;
res = 0;
}
return res;
}
case SYS_SYNC_PRIORITY:
{
@ -80,7 +148,7 @@ u32 sleep_queue_t::pop(u32 protocol)
u64 highest_prio = ~0ull;
u64 sel = ~0ull;
for (auto& v : m_list)
for (auto& v : m_waiting)
{
if (std::shared_ptr<CPUThread> t = Emu.GetCPU().GetThread(v))
{
@ -88,20 +156,21 @@ u32 sleep_queue_t::pop(u32 protocol)
if (prio < highest_prio)
{
highest_prio = prio;
sel = &v - m_list.data();
sel = &v - m_waiting.data();
}
}
else
{
LOG_ERROR(HLE, "sleep_queue_t::pop(SYS_SYNC_PRIORITY): invalid thread (%d)", v);
LOG_ERROR(HLE, "sleep_queue_t['%s']::signal(SYS_SYNC_PRIORITY) failed: invalid thread (%d)", m_name.c_str(), v);
Emu.Pause();
}
}
if (~sel)
{
const u32 res = m_list[sel];
m_list.erase(m_list.begin() + sel);
res = m_waiting[sel];
m_waiting.erase(m_waiting.begin() + sel);
m_signaled.push_back(res);
return res;
}
// fallthrough
@ -112,22 +181,65 @@ u32 sleep_queue_t::pop(u32 protocol)
}
}
LOG_ERROR(HLE, "sleep_queue_t::pop(): unsupported protocol (0x%x)", protocol);
LOG_ERROR(HLE, "sleep_queue_t['%s']::signal(): unsupported protocol (0x%x)", m_name.c_str(), protocol);
Emu.Pause();
return 0;
}
bool sleep_queue_t::invalidate(u32 tid)
bool sleep_queue_t::invalidate(u32 tid, u32 protocol)
{
assert(tid);
switch (protocol & SYS_SYNC_ATTR_PROTOCOL_MASK)
{
case SYS_SYNC_FIFO:
case SYS_SYNC_PRIORITY:
{
std::lock_guard<std::mutex> lock(m_mutex);
for (auto& v : m_waiting)
{
if (v == tid)
{
m_waiting.erase(m_waiting.begin() + (&v - m_waiting.data()));
return true;
}
}
for (auto& v : m_signaled)
{
if (v == tid)
{
m_signaled.erase(m_signaled.begin() + (&v - m_signaled.data()));
return true;
}
}
return false;
}
case SYS_SYNC_RETRY:
{
return true;
}
}
LOG_ERROR(HLE, "sleep_queue_t['%s']::invalidate(): unsupported protocol (0x%x)", m_name.c_str(), protocol);
Emu.Pause();
return 0;
}
bool sleep_queue_t::signal_selected(u32 tid)
{
assert(tid);
std::lock_guard<std::mutex> lock(m_mutex);
for (auto& v : m_list)
for (auto& v : m_waiting)
{
if (v == tid)
{
m_list.erase(m_list.begin() + (&v - m_list.data()));
m_waiting.erase(m_waiting.begin() + (&v - m_waiting.data()));
m_signaled.push_back(tid);
return true;
}
}
@ -139,5 +251,5 @@ u32 sleep_queue_t::count()
{
std::lock_guard<std::mutex> lock(m_mutex);
return (u32)m_list.size();
return (u32)m_waiting.size() + (u32)m_signaled.size();
}

View File

@ -28,7 +28,8 @@ enum
class sleep_queue_t
{
std::vector<u32> m_list;
std::vector<u32> m_waiting;
std::vector<u32> m_signaled;
std::mutex m_mutex;
std::string m_name;
@ -46,7 +47,9 @@ public:
const std::string& get_full_name() { return m_name; }
void push(u32 tid, u32 protocol);
u32 pop(u32 protocol);
bool invalidate(u32 tid);
bool pop(u32 tid, u32 protocol);
u32 signal(u32 protocol);
bool signal_selected(u32 tid);
bool invalidate(u32 tid, u32 protocol);
u32 count();
};

View File

@ -36,7 +36,7 @@ s32 sys_cond_create(vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribu
*cond_id = id;
mutex->cond_count++; // TODO: check safety
sys_cond.Warning("*** condition created [%s] (mutex_id=%d): id = %d", std::string(attr->name, 8).c_str(), mutex_id, id);
sys_cond.Warning("*** cond created [%s] (mutex_id=%d): id = %d", std::string(attr->name, 8).c_str(), mutex_id, id);
return CELL_OK;
}
@ -70,16 +70,7 @@ s32 sys_cond_signal(u32 cond_id)
return CELL_ESRCH;
}
if (u32 target = cond->queue.pop(cond->mutex->protocol))
{
cond->signal.push(target);
if (Emu.IsStopped())
{
sys_cond.Warning("sys_cond_signal(id=%d) aborted", cond_id);
}
}
u32 target = cond->queue.signal(cond->mutex->protocol);
return CELL_OK;
}
@ -95,10 +86,8 @@ s32 sys_cond_signal_all(u32 cond_id)
Mutex* mutex = cond->mutex.get();
while (u32 target = cond->queue.pop(mutex->protocol))
while (u32 target = cond->queue.signal(mutex->protocol))
{
cond->signal.push(target);
if (Emu.IsStopped())
{
sys_cond.Warning("sys_cond_signal_all(id=%d) aborted", cond_id);
@ -124,21 +113,10 @@ s32 sys_cond_signal_to(u32 cond_id, u32 thread_id)
return CELL_ESRCH;
}
if (!cond->queue.invalidate(thread_id))
if (!cond->queue.signal_selected(thread_id))
{
return CELL_EPERM;
}
u32 target = thread_id;
{
cond->signal.push(target);
}
if (Emu.IsStopped())
{
sys_cond.Warning("sys_cond_signal_to(id=%d, to=%d) aborted", cond_id, thread_id);
}
return CELL_OK;
}
@ -146,6 +124,8 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
{
sys_cond.Log("sys_cond_wait(cond_id=%d, timeout=%lld)", cond_id, timeout);
const u64 start_time = get_system_time();
std::shared_ptr<Cond> cond;
if (!Emu.GetIdManager().GetIDData(cond_id, cond))
{
@ -164,17 +144,15 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
auto old_recursive = mutex->recursive_count.load();
mutex->recursive_count = 0;
if (!mutex->owner.compare_and_swap_test(tid, mutex->queue.pop(mutex->protocol)))
if (!mutex->owner.compare_and_swap_test(tid, mutex->queue.signal(mutex->protocol)))
{
assert(!"sys_cond_wait() failed");
}
bool pushed_in_sleep_queue = false;
const u64 time_start = get_system_time();
bool pushed_in_sleep_queue = false, signaled = false;
while (true)
{
u32 signaled;
if (cond->signal.try_peek(signaled) && signaled == tid) // check signaled threads
if (signaled = signaled || cond->queue.pop(tid, mutex->protocol)) // check if signaled
{
if (mutex->owner.compare_and_swap_test(0, tid)) // try to lock
{
@ -188,12 +166,7 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
}
auto old_owner = mutex->owner.compare_and_swap(0, tid);
if (!old_owner)
{
mutex->queue.invalidate(tid);
break;
}
if (old_owner == tid)
if (!old_owner || old_owner == tid)
{
break;
}
@ -201,9 +174,12 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
if (timeout && get_system_time() - time_start > timeout)
if (timeout && get_system_time() - start_time > timeout)
{
cond->queue.invalidate(tid);
if (!cond->queue.invalidate(tid, mutex->protocol))
{
assert(!"sys_cond_wait() failed (timeout)");
}
CPU.owned_mutexes--; // ???
return CELL_ETIMEDOUT; // mutex not locked
}
@ -215,7 +191,10 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
}
}
if (pushed_in_sleep_queue && !mutex->queue.invalidate(tid, mutex->protocol))
{
assert(!"sys_cond_wait() failed (locking)");
}
mutex->recursive_count = old_recursive;
cond->signal.pop(cond_id /* unused result */);
return CELL_OK;
}

View File

@ -4,7 +4,7 @@ struct sys_cond_attribute
{
be_t<u32> pshared;
be_t<u64> ipc_key;
be_t<int> flags;
be_t<s32> flags;
union
{
char name[8];
@ -15,7 +15,6 @@ struct sys_cond_attribute
struct Cond
{
std::shared_ptr<Mutex> mutex; // associated with mutex
squeue_t<u32, 32> signal;
sleep_queue_t queue;
Cond(std::shared_ptr<Mutex>& mutex, u64 name)

View File

@ -7,6 +7,7 @@
#include "Emu/Cell/PPUThread.h"
#include "Emu/Event.h"
#include "sleep_queue_type.h"
#include "sys_time.h"
#include "sys_process.h"
#include "sys_event.h"
@ -156,6 +157,8 @@ s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event,
sys_event.Log("sys_event_queue_receive(equeue_id=%d, dummy_event_addr=0x%x, timeout=%lld)",
equeue_id, dummy_event.addr(), timeout);
const u64 start_time = get_system_time();
std::shared_ptr<EventQueue> eq;
if (!Emu.GetIdManager().GetIDData(equeue_id, eq))
{
@ -167,22 +170,20 @@ s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event,
return CELL_EINVAL;
}
u32 tid = GetCurrentPPUThread().GetId();
const u32 tid = GetCurrentPPUThread().GetId();
eq->sq.push(tid, eq->protocol); // add thread to sleep queue
timeout = timeout ? (timeout / 1000) : ~0;
u64 counter = 0;
while (true)
{
u32 old_owner = eq->owner.compare_and_swap(0, tid);
const u32 old_owner = eq->owner.compare_and_swap(0, tid);
const s32 res = old_owner ? (old_owner == tid ? 1 : 2) : 0;
switch (res)
{
case 0:
{
const u32 next = eq->events.count() ? eq->sq.pop(eq->protocol) : 0;
const u32 next = eq->events.count() ? eq->sq.signal(eq->protocol) : 0;
if (next != tid)
{
if (!eq->owner.compare_and_swap_test(tid, next))
@ -209,23 +210,39 @@ s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event,
t.GPR[5] = event.data1;
t.GPR[6] = event.data2;
t.GPR[7] = event.data3;
if (!eq->sq.invalidate(tid, eq->protocol))
{
assert(!"sys_event_queue_receive() failed (receiving)");
}
return CELL_OK;
}
}
if (!~old_owner)
{
eq->sq.invalidate(tid);
if (!eq->sq.invalidate(tid, eq->protocol))
{
assert(!"sys_event_queue_receive() failed (cancelling)");
}
return CELL_ECANCELED;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
if (counter++ > timeout || Emu.IsStopped())
if (timeout && get_system_time() - start_time > timeout)
{
if (Emu.IsStopped()) sys_event.Warning("sys_event_queue_receive(equeue=%d) aborted", equeue_id);
eq->sq.invalidate(tid);
if (!eq->sq.invalidate(tid, eq->protocol))
{
assert(!"sys_event_queue_receive() failed (timeout)");
}
return CELL_ETIMEDOUT;
}
if (Emu.IsStopped())
{
sys_event.Warning("sys_event_queue_receive(equeue=%d) aborted", equeue_id);
return CELL_OK;
}
}
}

View File

@ -43,6 +43,10 @@ u32 EventFlag::check()
assert(!"EventFlag::check(): waiter not found");
}
}
else
{
assert(!"EventFlag::check(): unknown protocol");
}
}
}
@ -51,8 +55,7 @@ u32 EventFlag::check()
s32 sys_event_flag_create(vm::ptr<u32> eflag_id, vm::ptr<sys_event_flag_attr> attr, u64 init)
{
sys_event_flag.Warning("sys_event_flag_create(eflag_id_addr=0x%x, attr_addr=0x%x, init=0x%llx)",
eflag_id.addr(), attr.addr(), init);
sys_event_flag.Warning("sys_event_flag_create(eflag_id_addr=0x%x, attr_addr=0x%x, init=0x%llx)", eflag_id.addr(), attr.addr(), init);
if (!eflag_id)
{
@ -76,7 +79,9 @@ s32 sys_event_flag_create(vm::ptr<u32> eflag_id, vm::ptr<sys_event_flag_attr> at
}
if (attr->pshared.ToBE() != se32(0x200))
{
return CELL_EINVAL;
}
switch (attr->type.ToBE())
{

View File

@ -67,10 +67,8 @@ s32 sys_lwcond_signal(vm::ptr<sys_lwcond_t> lwcond)
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex.addr());
if (u32 target = lw->queue.pop(mutex->attribute))
if (u32 target = lw->queue.signal(mutex->attribute))
{
lw->signal.push(target);
if (Emu.IsStopped())
{
sys_lwcond.Warning("sys_lwcond_signal(id=%d) aborted", (u32)lwcond->lwcond_queue);
@ -93,10 +91,8 @@ s32 sys_lwcond_signal_all(vm::ptr<sys_lwcond_t> lwcond)
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex.addr());
while (u32 target = lw->queue.pop(mutex->attribute))
while (u32 target = lw->queue.signal(mutex->attribute))
{
lw->signal.push(target);
if (Emu.IsStopped())
{
sys_lwcond.Warning("sys_lwcond_signal_all(id=%d) aborted", (u32)lwcond->lwcond_queue);
@ -122,22 +118,11 @@ s32 sys_lwcond_signal_to(vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id)
return CELL_ESRCH;
}
if (!lw->queue.invalidate(ppu_thread_id))
if (!lw->queue.signal_selected(ppu_thread_id))
{
return CELL_EPERM;
}
u32 target = ppu_thread_id;
{
lw->signal.push(target);
if (Emu.IsStopped())
{
sys_lwcond.Warning("sys_lwcond_signal_to(id=%d, to=%d) aborted", (u32)lwcond->lwcond_queue, ppu_thread_id);
return CELL_OK;
}
}
return CELL_OK;
}
@ -145,6 +130,8 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
{
sys_lwcond.Log("sys_lwcond_wait(lwcond_addr=0x%x, timeout=%lld)", lwcond.addr(), timeout);
const u64 start_time = get_system_time();
std::shared_ptr<Lwcond> lw;
if (!Emu.GetIdManager().GetIDData((u32)lwcond->lwcond_queue, lw))
{
@ -173,19 +160,18 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
auto old_recursive = mutex->recursive_count.read_relaxed();
mutex->recursive_count.exchange(be_t<u32>::make(0));
be_t<u32> target = be_t<u32>::make(sq->pop(mutex->attribute));
be_t<u32> target = be_t<u32>::make(sq->signal(mutex->attribute));
if (!mutex->owner.compare_and_swap_test(tid, target))
{
assert(!"sys_lwcond_wait(): mutex unlocking failed");
}
const u64 time_start = get_system_time();
bool signaled = false;
while (true)
{
u32 signaled;
if (lw->signal.try_peek(signaled) && signaled == tid_le) // check signaled threads
if (signaled = signaled || lw->queue.pop(tid, mutex->attribute)) // check signaled threads
{
s32 res = mutex->lock(tid, timeout ? get_system_time() - time_start : 0); // this is bad
s32 res = mutex->lock(tid, timeout ? get_system_time() - start_time : 0); // this is bad
if (res == CELL_OK)
{
break;
@ -196,35 +182,25 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
case static_cast<int>(CELL_EDEADLK):
{
sys_lwcond.Error("sys_lwcond_wait(id=%d): associated mutex was locked", (u32)lwcond->lwcond_queue);
lw->queue.invalidate(tid_le);
lw->signal.pop(tid_le /* unused result */);
return CELL_OK; // mutex not locked (but already locked in the incorrect way)
}
case static_cast<int>(CELL_ESRCH):
{
sys_lwcond.Error("sys_lwcond_wait(id=%d): associated mutex not found (%d)", (u32)lwcond->lwcond_queue, (u32)mutex->sleep_queue);
lw->queue.invalidate(tid_le);
lw->signal.pop(tid_le /* unused result */);
return CELL_ESRCH; // mutex not locked
}
case static_cast<int>(CELL_ETIMEDOUT):
{
lw->queue.invalidate(tid_le);
lw->signal.pop(tid_le /* unused result */);
return CELL_ETIMEDOUT; // mutex not locked
}
case static_cast<int>(CELL_EINVAL):
{
sys_lwcond.Error("sys_lwcond_wait(id=%d): invalid associated mutex (%d)", (u32)lwcond->lwcond_queue, (u32)mutex->sleep_queue);
lw->queue.invalidate(tid_le);
lw->signal.pop(tid_le /* unused result */);
return CELL_EINVAL; // mutex not locked
}
default:
{
sys_lwcond.Error("sys_lwcond_wait(id=%d): mutex->lock() returned 0x%x", (u32)lwcond->lwcond_queue, res);
lw->queue.invalidate(tid_le);
lw->signal.pop(tid_le /* unused result */);
return CELL_EINVAL; // mutex not locked
}
}
@ -232,9 +208,12 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
if (timeout && get_system_time() - time_start > timeout)
if (timeout && get_system_time() - start_time > timeout)
{
lw->queue.invalidate(tid_le);
if (!lw->queue.invalidate(tid_le, mutex->attribute))
{
assert(!"sys_lwcond_wait() failed (timeout)");
}
return CELL_ETIMEDOUT; // mutex not locked
}
@ -246,6 +225,5 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
}
mutex->recursive_count.exchange(old_recursive);
lw->signal.pop(tid_le /* unused result */);
return CELL_OK;
}

View File

@ -19,7 +19,6 @@ struct sys_lwcond_t
struct Lwcond
{
squeue_t<u32, 32> signal;
sleep_queue_t queue;
const u32 addr;

View File

@ -157,7 +157,7 @@ s32 sys_lwmutex_t::unlock(be_t<u32> tid)
return CELL_ESRCH;
}
if (!owner.compare_and_swap_test(tid, be_t<u32>::make(sq->pop(attribute))))
if (!owner.compare_and_swap_test(tid, be_t<u32>::make(sq->signal(attribute))))
{
assert(!"sys_lwmutex_t::unlock() failed");
}
@ -168,6 +168,8 @@ s32 sys_lwmutex_t::unlock(be_t<u32> tid)
s32 sys_lwmutex_t::lock(be_t<u32> tid, u64 timeout)
{
const u64 start_time = get_system_time();
switch (s32 res = trylock(tid))
{
case static_cast<s32>(CELL_EBUSY): break;
@ -182,25 +184,22 @@ s32 sys_lwmutex_t::lock(be_t<u32> tid, u64 timeout)
sq->push(tid, attribute);
const u64 time_start = get_system_time();
while (true)
{
auto old_owner = owner.compare_and_swap(be_t<u32>::make(0), tid);
if (!old_owner.ToBE())
{
sq->invalidate(tid);
break;
}
if (old_owner == tid)
if (!old_owner.ToBE() || old_owner == tid)
{
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
if (timeout && get_system_time() - time_start > timeout)
if (timeout && get_system_time() - start_time > timeout)
{
sq->invalidate(tid);
if (!sq->invalidate(tid, attribute))
{
assert(!"sys_lwmutex_t::lock() failed (timeout)");
}
return CELL_ETIMEDOUT;
}
@ -211,6 +210,10 @@ s32 sys_lwmutex_t::lock(be_t<u32> tid, u64 timeout)
}
}
if (!sq->invalidate(tid, attribute))
{
assert(!"sys_lwmutex_t::lock() failed (locking)");
}
recursive_count.exchange(be_t<u32>::make(1));
return CELL_OK;
}

View File

@ -81,26 +81,11 @@ s32 sys_mutex_destroy(PPUThread& CPU, u32 mutex_id)
return CELL_EPERM;
}
const u32 tid = CPU.GetId();
if (!mutex->owner.compare_and_swap_test(0, tid)) // check if locked
if (!mutex->owner.compare_and_swap_test(0, ~0)) // check if locked and make unusable
{
return CELL_EBUSY;
}
if (mutex->queue.count()) // TODO: safely make object unusable
{
if (!mutex->owner.compare_and_swap_test(tid, 0))
{
assert(!"sys_mutex_destroy() failed (busy)");
}
return CELL_EBUSY;
}
if (!mutex->owner.compare_and_swap_test(tid, ~0))
{
assert(!"sys_mutex_destroy() failed");
}
Emu.GetIdManager().RemoveID(mutex_id);
return CELL_OK;
}
@ -109,6 +94,8 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
{
sys_mutex.Log("sys_mutex_lock(mutex_id=%d, timeout=%lld)", mutex_id, timeout);
const u64 start_time = get_system_time();
std::shared_ptr<Mutex> mutex;
if (!Emu.GetIdManager().GetIDData(mutex_id, mutex))
{
@ -117,7 +104,12 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
const u32 tid = CPU.GetId();
if (mutex->owner.read_sync() == tid)
const u32 old_owner = mutex->owner.compare_and_swap(0, tid);
if (!~old_owner)
{
return CELL_ESRCH; // mutex is going to be destroyed
}
if (old_owner == tid)
{
if (mutex->is_recursive)
{
@ -133,8 +125,7 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
return CELL_EDEADLK;
}
}
if (mutex->owner.compare_and_swap_test(0, tid))
else if (!old_owner)
{
mutex->recursive_count = 1;
CPU.owned_mutexes++;
@ -143,25 +134,22 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
mutex->queue.push(tid, mutex->protocol);
const u64 time_start = get_system_time();
while (true)
{
auto old_owner = mutex->owner.compare_and_swap(0, tid);
if (!old_owner)
{
mutex->queue.invalidate(tid);
break;
}
if (old_owner == tid)
if (!old_owner || old_owner == tid)
{
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
if (timeout && get_system_time() - time_start > timeout)
if (timeout && get_system_time() - start_time > timeout)
{
mutex->queue.invalidate(tid);
if (!mutex->queue.invalidate(tid, mutex->protocol))
{
assert(!"sys_mutex_lock() failed (timeout)");
}
return CELL_ETIMEDOUT;
}
@ -172,6 +160,10 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
}
}
if (!mutex->queue.invalidate(tid, mutex->protocol))
{
assert(!"sys_mutex_lock() failed (locking)");
}
mutex->recursive_count = 1;
CPU.owned_mutexes++;
return CELL_OK;
@ -189,7 +181,12 @@ s32 sys_mutex_trylock(PPUThread& CPU, u32 mutex_id)
const u32 tid = CPU.GetId();
if (mutex->owner.read_sync() == tid)
const u32 old_owner = mutex->owner.compare_and_swap(0, tid);
if (!~old_owner)
{
return CELL_ESRCH; // mutex is going to be destroyed
}
if (old_owner == tid)
{
if (mutex->is_recursive)
{
@ -205,15 +202,14 @@ s32 sys_mutex_trylock(PPUThread& CPU, u32 mutex_id)
return CELL_EDEADLK;
}
}
if (!mutex->owner.compare_and_swap_test(0, tid))
else if (!old_owner)
{
return CELL_EBUSY;
mutex->recursive_count = 1;
CPU.owned_mutexes++;
return CELL_OK;
}
mutex->recursive_count = 1;
CPU.owned_mutexes++;
return CELL_OK;
return CELL_EBUSY;
}
s32 sys_mutex_unlock(PPUThread& CPU, u32 mutex_id)
@ -228,7 +224,12 @@ s32 sys_mutex_unlock(PPUThread& CPU, u32 mutex_id)
const u32 tid = CPU.GetId();
if (mutex->owner.read_sync() != tid)
const u32 owner = mutex->owner.read_sync();
if (!~owner)
{
return CELL_ESRCH; // mutex is going to be destroyed
}
if (owner != tid)
{
return CELL_EPERM;
}
@ -241,7 +242,7 @@ s32 sys_mutex_unlock(PPUThread& CPU, u32 mutex_id)
if (!--mutex->recursive_count)
{
if (!mutex->owner.compare_and_swap_test(tid, mutex->queue.pop(mutex->protocol)))
if (!mutex->owner.compare_and_swap_test(tid, mutex->queue.signal(mutex->protocol)))
{
assert(!"sys_mutex_unlock() failed");
}

View File

@ -6,6 +6,7 @@
#include "Emu/Cell/PPUThread.h"
#include "sleep_queue_type.h"
#include "sys_time.h"
#include "sys_rwlock.h"
SysCallBase sys_rwlock("sys_rwlock");
@ -14,31 +15,27 @@ s32 sys_rwlock_create(vm::ptr<u32> rw_lock_id, vm::ptr<sys_rwlock_attribute_t> a
{
sys_rwlock.Warning("sys_rwlock_create(rw_lock_id_addr=0x%x, attr_addr=0x%x)", rw_lock_id.addr(), attr.addr());
if (!attr)
return CELL_EFAULT;
switch (attr->attr_protocol.ToBE())
switch (attr->protocol.ToBE())
{
case se32(SYS_SYNC_PRIORITY): sys_rwlock.Todo("SYS_SYNC_PRIORITY"); break;
case se32(SYS_SYNC_PRIORITY): break;
case se32(SYS_SYNC_RETRY): sys_rwlock.Error("SYS_SYNC_RETRY"); return CELL_EINVAL;
case se32(SYS_SYNC_PRIORITY_INHERIT): sys_rwlock.Todo("SYS_SYNC_PRIORITY_INHERIT"); break;
case se32(SYS_SYNC_FIFO): break;
default: return CELL_EINVAL;
}
if (attr->attr_pshared.ToBE() != se32(0x200))
if (attr->pshared.ToBE() != se32(0x200))
{
sys_rwlock.Error("Invalid attr_pshared(0x%x)", (u32)attr->attr_pshared);
sys_rwlock.Error("Invalid pshared value (0x%x)", (u32)attr->pshared);
return CELL_EINVAL;
}
std::shared_ptr<RWLock> rw(new RWLock((u32)attr->attr_protocol, attr->name_u64));
u32 id = sys_rwlock.GetNewId(rw, TYPE_RWLOCK);
std::shared_ptr<RWLock> rw(new RWLock((u32)attr->protocol, attr->name_u64));
const u32 id = sys_rwlock.GetNewId(rw, TYPE_RWLOCK);
*rw_lock_id = id;
rw->wqueue.set_full_name(fmt::Format("Rwlock(%d)", id));
sys_rwlock.Warning("*** rwlock created [%s] (protocol=0x%x): id = %d",
std::string(attr->name, 8).c_str(), (u32)attr->attr_protocol, id);
sys_rwlock.Warning("*** rwlock created [%s] (protocol=0x%x): id = %d", std::string(attr->name, 8).c_str(), rw->protocol, id);
return CELL_OK;
}
@ -47,14 +44,17 @@ s32 sys_rwlock_destroy(u32 rw_lock_id)
sys_rwlock.Warning("sys_rwlock_destroy(rw_lock_id=%d)", rw_lock_id);
std::shared_ptr<RWLock> rw;
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
if (!sys_rwlock.CheckId(rw_lock_id, rw))
{
return CELL_ESRCH;
}
std::lock_guard<std::mutex> lock(rw->m_lock);
if (rw->wlock_queue.size() || rw->rlock_list.size() || rw->wlock_thread) return CELL_EBUSY;
if (!rw->sync.compare_and_swap_test({ 0, 0 }, { -1, -1 })) // check if locked and make unusable
{
return CELL_EBUSY;
}
Emu.GetIdManager().RemoveID(rw_lock_id);
return CELL_OK;
}
@ -62,37 +62,46 @@ s32 sys_rwlock_rlock(u32 rw_lock_id, u64 timeout)
{
sys_rwlock.Log("sys_rwlock_rlock(rw_lock_id=%d, timeout=%lld)", rw_lock_id, timeout);
const u64 start_time = get_system_time();
std::shared_ptr<RWLock> rw;
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
const u32 tid = GetCurrentPPUThread().GetId();
if (rw->rlock_trylock(tid)) return CELL_OK;
u64 counter = 0;
const u64 max_counter = timeout ? (timeout / 1000) : 20000;
do
if (!sys_rwlock.CheckId(rw_lock_id, rw))
{
if (Emu.IsStopped())
return CELL_ESRCH;
}
while (true)
{
bool succeeded;
rw->sync.atomic_op_sync([&succeeded](RWLock::sync_var_t& sync)
{
sys_rwlock.Warning("sys_rwlock_rlock(rw_lock_id=%d, ...) aborted", rw_lock_id);
return CELL_ETIMEDOUT;
assert(~sync.readers);
if (succeeded = !sync.writer)
{
sync.readers++;
}
});
if (succeeded)
{
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
if (rw->rlock_trylock(tid)) return CELL_OK;
if (counter++ > max_counter)
if (timeout && get_system_time() - start_time > timeout)
{
if (!timeout)
{
counter = 0;
}
else
{
return CELL_ETIMEDOUT;
}
}
} while (true);
return CELL_ETIMEDOUT;
}
if (Emu.IsStopped())
{
sys_rwlock.Warning("sys_rwlock_rlock(id=%d) aborted", rw_lock_id);
return CELL_OK;
}
}
return CELL_OK;
}
s32 sys_rwlock_tryrlock(u32 rw_lock_id)
@ -100,11 +109,27 @@ s32 sys_rwlock_tryrlock(u32 rw_lock_id)
sys_rwlock.Log("sys_rwlock_tryrlock(rw_lock_id=%d)", rw_lock_id);
std::shared_ptr<RWLock> rw;
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
if (!sys_rwlock.CheckId(rw_lock_id, rw))
{
return CELL_ESRCH;
}
if (!rw->rlock_trylock(GetCurrentPPUThread().GetId())) return CELL_EBUSY;
bool succeeded;
rw->sync.atomic_op_sync([&succeeded](RWLock::sync_var_t& sync)
{
assert(~sync.readers);
if (succeeded = !sync.writer)
{
sync.readers++;
}
});
return CELL_OK;
if (succeeded)
{
return CELL_OK;
}
return CELL_EBUSY;
}
s32 sys_rwlock_runlock(u32 rw_lock_id)
@ -112,75 +137,134 @@ s32 sys_rwlock_runlock(u32 rw_lock_id)
sys_rwlock.Log("sys_rwlock_runlock(rw_lock_id=%d)", rw_lock_id);
std::shared_ptr<RWLock> rw;
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
if (!sys_rwlock.CheckId(rw_lock_id, rw))
{
return CELL_ESRCH;
}
if (!rw->rlock_unlock(GetCurrentPPUThread().GetId())) return CELL_EPERM;
bool succeeded;
rw->sync.atomic_op_sync([&succeeded](RWLock::sync_var_t& sync)
{
if (succeeded = sync.readers != 0)
{
assert(!sync.writer);
sync.readers--;
}
});
return CELL_OK;
if (succeeded)
{
return CELL_OK;
}
return CELL_EPERM;
}
s32 sys_rwlock_wlock(u32 rw_lock_id, u64 timeout)
s32 sys_rwlock_wlock(PPUThread& CPU, u32 rw_lock_id, u64 timeout)
{
sys_rwlock.Log("sys_rwlock_wlock(rw_lock_id=%d, timeout=%lld)", rw_lock_id, timeout);
const u64 start_time = get_system_time();
std::shared_ptr<RWLock> rw;
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
const u32 tid = GetCurrentPPUThread().GetId();
if (!rw->wlock_check(tid)) return CELL_EDEADLK;
if (rw->wlock_trylock(tid, true)) return CELL_OK;
u64 counter = 0;
const u64 max_counter = timeout ? (timeout / 1000) : 20000;
do
if (!sys_rwlock.CheckId(rw_lock_id, rw))
{
if (Emu.IsStopped())
return CELL_ESRCH;
}
const u32 tid = CPU.GetId();
if (rw->sync.compare_and_swap_test({ 0, 0 }, { 0, tid }))
{
return CELL_OK;
}
if (rw->sync.read_relaxed().writer == tid)
{
return CELL_EDEADLK;
}
rw->wqueue.push(tid, rw->protocol);
while (true)
{
auto old_sync = rw->sync.compare_and_swap({ 0, 0 }, { 0, tid });
if (!old_sync.readers && (!old_sync.writer || old_sync.writer == tid))
{
sys_rwlock.Warning("sys_rwlock_wlock(rw_lock_id=%d, ...) aborted", rw_lock_id);
return CELL_ETIMEDOUT;
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
if (rw->wlock_trylock(tid, true)) return CELL_OK;
if (counter++ > max_counter)
if (timeout && get_system_time() - start_time > timeout)
{
if (!timeout)
if (!rw->wqueue.invalidate(tid, rw->protocol))
{
counter = 0;
assert(!"sys_rwlock_wlock() failed (timeout)");
}
else
{
return CELL_ETIMEDOUT;
}
}
} while (true);
return CELL_ETIMEDOUT;
}
if (Emu.IsStopped())
{
sys_rwlock.Warning("sys_rwlock_wlock(id=%d) aborted", rw_lock_id);
return CELL_OK;
}
}
if (!rw->wqueue.invalidate(tid, rw->protocol))
{
assert(!"sys_rwlock_wlock() failed (locking)");
}
return CELL_OK;
}
s32 sys_rwlock_trywlock(u32 rw_lock_id)
s32 sys_rwlock_trywlock(PPUThread& CPU, u32 rw_lock_id)
{
sys_rwlock.Log("sys_rwlock_trywlock(rw_lock_id=%d)", rw_lock_id);
std::shared_ptr<RWLock> rw;
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
const u32 tid = GetCurrentPPUThread().GetId();
if (!sys_rwlock.CheckId(rw_lock_id, rw))
{
return CELL_ESRCH;
}
if (!rw->wlock_check(tid)) return CELL_EDEADLK;
const u32 tid = CPU.GetId();
if (!rw->wlock_trylock(tid, false)) return CELL_EBUSY;
if (rw->sync.compare_and_swap_test({ 0, 0 }, { 0, tid }))
{
return CELL_OK;
}
return CELL_OK;
if (rw->sync.read_relaxed().writer == tid)
{
return CELL_EDEADLK;
}
return CELL_EBUSY;
}
s32 sys_rwlock_wunlock(u32 rw_lock_id)
s32 sys_rwlock_wunlock(PPUThread& CPU, u32 rw_lock_id)
{
sys_rwlock.Log("sys_rwlock_wunlock(rw_lock_id=%d)", rw_lock_id);
std::shared_ptr<RWLock> rw;
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
if (!sys_rwlock.CheckId(rw_lock_id, rw))
{
return CELL_ESRCH;
}
if (!rw->wlock_unlock(GetCurrentPPUThread().GetId())) return CELL_EPERM;
const u32 tid = CPU.GetId();
const u32 target = rw->wqueue.signal(rw->protocol);
return CELL_OK;
if (rw->sync.compare_and_swap_test({ 0, tid }, { 0, target }))
{
if (!target)
{
// TODO: signal readers
}
return CELL_OK;
}
return CELL_EPERM;
}

View File

@ -2,11 +2,11 @@
struct sys_rwlock_attribute_t
{
be_t<u32> attr_protocol;
be_t<u32> attr_pshared; // == 0x200 (NOT SHARED)
be_t<u64> key; // process-shared key (not used)
be_t<s32> flags; // process-shared flags (not used)
be_t<u32> pad; // not used
be_t<u32> protocol;
be_t<u32> pshared;
be_t<u64> ipc_key;
be_t<s32> flags;
be_t<u32> pad;
union
{
char name[8];
@ -16,136 +16,22 @@ struct sys_rwlock_attribute_t
struct RWLock
{
std::mutex m_lock; // internal lock
u32 wlock_thread; // write lock owner
std::vector<u32> wlock_queue; // write lock queue
std::vector<u32> rlock_list; // read lock list
u32 m_protocol; // TODO
union
struct sync_var_t
{
u64 m_name_u64;
char m_name[8];
u32 readers; // reader count
u32 writer; // writer thread id
};
sleep_queue_t wqueue;
atomic_le_t<sync_var_t> sync;
const u32 protocol;
RWLock(u32 protocol, u64 name)
: m_protocol(protocol)
, m_name_u64(name)
, wlock_thread(0)
: protocol(protocol)
, wqueue(name)
{
}
bool rlock_trylock(u32 tid)
{
std::lock_guard<std::mutex> lock(m_lock);
if (!wlock_thread && !wlock_queue.size())
{
rlock_list.push_back(tid);
return true;
}
return false;
}
bool rlock_unlock(u32 tid)
{
std::lock_guard<std::mutex> lock(m_lock);
for (u32 i = (u32)rlock_list.size() - 1; ~i; i--)
{
if (rlock_list[i] == tid)
{
rlock_list.erase(rlock_list.begin() + i);
return true;
}
}
return false;
}
bool wlock_check(u32 tid)
{
std::lock_guard<std::mutex> lock(m_lock);
if (wlock_thread == tid)
{
return false; // deadlock
}
for (u32 i = (u32)rlock_list.size() - 1; ~i; i--)
{
if (rlock_list[i] == tid)
{
return false; // deadlock
}
}
return true;
}
bool wlock_trylock(u32 tid, bool enqueue)
{
std::lock_guard<std::mutex> lock(m_lock);
if (wlock_thread || rlock_list.size()) // already locked
{
if (!enqueue)
{
return false; // do not enqueue
}
for (u32 i = (u32)wlock_queue.size() - 1; ~i; i--)
{
if (wlock_queue[i] == tid)
{
return false; // already enqueued
}
}
wlock_queue.push_back(tid); // enqueue new thread
return false;
}
else
{
if (wlock_queue.size())
{
// SYNC_FIFO only yet
if (wlock_queue[0] == tid)
{
wlock_thread = tid;
wlock_queue.erase(wlock_queue.begin());
return true;
}
else
{
if (!enqueue)
{
return false; // do not enqueue
}
for (u32 i = (u32)wlock_queue.size() - 1; ~i; i--)
{
if (wlock_queue[i] == tid)
{
return false; // already enqueued
}
}
wlock_queue.push_back(tid); // enqueue new thread
return false;
}
}
else
{
wlock_thread = tid; // easy way
return true;
}
}
}
bool wlock_unlock(u32 tid)
{
std::lock_guard<std::mutex> lock(m_lock);
if (wlock_thread == tid)
{
wlock_thread = 0;
return true;
}
return false;
sync.write_relaxed({ 0, 0 });
}
};
@ -155,6 +41,6 @@ s32 sys_rwlock_destroy(u32 rw_lock_id);
s32 sys_rwlock_rlock(u32 rw_lock_id, u64 timeout);
s32 sys_rwlock_tryrlock(u32 rw_lock_id);
s32 sys_rwlock_runlock(u32 rw_lock_id);
s32 sys_rwlock_wlock(u32 rw_lock_id, u64 timeout);
s32 sys_rwlock_trywlock(u32 rw_lock_id);
s32 sys_rwlock_wunlock(u32 rw_lock_id);
s32 sys_rwlock_wlock(PPUThread& CPU, u32 rw_lock_id, u64 timeout);
s32 sys_rwlock_trywlock(PPUThread& CPU, u32 rw_lock_id);
s32 sys_rwlock_wunlock(PPUThread& CPU, u32 rw_lock_id);

View File

@ -88,6 +88,8 @@ s32 sys_semaphore_wait(u32 sem_id, u64 timeout)
{
sys_semaphore.Log("sys_semaphore_wait(sem_id=%d, timeout=%lld)", sem_id, timeout);
const u64 start_time = get_system_time();
std::shared_ptr<Semaphore> sem;
if (!Emu.GetIdManager().GetIDData(sem_id, sem))
{
@ -95,46 +97,56 @@ s32 sys_semaphore_wait(u32 sem_id, u64 timeout)
}
const u32 tid = GetCurrentPPUThread().GetId();
const u64 start_time = get_system_time();
s32 old_value;
{
std::lock_guard<std::mutex> lock(sem->mutex);
if (sem->value > 0)
LV2_LOCK(0);
sem->value.atomic_op_sync([&old_value](s32& value)
{
old_value = value;
if (value > 0)
{
value--;
}
});
if (old_value > 0)
{
sem->value--;
return CELL_OK;
}
sem->queue.push(tid, sem->protocol);
}
while (true)
{
if (sem->queue.pop(tid, sem->protocol))
{
break;
}
assert(!sem->value.read_sync());
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
if (timeout && get_system_time() - start_time > timeout)
{
if (!sem->queue.invalidate(tid, sem->protocol))
{
assert(!"sys_semaphore_wait() failed (timeout)");
}
return CELL_ETIMEDOUT;
}
if (Emu.IsStopped())
{
sys_semaphore.Warning("sys_semaphore_wait(%d) aborted", sem_id);
return CELL_OK;
}
if (timeout && get_system_time() - start_time > timeout)
{
sem->queue.invalidate(tid);
return CELL_ETIMEDOUT;
}
if (tid == sem->signal)
{
std::lock_guard<std::mutex> lock(sem->mutex);
if (tid != sem->signal)
{
continue;
}
sem->signal = 0;
return CELL_OK;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
}
return CELL_OK;
}
s32 sys_semaphore_trywait(u32 sem_id)
@ -147,11 +159,19 @@ s32 sys_semaphore_trywait(u32 sem_id)
return CELL_ESRCH;
}
std::lock_guard<std::mutex> lock(sem->mutex);
s32 old_value;
if (sem->value > 0)
sem->value.atomic_op_sync([&old_value](s32& value)
{
old_value = value;
if (value > 0)
{
value--;
}
});
if (old_value > 0)
{
sem->value--;
return CELL_OK;
}
else
@ -175,7 +195,9 @@ s32 sys_semaphore_post(u32 sem_id, s32 count)
return CELL_EINVAL;
}
if (count + sem->value - (s32)sem->queue.count() > sem->max)
LV2_LOCK(0);
if (count + sem->value.read_sync() - (s32)sem->queue.count() > sem->max)
{
return CELL_EBUSY;
}
@ -188,22 +210,16 @@ s32 sys_semaphore_post(u32 sem_id, s32 count)
return CELL_OK;
}
std::lock_guard<std::mutex> lock(sem->mutex);
if (sem->signal && sem->queue.count())
{
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
continue;
}
if (u32 target = sem->queue.pop(sem->protocol))
if (u32 target = sem->queue.signal(sem->protocol))
{
count--;
sem->signal = target;
}
else
{
sem->value += count;
sem->value.atomic_op([count](s32& value)
{
value += count;
});
count = 0;
}
}
@ -217,7 +233,7 @@ s32 sys_semaphore_get_value(u32 sem_id, vm::ptr<s32> count)
if (!count)
{
sys_semaphore.Error("sys_semaphore_get_value(): invalid memory access (count=0x%x)", count.addr());
sys_semaphore.Error("sys_semaphore_get_value(): invalid memory access (addr=0x%x)", count.addr());
return CELL_EFAULT;
}
@ -227,9 +243,6 @@ s32 sys_semaphore_get_value(u32 sem_id, vm::ptr<s32> count)
return CELL_ESRCH;
}
std::lock_guard<std::mutex> lock(sem->mutex);
*count = sem->value;
*count = sem->value.read_sync();
return CELL_OK;
}

View File

@ -16,22 +16,19 @@ struct sys_semaphore_attribute
struct Semaphore
{
std::mutex mutex;
sleep_queue_t queue;
s32 value;
u32 signal;
atomic_le_t<s32> value;
const s32 max;
const u32 protocol;
const u64 name;
Semaphore(s32 initial_count, s32 max_count, u32 protocol, u64 name)
: value(initial_count)
, signal(0)
, max(max_count)
: max(max_count)
, protocol(protocol)
, name(name)
{
value.write_relaxed(initial_count);
}
};