From 1e021cdbbaf9438271f77b9caa3bf2ee1167eccf Mon Sep 17 00:00:00 2001 From: Nekotekina Date: Wed, 15 Jul 2015 14:58:13 +0300 Subject: [PATCH] waiter_map reintroduced, combined with vm --- Utilities/Thread.cpp | 14 -- Utilities/Thread.h | 55 ----- rpcs3/Emu/Memory/vm.cpp | 241 ++++++++++++++++++- rpcs3/Emu/Memory/vm.h | 72 ++++++ rpcs3/Emu/SysCalls/Modules/cellSync.cpp | 127 +++++----- rpcs3/Emu/SysCalls/Modules/sysPrxForUser.cpp | 102 ++++---- rpcs3/stdafx.h | 2 + 7 files changed, 414 insertions(+), 199 deletions(-) diff --git a/Utilities/Thread.cpp b/Utilities/Thread.cpp index b344bac68b..21004e51f6 100644 --- a/Utilities/Thread.cpp +++ b/Utilities/Thread.cpp @@ -1377,20 +1377,6 @@ bool thread_t::is_current() const return g_tls_this_thread == m_thread.get(); } -void waiter_map_t::check_emu_status(u32 addr) -{ - if (Emu.IsStopped()) - { - throw EXCEPTION("Aborted (emulation stopped) (%s, addr=0x%x)", name, addr); - } -} - -void waiter_map_t::notify(u32 addr) -{ - // signal an appropriate condition variable - cvs[get_hash(addr)].notify_all(); -} - const std::function SQUEUE_ALWAYS_EXIT = [](){ return true; }; const std::function SQUEUE_NEVER_EXIT = [](){ return false; }; diff --git a/Utilities/Thread.h b/Utilities/Thread.h index 23676853c9..4a85fdd9ca 100644 --- a/Utilities/Thread.h +++ b/Utilities/Thread.h @@ -97,61 +97,6 @@ public: using thread_t::is_current; }; -struct waiter_map_t -{ - static const size_t size = 16; - - std::array mutexes; - std::array cvs; - - const std::string name; - - waiter_map_t(const char* name) - : name(name) - { - } - - // generate simple "hash" for mutex/cv distribution - u32 get_hash(u32 addr) - { - addr ^= addr >> 16; - addr ^= addr >> 24; - addr ^= addr >> 28; - return addr % size; - } - - void check_emu_status(u32 addr); - - // wait until pred() returns true, `addr` is an arbitrary number - template safe_buffers auto wait_op(u32 addr, F pred, Args&&... args) -> decltype(static_cast(pred(args...))) - { - const u32 hash = get_hash(addr); - - // set mutex locker - std::unique_lock lock(mutexes[hash], std::defer_lock); - - while (true) - { - // check the condition - if (pred(args...)) return; - - check_emu_status(addr); - - if (!lock) - { - lock.lock(); - continue; - } - - // wait on an appropriate cond var for 1 ms or until a signal arrived - cvs[hash].wait_for(lock, std::chrono::milliseconds(1)); - } - } - - // signal all threads waiting on wait_op() with the same `addr` (signaling only hints those threads that corresponding conditions are *probably* met) - void notify(u32 addr); -}; - extern const std::function SQUEUE_ALWAYS_EXIT; extern const std::function SQUEUE_NEVER_EXIT; diff --git a/rpcs3/Emu/Memory/vm.cpp b/rpcs3/Emu/Memory/vm.cpp index 3e2dc873f8..000c984ded 100644 --- a/rpcs3/Emu/Memory/vm.cpp +++ b/rpcs3/Emu/Memory/vm.cpp @@ -2,6 +2,7 @@ #include "Utilities/Log.h" #include "Memory.h" #include "Emu/System.h" +#include "Utilities/Thread.h" #include "Emu/CPU/CPUThread.h" #include "Emu/Cell/PPUThread.h" #include "Emu/Cell/SPUThread.h" @@ -144,6 +145,200 @@ namespace vm reservation_mutex_t g_reservation_mutex; + waiter_list_t g_waiter_list; + + std::size_t g_waiter_max = 0; // min unused position + std::size_t g_waiter_nil = 0; // min search position + + std::mutex g_waiter_list_mutex; + + waiter_t* _add_waiter(CPUThread& thread, u32 addr, u32 size) + { + std::lock_guard lock(g_waiter_list_mutex); + + const u64 align = 0x80000000ull >> cntlz32(size); + + if (!size || !addr || size > 4096 || size != align || addr & (align - 1)) + { + throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size); + } + + thread.mutex.lock(); + + // look for empty position + for (; g_waiter_nil < g_waiter_max; g_waiter_nil++) + { + waiter_t& waiter = g_waiter_list[g_waiter_nil]; + + if (!waiter.thread) + { + // store next position for further addition + g_waiter_nil++; + + return waiter.reset(addr, size, thread); + } + } + + if (g_waiter_max >= g_waiter_list.size()) + { + throw EXCEPTION("Waiter list limit broken (%lld)", g_waiter_max); + } + + waiter_t& waiter = g_waiter_list[g_waiter_max++]; + + g_waiter_nil = g_waiter_max; + + return waiter.reset(addr, size, thread); + } + + void _remove_waiter(waiter_t* waiter) + { + std::lock_guard lock(g_waiter_list_mutex); + + // mark as deleted + waiter->thread = nullptr; + + // amortize adding new element + g_waiter_nil = std::min(g_waiter_nil, waiter - g_waiter_list.data()); + + // amortize polling + while (g_waiter_max && !g_waiter_list[g_waiter_max - 1].thread) + { + g_waiter_max--; + } + } + + bool waiter_t::try_notify() + { + std::lock_guard lock(thread->mutex); + + // check predicate + if (pred && pred()) + { + // clear predicate and signal if succeeded + pred = nullptr; + + if (thread->Signal()) + { + return true; + } + else + { + throw EXCEPTION("Thread already signaled"); + } + } + + return false; + } + + waiter_lock_t::waiter_lock_t(CPUThread& thread, u32 addr, u32 size) + : m_waiter(_add_waiter(thread, addr, size)) + , m_lock(thread.mutex, std::adopt_lock) // must be locked in _add_waiter + { + } + + void waiter_lock_t::wait() + { + while (!m_waiter->thread->Signaled()) + { + if (m_waiter->pred()) + { + return; + } + + CHECK_EMU_STATUS; + + m_waiter->thread->cv.wait(m_lock); + } + + // if another thread called pred(), it must be removed + if (m_waiter->pred) + { + throw EXCEPTION("Unexpected"); + } + } + + waiter_lock_t::~waiter_lock_t() + { + // remove predicate to avoid excessive signaling + m_waiter->pred = nullptr; + + // unlock thread's mutex to avoid deadlock with g_waiter_list_mutex + m_lock.unlock(); + + _remove_waiter(m_waiter); + } + + void _notify_at(u32 addr, u32 size) + { + std::lock_guard lock(g_waiter_list_mutex); + + const u32 mask = ~(size - 1); + + for (std::size_t i = 0; i < g_waiter_max; i++) + { + waiter_t& waiter = g_waiter_list[i]; + + if (((waiter.addr ^ addr) & (mask & waiter.mask)) == 0 && waiter.thread) + { + waiter.try_notify(); + } + } + } + + void notify_at(u32 addr, u32 size) + { + const u64 align = 0x80000000ull >> cntlz32(size); + + if (!size || !addr || size > 4096 || size != align || addr & (align - 1)) + { + throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size); + } + + _notify_at(addr, size); + } + + bool notify_all() + { + std::unique_lock lock(g_waiter_list_mutex, std::try_to_lock); + + if (lock) + { + for (std::size_t i = 0; i < g_waiter_max; i++) + { + waiter_t& waiter = g_waiter_list[i]; + + if (waiter.thread && waiter.pred) + { + waiter.try_notify(); + } + } + + return true; + } + + return false; + } + + void start() + { + // start notification thread + thread_t(COPY_EXPR("vm::start thread"), []() + { + while (!Emu.IsStopped()) + { + // poll waiters periodically (TODO) + while (!notify_all() && !Emu.IsPaused()) + { + std::this_thread::yield(); + } + + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } + + }).detach(); + } + void _reservation_set(u32 addr, bool no_access = false) { #ifdef _WIN32 @@ -183,9 +378,15 @@ namespace vm void reservation_break(u32 addr) { - std::lock_guard lock(g_reservation_mutex); + std::unique_lock lock(g_reservation_mutex); - g_tls_did_break_reservation = _reservation_break(addr); + const u32 raddr = g_reservation_addr; + const u32 rsize = g_reservation_size; + + if ((g_tls_did_break_reservation = _reservation_break(addr))) + { + lock.unlock(), _notify_at(raddr, rsize); + } } void reservation_acquire(void* data, u32 addr, u32 size) @@ -229,7 +430,7 @@ namespace vm bool reservation_update(u32 addr, const void* data, u32 size) { - std::lock_guard lock(g_reservation_mutex); + std::unique_lock lock(g_reservation_mutex); const u64 align = 0x80000000ull >> cntlz32(size); @@ -253,13 +454,16 @@ namespace vm // free the reservation and restore memory protection _reservation_break(addr); + // notify waiter + lock.unlock(), _notify_at(addr, size); + // atomic update succeeded return true; } bool reservation_query(u32 addr, u32 size, bool is_writing, std::function callback) { - std::lock_guard lock(g_reservation_mutex); + std::unique_lock lock(g_reservation_mutex); if (!check_addr(addr)) { @@ -269,15 +473,21 @@ namespace vm // check if current reservation and address may overlap if (g_reservation_addr >> 12 == addr >> 12 && is_writing) { - if (size && addr + size - 1 >= g_reservation_addr && g_reservation_addr + g_reservation_size - 1 >= addr) + const bool result = callback(); + + if (result && size && addr + size - 1 >= g_reservation_addr && g_reservation_addr + g_reservation_size - 1 >= addr) { + const u32 raddr = g_reservation_addr; + const u32 rsize = g_reservation_size; + // break the reservation if overlap - g_tls_did_break_reservation = _reservation_break(addr); - } - else - { - return callback(); //? true : _reservation_break(addr), true; + if ((g_tls_did_break_reservation = _reservation_break(addr))) + { + lock.unlock(), _notify_at(raddr, rsize); + } } + + return result; } return true; @@ -302,7 +512,7 @@ namespace vm void reservation_op(u32 addr, u32 size, std::function proc) { - std::lock_guard lock(g_reservation_mutex); + std::unique_lock lock(g_reservation_mutex); const u64 align = 0x80000000ull >> cntlz32(size); @@ -337,6 +547,9 @@ namespace vm // remove the reservation _reservation_break(addr); + + // notify waiter + lock.unlock(), _notify_at(addr, size); } void _page_map(u32 addr, u32 size, u8 flags) @@ -752,6 +965,8 @@ namespace vm std::make_shared(0xE0000000, 0x20000000), // SPU }; + + vm::start(); } } @@ -766,6 +981,8 @@ namespace vm nullptr, // video nullptr, // stack }; + + vm::start(); } } @@ -783,6 +1000,8 @@ namespace vm std::make_shared(0x00010000, 0x00004000), // scratchpad std::make_shared(0x88000000, 0x00800000), // kernel }; + + vm::start(); } } diff --git a/rpcs3/Emu/Memory/vm.h b/rpcs3/Emu/Memory/vm.h index e5b6825a4d..4386d08a06 100644 --- a/rpcs3/Emu/Memory/vm.h +++ b/rpcs3/Emu/Memory/vm.h @@ -32,6 +32,78 @@ namespace vm page_allocated = (1 << 7), }; + struct waiter_t + { + u32 addr = 0; + u32 mask = 0; + CPUThread* thread = nullptr; + + std::function pred; + + waiter_t() = default; + + waiter_t* reset(u32 addr, u32 size, CPUThread& thread) + { + this->addr = addr; + this->mask = ~(size - 1); + this->thread = &thread; + + // must be null at this point + if (pred) + { + throw EXCEPTION("Unexpected"); + } + + return this; + } + + bool try_notify(); + }; + + using waiter_list_t = std::array; + + class waiter_lock_t + { + waiter_t* m_waiter; + std::unique_lock m_lock; + + public: + waiter_lock_t() = delete; + + waiter_lock_t(CPUThread& thread, u32 addr, u32 size); + + waiter_t* operator ->() const + { + return m_waiter; + } + + void wait(); + + ~waiter_lock_t(); + }; + + // wait until pred() returns true, addr must be aligned to size which must be a power of 2, pred() may be called by any thread + template auto wait_op(CPUThread& thread, u32 addr, u32 size, F pred, Args&&... args) -> decltype(static_cast(pred(args...))) + { + // return immediately if condition passed (optimistic case) + if (pred(args...)) return; + + // initialize waiter and locker + waiter_lock_t lock(thread, addr, size); + + // initialize predicate + lock->pred = WRAP_EXPR(pred(args...)); + + // start waiting + lock.wait(); + } + + // notify waiters on specific addr, addr must be aligned to size which must be a power of 2 + void notify_at(u32 addr, u32 size); + + // try to poll each waiter's condition (false if try_lock failed) + bool notify_all(); + // This flag is changed by various reservation functions and may have different meaning. // reservation_break() - true if the reservation was successfully broken. // reservation_acquire() - true if another existing reservation was broken. diff --git a/rpcs3/Emu/SysCalls/Modules/cellSync.cpp b/rpcs3/Emu/SysCalls/Modules/cellSync.cpp index 0e04566a8d..7981eff11c 100644 --- a/rpcs3/Emu/SysCalls/Modules/cellSync.cpp +++ b/rpcs3/Emu/SysCalls/Modules/cellSync.cpp @@ -11,13 +11,6 @@ extern Module cellSync; -waiter_map_t g_sync_mutex_wm("sync_mutex_wm"); -waiter_map_t g_sync_barrier_wait_wm("sync_barrier_wait_wm"); -waiter_map_t g_sync_barrier_notify_wm("sync_barrier_notify_wm"); -waiter_map_t g_sync_rwm_read_wm("sync_rwm_read_wm"); -waiter_map_t g_sync_rwm_write_wm("sync_rwm_write_wm"); -waiter_map_t g_sync_queue_wm("sync_queue_wm"); - s32 cellSyncMutexInitialize(vm::ptr mutex) { cellSync.Log("cellSyncMutexInitialize(mutex=*0x%x)", mutex); @@ -37,7 +30,7 @@ s32 cellSyncMutexInitialize(vm::ptr mutex) return CELL_OK; } -s32 cellSyncMutexLock(vm::ptr mutex) +s32 cellSyncMutexLock(PPUThread& ppu, vm::ptr mutex) { cellSync.Log("cellSyncMutexLock(mutex=*0x%x)", mutex); @@ -55,7 +48,7 @@ s32 cellSyncMutexLock(vm::ptr mutex) const auto order = mutex->atomic_op(&sync_mutex_t::acquire); // wait until rel value is equal to old acq value - g_sync_mutex_wm.wait_op(mutex.addr(), WRAP_EXPR(mutex->load().rel == order)); + vm::wait_op(ppu, mutex.addr(), 4, WRAP_EXPR(mutex->load().rel == order)); _mm_mfence(); @@ -100,7 +93,7 @@ s32 cellSyncMutexUnlock(vm::ptr mutex) mutex->atomic_op(&sync_mutex_t::unlock); - g_sync_mutex_wm.notify(mutex.addr()); + vm::notify_at(mutex.addr(), 4); return CELL_OK; } @@ -130,7 +123,7 @@ s32 cellSyncBarrierInitialize(vm::ptr barrier, u16 total_count) return CELL_OK; } -s32 cellSyncBarrierNotify(vm::ptr barrier) +s32 cellSyncBarrierNotify(PPUThread& ppu, vm::ptr barrier) { cellSync.Log("cellSyncBarrierNotify(barrier=*0x%x)", barrier); @@ -144,9 +137,9 @@ s32 cellSyncBarrierNotify(vm::ptr barrier) return CELL_SYNC_ERROR_ALIGN; } - g_sync_barrier_notify_wm.wait_op(barrier.addr(), WRAP_EXPR(barrier->atomic_op(&sync_barrier_t::try_notify))); + vm::wait_op(ppu, barrier.addr(), 4, WRAP_EXPR(barrier->atomic_op(&sync_barrier_t::try_notify))); - g_sync_barrier_wait_wm.notify(barrier.addr()); + vm::notify_at(barrier.addr(), 4); return CELL_OK; } @@ -172,12 +165,12 @@ s32 cellSyncBarrierTryNotify(vm::ptr barrier) return CELL_SYNC_ERROR_BUSY; } - g_sync_barrier_wait_wm.notify(barrier.addr()); + vm::notify_at(barrier.addr(), 4); return CELL_OK; } -s32 cellSyncBarrierWait(vm::ptr barrier) +s32 cellSyncBarrierWait(PPUThread& ppu, vm::ptr barrier) { cellSync.Log("cellSyncBarrierWait(barrier=*0x%x)", barrier); @@ -193,9 +186,9 @@ s32 cellSyncBarrierWait(vm::ptr barrier) _mm_mfence(); - g_sync_barrier_wait_wm.wait_op(barrier.addr(), WRAP_EXPR(barrier->atomic_op(&sync_barrier_t::try_wait))); + vm::wait_op(ppu, barrier.addr(), 4, WRAP_EXPR(barrier->atomic_op(&sync_barrier_t::try_wait))); - g_sync_barrier_notify_wm.notify(barrier.addr()); + vm::notify_at(barrier.addr(), 4); return CELL_OK; } @@ -221,7 +214,7 @@ s32 cellSyncBarrierTryWait(vm::ptr barrier) return CELL_SYNC_ERROR_BUSY; } - g_sync_barrier_notify_wm.notify(barrier.addr()); + vm::notify_at(barrier.addr(), 4); return CELL_OK; } @@ -255,7 +248,7 @@ s32 cellSyncRwmInitialize(vm::ptr rwm, vm::ptr buffer, u32 bu return CELL_OK; } -s32 cellSyncRwmRead(vm::ptr rwm, vm::ptr buffer) +s32 cellSyncRwmRead(PPUThread& ppu, vm::ptr rwm, vm::ptr buffer) { cellSync.Log("cellSyncRwmRead(rwm=*0x%x, buffer=*0x%x)", rwm, buffer); @@ -270,7 +263,7 @@ s32 cellSyncRwmRead(vm::ptr rwm, vm::ptr buffer) } // wait until `writers` is zero, increase `readers` - g_sync_rwm_read_wm.wait_op(rwm.addr(), WRAP_EXPR(rwm->ctrl.atomic_op(&sync_rwm_t::try_read_begin))); + vm::wait_op(ppu, rwm.addr(), 4, WRAP_EXPR(rwm->ctrl.atomic_op(&sync_rwm_t::try_read_begin))); // copy data to buffer std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size); @@ -281,7 +274,7 @@ s32 cellSyncRwmRead(vm::ptr rwm, vm::ptr buffer) return CELL_SYNC_ERROR_ABORT; } - g_sync_rwm_write_wm.notify(rwm.addr()); + vm::notify_at(rwm.addr(), 4); return CELL_OK; } @@ -315,12 +308,12 @@ s32 cellSyncRwmTryRead(vm::ptr rwm, vm::ptr buffer) return CELL_SYNC_ERROR_ABORT; } - g_sync_rwm_write_wm.notify(rwm.addr()); + vm::notify_at(rwm.addr(), 4); return CELL_OK; } -s32 cellSyncRwmWrite(vm::ptr rwm, vm::cptr buffer) +s32 cellSyncRwmWrite(PPUThread& ppu, vm::ptr rwm, vm::cptr buffer) { cellSync.Log("cellSyncRwmWrite(rwm=*0x%x, buffer=*0x%x)", rwm, buffer); @@ -335,10 +328,10 @@ s32 cellSyncRwmWrite(vm::ptr rwm, vm::cptr buffer) } // wait until `writers` is zero, set to 1 - g_sync_rwm_read_wm.wait_op(rwm.addr(), WRAP_EXPR(rwm->ctrl.atomic_op(&sync_rwm_t::try_write_begin))); + vm::wait_op(ppu, rwm.addr(), 4, WRAP_EXPR(rwm->ctrl.atomic_op(&sync_rwm_t::try_write_begin))); // wait until `readers` is zero - g_sync_rwm_write_wm.wait_op(rwm.addr(), WRAP_EXPR(!rwm->ctrl.load().readers.data())); + vm::wait_op(ppu, rwm.addr(), 4, WRAP_EXPR(!rwm->ctrl.load().readers.data())); // copy data from buffer std::memcpy(rwm->buffer.get_ptr(), buffer.get_ptr(), rwm->size); @@ -346,7 +339,7 @@ s32 cellSyncRwmWrite(vm::ptr rwm, vm::cptr buffer) // sync and clear `readers` and `writers` rwm->ctrl.exchange({}); - g_sync_rwm_read_wm.notify(rwm.addr()); + vm::notify_at(rwm.addr(), 4); return CELL_OK; } @@ -377,7 +370,7 @@ s32 cellSyncRwmTryWrite(vm::ptr rwm, vm::cptr buffer) // sync and clear `readers` and `writers` rwm->ctrl.exchange({}); - g_sync_rwm_read_wm.notify(rwm.addr()); + vm::notify_at(rwm.addr(), 4); return CELL_OK; } @@ -417,7 +410,7 @@ s32 cellSyncQueueInitialize(vm::ptr queue, vm::ptr buffer, u3 return CELL_OK; } -s32 cellSyncQueuePush(vm::ptr queue, vm::cptr buffer) +s32 cellSyncQueuePush(PPUThread& ppu, vm::ptr queue, vm::cptr buffer) { cellSync.Log("cellSyncQueuePush(queue=*0x%x, buffer=*0x%x)", queue, buffer); @@ -435,7 +428,7 @@ s32 cellSyncQueuePush(vm::ptr queue, vm::cptr buffer) u32 position; - g_sync_queue_wm.wait_op(queue.addr(), WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_push_begin, depth, position))); + vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_push_begin, depth, position))); // copy data from the buffer at the position std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size); @@ -443,7 +436,7 @@ s32 cellSyncQueuePush(vm::ptr queue, vm::cptr buffer) // clear 5th byte queue->ctrl &= { 0xffffffff, 0x00ffffff }; - g_sync_queue_wm.notify(queue.addr()); + vm::notify_at(queue.addr(), 8); return CELL_OK; } @@ -477,12 +470,12 @@ s32 cellSyncQueueTryPush(vm::ptr queue, vm::cptr buffer) // clear 5th byte queue->ctrl &= { 0xffffffff, 0x00ffffff }; - g_sync_queue_wm.notify(queue.addr()); + vm::notify_at(queue.addr(), 8); return CELL_OK; } -s32 cellSyncQueuePop(vm::ptr queue, vm::ptr buffer) +s32 cellSyncQueuePop(PPUThread& ppu, vm::ptr queue, vm::ptr buffer) { cellSync.Log("cellSyncQueuePop(queue=*0x%x, buffer=*0x%x)", queue, buffer); @@ -500,7 +493,7 @@ s32 cellSyncQueuePop(vm::ptr queue, vm::ptr buffer) u32 position; - g_sync_queue_wm.wait_op(queue.addr(), WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_pop_begin, depth, position))); + vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_pop_begin, depth, position))); // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position * queue->size], queue->size); @@ -508,7 +501,7 @@ s32 cellSyncQueuePop(vm::ptr queue, vm::ptr buffer) // clear first byte queue->ctrl &= { 0x00ffffff, 0xffffffffu }; - g_sync_queue_wm.notify(queue.addr()); + vm::notify_at(queue.addr(), 8); return CELL_OK; } @@ -542,12 +535,12 @@ s32 cellSyncQueueTryPop(vm::ptr queue, vm::ptr buffer) // clear first byte queue->ctrl &= { 0x00ffffff, 0xffffffffu }; - g_sync_queue_wm.notify(queue.addr()); + vm::notify_at(queue.addr(), 8); return CELL_OK; } -s32 cellSyncQueuePeek(vm::ptr queue, vm::ptr buffer) +s32 cellSyncQueuePeek(PPUThread& ppu, vm::ptr queue, vm::ptr buffer) { cellSync.Log("cellSyncQueuePeek(queue=*0x%x, buffer=*0x%x)", queue, buffer); @@ -565,7 +558,7 @@ s32 cellSyncQueuePeek(vm::ptr queue, vm::ptr buffer) u32 position; - g_sync_queue_wm.wait_op(queue.addr(), WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_peek_begin, depth, position))); + vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_peek_begin, depth, position))); // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position * queue->size], queue->size); @@ -573,7 +566,7 @@ s32 cellSyncQueuePeek(vm::ptr queue, vm::ptr buffer) // clear first byte queue->ctrl &= { 0x00ffffff, 0xffffffffu }; - g_sync_queue_wm.notify(queue.addr()); + vm::notify_at(queue.addr(), 8); return CELL_OK; } @@ -607,7 +600,7 @@ s32 cellSyncQueueTryPeek(vm::ptr queue, vm::ptr buffer) // clear first byte queue->ctrl &= { 0x00ffffff, 0xffffffffu }; - g_sync_queue_wm.notify(queue.addr()); + vm::notify_at(queue.addr(), 8); return CELL_OK; } @@ -631,7 +624,7 @@ s32 cellSyncQueueSize(vm::ptr queue) return queue->ctrl.load().m_v2 & 0xffffff; } -s32 cellSyncQueueClear(vm::ptr queue) +s32 cellSyncQueueClear(PPUThread& ppu, vm::ptr queue) { cellSync.Log("cellSyncQueueClear(queue=*0x%x)", queue); @@ -647,13 +640,13 @@ s32 cellSyncQueueClear(vm::ptr queue) queue->check_depth(); - g_sync_queue_wm.wait_op(queue.addr(), WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_clear_begin_1))); + vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_clear_begin_1))); - g_sync_queue_wm.wait_op(queue.addr(), WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_clear_begin_2))); + vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(&sync_queue_t::try_clear_begin_2))); queue->ctrl.exchange({}); - g_sync_queue_wm.notify(queue.addr()); + vm::notify_at(queue.addr(), 8); return CELL_OK; } @@ -811,7 +804,7 @@ s32 cellSyncLFQueueInitialize(vm::ptr queue, vm::ptr buffer return CELL_OK; } -s32 _cellSyncLFQueueGetPushPointer(PPUThread& CPU, vm::ptr queue, vm::ptr pointer, u32 isBlocking, u32 useEventQueue) +s32 _cellSyncLFQueueGetPushPointer(PPUThread& ppu, vm::ptr queue, vm::ptr pointer, u32 isBlocking, u32 useEventQueue) { cellSync.Warning("_cellSyncLFQueueGetPushPointer(queue=*0x%x, pointer=*0x%x, isBlocking=%d, useEventQueue=%d)", queue, pointer, isBlocking, useEventQueue); @@ -899,7 +892,7 @@ s32 _cellSyncLFQueueGetPushPointer(PPUThread& CPU, vm::ptr queu } } - if (s32 res = sys_event_queue_receive(CPU, queue->m_eq_id, vm::null, 0)) + if (s32 res = sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0)) { throw EXCEPTION(""); } @@ -907,7 +900,7 @@ s32 _cellSyncLFQueueGetPushPointer(PPUThread& CPU, vm::ptr queu } } -s32 _cellSyncLFQueueGetPushPointer2(PPUThread& CPU, vm::ptr queue, vm::ptr pointer, u32 isBlocking, u32 useEventQueue) +s32 _cellSyncLFQueueGetPushPointer2(PPUThread& ppu, vm::ptr queue, vm::ptr pointer, u32 isBlocking, u32 useEventQueue) { // arguments copied from _cellSyncLFQueueGetPushPointer cellSync.Todo("_cellSyncLFQueueGetPushPointer2(queue=*0x%x, pointer=*0x%x, isBlocking=%d, useEventQueue=%d)", queue, pointer, isBlocking, useEventQueue); @@ -915,7 +908,7 @@ s32 _cellSyncLFQueueGetPushPointer2(PPUThread& CPU, vm::ptr que throw EXCEPTION(""); } -s32 _cellSyncLFQueueCompletePushPointer(PPUThread& CPU, vm::ptr queue, s32 pointer, vm::ptr fpSendSignal) +s32 _cellSyncLFQueueCompletePushPointer(PPUThread& ppu, vm::ptr queue, s32 pointer, vm::ptr fpSendSignal) { cellSync.Warning("_cellSyncLFQueueCompletePushPointer(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x)", queue, pointer, fpSendSignal); @@ -1033,7 +1026,7 @@ s32 _cellSyncLFQueueCompletePushPointer(PPUThread& CPU, vm::ptr if (exch) { assert(fpSendSignal); - return fpSendSignal(CPU, (u32)queue->m_eaSignal.addr(), var6); + return fpSendSignal(ppu, (u32)queue->m_eaSignal.addr(), var6); } } else @@ -1051,7 +1044,7 @@ s32 _cellSyncLFQueueCompletePushPointer(PPUThread& CPU, vm::ptr } } -s32 _cellSyncLFQueueCompletePushPointer2(PPUThread& CPU, vm::ptr queue, s32 pointer, vm::ptr fpSendSignal) +s32 _cellSyncLFQueueCompletePushPointer2(PPUThread& ppu, vm::ptr queue, s32 pointer, vm::ptr fpSendSignal) { // arguments copied from _cellSyncLFQueueCompletePushPointer cellSync.Todo("_cellSyncLFQueueCompletePushPointer2(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x)", queue, pointer, fpSendSignal); @@ -1059,7 +1052,7 @@ s32 _cellSyncLFQueueCompletePushPointer2(PPUThread& CPU, vm::ptr queue, vm::cptr buffer, u32 isBlocking) +s32 _cellSyncLFQueuePushBody(PPUThread& ppu, vm::ptr queue, vm::cptr buffer, u32 isBlocking) { // cellSyncLFQueuePush has 1 in isBlocking param, cellSyncLFQueueTryPush has 0 cellSync.Warning("_cellSyncLFQueuePushBody(queue=*0x%x, buffer=*0x%x, isBlocking=%d)", queue, buffer, isBlocking); @@ -1074,7 +1067,7 @@ s32 _cellSyncLFQueuePushBody(PPUThread& CPU, vm::ptr queue, vm: return CELL_SYNC_ERROR_ALIGN; } - vm::stackvar> position(CPU); + vm::stackvar> position(ppu); while (true) { @@ -1084,11 +1077,11 @@ s32 _cellSyncLFQueuePushBody(PPUThread& CPU, vm::ptr queue, vm: if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY) { - res = _cellSyncLFQueueGetPushPointer(CPU, queue, position, isBlocking, 0); + res = _cellSyncLFQueueGetPushPointer(ppu, queue, position, isBlocking, 0); } else { - res = _cellSyncLFQueueGetPushPointer2(CPU, queue, position, isBlocking, 0); + res = _cellSyncLFQueueGetPushPointer2(ppu, queue, position, isBlocking, 0); } if (!isBlocking || res != CELL_SYNC_ERROR_AGAIN) @@ -1109,15 +1102,15 @@ s32 _cellSyncLFQueuePushBody(PPUThread& CPU, vm::ptr queue, vm: if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY) { - return _cellSyncLFQueueCompletePushPointer(CPU, queue, pos, vm::null); + return _cellSyncLFQueueCompletePushPointer(ppu, queue, pos, vm::null); } else { - return _cellSyncLFQueueCompletePushPointer2(CPU, queue, pos, vm::null); + return _cellSyncLFQueueCompletePushPointer2(ppu, queue, pos, vm::null); } } -s32 _cellSyncLFQueueGetPopPointer(PPUThread& CPU, vm::ptr queue, vm::ptr pointer, u32 isBlocking, u32 arg4, u32 useEventQueue) +s32 _cellSyncLFQueueGetPopPointer(PPUThread& ppu, vm::ptr queue, vm::ptr pointer, u32 isBlocking, u32 arg4, u32 useEventQueue) { cellSync.Warning("_cellSyncLFQueueGetPopPointer(queue=*0x%x, pointer=*0x%x, isBlocking=%d, arg4=%d, useEventQueue=%d)", queue, pointer, isBlocking, arg4, useEventQueue); @@ -1205,7 +1198,7 @@ s32 _cellSyncLFQueueGetPopPointer(PPUThread& CPU, vm::ptr queue } } - if (s32 res = sys_event_queue_receive(CPU, queue->m_eq_id, vm::null, 0)) + if (s32 res = sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0)) { throw EXCEPTION(""); } @@ -1213,7 +1206,7 @@ s32 _cellSyncLFQueueGetPopPointer(PPUThread& CPU, vm::ptr queue } } -s32 _cellSyncLFQueueGetPopPointer2(PPUThread& CPU, vm::ptr queue, vm::ptr pointer, u32 isBlocking, u32 useEventQueue) +s32 _cellSyncLFQueueGetPopPointer2(PPUThread& ppu, vm::ptr queue, vm::ptr pointer, u32 isBlocking, u32 useEventQueue) { // arguments copied from _cellSyncLFQueueGetPopPointer cellSync.Todo("_cellSyncLFQueueGetPopPointer2(queue=*0x%x, pointer=*0x%x, isBlocking=%d, useEventQueue=%d)", queue, pointer, isBlocking, useEventQueue); @@ -1221,7 +1214,7 @@ s32 _cellSyncLFQueueGetPopPointer2(PPUThread& CPU, vm::ptr queu throw EXCEPTION(""); } -s32 _cellSyncLFQueueCompletePopPointer(PPUThread& CPU, vm::ptr queue, s32 pointer, vm::ptr fpSendSignal, u32 noQueueFull) +s32 _cellSyncLFQueueCompletePopPointer(PPUThread& ppu, vm::ptr queue, s32 pointer, vm::ptr fpSendSignal, u32 noQueueFull) { // arguments copied from _cellSyncLFQueueCompletePushPointer + unknown argument (noQueueFull taken from LFQueue2CompletePopPointer) cellSync.Warning("_cellSyncLFQueueCompletePopPointer(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x, noQueueFull=%d)", queue, pointer, fpSendSignal, noQueueFull); @@ -1339,7 +1332,7 @@ s32 _cellSyncLFQueueCompletePopPointer(PPUThread& CPU, vm::ptr if (exch) { assert(fpSendSignal); - return fpSendSignal(CPU, (u32)queue->m_eaSignal.addr(), var6); + return fpSendSignal(ppu, (u32)queue->m_eaSignal.addr(), var6); } } else @@ -1357,7 +1350,7 @@ s32 _cellSyncLFQueueCompletePopPointer(PPUThread& CPU, vm::ptr } } -s32 _cellSyncLFQueueCompletePopPointer2(PPUThread& CPU, vm::ptr queue, s32 pointer, vm::ptr fpSendSignal, u32 noQueueFull) +s32 _cellSyncLFQueueCompletePopPointer2(PPUThread& ppu, vm::ptr queue, s32 pointer, vm::ptr fpSendSignal, u32 noQueueFull) { // arguments copied from _cellSyncLFQueueCompletePopPointer cellSync.Todo("_cellSyncLFQueueCompletePopPointer2(queue=*0x%x, pointer=%d, fpSendSignal=*0x%x, noQueueFull=%d)", queue, pointer, fpSendSignal, noQueueFull); @@ -1365,7 +1358,7 @@ s32 _cellSyncLFQueueCompletePopPointer2(PPUThread& CPU, vm::ptr throw EXCEPTION(""); } -s32 _cellSyncLFQueuePopBody(PPUThread& CPU, vm::ptr queue, vm::ptr buffer, u32 isBlocking) +s32 _cellSyncLFQueuePopBody(PPUThread& ppu, vm::ptr queue, vm::ptr buffer, u32 isBlocking) { // cellSyncLFQueuePop has 1 in isBlocking param, cellSyncLFQueueTryPop has 0 cellSync.Warning("_cellSyncLFQueuePopBody(queue=*0x%x, buffer=*0x%x, isBlocking=%d)", queue, buffer, isBlocking); @@ -1380,7 +1373,7 @@ s32 _cellSyncLFQueuePopBody(PPUThread& CPU, vm::ptr queue, vm:: return CELL_SYNC_ERROR_ALIGN; } - vm::stackvar> position(CPU); + vm::stackvar> position(ppu); while (true) { @@ -1390,11 +1383,11 @@ s32 _cellSyncLFQueuePopBody(PPUThread& CPU, vm::ptr queue, vm:: if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY) { - res = _cellSyncLFQueueGetPopPointer(CPU, queue, position, isBlocking, 0, 0); + res = _cellSyncLFQueueGetPopPointer(ppu, queue, position, isBlocking, 0, 0); } else { - res = _cellSyncLFQueueGetPopPointer2(CPU, queue, position, isBlocking, 0); + res = _cellSyncLFQueueGetPopPointer2(ppu, queue, position, isBlocking, 0); } if (!isBlocking || res != CELL_SYNC_ERROR_AGAIN) @@ -1415,11 +1408,11 @@ s32 _cellSyncLFQueuePopBody(PPUThread& CPU, vm::ptr queue, vm:: if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY) { - return _cellSyncLFQueueCompletePopPointer(CPU, queue, pos, vm::null, 0); + return _cellSyncLFQueueCompletePopPointer(ppu, queue, pos, vm::null, 0); } else { - return _cellSyncLFQueueCompletePopPointer2(CPU, queue, pos, vm::null, 0); + return _cellSyncLFQueueCompletePopPointer2(ppu, queue, pos, vm::null, 0); } } diff --git a/rpcs3/Emu/SysCalls/Modules/sysPrxForUser.cpp b/rpcs3/Emu/SysCalls/Modules/sysPrxForUser.cpp index d4896ae5a5..235a862d51 100644 --- a/rpcs3/Emu/SysCalls/Modules/sysPrxForUser.cpp +++ b/rpcs3/Emu/SysCalls/Modules/sysPrxForUser.cpp @@ -32,8 +32,6 @@ u32 g_tls_size; std::array, TLS_MAX> g_tls_owners; -waiter_map_t g_sys_spinlock_wm("sys_spinlock_wm"); - void sys_initialize_tls() { sysPrxForUser.Log("sys_initialize_tls()"); @@ -120,18 +118,18 @@ s32 sys_lwmutex_create(vm::ptr lwmutex, vm::ptr lwmutex) +s32 sys_lwmutex_destroy(PPUThread& ppu, vm::ptr lwmutex) { sysPrxForUser.Log("sys_lwmutex_destroy(lwmutex=*0x%x)", lwmutex); // check to prevent recursive locking in the next call - if (lwmutex->vars.owner.load() == CPU.GetId()) + if (lwmutex->vars.owner.load() == ppu.GetId()) { return CELL_EBUSY; } // attempt to lock the mutex - if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) + if (s32 res = sys_lwmutex_trylock(ppu, lwmutex)) { return res; } @@ -140,7 +138,7 @@ s32 sys_lwmutex_destroy(PPUThread& CPU, vm::ptr lwmutex) if (s32 res = _sys_lwmutex_destroy(lwmutex->sleep_queue)) { // unlock the mutex if failed - sys_lwmutex_unlock(CPU, lwmutex); + sys_lwmutex_unlock(ppu, lwmutex); return res; } @@ -151,11 +149,11 @@ s32 sys_lwmutex_destroy(PPUThread& CPU, vm::ptr lwmutex) return CELL_OK; } -s32 sys_lwmutex_lock(PPUThread& CPU, vm::ptr lwmutex, u64 timeout) +s32 sys_lwmutex_lock(PPUThread& ppu, vm::ptr lwmutex, u64 timeout) { sysPrxForUser.Log("sys_lwmutex_lock(lwmutex=*0x%x, timeout=0x%llx)", lwmutex, timeout); - const be_t tid = CPU.GetId(); + const be_t tid = ppu.GetId(); // try to lock lightweight mutex const be_t old_owner = lwmutex->vars.owner.compare_and_swap(lwmutex_free, tid); @@ -245,11 +243,11 @@ s32 sys_lwmutex_lock(PPUThread& CPU, vm::ptr lwmutex, u64 timeout return res; } -s32 sys_lwmutex_trylock(PPUThread& CPU, vm::ptr lwmutex) +s32 sys_lwmutex_trylock(PPUThread& ppu, vm::ptr lwmutex) { sysPrxForUser.Log("sys_lwmutex_trylock(lwmutex=*0x%x)", lwmutex); - const be_t tid = CPU.GetId(); + const be_t tid = ppu.GetId(); // try to lock lightweight mutex const be_t old_owner = lwmutex->vars.owner.compare_and_swap(lwmutex_free, tid); @@ -312,11 +310,11 @@ s32 sys_lwmutex_trylock(PPUThread& CPU, vm::ptr lwmutex) return CELL_EBUSY; } -s32 sys_lwmutex_unlock(PPUThread& CPU, vm::ptr lwmutex) +s32 sys_lwmutex_unlock(PPUThread& ppu, vm::ptr lwmutex) { sysPrxForUser.Log("sys_lwmutex_unlock(lwmutex=*0x%x)", lwmutex); - const be_t tid = CPU.GetId(); + const be_t tid = ppu.GetId(); // check owner if (lwmutex->vars.owner.load() != tid) @@ -380,7 +378,7 @@ s32 sys_lwcond_destroy(vm::ptr lwcond) return res; } -s32 sys_lwcond_signal(PPUThread& CPU, vm::ptr lwcond) +s32 sys_lwcond_signal(PPUThread& ppu, vm::ptr lwcond) { sysPrxForUser.Log("sys_lwcond_signal(lwcond=*0x%x)", lwcond); @@ -392,7 +390,7 @@ s32 sys_lwcond_signal(PPUThread& CPU, vm::ptr lwcond) //return _sys_lwcond_signal(lwcond->lwcond_queue, 0, -1, 2); } - if (lwmutex->vars.owner.load() == CPU.GetId()) + if (lwmutex->vars.owner.load() == ppu.GetId()) { // if owns the mutex lwmutex->all_info++; @@ -408,7 +406,7 @@ s32 sys_lwcond_signal(PPUThread& CPU, vm::ptr lwcond) return CELL_OK; } - if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) + if (s32 res = sys_lwmutex_trylock(ppu, lwmutex)) { // if locking failed @@ -430,7 +428,7 @@ s32 sys_lwcond_signal(PPUThread& CPU, vm::ptr lwcond) lwmutex->all_info--; // unlock the lightweight mutex - sys_lwmutex_unlock(CPU, lwmutex); + sys_lwmutex_unlock(ppu, lwmutex); return res == CELL_ENOENT ? CELL_OK : res; } @@ -438,7 +436,7 @@ s32 sys_lwcond_signal(PPUThread& CPU, vm::ptr lwcond) return CELL_OK; } -s32 sys_lwcond_signal_all(PPUThread& CPU, vm::ptr lwcond) +s32 sys_lwcond_signal_all(PPUThread& ppu, vm::ptr lwcond) { sysPrxForUser.Log("sys_lwcond_signal_all(lwcond=*0x%x)", lwcond); @@ -450,7 +448,7 @@ s32 sys_lwcond_signal_all(PPUThread& CPU, vm::ptr lwcond) //return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2); } - if (lwmutex->vars.owner.load() == CPU.GetId()) + if (lwmutex->vars.owner.load() == ppu.GetId()) { // if owns the mutex, call the syscall const s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1); @@ -466,7 +464,7 @@ s32 sys_lwcond_signal_all(PPUThread& CPU, vm::ptr lwcond) return CELL_OK; } - if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) + if (s32 res = sys_lwmutex_trylock(ppu, lwmutex)) { // if locking failed @@ -490,12 +488,12 @@ s32 sys_lwcond_signal_all(PPUThread& CPU, vm::ptr lwcond) } // unlock mutex - sys_lwmutex_unlock(CPU, lwmutex); + sys_lwmutex_unlock(ppu, lwmutex); return res; } -s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr lwcond, u32 ppu_thread_id) +s32 sys_lwcond_signal_to(PPUThread& ppu, vm::ptr lwcond, u32 ppu_thread_id) { sysPrxForUser.Log("sys_lwcond_signal_to(lwcond=*0x%x, ppu_thread_id=0x%x)", lwcond, ppu_thread_id); @@ -507,7 +505,7 @@ s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr lwcond, u32 ppu_t //return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2); } - if (lwmutex->vars.owner.load() == CPU.GetId()) + if (lwmutex->vars.owner.load() == ppu.GetId()) { // if owns the mutex lwmutex->all_info++; @@ -523,7 +521,7 @@ s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr lwcond, u32 ppu_t return CELL_OK; } - if (s32 res = sys_lwmutex_trylock(CPU, lwmutex)) + if (s32 res = sys_lwmutex_trylock(ppu, lwmutex)) { // if locking failed @@ -545,7 +543,7 @@ s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr lwcond, u32 ppu_t lwmutex->all_info--; // unlock the lightweight mutex - sys_lwmutex_unlock(CPU, lwmutex); + sys_lwmutex_unlock(ppu, lwmutex); return res; } @@ -553,11 +551,11 @@ s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr lwcond, u32 ppu_t return CELL_OK; } -s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr lwcond, u64 timeout) +s32 sys_lwcond_wait(PPUThread& ppu, vm::ptr lwcond, u64 timeout) { sysPrxForUser.Log("sys_lwcond_wait(lwcond=*0x%x, timeout=0x%llx)", lwcond, timeout); - const be_t tid = CPU.GetId(); + const be_t tid = ppu.GetId(); const vm::ptr lwmutex = lwcond->lwmutex; @@ -575,7 +573,7 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr lwcond, u64 timeout) lwmutex->recursive_count = 0; // call the syscall - s32 res = _sys_lwcond_queue_wait(CPU, lwcond->lwcond_queue, lwmutex->sleep_queue, timeout); + s32 res = _sys_lwcond_queue_wait(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, timeout); if (res == CELL_OK || res == CELL_ESRCH) { @@ -598,7 +596,7 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr lwcond, u64 timeout) if (res == CELL_EBUSY || res == CELL_ETIMEDOUT) { - const s32 res2 = sys_lwmutex_lock(CPU, lwmutex, 0); + const s32 res2 = sys_lwmutex_lock(ppu, lwmutex, 0); if (res2 == CELL_OK) { @@ -819,11 +817,11 @@ s64 _sys_process_at_Exitspawn() return CELL_OK; } -s32 sys_interrupt_thread_disestablish(PPUThread& CPU, u32 ih) +s32 sys_interrupt_thread_disestablish(PPUThread& ppu, u32 ih) { sysPrxForUser.Todo("sys_interrupt_thread_disestablish(ih=0x%x)", ih); - return _sys_interrupt_thread_disestablish(CPU, ih, vm::stackvar>(CPU)); + return _sys_interrupt_thread_disestablish(ppu, ih, vm::stackvar>(ppu)); } s32 sys_process_is_stack(u32 p) @@ -898,7 +896,7 @@ s32 sys_raw_spu_load(s32 id, vm::cptr path, vm::ptr entry) return CELL_OK; } -s32 sys_raw_spu_image_load(PPUThread& CPU, s32 id, vm::ptr img) +s32 sys_raw_spu_image_load(PPUThread& ppu, s32 id, vm::ptr img) { sysPrxForUser.Warning("sys_raw_spu_image_load(id=%d, img=*0x%x)", id, img); @@ -1069,7 +1067,7 @@ s32 _sys_spu_printf_finalize() return CELL_OK; } -s32 _sys_spu_printf_attach_group(PPUThread& CPU, u32 group) +s32 _sys_spu_printf_attach_group(PPUThread& ppu, u32 group) { sysPrxForUser.Warning("_sys_spu_printf_attach_group(group=0x%x)", group); @@ -1078,10 +1076,10 @@ s32 _sys_spu_printf_attach_group(PPUThread& CPU, u32 group) return CELL_ESTAT; } - return spu_printf_agcb(CPU, group); + return spu_printf_agcb(ppu, group); } -s32 _sys_spu_printf_detach_group(PPUThread& CPU, u32 group) +s32 _sys_spu_printf_detach_group(PPUThread& ppu, u32 group) { sysPrxForUser.Warning("_sys_spu_printf_detach_group(group=0x%x)", group); @@ -1090,10 +1088,10 @@ s32 _sys_spu_printf_detach_group(PPUThread& CPU, u32 group) return CELL_ESTAT; } - return spu_printf_dgcb(CPU, group); + return spu_printf_dgcb(ppu, group); } -s32 _sys_spu_printf_attach_thread(PPUThread& CPU, u32 thread) +s32 _sys_spu_printf_attach_thread(PPUThread& ppu, u32 thread) { sysPrxForUser.Warning("_sys_spu_printf_attach_thread(thread=0x%x)", thread); @@ -1102,10 +1100,10 @@ s32 _sys_spu_printf_attach_thread(PPUThread& CPU, u32 thread) return CELL_ESTAT; } - return spu_printf_atcb(CPU, thread); + return spu_printf_atcb(ppu, thread); } -s32 _sys_spu_printf_detach_thread(PPUThread& CPU, u32 thread) +s32 _sys_spu_printf_detach_thread(PPUThread& ppu, u32 thread) { sysPrxForUser.Warning("_sys_spu_printf_detach_thread(thread=0x%x)", thread); @@ -1114,7 +1112,7 @@ s32 _sys_spu_printf_detach_thread(PPUThread& CPU, u32 thread) return CELL_ESTAT; } - return spu_printf_dtcb(CPU, thread); + return spu_printf_dtcb(ppu, thread); } u32 _sys_malloc(u32 size) @@ -1140,11 +1138,11 @@ s32 _sys_free(u32 addr) return CELL_OK; } -s32 _sys_snprintf(PPUThread& CPU, vm::ptr dst, u32 count, vm::cptr fmt, ppu_va_args_t va_args) +s32 _sys_snprintf(PPUThread& ppu, vm::ptr dst, u32 count, vm::cptr fmt, ppu_va_args_t va_args) { sysPrxForUser.Warning("_sys_snprintf(dst=*0x%x, count=%d, fmt=*0x%x, ...)", dst, count, fmt); - std::string result = ps3_fmt(CPU, fmt, va_args.g_count, va_args.f_count, va_args.v_count); + std::string result = ps3_fmt(ppu, fmt, va_args.g_count, va_args.f_count, va_args.v_count); sysPrxForUser.Warning("*** '%s' -> '%s'", fmt.get_ptr(), result); @@ -1186,14 +1184,14 @@ void sys_spinlock_initialize(vm::ptr> lock) lock->exchange(0); } -void sys_spinlock_lock(vm::ptr> lock) +void sys_spinlock_lock(PPUThread& ppu, vm::ptr> lock) { sysPrxForUser.Log("sys_spinlock_lock(lock=*0x%x)", lock); // prx: exchange with 0xabadcafe, repeat until exchanged with 0 while (lock->exchange(0xabadcafe).data()) { - g_sys_spinlock_wm.wait_op(lock.addr(), WRAP_EXPR(!lock->load().data())); + vm::wait_op(ppu, lock.addr(), 4, WRAP_EXPR(!lock->load().data())); CHECK_EMU_STATUS; } @@ -1219,10 +1217,10 @@ void sys_spinlock_unlock(vm::ptr> lock) // prx: sync and set 0 lock->exchange(0); - g_sys_spinlock_wm.notify(lock.addr()); + vm::notify_at(lock.addr(), 4); } -s32 sys_ppu_thread_create(PPUThread& CPU, vm::ptr thread_id, u32 entry, u64 arg, s32 prio, u32 stacksize, u64 flags, vm::cptr threadname) +s32 sys_ppu_thread_create(PPUThread& ppu, vm::ptr thread_id, u32 entry, u64 arg, s32 prio, u32 stacksize, u64 flags, vm::cptr threadname) { sysPrxForUser.Warning("sys_ppu_thread_create(thread_id=*0x%x, entry=0x%x, arg=0x%llx, prio=%d, stacksize=0x%x, flags=0x%llx, threadname=*0x%x)", thread_id, entry, arg, prio, stacksize, flags, threadname); @@ -1230,7 +1228,7 @@ s32 sys_ppu_thread_create(PPUThread& CPU, vm::ptr thread_id, u32 entry, u64 // (return CELL_ENOMEM if failed) // ... - vm::stackvar attr(CPU); + vm::stackvar attr(ppu); attr->entry = entry; attr->tls = 0; @@ -1245,16 +1243,16 @@ s32 sys_ppu_thread_create(PPUThread& CPU, vm::ptr thread_id, u32 entry, u64 return flags & SYS_PPU_THREAD_CREATE_INTERRUPT ? CELL_OK : sys_ppu_thread_start(static_cast(*thread_id)); } -s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr thread_id) +s32 sys_ppu_thread_get_id(PPUThread& ppu, vm::ptr thread_id) { sysPrxForUser.Log("sys_ppu_thread_get_id(thread_id=*0x%x)", thread_id); - *thread_id = CPU.GetId(); + *thread_id = ppu.GetId(); return CELL_OK; } -void sys_ppu_thread_exit(PPUThread& CPU, u64 val) +void sys_ppu_thread_exit(PPUThread& ppu, u64 val) { sysPrxForUser.Log("sys_ppu_thread_exit(val=0x%llx)", val); @@ -1263,12 +1261,12 @@ void sys_ppu_thread_exit(PPUThread& CPU, u64 val) // ... // call the syscall - _sys_ppu_thread_exit(CPU, val); + _sys_ppu_thread_exit(ppu, val); } std::mutex g_once_mutex; -void sys_ppu_thread_once(PPUThread& CPU, vm::ptr> once_ctrl, vm::ptr init) +void sys_ppu_thread_once(PPUThread& ppu, vm::ptr> once_ctrl, vm::ptr init) { sysPrxForUser.Warning("sys_ppu_thread_once(once_ctrl=*0x%x, init=*0x%x)", once_ctrl, init); @@ -1277,7 +1275,7 @@ void sys_ppu_thread_once(PPUThread& CPU, vm::ptr> once_ctrl, vm if (once_ctrl->compare_and_swap_test(SYS_PPU_THREAD_ONCE_INIT, SYS_PPU_THREAD_DONE_INIT)) { // call init function using current thread context - init(CPU); + init(ppu); } } diff --git a/rpcs3/stdafx.h b/rpcs3/stdafx.h index c1bd53db80..401e14293c 100644 --- a/rpcs3/stdafx.h +++ b/rpcs3/stdafx.h @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include "Utilities/GNU.h"