diff --git a/rpcs3/Emu/Memory/vm.cpp b/rpcs3/Emu/Memory/vm.cpp index 43ce3adb3f..61e619c5a5 100644 --- a/rpcs3/Emu/Memory/vm.cpp +++ b/rpcs3/Emu/Memory/vm.cpp @@ -69,6 +69,16 @@ namespace vm std::array, g_cfg.core.ppu_threads.max> g_locks{}; std::array, 6> g_range_locks{}; + // Page information + struct memory_page + { + // Memory flags + atomic_t flags; + }; + + // Memory pages + std::array g_pages{}; + static void _register_lock(cpu_thread* _cpu) { for (u32 i = 0, max = g_cfg.core.ppu_threads;;) @@ -215,14 +225,12 @@ namespace vm return 0; }; - atomic_t* _ret; - if (u64 _a1 = test_addr(g_addr_lock.load(), addr, end)) [[likely]] { // Optimistic path (hope that address range is not locked) - _ret = _register_range_lock(_a1); + const auto _ret = _register_range_lock(_a1); - if (_a1 == test_addr(g_addr_lock.load(), addr, end)) [[likely]] + if (_a1 == test_addr(g_addr_lock.load(), addr, end) && !!(g_pages[addr / 4096].flags & page_readable)) [[likely]] { return _ret; } @@ -230,12 +238,22 @@ namespace vm *_ret = 0; } + while (true) { - ::reader_lock lock(g_mutex); - _ret = _register_range_lock(test_addr(UINT32_MAX, addr, end)); - } + std::shared_lock lock(g_mutex); - return _ret; + if (!(g_pages[addr / 4096].flags & page_readable)) + { + lock.unlock(); + + // Try tiggering a page fault (write) + // TODO: Read memory if needed + vm::_ref>(addr) += 0; + continue; + } + + return _register_range_lock(test_addr(UINT32_MAX, addr, end)); + } } void passive_unlock(cpu_thread& cpu) @@ -397,7 +415,7 @@ namespace vm g_mutex.unlock(); } - void reservation_lock_internal(atomic_t& res) + bool reservation_lock_internal(u32 addr, atomic_t& res) { for (u64 i = 0;; i++) { @@ -412,21 +430,19 @@ namespace vm } else { + // TODO: Accurate locking in this case + if (!(g_pages[addr / 4096].flags & page_writable)) + { + return false; + } + std::this_thread::yield(); } } + + return true; } - // Page information - struct memory_page - { - // Memory flags - atomic_t flags; - }; - - // Memory pages - std::array g_pages{}; - static void _page_map(u32 addr, u8 flags, u32 size, utils::shm* shm) { if (!size || (size | addr) % 4096 || flags & page_allocated) diff --git a/rpcs3/Emu/Memory/vm_reservation.h b/rpcs3/Emu/Memory/vm_reservation.h index bcb0b54dc8..c6573a4084 100644 --- a/rpcs3/Emu/Memory/vm_reservation.h +++ b/rpcs3/Emu/Memory/vm_reservation.h @@ -26,18 +26,23 @@ namespace vm return *reinterpret_cast*>(g_reservations + (addr & 0xff80) / 2); } - void reservation_lock_internal(atomic_t&); + bool reservation_lock_internal(u32, atomic_t&); inline atomic_t& reservation_lock(u32 addr, u32 size) { - auto& res = vm::reservation_acquire(addr, size); + auto res = &vm::reservation_acquire(addr, size); - if (res.bts(0)) [[unlikely]] + if (res->bts(0)) [[unlikely]] { - reservation_lock_internal(res); + static atomic_t no_lock{}; + + if (!reservation_lock_internal(addr, *res)) + { + res = &no_lock; + } } - return res; + return *res; } inline bool reservation_trylock(atomic_t& res, u64 rtime)