mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-26 04:32:35 +01:00
SPU: Fix page faults notifications (non-TSX)
This commit is contained in:
parent
3d20ce70f5
commit
a199c71a55
@ -69,6 +69,16 @@ namespace vm
|
||||
std::array<atomic_t<cpu_thread*>, g_cfg.core.ppu_threads.max> g_locks{};
|
||||
std::array<atomic_t<u64>, 6> g_range_locks{};
|
||||
|
||||
// Page information
|
||||
struct memory_page
|
||||
{
|
||||
// Memory flags
|
||||
atomic_t<u8> flags;
|
||||
};
|
||||
|
||||
// Memory pages
|
||||
std::array<memory_page, 0x100000000 / 4096> g_pages{};
|
||||
|
||||
static void _register_lock(cpu_thread* _cpu)
|
||||
{
|
||||
for (u32 i = 0, max = g_cfg.core.ppu_threads;;)
|
||||
@ -215,14 +225,12 @@ namespace vm
|
||||
return 0;
|
||||
};
|
||||
|
||||
atomic_t<u64>* _ret;
|
||||
|
||||
if (u64 _a1 = test_addr(g_addr_lock.load(), addr, end)) [[likely]]
|
||||
{
|
||||
// Optimistic path (hope that address range is not locked)
|
||||
_ret = _register_range_lock(_a1);
|
||||
const auto _ret = _register_range_lock(_a1);
|
||||
|
||||
if (_a1 == test_addr(g_addr_lock.load(), addr, end)) [[likely]]
|
||||
if (_a1 == test_addr(g_addr_lock.load(), addr, end) && !!(g_pages[addr / 4096].flags & page_readable)) [[likely]]
|
||||
{
|
||||
return _ret;
|
||||
}
|
||||
@ -230,12 +238,22 @@ namespace vm
|
||||
*_ret = 0;
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
::reader_lock lock(g_mutex);
|
||||
_ret = _register_range_lock(test_addr(UINT32_MAX, addr, end));
|
||||
std::shared_lock lock(g_mutex);
|
||||
|
||||
if (!(g_pages[addr / 4096].flags & page_readable))
|
||||
{
|
||||
lock.unlock();
|
||||
|
||||
// Try tiggering a page fault (write)
|
||||
// TODO: Read memory if needed
|
||||
vm::_ref<atomic_t<u8>>(addr) += 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
return _ret;
|
||||
return _register_range_lock(test_addr(UINT32_MAX, addr, end));
|
||||
}
|
||||
}
|
||||
|
||||
void passive_unlock(cpu_thread& cpu)
|
||||
@ -397,7 +415,7 @@ namespace vm
|
||||
g_mutex.unlock();
|
||||
}
|
||||
|
||||
void reservation_lock_internal(atomic_t<u64>& res)
|
||||
bool reservation_lock_internal(u32 addr, atomic_t<u64>& res)
|
||||
{
|
||||
for (u64 i = 0;; i++)
|
||||
{
|
||||
@ -412,21 +430,19 @@ namespace vm
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: Accurate locking in this case
|
||||
if (!(g_pages[addr / 4096].flags & page_writable))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Page information
|
||||
struct memory_page
|
||||
{
|
||||
// Memory flags
|
||||
atomic_t<u8> flags;
|
||||
};
|
||||
|
||||
// Memory pages
|
||||
std::array<memory_page, 0x100000000 / 4096> g_pages{};
|
||||
|
||||
static void _page_map(u32 addr, u8 flags, u32 size, utils::shm* shm)
|
||||
{
|
||||
if (!size || (size | addr) % 4096 || flags & page_allocated)
|
||||
|
@ -26,18 +26,23 @@ namespace vm
|
||||
return *reinterpret_cast<atomic_t<u64>*>(g_reservations + (addr & 0xff80) / 2);
|
||||
}
|
||||
|
||||
void reservation_lock_internal(atomic_t<u64>&);
|
||||
bool reservation_lock_internal(u32, atomic_t<u64>&);
|
||||
|
||||
inline atomic_t<u64>& reservation_lock(u32 addr, u32 size)
|
||||
{
|
||||
auto& res = vm::reservation_acquire(addr, size);
|
||||
auto res = &vm::reservation_acquire(addr, size);
|
||||
|
||||
if (res.bts(0)) [[unlikely]]
|
||||
if (res->bts(0)) [[unlikely]]
|
||||
{
|
||||
reservation_lock_internal(res);
|
||||
static atomic_t<u64> no_lock{};
|
||||
|
||||
if (!reservation_lock_internal(addr, *res))
|
||||
{
|
||||
res = &no_lock;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
return *res;
|
||||
}
|
||||
|
||||
inline bool reservation_trylock(atomic_t<u64>& res, u64 rtime)
|
||||
|
Loading…
Reference in New Issue
Block a user