1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-23 03:02:53 +01:00

vm: workaround bigger native page size for 4k pages

Always allocate full memory block if native page size > 4k
This commit is contained in:
Nekotekina 2022-05-31 21:03:36 +03:00 committed by Ivan
parent b0ccd2a8a0
commit 422246825b

View File

@ -609,7 +609,7 @@ namespace vm
thread_ctrl::emergency_exit("vm::reservation_escape");
}
static void _page_map(u32 addr, u8 flags, u32 size, utils::shm* shm, std::pair<const u32, std::pair<u32, std::shared_ptr<utils::shm>>>* (*search_shm)(vm::block_t* block, utils::shm* shm))
static void _page_map(u32 addr, u8 flags, u32 size, utils::shm* shm, u64 bflags, std::pair<const u32, std::pair<u32, std::shared_ptr<utils::shm>>>* (*search_shm)(vm::block_t* block, utils::shm* shm))
{
perf_meter<"PAGE_MAP"_u64> perf0;
@ -626,6 +626,9 @@ namespace vm
}
}
// If native page size exceeds 4096, don't map native pages (expected to be always mapped in this case)
const bool is_noop = bflags & page_size_4k && utils::c_page_size > 4096;
// Lock range being mapped
_lock_main_range_lock(range_allocation, addr, size);
@ -694,7 +697,10 @@ namespace vm
if (~flags & page_readable)
prot = utils::protection::no;
if (!shm)
if (is_noop)
{
}
else if (!shm)
{
utils::memory_protect(g_base_addr + addr, size, prot);
}
@ -703,7 +709,7 @@ namespace vm
fmt::throw_exception("Memory mapping failed - blame Windows (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags);
}
if (flags & page_executable)
if (flags & page_executable && !is_noop)
{
// TODO (dead code)
utils::memory_commit(g_exec_addr + addr * 2, size * 2);
@ -800,7 +806,7 @@ namespace vm
return true;
}
static u32 _page_unmap(u32 addr, u32 max_size, utils::shm* shm)
static u32 _page_unmap(u32 addr, u32 max_size, u64 bflags, utils::shm* shm)
{
perf_meter<"PAGE_UNm"_u64> perf0;
@ -809,6 +815,9 @@ namespace vm
fmt::throw_exception("Invalid arguments (addr=0x%x, max_size=0x%x)", addr, max_size);
}
// If native page size exceeds 4096, don't unmap native pages (always mapped)
const bool is_noop = bflags & page_size_4k && utils::c_page_size > 4096;
// Determine deallocation size
u32 size = 0;
bool is_exec = false;
@ -868,7 +877,11 @@ namespace vm
ppu_remove_hle_instructions(addr, size);
// Actually unmap memory
if (!shm)
if (is_noop)
{
std::memset(g_sudo_addr + addr, 0, size);
}
else if (!shm)
{
utils::memory_protect(g_base_addr + addr, size, utils::protection::no);
std::memset(g_sudo_addr + addr, 0, size);
@ -881,7 +894,7 @@ namespace vm
#endif
}
if (is_exec)
if (is_exec && !is_noop)
{
utils::memory_decommit(g_exec_addr + addr * 2, size * 2);
@ -1045,7 +1058,7 @@ namespace vm
}
// Map "real" memory pages; provide a function to search for mirrors with private member access
_page_map(page_addr, flags, page_size, shm.get(), [](vm::block_t* _this, utils::shm* shm)
_page_map(page_addr, flags, page_size, shm.get(), this->flags, [](vm::block_t* _this, utils::shm* shm)
{
auto& map = (_this->m.*block_map)();
@ -1127,7 +1140,7 @@ namespace vm
{
// Special path for whole-allocated areas allowing 4k granularity
m_common = std::make_shared<utils::shm>(size);
m_common->map_critical(vm::base(addr), utils::protection::no);
m_common->map_critical(vm::base(addr), this->flags & page_size_4k && utils::c_page_size > 4096 ? utils::protection::rw : utils::protection::no);
m_common->map_critical(vm::get_super_ptr(addr));
}
}
@ -1143,7 +1156,7 @@ namespace vm
{
const auto next = std::next(it);
const auto size = it->second.first;
_page_unmap(it->first, size, it->second.second.get());
_page_unmap(it->first, size, this->flags, it->second.second.get());
it = next;
}
@ -1328,7 +1341,7 @@ namespace vm
}
// Unmap "real" memory pages
ensure(size == _page_unmap(addr, size, found->second.second.get()));
ensure(size == _page_unmap(addr, size, this->flags, found->second.second.get()));
// Clear stack guards
if (flags & stack_guarded)