mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-21 18:22:33 +01:00
vm_native.cpp: Use Windows 10 memory mapping API (the correct API)
This commit is contained in:
parent
7235647e67
commit
1738b38536
@ -3,7 +3,7 @@
|
||||
#include "util/types.hpp"
|
||||
#include "util/atomic.hpp"
|
||||
|
||||
//! Simple sizeless array base for concurrent access. Cannot shrink, only growths automatically.
|
||||
//! Simple unshrinkable array base for concurrent access. Only growths automatically.
|
||||
//! There is no way to know the current size. The smaller index is, the faster it's accessed.
|
||||
//!
|
||||
//! T is the type of elements. Currently, default constructor of T shall be constexpr.
|
||||
@ -47,6 +47,18 @@ public:
|
||||
// Access recursively
|
||||
return (*m_next)[index - N];
|
||||
}
|
||||
|
||||
u64 size() const
|
||||
{
|
||||
u64 size_n = 0;
|
||||
|
||||
for (auto ptr = this; ptr; ptr = ptr->m_next)
|
||||
{
|
||||
size_n += N;
|
||||
}
|
||||
|
||||
return size_n;
|
||||
}
|
||||
};
|
||||
|
||||
//! Simple lock-free FIFO queue base. Based on lf_array<T, N> itself. Currently uses 32-bit counters.
|
||||
|
@ -23,11 +23,11 @@ void ppu_remove_hle_instructions(u32 addr, u32 size);
|
||||
|
||||
namespace vm
|
||||
{
|
||||
static u8* memory_reserve_4GiB(void* _addr, u64 size = 0x100000000)
|
||||
static u8* memory_reserve_4GiB(void* _addr, u64 size = 0x100000000, bool is_memory_mapping = false)
|
||||
{
|
||||
for (u64 addr = reinterpret_cast<u64>(_addr) + 0x100000000; addr < 0x8000'0000'0000; addr += 0x100000000)
|
||||
{
|
||||
if (auto ptr = utils::memory_reserve(size, reinterpret_cast<void*>(addr)))
|
||||
if (auto ptr = utils::memory_reserve(size, reinterpret_cast<void*>(addr), is_memory_mapping))
|
||||
{
|
||||
return static_cast<u8*>(ptr);
|
||||
}
|
||||
@ -37,7 +37,7 @@ namespace vm
|
||||
}
|
||||
|
||||
// Emulated virtual memory
|
||||
u8* const g_base_addr = memory_reserve_4GiB(reinterpret_cast<void*>(0x2'0000'0000), 0x2'0000'0000);
|
||||
u8* const g_base_addr = memory_reserve_4GiB(reinterpret_cast<void*>(0x2'0000'0000), 0x2'0000'0000, true);
|
||||
|
||||
// Unprotected virtual memory mirror
|
||||
u8* const g_sudo_addr = g_base_addr + 0x1'0000'0000;
|
||||
@ -697,6 +697,21 @@ namespace vm
|
||||
if (~flags & page_readable)
|
||||
prot = utils::protection::no;
|
||||
|
||||
std::string map_error;
|
||||
|
||||
auto map_critical = [&](u8* ptr, utils::protection prot)
|
||||
{
|
||||
auto [res, error] = shm->map_critical(ptr, prot);
|
||||
|
||||
if (res != ptr)
|
||||
{
|
||||
map_error = std::move(error);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
if (is_noop)
|
||||
{
|
||||
}
|
||||
@ -704,9 +719,9 @@ namespace vm
|
||||
{
|
||||
utils::memory_protect(g_base_addr + addr, size, prot);
|
||||
}
|
||||
else if (shm->map_critical(g_base_addr + addr, prot) != g_base_addr + addr || shm->map_critical(g_sudo_addr + addr) != g_sudo_addr + addr || !shm->map_self())
|
||||
else if (!map_critical(g_base_addr + addr, prot) || !map_critical(g_sudo_addr + addr, utils::protection::rw) || (map_error = "map_self()", !shm->map_self()))
|
||||
{
|
||||
fmt::throw_exception("Memory mapping failed - blame Windows (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags);
|
||||
fmt::throw_exception("Memory mapping failed (addr=0x%x, size=0x%x, flags=0x%x): %s", addr, size, flags, map_error);
|
||||
}
|
||||
|
||||
if (flags & page_executable && !is_noop)
|
||||
@ -1138,10 +1153,28 @@ namespace vm
|
||||
{
|
||||
if (this->flags & preallocated)
|
||||
{
|
||||
std::string map_error;
|
||||
|
||||
auto map_critical = [&](u8* ptr, utils::protection prot)
|
||||
{
|
||||
auto [res, error] = m_common->map_critical(ptr, prot);
|
||||
|
||||
if (res != ptr)
|
||||
{
|
||||
map_error = std::move(error);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
// Special path for whole-allocated areas allowing 4k granularity
|
||||
m_common = std::make_shared<utils::shm>(size);
|
||||
m_common->map_critical(vm::base(addr), this->flags & page_size_4k && utils::c_page_size > 4096 ? utils::protection::rw : utils::protection::no);
|
||||
m_common->map_critical(vm::get_super_ptr(addr));
|
||||
|
||||
if (!map_critical(vm::_ptr<u8>(addr), this->flags & page_size_4k && utils::c_page_size > 4096 ? utils::protection::rw : utils::protection::no) || !map_critical(vm::get_super_ptr(addr), utils::protection::rw))
|
||||
{
|
||||
fmt::throw_exception("Memory mapping failed (addr=0x%x, size=0x%x, flags=0x%x): %s", addr, size, flags, map_error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1737,7 +1770,6 @@ namespace vm
|
||||
g_locations.clear();
|
||||
}
|
||||
|
||||
utils::memory_decommit(g_base_addr, 0x200000000);
|
||||
utils::memory_decommit(g_exec_addr, 0x200000000);
|
||||
utils::memory_decommit(g_stat_addr, 0x100000000);
|
||||
|
||||
|
@ -109,4 +109,4 @@ namespace utils
|
||||
};
|
||||
}
|
||||
|
||||
#define DYNAMIC_IMPORT(lib, name, ...) inline utils::dynamic_import<__VA_ARGS__> name(lib, #name);
|
||||
#define DYNAMIC_IMPORT(lib, name, ...) inline constinit utils::dynamic_import<__VA_ARGS__> name(lib, #name);
|
||||
|
@ -3,6 +3,8 @@
|
||||
#include "util/types.hpp"
|
||||
#include "util/atomic.hpp"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace utils
|
||||
{
|
||||
#ifdef _WIN32
|
||||
@ -31,7 +33,7 @@ namespace utils
|
||||
* Reserve `size` bytes of virtual memory and returns it.
|
||||
* The memory should be committed before usage.
|
||||
*/
|
||||
void* memory_reserve(usz size, void* use_addr = nullptr);
|
||||
void* memory_reserve(usz size, void* use_addr = nullptr, bool is_memory_mapping = false);
|
||||
|
||||
/**
|
||||
* Commit `size` bytes of virtual memory starting at pointer.
|
||||
@ -89,7 +91,7 @@ namespace utils
|
||||
u8* try_map(void* ptr, protection prot = protection::rw, bool cow = false) const;
|
||||
|
||||
// Map shared memory over reserved memory region, which is unsafe (non-atomic) under Win32
|
||||
u8* map_critical(void* ptr, protection prot = protection::rw, bool cow = false);
|
||||
std::pair<u8*, std::string> map_critical(void* ptr, protection prot = protection::rw, bool cow = false);
|
||||
|
||||
// Map shared memory into its own storage (not mapped by default)
|
||||
u8* map_self(protection prot = protection::rw);
|
||||
|
@ -5,7 +5,9 @@
|
||||
#ifdef _WIN32
|
||||
#include "Utilities/File.h"
|
||||
#include "util/dyn_lib.hpp"
|
||||
#include "Utilities/lockless.h"
|
||||
#include <Windows.h>
|
||||
#include <span>
|
||||
#else
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
@ -87,6 +89,100 @@ namespace utils
|
||||
#ifdef _WIN32
|
||||
DYNAMIC_IMPORT("KernelBase.dll", VirtualAlloc2, PVOID(HANDLE Process, PVOID Base, SIZE_T Size, ULONG AllocType, ULONG Prot, MEM_EXTENDED_PARAMETER*, ULONG));
|
||||
DYNAMIC_IMPORT("KernelBase.dll", MapViewOfFile3, PVOID(HANDLE Handle, HANDLE Process, PVOID Base, ULONG64 Off, SIZE_T ViewSize, ULONG AllocType, ULONG Prot, MEM_EXTENDED_PARAMETER*, ULONG));
|
||||
DYNAMIC_IMPORT("KernelBase.dll", UnmapViewOfFile2, BOOL(HANDLE Process, PVOID BaseAddress, ULONG UnmapFlags));
|
||||
|
||||
const bool has_win10_memory_mapping_api()
|
||||
{
|
||||
return VirtualAlloc2 && MapViewOfFile3 && UnmapViewOfFile2;
|
||||
}
|
||||
|
||||
struct map_info_t
|
||||
{
|
||||
u64 addr = 0;
|
||||
u64 size = 0;
|
||||
atomic_t<u8> state{};
|
||||
};
|
||||
|
||||
lf_array<map_info_t, 32> s_is_mapping{};
|
||||
|
||||
bool is_memory_mappping_memory(u64 addr)
|
||||
{
|
||||
if (!addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
const u64 map_size = s_is_mapping.size();
|
||||
|
||||
for (u64 i = map_size - 1; i != umax; i--)
|
||||
{
|
||||
const auto& info = s_is_mapping[i];
|
||||
|
||||
if (info.state == 1)
|
||||
{
|
||||
if (addr >= info.addr && addr < info.addr + info.size)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 unmap_mappping_memory(u64 addr, u64 size)
|
||||
{
|
||||
if (!addr || !size)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
const u64 map_size = s_is_mapping.size();
|
||||
|
||||
for (u64 i = map_size; i != umax; i--)
|
||||
{
|
||||
auto& info = s_is_mapping[i];
|
||||
|
||||
if (info.state == 1)
|
||||
{
|
||||
if (addr == info.addr && size == info.size)
|
||||
{
|
||||
if (info.state.compare_and_swap_test(1, 0))
|
||||
{
|
||||
return info.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool map_mappping_memory(u64 addr, u64 size)
|
||||
{
|
||||
if (!addr || !size)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
for (u64 i = 0;; i++)
|
||||
{
|
||||
auto& info = s_is_mapping[i];
|
||||
|
||||
if (!info.addr && info.state.compare_and_swap_test(0, 2))
|
||||
{
|
||||
info.addr = addr;
|
||||
info.size = size;
|
||||
info.state = 1;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool is_memory_mappping_memory(const void* addr)
|
||||
{
|
||||
return is_memory_mappping_memory(reinterpret_cast<u64>(addr));
|
||||
}
|
||||
#endif
|
||||
|
||||
long get_page_size()
|
||||
@ -135,9 +231,20 @@ namespace utils
|
||||
return _prot;
|
||||
}
|
||||
|
||||
void* memory_reserve(usz size, void* use_addr)
|
||||
void* memory_reserve(usz size, void* use_addr, bool is_memory_mapping)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
if (is_memory_mapping && has_win10_memory_mapping_api())
|
||||
{
|
||||
if (auto ptr = VirtualAlloc2(nullptr, use_addr, size, MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS, nullptr, 0))
|
||||
{
|
||||
map_mappping_memory(reinterpret_cast<u64>(ptr), size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return ::VirtualAlloc(use_addr, size, MEM_RESERVE, PAGE_NOACCESS);
|
||||
#else
|
||||
if (use_addr && reinterpret_cast<uptr>(use_addr) % 0x10000)
|
||||
@ -271,6 +378,7 @@ namespace utils
|
||||
{
|
||||
#ifdef _WIN32
|
||||
ensure(::VirtualFree(pointer, 0, MEM_RELEASE));
|
||||
unmap_mappping_memory(reinterpret_cast<u64>(pointer), size);
|
||||
#else
|
||||
ensure(::munmap(pointer, size) != -1);
|
||||
#endif
|
||||
@ -671,32 +779,72 @@ namespace utils
|
||||
#endif
|
||||
}
|
||||
|
||||
u8* shm::map_critical(void* ptr, protection prot, bool cow)
|
||||
std::pair<u8*, std::string> shm::map_critical(void* ptr, protection prot, bool cow)
|
||||
{
|
||||
const auto target = reinterpret_cast<u8*>(reinterpret_cast<u64>(ptr) & -0x10000);
|
||||
|
||||
#ifdef _WIN32
|
||||
::MEMORY_BASIC_INFORMATION mem;
|
||||
if (!::VirtualQuery(target, &mem, sizeof(mem)) || mem.State != MEM_RESERVE || !::VirtualFree(mem.AllocationBase, 0, MEM_RELEASE))
|
||||
::MEMORY_BASIC_INFORMATION mem{};
|
||||
if (!::VirtualQuery(target, &mem, sizeof(mem)) || mem.State != MEM_RESERVE)
|
||||
{
|
||||
return nullptr;
|
||||
return {nullptr, fmt::format("VirtualQuery() Unexpceted memory info: state=0x%x, %s", mem.State, std::as_bytes(std::span(&mem, 1)))};
|
||||
}
|
||||
|
||||
const auto base = (u8*)mem.AllocationBase;
|
||||
const auto size = mem.RegionSize + (target - base);
|
||||
|
||||
if (is_memory_mappping_memory(ptr))
|
||||
{
|
||||
if (base < target && !::VirtualFree(base, target - base, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
|
||||
{
|
||||
return {nullptr, "Failed to split allocation base"};
|
||||
}
|
||||
|
||||
if (target + m_size < base + size && !::VirtualFree(target, m_size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
|
||||
{
|
||||
return {nullptr, "Failed to split allocation end"};
|
||||
}
|
||||
|
||||
if (cow)
|
||||
{
|
||||
// TODO: Implement it
|
||||
}
|
||||
|
||||
if (MapViewOfFile3(m_handle, GetCurrentProcess(), target, 0, m_size, MEM_REPLACE_PLACEHOLDER, PAGE_EXECUTE_READWRITE, nullptr, 0))
|
||||
{
|
||||
if (prot != protection::rw && prot != protection::wx)
|
||||
{
|
||||
DWORD old;
|
||||
if (!::VirtualProtect(target, m_size, +prot, &old))
|
||||
{
|
||||
UnmapViewOfFile2(nullptr, target, MEM_PRESERVE_PLACEHOLDER);
|
||||
return {nullptr, "Failed to protect"};
|
||||
}
|
||||
}
|
||||
|
||||
return {target, {}};
|
||||
}
|
||||
|
||||
return {nullptr, "Failed to map3"};
|
||||
}
|
||||
|
||||
if (!::VirtualFree(mem.AllocationBase, 0, MEM_RELEASE))
|
||||
{
|
||||
return {nullptr, "VirtualFree() failed on allocation base"};
|
||||
}
|
||||
|
||||
if (base < target && !::VirtualAlloc(base, target - base, MEM_RESERVE, PAGE_NOACCESS))
|
||||
{
|
||||
return nullptr;
|
||||
return {nullptr, "VirtualAlloc() failed to reserve allocation base"};
|
||||
}
|
||||
|
||||
if (target + m_size < base + size && !::VirtualAlloc(target + m_size, base + size - target - m_size, MEM_RESERVE, PAGE_NOACCESS))
|
||||
{
|
||||
return nullptr;
|
||||
return {nullptr, "VirtualAlloc() failed to reserve allocation end"};
|
||||
}
|
||||
#endif
|
||||
|
||||
return this->map(target, prot, cow);
|
||||
return {this->map(target, prot, cow), "Failed to map"};
|
||||
}
|
||||
|
||||
u8* shm::map_self(protection prot)
|
||||
@ -736,6 +884,25 @@ namespace utils
|
||||
const auto target = reinterpret_cast<u8*>(reinterpret_cast<u64>(ptr) & -0x10000);
|
||||
|
||||
#ifdef _WIN32
|
||||
if (is_memory_mappping_memory(ptr))
|
||||
{
|
||||
ensure(UnmapViewOfFile2(GetCurrentProcess(), target, MEM_PRESERVE_PLACEHOLDER));
|
||||
|
||||
::MEMORY_BASIC_INFORMATION mem{}, mem2{};
|
||||
ensure(::VirtualQuery(target - 1, &mem, sizeof(mem)) && ::VirtualQuery(target + m_size, &mem2, sizeof(mem2)));
|
||||
|
||||
const auto size1 = mem.State == MEM_RESERVE ? target - (u8*)mem.AllocationBase : 0;
|
||||
const auto size2 = mem2.State == MEM_RESERVE ? mem2.RegionSize : 0;
|
||||
|
||||
if (!size1 && !size2)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
ensure(::VirtualFree(size1 ? mem.AllocationBase : target, m_size + size1 + size2, MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS));
|
||||
return;
|
||||
}
|
||||
|
||||
this->unmap(target);
|
||||
|
||||
::MEMORY_BASIC_INFORMATION mem, mem2;
|
||||
|
Loading…
Reference in New Issue
Block a user