1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 18:53:28 +01:00

Memory mirror support

Implemented utils::memory_release (not used)
Implemented utils::shm class (handler for shared memory)
Improved sys_mmapper syscalls
Rewritten ppu_patch function
Implemented vm::get_super_ptr (ignores memory protection)
Minimal allocation alignment increased to 0x10000
This commit is contained in:
Nekotekina 2018-05-07 21:57:06 +03:00
parent fe4c3c4d84
commit 5d15d64ec8
14 changed files with 541 additions and 232 deletions

View File

@ -76,13 +76,158 @@ namespace utils
#endif
}
void memory_release(void* pointer, std::size_t size)
{
#ifdef _WIN32
verify(HERE), ::VirtualFree(pointer, 0, MEM_RELEASE);
#else
verify(HERE), ::munmap(pointer, size) != -1;
#endif
}
void memory_protect(void* pointer, std::size_t size, protection prot)
{
#ifdef _WIN32
DWORD old;
verify(HERE), ::VirtualProtect(pointer, size, +prot, &old);
for (u64 addr = (u64)pointer, end = addr + size; addr < end;)
{
// Query current region
::MEMORY_BASIC_INFORMATION mem;
verify(HERE), ::VirtualQuery((void*)addr, &mem, sizeof(mem));
DWORD old;
if (!::VirtualProtect(mem.BaseAddress, std::min<u64>(end - (u64)mem.BaseAddress, mem.RegionSize), +prot, &old))
{
fmt::throw_exception("VirtualProtect failed (%p, 0x%x, addr=0x%x, error=%#x)", pointer, size, addr, GetLastError());
}
// Next region
addr = (u64)mem.BaseAddress + mem.RegionSize;
}
#else
verify(HERE), ::mprotect((void*)((u64)pointer & -4096), ::align(size, 4096), +prot) != -1;
#endif
}
shm::shm(u32 size)
: m_size(::align(size, 0x10000))
, m_ptr(nullptr)
{
#ifdef _WIN32
m_handle = ::CreateFileMappingW(INVALID_HANDLE_VALUE, NULL, PAGE_EXECUTE_READWRITE, 0, m_size, NULL);
//#elif __linux__
// m_file = ::memfd_create("mem1", 0);
// ::ftruncate(m_file, m_size);
#else
while ((m_file = ::shm_open("/rpcs3-mem1", O_RDWR | O_CREAT | O_EXCL, S_IWUSR | S_IRUSR)) == -1)
{
if (errno != EEXIST)
return;
}
::shm_unlink("/rpcs3-mem1");
::ftruncate(m_file, m_size);
#endif
m_ptr = this->map(nullptr);
}
shm::~shm()
{
#ifdef _WIN32
::UnmapViewOfFile(m_ptr);
::CloseHandle(m_handle);
#else
::munmap(m_ptr, m_size);
::close(m_file);
#endif
}
u8* shm::map(void* ptr, protection prot) const
{
#ifdef _WIN32
DWORD access = 0;
switch (prot)
{
case protection::rw: access = FILE_MAP_WRITE; break;
case protection::ro: access = FILE_MAP_READ; break;
case protection::no: break;
case protection::wx: access = FILE_MAP_WRITE | FILE_MAP_EXECUTE; break;
case protection::rx: access = FILE_MAP_READ | FILE_MAP_EXECUTE; break;
}
return static_cast<u8*>(::MapViewOfFileEx(m_handle, access, 0, 0, m_size, ptr));
#else
return static_cast<u8*>(::mmap((void*)((u64)ptr & -0x10000), m_size, +prot, MAP_SHARED | (ptr ? MAP_FIXED : 0), m_file, 0));
#endif
}
u8* shm::map_critical(void* ptr, protection prot)
{
const auto target = (u8*)((u64)ptr & -0x10000);
#ifdef _WIN32
::MEMORY_BASIC_INFORMATION mem;
if (!::VirtualQuery(target, &mem, sizeof(mem)) || mem.State != MEM_RESERVE || !::VirtualFree(mem.AllocationBase, 0, MEM_RELEASE))
{
return nullptr;
}
const auto base = (u8*)mem.AllocationBase;
const auto size = mem.RegionSize + (target - base);
if (base < target && !::VirtualAlloc(base, target - base, MEM_RESERVE, PAGE_NOACCESS))
{
return nullptr;
}
if (target + m_size < base + size && !::VirtualAlloc(target + m_size, base + size - target - m_size, MEM_RESERVE, PAGE_NOACCESS))
{
return nullptr;
}
#endif
return this->map(target, prot);
}
void shm::unmap(void* ptr) const
{
#ifdef _WIN32
::UnmapViewOfFile(ptr);
#else
::munmap(ptr, m_size);
#endif
}
void shm::unmap_critical(void* ptr)
{
const auto target = (u8*)((u64)ptr & -0x10000);
this->unmap(target);
#ifdef _WIN32
::MEMORY_BASIC_INFORMATION mem, mem2;
if (!::VirtualQuery(target - 1, &mem, sizeof(mem)) || !::VirtualQuery(target + m_size, &mem2, sizeof(mem2)))
{
return;
}
if (mem.State == MEM_RESERVE && !::VirtualFree(mem.AllocationBase, 0, MEM_RELEASE))
{
return;
}
if (mem2.State == MEM_RESERVE && !::VirtualFree(mem2.AllocationBase, 0, MEM_RELEASE))
{
return;
}
const auto size1 = mem.State == MEM_RESERVE ? target - (u8*)mem.AllocationBase : 0;
const auto size2 = mem2.State == MEM_RESERVE ? mem2.RegionSize : 0;
if (!::VirtualAlloc(mem.State == MEM_RESERVE ? mem.AllocationBase : target, m_size + size1 + size2, MEM_RESERVE, PAGE_NOACCESS))
{
return;
}
#endif
}
}

View File

@ -30,6 +30,56 @@ namespace utils
*/
void memory_decommit(void* pointer, std::size_t size);
// Free memory after reserved by memory_reserve, should specify original size
void memory_release(void* pointer, std::size_t size);
// Set memory protection
void memory_protect(void* pointer, std::size_t size, protection prot);
// Shared memory handle
class shm
{
#ifdef _WIN32
void* m_handle;
#else
int m_file;
#endif
u32 m_size;
u8* m_ptr;
public:
explicit shm(u32 size);
shm(const shm&) = delete;
~shm();
// Map shared memory
u8* map(void* ptr, protection prot = protection::rw) const;
// Map shared memory over reserved memory region, which is unsafe (non-atomic) under Win32
u8* map_critical(void* ptr, protection prot = protection::rw);
// Unmap shared memory
void unmap(void* ptr) const;
// Unmap shared memory, undoing map_critical
void unmap_critical(void* ptr);
// Access memory with simple range check
u8* get(u32 offset, u32 size) const
{
if (offset >= m_size || m_size - offset < size)
{
return nullptr;
}
return m_ptr + offset;
}
u32 size() const
{
return m_size;
}
};
}

View File

@ -48,7 +48,7 @@ u32 _sys_heap_memalign(u32 heap, u32 align, u32 size)
{
sysPrxForUser.warning("_sys_heap_memalign(heap=0x%x, align=0x%x, size=0x%x)", heap, align, size);
return vm::alloc(size, vm::main, std::max<u32>(align, 4096));
return vm::alloc(size, vm::main, std::max<u32>(align, 0x10000));
}
s32 _sys_heap_free(u32 heap, u32 addr)

View File

@ -381,7 +381,7 @@ u32 _sys_memalign(u32 align, u32 size)
{
sysPrxForUser.warning("_sys_memalign(align=0x%x, size=0x%x)", align, size);
return vm::alloc(size, vm::main, std::max<u32>(align, 4096));
return vm::alloc(size, vm::main, std::max<u32>(align, 0x10000));
}
s32 _sys_free(u32 addr)

View File

@ -330,7 +330,7 @@ static void ppu_initialize_modules(const std::shared_ptr<ppu_linkage_info>& link
// Allocate HLE variable
if (variable.second.size >= 4096 || variable.second.align >= 4096)
{
variable.second.addr = vm::alloc(variable.second.size, vm::main, std::max<u32>(variable.second.align, 4096));
variable.second.addr = vm::alloc(variable.second.size, vm::main, std::max<u32>(variable.second.align, 0x10000));
}
else
{

View File

@ -400,39 +400,32 @@ extern void ppu_remove_breakpoint(u32 addr)
extern bool ppu_patch(u32 addr, u32 value)
{
// TODO: check executable flag
if (vm::check_addr(addr, sizeof(u32)))
if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm && Emu.GetStatus() != system_state::ready)
{
if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm && Emu.GetStatus() != system_state::ready)
{
// TODO
return false;
}
if (!vm::check_addr(addr, sizeof(u32), vm::page_writable))
{
utils::memory_protect(vm::g_base_addr + addr, sizeof(u32), utils::protection::rw);
}
vm::write32(addr, value);
const u32 _break = ::narrow<u32>(reinterpret_cast<std::uintptr_t>(&ppu_break));
const u32 fallback = ::narrow<u32>(reinterpret_cast<std::uintptr_t>(&ppu_fallback));
if (ppu_ref(addr) != _break && ppu_ref(addr) != fallback)
{
ppu_ref(addr) = ppu_cache(addr);
}
if (!vm::check_addr(addr, sizeof(u32), vm::page_writable))
{
utils::memory_protect(vm::g_base_addr + addr, sizeof(u32), utils::protection::ro);
}
return true;
// TODO: support recompilers
LOG_FATAL(GENERAL, "Patch failed at 0x%x: LLVM recompiler is used.", addr);
return false;
}
return false;
const auto ptr = vm::get_super_ptr<u32>(addr);
if (!ptr)
{
LOG_FATAL(GENERAL, "Patch failed at 0x%x: invalid memory address.", addr);
return false;
}
*ptr = value;
const u32 _break = ::narrow<u32>(reinterpret_cast<std::uintptr_t>(&ppu_break));
const u32 fallback = ::narrow<u32>(reinterpret_cast<std::uintptr_t>(&ppu_fallback));
if (ppu_ref(addr) != _break && ppu_ref(addr) != fallback)
{
ppu_ref(addr) = ppu_cache(addr);
}
return true;
}
std::string ppu_thread::get_name() const

View File

@ -1,42 +1,37 @@
#include "stdafx.h"
#include "Utilities/VirtualMemory.h"
#include "Emu/IdManager.h"
#include "sys_memory.h"
logs::channel sys_memory("sys_memory");
lv2_memory_alloca::lv2_memory_alloca(u32 size, u32 align, u64 flags, const std::shared_ptr<lv2_memory_container>& ct)
: size(size)
, align(align)
, flags(flags)
, ct(ct)
, shm(std::make_shared<utils::shm>(size))
{
}
error_code sys_memory_allocate(u32 size, u64 flags, vm::ptr<u32> alloc_addr)
{
sys_memory.warning("sys_memory_allocate(size=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, flags, alloc_addr);
// Check allocation size
switch (flags)
{
case 0: //handle "default" value, issue 2510
case SYS_MEMORY_PAGE_SIZE_1M:
{
if (size % 0x100000)
{
return CELL_EALIGN;
}
const u32 align =
flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 :
flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 :
flags == 0 ? 0x10000 : 0;
break;
if (!align)
{
return {CELL_EINVAL, flags};
}
case SYS_MEMORY_PAGE_SIZE_64K:
if (size % align)
{
if (size % 0x10000)
{
return CELL_EALIGN;
}
break;
}
default:
{
return CELL_EINVAL;
}
return {CELL_EALIGN, size};
}
// Get "default" memory container
@ -49,7 +44,7 @@ error_code sys_memory_allocate(u32 size, u64 flags, vm::ptr<u32> alloc_addr)
}
// Allocate memory, write back the start address of the allocated area
*alloc_addr = verify(HERE, vm::alloc(size, vm::user_space, flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : 0x10000));
*alloc_addr = verify(HERE, vm::alloc(size, vm::user_space, align));
return CELL_OK;
}
@ -59,32 +54,19 @@ error_code sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::
sys_memory.warning("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, cid, flags, alloc_addr);
// Check allocation size
switch (flags)
{
case SYS_MEMORY_PAGE_SIZE_1M:
{
if (size % 0x100000)
{
return CELL_EALIGN;
}
const u32 align =
flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 :
flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 :
flags == 0 ? 0x10000 : 0;
break;
if (!align)
{
return {CELL_EINVAL, flags};
}
case SYS_MEMORY_PAGE_SIZE_64K:
if (size % align)
{
if (size % 0x10000)
{
return CELL_EALIGN;
}
break;
}
default:
{
return CELL_EINVAL;
}
return {CELL_EALIGN, size};
}
const auto ct = idm::get<lv2_memory_container>(cid, [&](lv2_memory_container& ct) -> CellError
@ -108,8 +90,11 @@ error_code sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::
return ct.ret;
}
// Allocate memory, write back the start address of the allocated area, use cid as the supplementary info
*alloc_addr = verify(HERE, vm::alloc(size, vm::user_space, flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : 0x10000, cid));
// Create phantom memory object
const auto mem = idm::make_ptr<lv2_memory_alloca>(size, align, flags, ct.ptr);
// Allocate memory
*alloc_addr = verify(HERE, vm::get(vm::user_space)->alloc(size, mem->align, &mem->shm));
return CELL_OK;
}
@ -120,25 +105,49 @@ error_code sys_memory_free(u32 addr)
const auto area = vm::get(vm::user_space);
verify(HERE), area;
const auto shm = area->get(addr);
if (!shm.second)
{
return {CELL_EINVAL, addr};
}
// Retrieve phantom memory object
const auto mem = idm::select<lv2_memory_alloca>([&](u32 id, lv2_memory_alloca& mem) -> u32
{
if (mem.shm.get() == shm.second.get())
{
return id;
}
return 0;
});
if (!mem)
{
// Deallocate memory (simple)
if (!area->dealloc(addr))
{
return {CELL_EINVAL, addr};
}
// Return "physical memory" to the default container
fxm::get_always<lv2_memory_container>()->used -= shm.second->size();
return CELL_OK;
}
// Deallocate memory
u32 cid, size = area->dealloc(addr, nullptr, &cid);
if (!size)
if (!area->dealloc(addr, &shm.second))
{
return CELL_EINVAL;
return {CELL_EINVAL, addr};
}
// Return "physical memory"
if (cid == 0)
{
fxm::get<lv2_memory_container>()->used -= size;
}
else if (const auto ct = idm::get<lv2_memory_container>(cid))
{
ct->used -= size;
}
mem->ct->used -= mem->size;
// Remove phantom memory object
verify(HERE), idm::remove<lv2_memory_alloca>(mem.ret);
return CELL_OK;
}

View File

@ -83,6 +83,21 @@ struct lv2_memory_container
}
};
struct lv2_memory_alloca
{
static const u32 id_base = 0x1;
static const u32 id_step = 0x1;
static const u32 id_count = 0x1000;
const u32 size; // Memory size
const u32 align; // Alignment required
const u64 flags;
const std::shared_ptr<lv2_memory_container> ct;
const std::shared_ptr<utils::shm> shm;
lv2_memory_alloca(u32 size, u32 align, u64 flags, const std::shared_ptr<lv2_memory_container>& ct);
};
// SysCalls
error_code sys_memory_allocate(u32 size, u64 flags, vm::ptr<u32> alloc_addr);
error_code sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr);

View File

@ -1,13 +1,22 @@
#include "stdafx.h"
#include "sys_mmapper.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_ppu_thread.h"
#include "Emu/Cell/lv2/sys_event.h"
#include "Utilities/VirtualMemory.h"
#include "sys_memory.h"
#include "sys_mmapper.h"
logs::channel sys_mmapper("sys_mmapper");
lv2_memory::lv2_memory(u32 size, u32 align, u64 flags, const std::shared_ptr<lv2_memory_container>& ct)
: size(size)
, align(align)
, flags(flags)
, ct(ct)
, shm(std::make_shared<utils::shm>(size))
{
}
error_code sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32> alloc_addr)
{
sys_mmapper.error("sys_mmapper_allocate_address(size=0x%llx, flags=0x%llx, alignment=0x%llx, alloc_addr=*0x%x)", size, flags, alignment, alloc_addr);
@ -233,7 +242,7 @@ error_code sys_mmapper_free_shared_memory(u32 mem_id)
// Conditionally remove memory ID
const auto mem = idm::withdraw<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory& mem) -> CellError
{
if (!mem.addr.compare_and_swap_test(0, -1))
if (mem.counter)
{
return CELL_EBUSY;
}
@ -268,31 +277,33 @@ error_code sys_mmapper_map_shared_memory(u32 addr, u32 mem_id, u64 flags)
return CELL_EINVAL;
}
const auto mem = idm::get<lv2_obj, lv2_memory>(mem_id);
const auto mem = idm::get<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory& mem) -> CellError
{
if (addr % mem.align)
{
return CELL_EALIGN;
}
mem.counter++;
return {};
});
if (!mem)
{
return CELL_ESRCH;
}
if (addr % mem->align)
if (mem.ret)
{
return CELL_EALIGN;
return mem.ret;
}
if (const u32 old_addr = mem->addr.compare_and_swap(0, -1))
if (!area->falloc(addr, mem->size, &mem->shm))
{
sys_mmapper.warning("sys_mmapper_map_shared_memory(): Already mapped (mem_id=0x%x, addr=0x%x)", mem_id, old_addr);
return CELL_OK;
}
if (!area->falloc(addr, mem->size, mem->data.data()))
{
mem->addr = 0;
mem->counter--;
return CELL_EBUSY;
}
mem->addr = addr;
return CELL_OK;
}
@ -304,31 +315,28 @@ error_code sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, vm:
if (!area || start_addr < 0x50000000 || start_addr >= 0xC0000000)
{
return CELL_EINVAL;
return {CELL_EINVAL, start_addr};
}
const auto mem = idm::get<lv2_obj, lv2_memory>(mem_id);
const auto mem = idm::get<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory& mem)
{
mem.counter++;
});
if (!mem)
{
return CELL_ESRCH;
}
if (const u32 old_addr = mem->addr.compare_and_swap(0, -1))
{
sys_mmapper.warning("sys_mmapper_search_and_map(): Already mapped (mem_id=0x%x, addr=0x%x)", mem_id, old_addr);
return CELL_OK;
}
const u32 addr = area->alloc(mem->size, mem->align, mem->data.data());
const u32 addr = area->alloc(mem->size, mem->align, &mem->shm);
if (!addr)
{
mem->addr = 0;
mem->counter--;
return CELL_ENOMEM;
}
*alloc_addr = mem->addr = addr;
*alloc_addr = addr;
return CELL_OK;
}
@ -340,26 +348,42 @@ error_code sys_mmapper_unmap_shared_memory(u32 addr, vm::ptr<u32> mem_id)
if (!area || addr < 0x50000000 || addr >= 0xC0000000)
{
return CELL_EINVAL;
return {CELL_EINVAL, addr};
}
const auto mem = idm::select<lv2_obj, lv2_memory>([&](u32 id, lv2_memory& mem)
const auto shm = area->get(addr);
if (!shm.second)
{
if (mem.addr == addr)
return {CELL_EINVAL, addr};
}
const auto mem = idm::select<lv2_obj, lv2_memory>([&](u32 id, lv2_memory& mem) -> u32
{
if (mem.shm.get() == shm.second.get())
{
*mem_id = id;
return true;
return id;
}
return false;
return 0;
});
if (!mem)
{
return CELL_EINVAL;
return {CELL_EINVAL, addr};
}
verify(HERE), area->dealloc(addr, mem->data.data()), mem->addr.exchange(0) == addr;
if (!area->dealloc(addr, &shm.second))
{
return {CELL_EINVAL, addr};
}
// Write out the ID
*mem_id = mem.ret;
// Acknowledge
mem->counter--;
return CELL_OK;
}

View File

@ -1,9 +1,9 @@
#pragma once
#include "sys_sync.h"
#include "sys_memory.h"
#include <vector>
#include <list>
struct lv2_memory_container;
struct lv2_memory : lv2_obj
{
@ -13,19 +13,11 @@ struct lv2_memory : lv2_obj
const u32 align; // Alignment required
const u64 flags;
const std::shared_ptr<lv2_memory_container> ct; // Associated memory container
const std::shared_ptr<utils::shm> shm;
atomic_t<u32> addr{}; // Actual mapping address
atomic_t<u32> counter{0};
std::vector<uchar> data;
lv2_memory(u32 size, u32 align, u64 flags, const std::shared_ptr<lv2_memory_container>& ct)
: size(size)
, align(align)
, flags(flags)
, ct(ct)
{
data.resize(size);
}
lv2_memory(u32 size, u32 align, u64 flags, const std::shared_ptr<lv2_memory_container>& ct);
};
enum : u64
@ -52,7 +44,7 @@ struct page_fault_notification_entry
// Used to hold list of queues to be notified on page fault event.
struct page_fault_notification_entries
{
std::list<page_fault_notification_entry> entries;
std::vector<page_fault_notification_entry> entries;
};
struct page_fault_event
@ -63,7 +55,7 @@ struct page_fault_event
struct page_fault_event_entries
{
std::list<page_fault_event> events;
std::vector<page_fault_event> events;
semaphore<> pf_mutex;
};

View File

@ -7,7 +7,6 @@
#include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/lv2/sys_memory.h"
#include "Emu/RSX/GSRender.h"
#include <atomic>
#include <deque>
@ -299,8 +298,10 @@ namespace vm
}
}
void _page_map(u32 addr, u32 size, u8 flags)
static void _page_map(u32 addr, u8 flags, utils::shm& shm)
{
const u32 size = shm.size();
if (!size || (size | addr) % 4096 || flags & page_allocated)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
@ -314,7 +315,10 @@ namespace vm
}
}
utils::memory_commit(g_base_addr + addr, size);
if (shm.map_critical(g_base_addr + addr) != g_base_addr + addr)
{
fmt::throw_exception("Memory mapping failed - blame Windows (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags);
}
if (flags & page_executable)
{
@ -393,19 +397,28 @@ namespace vm
return true;
}
void _page_unmap(u32 addr, u32 size)
static void _page_unmap(u32 addr, utils::shm& shm)
{
const u32 size = shm.size();
if (!size || (size | addr) % 4096)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size);
}
bool is_exec = false;
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if ((g_pages[i].flags & page_allocated) == 0)
{
fmt::throw_exception("Memory not mapped (addr=0x%x, size=0x%x, current_addr=0x%x)" HERE, addr, size, i * 4096);
}
if (g_pages[i].flags & page_executable)
{
is_exec = true;
}
}
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
@ -416,8 +429,12 @@ namespace vm
}
}
utils::memory_decommit(g_base_addr + addr, size);
utils::memory_decommit(g_exec_addr + addr, size);
shm.unmap_critical(g_base_addr + addr);
if (is_exec)
{
utils::memory_decommit(g_exec_addr + addr, size);
}
if (g_cfg.core.ppu_debug)
{
@ -438,7 +455,7 @@ namespace vm
return true;
}
u32 alloc(u32 size, memory_location_t location, u32 align, u32 sup)
u32 alloc(u32 size, memory_location_t location, u32 align)
{
const auto block = get(location);
@ -447,10 +464,10 @@ namespace vm
fmt::throw_exception("Invalid memory location (%u)" HERE, (uint)location);
}
return block->alloc(size, align, nullptr, sup);
return block->alloc(size, align);
}
u32 falloc(u32 addr, u32 size, memory_location_t location, u32 sup)
u32 falloc(u32 addr, u32 size, memory_location_t location)
{
const auto block = get(location, addr);
@ -459,10 +476,10 @@ namespace vm
fmt::throw_exception("Invalid memory location (%u, addr=0x%x)" HERE, (uint)location, addr);
}
return block->falloc(addr, size, nullptr, sup);
return block->falloc(addr, size);
}
u32 dealloc(u32 addr, memory_location_t location, u32* sup_out)
u32 dealloc(u32 addr, memory_location_t location)
{
const auto block = get(location, addr);
@ -471,7 +488,7 @@ namespace vm
fmt::throw_exception("Invalid memory location (%u, addr=0x%x)" HERE, (uint)location, addr);
}
return block->dealloc(addr, nullptr, sup_out);
return block->dealloc(addr);
}
void dealloc_verbose_nothrow(u32 addr, memory_location_t location) noexcept
@ -491,8 +508,10 @@ namespace vm
}
}
bool block_t::try_alloc(u32 addr, u32 size, u8 flags, u32 sup)
bool block_t::try_alloc(u32 addr, u8 flags, std::shared_ptr<utils::shm>&& shm)
{
const u32 size = shm->size();
// Check if memory area is already mapped
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
{
@ -503,13 +522,10 @@ namespace vm
}
// Map "real" memory pages
_page_map(addr, size, flags);
_page_map(addr, flags, *shm);
// Add entry
m_map[addr] = size;
// Add supplementary info if necessary
if (sup) m_sup[addr] = sup;
m_map[addr] = std::move(shm);
return true;
}
@ -528,24 +544,32 @@ namespace vm
block_t::~block_t()
{
vm::writer_lock lock(0);
// Deallocate all memory
for (auto& entry : m_map)
{
_page_unmap(entry.first, entry.second);
vm::writer_lock lock(0);
// Deallocate all memory
for (auto& entry : m_map)
{
_page_unmap(entry.first, *entry.second);
}
}
// Notify rsx to invalidate range (TODO)
if (const auto rsxthr = fxm::check_unlocked<GSRender>())
{
rsxthr->on_notify_memory_unmapped(addr, size);
}
}
u32 block_t::alloc(const u32 orig_size, u32 align, const uchar* data, u32 sup)
u32 block_t::alloc(const u32 orig_size, u32 align, const std::shared_ptr<utils::shm>* src)
{
vm::writer_lock lock(0);
// Align to minimal page size
const u32 size = ::align(orig_size, 4096);
const u32 size = ::align(orig_size, 0x10000);
// Check alignment (it's page allocation, so passing small values there is just silly)
if (align < 4096 || align != (0x80000000u >> cntlz32(align, true)))
if (align < 0x10000 || align != (0x80000000u >> cntlz32(align, true)))
{
fmt::throw_exception("Invalid alignment (size=0x%x, align=0x%x)" HERE, size, align);
}
@ -567,16 +591,14 @@ namespace vm
pflags |= page_64k_size;
}
// Create or import shared memory object
std::shared_ptr<utils::shm> shm = src ? std::shared_ptr<utils::shm>(*src) : std::make_shared<utils::shm>(size);
// Search for an appropriate place (unoptimized)
for (u32 addr = ::align(this->addr, align); addr < this->addr + this->size - 1; addr += align)
{
if (try_alloc(addr, size, pflags, sup))
if (try_alloc(addr, pflags, std::move(shm)))
{
if (data)
{
std::memcpy(vm::base(addr), data, orig_size);
}
return addr;
}
}
@ -584,12 +606,12 @@ namespace vm
return 0;
}
u32 block_t::falloc(u32 addr, const u32 orig_size, const uchar* data, u32 sup)
u32 block_t::falloc(u32 addr, const u32 orig_size, const std::shared_ptr<utils::shm>* src)
{
vm::writer_lock lock(0);
// align to minimal page size
const u32 size = ::align(orig_size, 4096);
const u32 size = ::align(orig_size, 0x10000);
// return if addr or size is invalid
if (!size || size > this->size || addr < this->addr || addr + size - 1 > this->addr + this->size - 1)
@ -608,54 +630,81 @@ namespace vm
pflags |= page_64k_size;
}
if (!try_alloc(addr, size, pflags, sup))
if (!try_alloc(addr, pflags, src ? std::shared_ptr<utils::shm>(*src) : std::make_shared<utils::shm>(size)))
{
return 0;
}
if (data)
{
std::memcpy(vm::base(addr), data, orig_size);
}
return addr;
}
u32 block_t::dealloc(u32 addr, uchar* data_out, u32* sup_out)
u32 block_t::dealloc(u32 addr, const std::shared_ptr<utils::shm>* src)
{
vm::writer_lock lock(0);
const auto found = m_map.find(addr);
if (found != m_map.end())
u32 result = 0;
{
const u32 size = found->second;
const auto rsxthr = fxm::get<GSRender>();
vm::writer_lock lock(0);
const auto found = m_map.find(addr);
if (found == m_map.end())
{
return 0;
}
if (src && found->second.get() != src->get())
{
return 0;
}
result = found->second->size();
// Unmap "real" memory pages
_page_unmap(addr, *found->second);
// Remove entry
m_map.erase(found);
if (data_out)
{
std::memcpy(data_out, vm::base(addr), size);
}
// Unmap "real" memory pages
_page_unmap(addr, size);
// Notify rsx to invalidate range
if (rsxthr != nullptr) rsxthr->on_notify_memory_unmapped(addr, size);
// Write supplementary info if necessary
if (sup_out) *sup_out = m_sup[addr];
// Remove supplementary info
m_sup.erase(addr);
return size;
}
return 0;
// Notify rsx to invalidate range (TODO)
if (const auto rsxthr = fxm::check_unlocked<GSRender>())
{
rsxthr->on_notify_memory_unmapped(addr, result);
}
return result;
}
std::pair<const u32, std::shared_ptr<utils::shm>> block_t::get(u32 addr, u32 size)
{
if (addr < this->addr || std::max<u32>(size, addr - this->addr + size) >= this->size)
{
return {addr, nullptr};
}
vm::reader_lock lock;
const auto upper = m_map.upper_bound(addr);
if (upper == m_map.begin())
{
return {addr, nullptr};
}
const auto found = std::prev(upper);
// Exact address condition (size == 0)
if (size == 0 && found->first != addr)
{
return {addr, nullptr};
}
// Range check
if (std::max<u32>(size, addr - found->first + size) > found->second->size())
{
return {addr, nullptr};
}
return *found;
}
u32 block_t::imp_used(const vm::writer_lock&)
@ -664,7 +713,7 @@ namespace vm
for (auto& entry : m_map)
{
result += entry.second;
result += entry.second->size();
}
return result;

View File

@ -3,6 +3,7 @@
#include <map>
#include <functional>
#include <memory>
#include "Utilities/VirtualMemory.h"
class shared_mutex;
class named_thread;
@ -126,14 +127,14 @@ namespace vm
// Check flags for specified memory range (unsafe)
bool check_addr(u32 addr, u32 size = 1, u8 flags = page_allocated);
// Search and map memory in specified memory location (don't pass alignment smaller than 4096)
u32 alloc(u32 size, memory_location_t location, u32 align = 4096, u32 sup = 0);
// Search and map memory in specified memory location (min alignment is 0x10000)
u32 alloc(u32 size, memory_location_t location, u32 align = 0x10000);
// Map memory at specified address (in optionally specified memory location)
u32 falloc(u32 addr, u32 size, memory_location_t location = any, u32 sup = 0);
u32 falloc(u32 addr, u32 size, memory_location_t location = any);
// Unmap memory at specified address (in optionally specified memory location), return size
u32 dealloc(u32 addr, memory_location_t location = any, u32* sup_out = nullptr);
u32 dealloc(u32 addr, memory_location_t location = any);
// dealloc() with no return value and no exceptions
void dealloc_verbose_nothrow(u32 addr, memory_location_t location = any) noexcept;
@ -141,10 +142,10 @@ namespace vm
// Object that handles memory allocations inside specific constant bounds ("location")
class block_t final
{
std::map<u32, u32> m_map; // Mapped memory: addr -> size
std::unordered_map<u32, u32> m_sup; // Supplementary info for allocations
// Mapped regions: addr -> shm handle
std::map<u32, std::shared_ptr<utils::shm>> m_map;
bool try_alloc(u32 addr, u32 size, u8 flags, u32 sup);
bool try_alloc(u32 addr, u8 flags, std::shared_ptr<utils::shm>&&);
public:
block_t(u32 addr, u32 size, u64 flags = 0);
@ -156,14 +157,17 @@ namespace vm
const u32 size; // Total size
const u64 flags; // Currently unused
// Search and map memory (don't pass alignment smaller than 4096)
u32 alloc(u32 size, u32 align = 4096, const uchar* data = nullptr, u32 sup = 0);
// Search and map memory (min alignment is 0x10000)
u32 alloc(u32 size, u32 align = 0x10000, const std::shared_ptr<utils::shm>* = nullptr);
// Try to map memory at fixed location
u32 falloc(u32 addr, u32 size, const uchar* data = nullptr, u32 sup = 0);
u32 falloc(u32 addr, u32 size, const std::shared_ptr<utils::shm>* = nullptr);
// Unmap memory at specified location previously returned by alloc(), return size
u32 dealloc(u32 addr, uchar* data_out = nullptr, u32* sup_out = nullptr);
u32 dealloc(u32 addr, const std::shared_ptr<utils::shm>* = nullptr);
// Get memory at specified address (if size = 0, addr assumed exact)
std::pair<const u32, std::shared_ptr<utils::shm>> get(u32 addr, u32 size = 0);
// Internal
u32 imp_used(const vm::writer_lock&);
@ -290,6 +294,35 @@ namespace vm
return *_ptr<T>(addr);
}
// Access memory bypassing memory protection
template <typename T>
inline std::shared_ptr<to_be_t<T>> get_super_ptr(u32 addr, u32 count = 1)
{
const auto area = vm::get(vm::any, addr);
if (!area || addr + u64{count} * sizeof(T) > UINT32_MAX)
{
return nullptr;
}
const auto shm = area->get(addr, sizeof(T) * count);
if (!shm.second || shm.first > addr)
{
return nullptr;
}
const auto ptr = reinterpret_cast<to_be_t<T>*>(shm.second->get(addr - shm.first, sizeof(T) * count));
if (!ptr)
{
return nullptr;
}
// Create a shared pointer using the aliasing constructor
return {shm.second, ptr};
}
inline const be_t<u16>& read16(u32 addr)
{
return _ref<u16>(addr);

View File

@ -9,7 +9,7 @@ namespace vm
{
static inline vm::addr_t alloc(u32 size, u32 align)
{
return vm::cast(vm::alloc(size, Location, std::max<u32>(align, 4096)));
return vm::cast(vm::alloc(size, Location, std::max<u32>(align, 0x10000)));
}
static inline void dealloc(u32 addr, u32 size = 0) noexcept

View File

@ -42,7 +42,7 @@ namespace rsx
buffered_section() {}
~buffered_section() {}
void reset(u32 base, u32 length, protection_policy protect_policy= protect_policy_full_range)
void reset(u32 base, u32 length, protection_policy protect_policy = protect_policy_full_range)
{
verify(HERE), locked == false;
@ -83,7 +83,6 @@ namespace rsx
void protect(utils::protection prot)
{
if (prot == protection) return;
verify(HERE), locked_address_range > 0;
utils::memory_protect(vm::base(locked_address_base), locked_address_range, prot);
protection = prot;
@ -131,7 +130,7 @@ namespace rsx
/**
* Check if the page containing the address tramples this section. Also compares a former trampled page range to compare
* If true, returns the range <min, max> with updated invalid range
* If true, returns the range <min, max> with updated invalid range
*/
std::tuple<bool, std::pair<u32, u32>> overlaps_page(std::pair<u32, u32> old_range, u32 address, bool full_range_check) const
{