1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-25 12:12:50 +01:00

vm: Flag names (refactoring)

This commit is contained in:
Nick Renieris 2021-04-06 10:44:50 +03:00 committed by Ivan
parent f64a7bb820
commit 396c129d41
3 changed files with 70 additions and 37 deletions

View File

@ -26,8 +26,9 @@ enum : u64
enum : u64
{
SYS_MEMORY_PAGE_SIZE_1M = 0x400ull,
SYS_MEMORY_PAGE_SIZE_4K = 0x100ull,
SYS_MEMORY_PAGE_SIZE_64K = 0x200ull,
SYS_MEMORY_PAGE_SIZE_1M = 0x400ull,
SYS_MEMORY_PAGE_SIZE_MASK = 0xf00ull,
};

View File

@ -9,7 +9,6 @@
#include "Utilities/Thread.h"
#include "Utilities/address_range.h"
#include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/lv2/sys_memory.h"
#include "Emu/RSX/RSXThread.h"
#include "Emu/Cell/SPURecompiler.h"
#include "Emu/perf_meter.hpp"
@ -1056,10 +1055,10 @@ namespace vm
}
}
const u32 page_addr = addr + (this->flags & 0x10 ? 0x1000 : 0);
const u32 page_size = size - (this->flags & 0x10 ? 0x2000 : 0);
const u32 page_addr = addr + (this->flags & stack_guarded ? 0x1000 : 0);
const u32 page_size = size - (this->flags & stack_guarded ? 0x2000 : 0);
if (this->flags & 0x10)
if (this->flags & stack_guarded)
{
// Mark overflow/underflow guard pages as allocated
ensure(!g_pages[addr / 4096].exchange(page_allocated));
@ -1074,7 +1073,7 @@ namespace vm
std::remove_reference_t<decltype(map)>::value_type* result = nullptr;
// Check eligibility
if (!_this || !(SYS_MEMORY_PAGE_SIZE_MASK & _this->flags) || _this->addr < 0x20000000 || _this->addr >= 0xC0000000)
if (!_this || !(page_size_mask & _this->flags) || _this->addr < 0x20000000 || _this->addr >= 0xC0000000)
{
return result;
}
@ -1092,7 +1091,7 @@ namespace vm
});
// Fill stack guards with STACKGRD
if (this->flags & 0x10)
if (this->flags & stack_guarded)
{
auto fill64 = [](u8* ptr, u64 data, usz count)
{
@ -1122,7 +1121,7 @@ namespace vm
, size(size)
, flags(flags)
{
if (flags & 0x100 || flags & 0x20)
if (flags & page_size_4k || flags & preallocated)
{
// Special path for whole-allocated areas allowing 4k granularity
m_common = std::make_shared<utils::shm>(size);
@ -1164,10 +1163,10 @@ namespace vm
}
// Determine minimal alignment
const u32 min_page_size = flags & 0x100 ? 0x1000 : 0x10000;
const u32 min_page_size = flags & page_size_4k ? 0x1000 : 0x10000;
// Align to minimal page size
const u32 size = utils::align(orig_size, min_page_size) + (flags & 0x10 ? 0x2000 : 0);
const u32 size = utils::align(orig_size, min_page_size) + (flags & stack_guarded ? 0x2000 : 0);
// Check alignment (it's page allocation, so passing small values there is just silly)
if (align < min_page_size || align != (0x80000000u >> std::countl_zero(align)))
@ -1181,13 +1180,13 @@ namespace vm
return 0;
}
u8 pflags = flags & 0x1000 ? 0 : page_readable | page_writable;
u8 pflags = flags & page_hidden ? 0 : page_readable | page_writable;
if ((flags & SYS_MEMORY_PAGE_SIZE_64K) == SYS_MEMORY_PAGE_SIZE_64K)
if ((flags & page_size_64k) == page_size_64k)
{
pflags |= page_64k_size;
}
else if (!(flags & (SYS_MEMORY_PAGE_SIZE_MASK & ~SYS_MEMORY_PAGE_SIZE_1M)))
else if (!(flags & (page_size_mask & ~page_size_1m)))
{
pflags |= page_1m_size;
}
@ -1211,7 +1210,7 @@ namespace vm
{
if (try_alloc(addr, pflags, size, std::move(shm)))
{
return addr + (flags & 0x10 ? 0x1000 : 0);
return addr + (flags & stack_guarded ? 0x1000 : 0);
}
}
@ -1227,7 +1226,7 @@ namespace vm
}
// Determine minimal alignment
const u32 min_page_size = flags & 0x100 ? 0x1000 : 0x10000;
const u32 min_page_size = flags & page_size_4k ? 0x1000 : 0x10000;
// Take address misalignment into account
const u32 size0 = orig_size + addr % min_page_size;
@ -1235,10 +1234,15 @@ namespace vm
// Align to minimal page size
const u32 size = utils::align(size0, min_page_size);
// return if addr or size is invalid
// Return if addr or size is invalid
// If shared memory is provided, addr/size must be aligned
if (!size || addr < this->addr || orig_size > size0 || orig_size > size ||
(addr - addr % min_page_size) + u64{size} > this->addr + u64{this->size} || (src && (orig_size | addr) % min_page_size) || flags & 0x10)
if (!size ||
addr < this->addr ||
orig_size > size0 ||
orig_size > size ||
(addr - addr % min_page_size) + u64{size} > this->addr + u64{this->size} ||
(src && (orig_size | addr) % min_page_size) ||
flags & stack_guarded)
{
return 0;
}
@ -1246,13 +1250,13 @@ namespace vm
// Force aligned address
addr -= addr % min_page_size;
u8 pflags = flags & 0x1000 ? 0 : page_readable | page_writable;
u8 pflags = flags & page_hidden ? 0 : page_readable | page_writable;
if ((flags & SYS_MEMORY_PAGE_SIZE_64K) == SYS_MEMORY_PAGE_SIZE_64K)
if ((flags & page_size_64k) == page_size_64k)
{
pflags |= page_64k_size;
}
else if (!(flags & (SYS_MEMORY_PAGE_SIZE_MASK & ~SYS_MEMORY_PAGE_SIZE_1M)))
else if (!(flags & (page_size_mask & ~page_size_1m)))
{
pflags |= page_1m_size;
}
@ -1285,7 +1289,7 @@ namespace vm
{
vm::writer_lock lock(0);
const auto found = m_map.find(addr - (flags & 0x10 ? 0x1000 : 0));
const auto found = m_map.find(addr - (flags & stack_guarded ? 0x1000 : 0));
if (found == m_map.end())
{
@ -1298,9 +1302,9 @@ namespace vm
}
// Get allocation size
const auto size = found->second.first - (flags & 0x10 ? 0x2000 : 0);
const auto size = found->second.first - (flags & stack_guarded ? 0x2000 : 0);
if (flags & 0x10)
if (flags & stack_guarded)
{
// Clear guard pages
ensure(g_pages[addr / 4096 - 1].exchange(0) == page_allocated);
@ -1311,7 +1315,7 @@ namespace vm
ensure(size == _page_unmap(addr, size, found->second.second.get()));
// Clear stack guards
if (flags & 0x10)
if (flags & stack_guarded)
{
std::memset(g_sudo_addr + addr - 4096, 0, 4096);
std::memset(g_sudo_addr + addr + size, 0, 4096);
@ -1371,7 +1375,7 @@ namespace vm
for (auto& entry : (m.*block_map)())
{
result += entry.second.first - (flags & 0x10 ? 0x2000 : 0);
result += entry.second.first - (flags & stack_guarded ? 0x2000 : 0);
}
return result;
@ -1515,12 +1519,12 @@ namespace vm
{
if (*it && (*it)->addr == addr)
{
if (must_be_empty && (*it)->flags & 0x3)
if (must_be_empty && (*it)->flags & bf0_mask)
{
continue;
}
if (!must_be_empty && ((*it)->flags & 0x3) != 2)
if (!must_be_empty && ((*it)->flags & bf0_mask) != bf0_0x2)
{
continue;
}
@ -1641,13 +1645,13 @@ namespace vm
g_locations =
{
std::make_shared<block_t>(0x00010000, 0x1FFF0000, 0x220), // main
std::make_shared<block_t>(0x20000000, 0x10000000, 0x201), // user 64k pages
nullptr, // user 1m pages
nullptr, // rsx context
std::make_shared<block_t>(0xC0000000, 0x10000000, 0x220), // video
std::make_shared<block_t>(0xD0000000, 0x10000000, 0x131), // stack
std::make_shared<block_t>(0xE0000000, 0x20000000, 0x200), // SPU reserved
std::make_shared<block_t>(0x00010000, 0x1FFF0000, page_size_64k | preallocated), // main
std::make_shared<block_t>(0x20000000, 0x10000000, page_size_64k | bf0_0x1), // user 64k pages
nullptr, // user 1m pages
nullptr, // rsx context
std::make_shared<block_t>(0xC0000000, 0x10000000, page_size_64k | preallocated), // video
std::make_shared<block_t>(0xD0000000, 0x10000000, page_size_4k | preallocated | stack_guarded | bf0_0x1), // stack
std::make_shared<block_t>(0xE0000000, 0x20000000, page_size_64k), // SPU reserved
};
std::memset(g_reservations, 0, sizeof(g_reservations));

View File

@ -90,6 +90,31 @@ namespace vm
// utils::memory_lock wrapper for locking sudo memory
void lock_sudo(u32 addr, u32 size);
enum block_flags_3
{
page_hidden = 0x1000,
};
enum block_flags_2_page_size
{
page_size_4k = 0x100, // SYS_MEMORY_PAGE_SIZE_4K
page_size_64k = 0x200, // SYS_MEMORY_PAGE_SIZE_64K
page_size_1m = 0x400, // SYS_MEMORY_PAGE_SIZE_1M
page_size_mask = 0xF00, // SYS_MEMORY_PAGE_SIZE_MASK
};
enum block_flags_1
{
stack_guarded = 0x10,
preallocated = 0x20, // nonshareable
};
enum block_flags_0
{
bf0_0x1 = 0x1, // TODO: document
bf0_0x2 = 0x2, // TODO: document
bf0_mask = bf0_0x1 | bf0_0x2,
};
// Object that handles memory allocations inside specific constant bounds ("location")
class block_t final
{
@ -108,7 +133,10 @@ namespace vm
public:
const u32 addr; // Start address
const u32 size; // Total size
const u64 flags; // Currently unused
const u64 flags; // Byte 0xF000: block_flags_3
// Byte 0x0F00: block_flags_2_page_size (SYS_MEMORY_PAGE_SIZE_*)
// Byte 0x00F0: block_flags_1
// Byte 0x000F: block_flags_0
// Search and map memory (min alignment is 0x10000)
u32 alloc(u32 size, const std::shared_ptr<utils::shm>* = nullptr, u32 align = 0x10000, u64 flags = 0);
@ -142,7 +170,7 @@ namespace vm
std::shared_ptr<block_t> get(memory_location_t location, u32 addr = 0);
// Allocate segment at specified location, does nothing if exists already
std::shared_ptr<block_t> reserve_map(memory_location_t location, u32 addr, u32 area_size, u64 flags = 0x200);
std::shared_ptr<block_t> reserve_map(memory_location_t location, u32 addr, u32 area_size, u64 flags = page_size_64k);
// Get PS3 virtual memory address from the provided pointer (nullptr or pointer from outside is always converted to 0)
// Super memory is allowed as well