mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 02:32:36 +01:00
Fixup No.3 after #10779
This commit is contained in:
parent
1cbcf7e1ad
commit
fafefb2cf5
@ -551,7 +551,6 @@ void patch_engine::append_title_patches(const std::string& title_id)
|
||||
}
|
||||
|
||||
void ppu_register_range(u32 addr, u32 size);
|
||||
void ppu_register_function_at(u32 addr, u32 size, u64 ptr);
|
||||
bool ppu_form_branch_to_code(u32 entry, u32 target);
|
||||
|
||||
void unmap_vm_area(std::shared_ptr<vm::block_t>& ptr)
|
||||
@ -581,24 +580,22 @@ static usz apply_modification(std::basic_string<u32>& applied, const patch_engin
|
||||
// Allocate map if needed, if allocated flags will indicate that bit 62 is set (unique identifier)
|
||||
auto alloc_map = vm::reserve_map(vm::any, alloc_at & -0x10000, utils::align(alloc_size, 0x10000), vm::page_size_64k | vm::preallocated | (1ull << 62));
|
||||
|
||||
u64 flags = vm::page_readable;
|
||||
u64 flags = vm::alloc_unwritable;
|
||||
|
||||
switch (p.offset % patch_engine::mem_protection::mask)
|
||||
{
|
||||
case patch_engine::mem_protection::wx: flags |= vm::page_writable + vm::page_executable; break;
|
||||
case patch_engine::mem_protection::wx: flags = vm::alloc_executable; break;
|
||||
case patch_engine::mem_protection::ro: break;
|
||||
case patch_engine::mem_protection::rx: flags |= vm::page_executable; break;
|
||||
case patch_engine::mem_protection::rw: flags |= vm::page_writable; break;
|
||||
case patch_engine::mem_protection::rx: flags |= vm::alloc_executable; break;
|
||||
case patch_engine::mem_protection::rw: flags &= ~vm::alloc_unwritable; break;
|
||||
default: ensure(false);
|
||||
}
|
||||
|
||||
if (alloc_map)
|
||||
{
|
||||
if ((p.alloc_addr = alloc_map->falloc(alloc_at, alloc_size)))
|
||||
if ((p.alloc_addr = alloc_map->falloc(alloc_at, alloc_size, nullptr, flags)))
|
||||
{
|
||||
vm::page_protect(alloc_at, alloc_size, 0, flags, flags ^ (vm::page_writable + vm::page_readable + vm::page_executable));
|
||||
|
||||
if (flags & vm::page_executable)
|
||||
if (flags & vm::alloc_executable)
|
||||
{
|
||||
ppu_register_range(alloc_at, alloc_size);
|
||||
}
|
||||
@ -681,7 +678,7 @@ static usz apply_modification(std::basic_string<u32>& applied, const patch_engin
|
||||
const u32 out_branch = vm::try_get_addr(dst + (offset & -4)).first;
|
||||
|
||||
// Allow only if points to a PPU executable instruction
|
||||
if (out_branch < 0x10000 || out_branch >= 0x4000'0000 || !vm::check_addr<4>(out_branch, vm::page_executable))
|
||||
if (out_branch < 0x10000 || out_branch >= 0x4000'0000 || !vm::check_addr<4>(out_branch, vm::alloc_executable))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
@ -689,14 +686,14 @@ static usz apply_modification(std::basic_string<u32>& applied, const patch_engin
|
||||
const u32 alloc_size = utils::align(static_cast<u32>(p.value.long_value + 1) * 4, 0x10000);
|
||||
|
||||
// Always executable
|
||||
u64 flags = vm::page_executable | vm::page_readable;
|
||||
u64 flags = vm::alloc_executable | vm::alloc_unwritable;
|
||||
|
||||
switch (p.offset % patch_engine::mem_protection::mask)
|
||||
{
|
||||
case patch_engine::mem_protection::rw:
|
||||
case patch_engine::mem_protection::wx:
|
||||
{
|
||||
flags |= vm::page_writable;
|
||||
flags &= ~vm::alloc_unwritable;
|
||||
break;
|
||||
}
|
||||
case patch_engine::mem_protection::ro:
|
||||
@ -726,7 +723,6 @@ static usz apply_modification(std::basic_string<u32>& applied, const patch_engin
|
||||
|
||||
// Register code
|
||||
ppu_register_range(addr, alloc_size);
|
||||
ppu_register_function_at(addr, static_cast<u32>(p.value.long_value), 0);
|
||||
|
||||
// Write branch to code
|
||||
ppu_form_branch_to_code(out_branch, addr);
|
||||
|
@ -1203,7 +1203,7 @@ void ppu_module::analyse(u32 lib_toc, u32 entry, const u32 sec_end, const std::b
|
||||
|
||||
if (ppu_get_far_jump(iaddr))
|
||||
{
|
||||
block.second = _ptr.addr() - block.first;
|
||||
block.second = _ptr.addr() - block.first - 4;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -521,7 +521,7 @@ bool ppu_form_branch_to_code(u32 entry, u32 target)
|
||||
entry &= -4;
|
||||
target &= -4;
|
||||
|
||||
if (entry == target || vm::check_addr(entry, vm::page_executable) || !vm::check_addr(target, vm::page_executable))
|
||||
if (entry == target || !vm::check_addr(entry, vm::page_executable) || !vm::check_addr(target, vm::page_executable))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -533,6 +533,7 @@ bool ppu_form_branch_to_code(u32 entry, u32 target)
|
||||
|
||||
std::lock_guard lock(jumps.mutex);
|
||||
jumps.vals.insert_or_assign(entry, target);
|
||||
ppu_register_function_at(entry, 4, &ppu_far_jump);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1710,12 +1710,12 @@ spu_thread::spu_thread(lv2_spu_group* group, u32 index, std::string_view name, u
|
||||
{
|
||||
if (!group)
|
||||
{
|
||||
ensure(vm::get(vm::spu)->falloc(vm_offset(), SPU_LS_SIZE, &shm, 0x200));
|
||||
ensure(vm::get(vm::spu)->falloc(vm_offset(), SPU_LS_SIZE, &shm, vm::page_size_64k));
|
||||
}
|
||||
else
|
||||
{
|
||||
// 0x1000 indicates falloc to allocate page with no access rights in base memory
|
||||
ensure(vm::get(vm::spu)->falloc(vm_offset(), SPU_LS_SIZE, &shm, 0x1200));
|
||||
// alloc_hidden indicates falloc to allocate page with no access rights in base memory
|
||||
ensure(vm::get(vm::spu)->falloc(vm_offset(), SPU_LS_SIZE, &shm, vm::page_size_64k | vm::alloc_hidden));
|
||||
}
|
||||
|
||||
// Try to guess free area
|
||||
|
@ -1050,7 +1050,7 @@ namespace vm
|
||||
// Mapped regions: addr -> shm handle
|
||||
constexpr auto block_map = &auto_typemap<block_t>::get<std::map<u32, std::pair<u32, std::shared_ptr<utils::shm>>>>;
|
||||
|
||||
bool block_t::try_alloc(u32 addr, u8 flags, u32 size, std::shared_ptr<utils::shm>&& shm) const
|
||||
bool block_t::try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&& shm) const
|
||||
{
|
||||
// Check if memory area is already mapped
|
||||
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
|
||||
@ -1064,6 +1064,34 @@ namespace vm
|
||||
const u32 page_addr = addr + (this->flags & stack_guarded ? 0x1000 : 0);
|
||||
const u32 page_size = size - (this->flags & stack_guarded ? 0x2000 : 0);
|
||||
|
||||
// No flags are default to readable/writable
|
||||
// Explicit (un...) flags are used to protect from such access
|
||||
u8 flags = 0;
|
||||
|
||||
if (~bflags & alloc_hidden)
|
||||
{
|
||||
flags |= page_readable;
|
||||
|
||||
if (~bflags & alloc_unwritable)
|
||||
{
|
||||
flags |= page_writable;
|
||||
}
|
||||
}
|
||||
|
||||
if (bflags & alloc_executable)
|
||||
{
|
||||
flags |= page_executable;
|
||||
}
|
||||
|
||||
if ((flags & page_size_mask) == page_size_64k)
|
||||
{
|
||||
flags |= page_64k_size;
|
||||
}
|
||||
else if (!(flags & (page_size_mask & ~page_size_1m)))
|
||||
{
|
||||
flags |= page_1m_size;
|
||||
}
|
||||
|
||||
if (this->flags & stack_guarded)
|
||||
{
|
||||
// Mark overflow/underflow guard pages as allocated
|
||||
@ -1122,12 +1150,31 @@ namespace vm
|
||||
return true;
|
||||
}
|
||||
|
||||
static constexpr u64 process_block_flags(u64 flags)
|
||||
{
|
||||
if ((flags & page_size_mask) == 0)
|
||||
{
|
||||
flags |= page_size_1m;
|
||||
}
|
||||
|
||||
if (flags & page_size_4k)
|
||||
{
|
||||
flags |= preallocated;
|
||||
}
|
||||
else
|
||||
{
|
||||
flags &= ~stack_guarded;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
block_t::block_t(u32 addr, u32 size, u64 flags)
|
||||
: addr(addr)
|
||||
, size(size)
|
||||
, flags(flags)
|
||||
, flags(process_block_flags(flags))
|
||||
{
|
||||
if (flags & page_size_4k || flags & preallocated)
|
||||
if (this->flags & preallocated)
|
||||
{
|
||||
// Special path for whole-allocated areas allowing 4k granularity
|
||||
m_common = std::make_shared<utils::shm>(size);
|
||||
@ -1166,7 +1213,7 @@ namespace vm
|
||||
if (!src)
|
||||
{
|
||||
// Use the block's flags (excpet for protection)
|
||||
flags = (this->flags & ~page_prot_mask) | (flags & page_prot_mask);
|
||||
flags = (this->flags & ~alloc_prot_mask) | (flags & alloc_prot_mask);
|
||||
}
|
||||
|
||||
// Determine minimal alignment
|
||||
@ -1187,17 +1234,6 @@ namespace vm
|
||||
return 0;
|
||||
}
|
||||
|
||||
u8 pflags = flags & page_hidden ? 0 : (~flags & (page_readable | page_writable));
|
||||
|
||||
if ((flags & page_size_64k) == page_size_64k)
|
||||
{
|
||||
pflags |= page_64k_size;
|
||||
}
|
||||
else if (!(flags & (page_size_mask & ~page_size_1m)))
|
||||
{
|
||||
pflags |= page_1m_size;
|
||||
}
|
||||
|
||||
// Create or import shared memory object
|
||||
std::shared_ptr<utils::shm> shm;
|
||||
|
||||
@ -1224,7 +1260,7 @@ namespace vm
|
||||
// Search for an appropriate place (unoptimized)
|
||||
for (;; addr += align)
|
||||
{
|
||||
if (try_alloc(addr, pflags, size, std::move(shm)))
|
||||
if (try_alloc(addr, flags, size, std::move(shm)))
|
||||
{
|
||||
return addr + (flags & stack_guarded ? 0x1000 : 0);
|
||||
}
|
||||
@ -1243,7 +1279,7 @@ namespace vm
|
||||
if (!src)
|
||||
{
|
||||
// Use the block's flags (excpet for protection)
|
||||
flags = (this->flags & ~page_prot_mask) | (flags & page_prot_mask);
|
||||
flags = (this->flags & ~alloc_prot_mask) | (flags & alloc_prot_mask);
|
||||
}
|
||||
|
||||
// Determine minimal alignment
|
||||
@ -1271,17 +1307,6 @@ namespace vm
|
||||
// Force aligned address
|
||||
addr -= addr % min_page_size;
|
||||
|
||||
u8 pflags = flags & page_hidden ? 0 : (~flags & (page_readable | page_writable));
|
||||
|
||||
if ((flags & page_size_64k) == page_size_64k)
|
||||
{
|
||||
pflags |= page_64k_size;
|
||||
}
|
||||
else if (!(flags & (page_size_mask & ~page_size_1m)))
|
||||
{
|
||||
pflags |= page_1m_size;
|
||||
}
|
||||
|
||||
// Create or import shared memory object
|
||||
std::shared_ptr<utils::shm> shm;
|
||||
|
||||
@ -1296,7 +1321,7 @@ namespace vm
|
||||
|
||||
vm::writer_lock lock(0);
|
||||
|
||||
if (!try_alloc(addr, pflags, size, std::move(shm)))
|
||||
if (!try_alloc(addr, flags, size, std::move(shm)))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -43,8 +43,7 @@ namespace vm
|
||||
page_readable = (1 << 0),
|
||||
page_writable = (1 << 1),
|
||||
page_executable = (1 << 2),
|
||||
page_prot_mask = page_readable | page_writable | page_executable,
|
||||
|
||||
|
||||
page_fault_notification = (1 << 3),
|
||||
page_no_reservations = (1 << 4),
|
||||
page_64k_size = (1 << 5),
|
||||
@ -93,8 +92,6 @@ namespace vm
|
||||
|
||||
enum block_flags_3
|
||||
{
|
||||
page_hidden = 0x1000,
|
||||
|
||||
page_size_4k = 0x100, // SYS_MEMORY_PAGE_SIZE_4K
|
||||
page_size_64k = 0x200, // SYS_MEMORY_PAGE_SIZE_64K
|
||||
page_size_1m = 0x400, // SYS_MEMORY_PAGE_SIZE_1M
|
||||
@ -109,6 +106,15 @@ namespace vm
|
||||
bf0_mask = bf0_0x1 | bf0_0x2,
|
||||
};
|
||||
|
||||
enum alloc_flags
|
||||
{
|
||||
alloc_hidden = 0x1000,
|
||||
alloc_unwritable = 0x2000,
|
||||
alloc_executable = 0x4000,
|
||||
|
||||
alloc_prot_mask = alloc_hidden | alloc_unwritable | alloc_executable,
|
||||
};
|
||||
|
||||
// Object that handles memory allocations inside specific constant bounds ("location")
|
||||
class block_t final
|
||||
{
|
||||
@ -117,7 +123,7 @@ namespace vm
|
||||
// Common mapped region for special cases
|
||||
std::shared_ptr<utils::shm> m_common;
|
||||
|
||||
bool try_alloc(u32 addr, u8 flags, u32 size, std::shared_ptr<utils::shm>&&) const;
|
||||
bool try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&&) const;
|
||||
|
||||
public:
|
||||
block_t(u32 addr, u32 size, u64 flags);
|
||||
|
Loading…
Reference in New Issue
Block a user