1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 02:32:36 +01:00

Patches/LLVM: Implement Complex Patches Support

This commit is contained in:
Eladash 2021-08-23 16:21:49 +03:00 committed by kd-11
parent 2d9929059f
commit ddb042148d
3 changed files with 108 additions and 8 deletions

View File

@ -2,10 +2,12 @@
#include "File.h" #include "File.h"
#include "Config.h" #include "Config.h"
#include "version.h" #include "version.h"
#include "Emu/Memory/vm.h"
#include "Emu/System.h" #include "Emu/System.h"
#include "util/types.hpp" #include "util/types.hpp"
#include "util/endian.hpp" #include "util/endian.hpp"
#include "util/asm.hpp"
LOG_CHANNEL(patch_log, "PAT"); LOG_CHANNEL(patch_log, "PAT");
@ -35,6 +37,7 @@ void fmt_class_string<patch_type>::format(std::string& out, u64 arg)
switch (value) switch (value)
{ {
case patch_type::invalid: return "invalid"; case patch_type::invalid: return "invalid";
case patch_type::alloc: return "alloc";
case patch_type::load: return "load"; case patch_type::load: return "load";
case patch_type::byte: return "byte"; case patch_type::byte: return "byte";
case patch_type::le16: return "le16"; case patch_type::le16: return "le16";
@ -512,10 +515,85 @@ void patch_engine::append_title_patches(const std::string& title_id)
load(m_map, get_patches_path() + title_id + "_patch.yml"); load(m_map, get_patches_path() + title_id + "_patch.yml");
} }
void ppu_register_range(u32 addr, u32 size);
static std::basic_string<u32> apply_modification(const patch_engine::patch_info& patch, u8* dst, u32 filesz, u32 min_addr) static std::basic_string<u32> apply_modification(const patch_engine::patch_info& patch, u8* dst, u32 filesz, u32 min_addr)
{ {
std::basic_string<u32> applied; std::basic_string<u32> applied;
for (const auto& p : patch.data_list)
{
if (p.type != patch_type::alloc) continue;
// Do not allow null address or if dst is not a VM ptr
if (const u32 alloc_at = vm::try_get_addr(dst + (p.offset & -4096)).first; alloc_at >> 16)
{
const u32 alloc_size = utils::align(static_cast<u32>(p.value.long_value) + alloc_at % 4096, 4096);
// Allocate map if needed, if allocated flags will indicate that bit 62 is set (unique identifier)
auto alloc_map = vm::reserve_map(vm::any, alloc_at & -0x10000, utils::align(alloc_size, 0x10000), vm::page_size_64k | vm::preallocated | vm::bf0_0x2 | (1ull << 62));
u64 flags = 0;
switch (p.offset % patch_engine::mem_protection::mask)
{
case patch_engine::mem_protection::wx: flags |= vm::page_writable + vm::page_readable + vm::page_executable; break;
case patch_engine::mem_protection::ro: flags |= vm::page_readable; break;
case patch_engine::mem_protection::rx: flags |= vm::page_writable + vm::page_executable; break;
case patch_engine::mem_protection::rw: flags |= vm::page_writable + vm::page_readable; break;
default: ensure(false);
}
if (alloc_map)
{
if (alloc_map->falloc(alloc_at, alloc_size))
{
vm::page_protect(alloc_at, alloc_size, 0, flags, flags ^ (vm::page_writable + vm::page_readable + vm::page_executable));
if (flags & vm::page_executable)
{
ppu_register_range(alloc_at, alloc_size);
}
applied.push_back(::narrow<u32>(&p - patch.data_list.data())); // Remember index in case of failure to allocate any memory
continue;
}
// Revert if allocated map before failure
if (alloc_map->flags & (1ull << 62))
{
vm::unmap(vm::any, alloc_map->addr);
}
}
}
// Revert in case of failure
for (u32 index : applied)
{
const u32 addr = patch.data_list[index].offset & -4096;
// Try different alignments until works
if (!vm::dealloc(addr))
{
if (!vm::dealloc(addr & -0x10000))
{
vm::dealloc(addr & -0x100000);
}
}
if (auto alloc_map = vm::get(vm::any, addr); alloc_map->flags & (1ull << 62))
{
vm::unmap(vm::any, alloc_map->addr);
}
}
applied.clear();
return applied;
}
// Fixup values from before
std::fill(applied.begin(), applied.end(), u32{umax});
for (const auto& p : patch.data_list) for (const auto& p : patch.data_list)
{ {
u32 offset = p.offset; u32 offset = p.offset;
@ -540,6 +618,11 @@ static std::basic_string<u32> apply_modification(const patch_engine::patch_info&
// Invalid in this context // Invalid in this context
continue; continue;
} }
case patch_type::alloc:
{
// Applied before
continue;
}
case patch_type::byte: case patch_type::byte:
{ {
*ptr = static_cast<u8>(p.value.long_value); *ptr = static_cast<u8>(p.value.long_value);

View File

@ -26,6 +26,7 @@ enum class patch_type
{ {
invalid, invalid,
load, load,
alloc, // Allocate memory at address (zeroized executable memory)
byte, byte,
le16, le16,
le32, le32,
@ -85,6 +86,15 @@ public:
std::string version{}; std::string version{};
}; };
enum mem_protection : u8
{
wx = 0, // Read + Write + Execute (default)
ro = 1, // Read
rx = 2, // Read + Execute
rw = 3, // Read + Write
mask = 3,
};
using patch_map = std::unordered_map<std::string /*hash*/, patch_container>; using patch_map = std::unordered_map<std::string /*hash*/, patch_container>;
patch_engine(); patch_engine();

View File

@ -345,16 +345,23 @@ void PPUTranslator::CallFunction(u64 target, Value* indirect)
if (!indirect) if (!indirect)
{ {
if ((!m_reloc && target < 0x10000) || target >= 0x100000000u - 0x10000) const u64 base = m_reloc ? m_reloc->addr : 0;
{ const u32 caddr = m_info.segs[0].addr;
Trap(); const u32 cend = caddr + m_info.segs[0].size - 1;
return; const u64 _target = target + base;
}
callee = m_module->getOrInsertFunction(fmt::format("__0x%x", target), type); if (_target >= caddr && _target <= cend)
cast<Function>(callee.getCallee())->setCallingConv(CallingConv::GHC); {
callee = m_module->getOrInsertFunction(fmt::format("__0x%x", target), type);
cast<Function>(callee.getCallee())->setCallingConv(CallingConv::GHC);
}
else
{
indirect = m_reloc ? m_ir->CreateAdd(m_ir->getInt64(target), seg0) : m_ir->getInt64(target);
}
} }
else
if (indirect)
{ {
m_ir->CreateStore(Trunc(indirect, GetType<u32>()), m_ir->CreateStructGEP(nullptr, m_thread, static_cast<uint>(&m_cia - m_locals)), true); m_ir->CreateStore(Trunc(indirect, GetType<u32>()), m_ir->CreateStructGEP(nullptr, m_thread, static_cast<uint>(&m_cia - m_locals)), true);