mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-25 04:02:42 +01:00
Loader fixed
This commit is contained in:
parent
3f3873ceb5
commit
1f2eafc4f2
@ -680,16 +680,42 @@ bool set_x64_cmp_flags(x64_context* context, size_t d_size, u64 x, u64 y)
|
||||
|
||||
size_t get_x64_access_size(x64_context* context, x64_op_t op, x64_reg_t reg, size_t d_size, size_t i_size)
|
||||
{
|
||||
if ((op == X64OP_MOVS || op == X64OP_STOS) && reg != X64_NOT_SET) // get "full" access size from RCX register
|
||||
if (op == X64OP_MOVS || op == X64OP_STOS)
|
||||
{
|
||||
if (EFLAGS(context) & 0x400 /* direction flag */)
|
||||
{
|
||||
// skip reservation bound check (TODO)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (reg != X64_NOT_SET) // get "full" access size from RCX register
|
||||
{
|
||||
u64 counter;
|
||||
if (!get_x64_reg_value(context, reg, 8, i_size, counter))
|
||||
{
|
||||
return ~0ull;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return d_size * counter;
|
||||
}
|
||||
}
|
||||
|
||||
if (op == X64OP_CMPXCHG)
|
||||
{
|
||||
// detect whether this instruction can't actually modify memory to avoid breaking reservation;
|
||||
// this may theoretically cause endless loop, but it shouldn't be a problem if only read_sync() generates such instruction
|
||||
u64 cmp, exch;
|
||||
if (!get_x64_reg_value(context, reg, d_size, i_size, cmp) || !get_x64_reg_value(context, X64R_RAX, d_size, i_size, exch))
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (cmp == exch)
|
||||
{
|
||||
// skip reservation bound check
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return d_size;
|
||||
}
|
||||
@ -766,23 +792,6 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (op == X64OP_CMPXCHG)
|
||||
{
|
||||
// detect whether this instruction can't actually modify memory to avoid breaking reservation;
|
||||
// this may theoretically cause endless loop, but it shouldn't be a problem if only read_sync() generates such instruction
|
||||
u64 cmp, exch;
|
||||
if (!get_x64_reg_value(context, reg, d_size, i_size, cmp) || !get_x64_reg_value(context, X64R_RAX, d_size, i_size, exch))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cmp == exch)
|
||||
{
|
||||
// this will skip reservation bound check
|
||||
a_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// check if fault is caused by the reservation
|
||||
return vm::reservation_query(addr, (u32)a_size, is_writing, [&]() -> bool
|
||||
{
|
||||
@ -809,12 +818,6 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
break;
|
||||
}
|
||||
|
||||
if (d_size > 8)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "X64OP_STORE: d_size=%lld", d_size);
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 reg_value;
|
||||
if (!get_x64_reg_value(context, reg, d_size, i_size, reg_value))
|
||||
{
|
||||
@ -834,7 +837,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
|
||||
if (vm::get_ptr(addr) != (void*)RDI(context))
|
||||
{
|
||||
LOG_ERROR(MEMORY, "X64OP_MOVS error: rdi=0x%llx, addr=0x%x", (u64)RDI(context), addr);
|
||||
LOG_ERROR(MEMORY, "X64OP_MOVS: rdi=0x%llx, rsi=0x%llx, addr=0x%x", (u64)RDI(context), (u64)RSI(context), addr);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -851,7 +854,6 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
// shift pointers
|
||||
if (EFLAGS(context) & 0x400 /* direction flag */)
|
||||
{
|
||||
// for reversed direction, addr argument should be calculated in different way
|
||||
LOG_ERROR(MEMORY, "X64OP_MOVS TODO: reversed direction");
|
||||
return false;
|
||||
//RSI(context) -= d_size;
|
||||
@ -890,7 +892,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
|
||||
if (vm::get_ptr(addr) != (void*)RDI(context))
|
||||
{
|
||||
LOG_ERROR(MEMORY, "X64OP_STOS error: rdi=0x%llx, addr=0x%x", (u64)RDI(context), addr);
|
||||
LOG_ERROR(MEMORY, "X64OP_STOS: rdi=0x%llx, addr=0x%x", (u64)RDI(context), addr);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -910,7 +912,6 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
// shift pointers
|
||||
if (EFLAGS(context) & 0x400 /* direction flag */)
|
||||
{
|
||||
// for reversed direction, addr argument should be calculated in different way
|
||||
LOG_ERROR(MEMORY, "X64OP_STOS TODO: reversed direction");
|
||||
return false;
|
||||
//RDI(context) -= d_size;
|
||||
@ -939,12 +940,6 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
}
|
||||
case X64OP_XCHG:
|
||||
{
|
||||
if (d_size != 1 && d_size != 2 && d_size != 4 && d_size != 8)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "X64OP_XCHG: d_size=%lld", d_size);
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 reg_value;
|
||||
if (!get_x64_reg_value(context, reg, d_size, i_size, reg_value))
|
||||
{
|
||||
@ -957,6 +952,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
case 2: reg_value = vm::get_priv_ref<atomic_le_t<u16>>(addr).exchange((u16)reg_value); break;
|
||||
case 4: reg_value = vm::get_priv_ref<atomic_le_t<u32>>(addr).exchange((u32)reg_value); break;
|
||||
case 8: reg_value = vm::get_priv_ref<atomic_le_t<u64>>(addr).exchange((u64)reg_value); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
if (!put_x64_reg_value(context, reg, d_size, reg_value))
|
||||
@ -967,12 +963,6 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
}
|
||||
case X64OP_CMPXCHG:
|
||||
{
|
||||
if (d_size != 1 && d_size != 2 && d_size != 4 && d_size != 8)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "X64OP_CMPXCHG: d_size=%lld", d_size);
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 reg_value, old_value, cmp_value;
|
||||
if (!get_x64_reg_value(context, reg, d_size, i_size, reg_value) || !get_x64_reg_value(context, X64R_RAX, d_size, i_size, cmp_value))
|
||||
{
|
||||
@ -985,6 +975,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
case 2: old_value = vm::get_priv_ref<atomic_le_t<u16>>(addr).compare_and_swap((u16)cmp_value, (u16)reg_value); break;
|
||||
case 4: old_value = vm::get_priv_ref<atomic_le_t<u32>>(addr).compare_and_swap((u32)cmp_value, (u32)reg_value); break;
|
||||
case 8: old_value = vm::get_priv_ref<atomic_le_t<u64>>(addr).compare_and_swap((u64)cmp_value, (u64)reg_value); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
if (!put_x64_reg_value(context, X64R_RAX, d_size, old_value) || !set_x64_cmp_flags(context, d_size, cmp_value, old_value))
|
||||
|
@ -73,7 +73,7 @@ void PPUThread::InitRegs()
|
||||
//GPR[12] = Emu.GetMallocPageSize();
|
||||
GPR[13] = ppu_get_tls(GetId()) + 0x7000; // 0x7000 is usually subtracted from r13 to access first TLS element (details are not clear)
|
||||
|
||||
LR = Emu.GetCPUThreadExit();
|
||||
LR = 0;
|
||||
CTR = PC;
|
||||
CR.CR = 0x22000082;
|
||||
VSCR.NJ = 1;
|
||||
|
@ -83,7 +83,6 @@ class Emulator
|
||||
uint m_mode;
|
||||
|
||||
u32 m_rsx_callback;
|
||||
u32 m_cpu_thr_exit;
|
||||
u32 m_cpu_thr_stop;
|
||||
std::vector<std::unique_ptr<ModuleInitializer>> m_modules_init;
|
||||
|
||||
@ -183,11 +182,6 @@ public:
|
||||
m_rsx_callback = addr;
|
||||
}
|
||||
|
||||
void SetCPUThreadExit(u32 addr)
|
||||
{
|
||||
m_cpu_thr_exit = addr;
|
||||
}
|
||||
|
||||
void SetCPUThreadStop(u32 addr)
|
||||
{
|
||||
m_cpu_thr_stop = addr;
|
||||
@ -202,7 +196,6 @@ public:
|
||||
u32 GetMallocPageSize() { return m_info.GetProcParam().malloc_pagesize; }
|
||||
|
||||
u32 GetRSXCallback() const { return m_rsx_callback; }
|
||||
u32 GetCPUThreadExit() const { return m_cpu_thr_exit; }
|
||||
u32 GetCPUThreadStop() const { return m_cpu_thr_stop; }
|
||||
|
||||
void CheckStatus();
|
||||
|
@ -19,10 +19,16 @@ namespace loader
|
||||
{
|
||||
handler::error_code elf32::init(vfsStream& stream)
|
||||
{
|
||||
m_ehdr = {};
|
||||
m_phdrs.clear();
|
||||
m_shdrs.clear();
|
||||
|
||||
error_code res = handler::init(stream);
|
||||
|
||||
if (res != ok)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
|
||||
m_stream->Read(&m_ehdr, sizeof(ehdr));
|
||||
|
||||
@ -52,8 +58,6 @@ namespace loader
|
||||
if (m_stream->Read(m_phdrs.data(), size) != size)
|
||||
return broken_file;
|
||||
}
|
||||
else
|
||||
m_phdrs.clear();
|
||||
|
||||
if (m_ehdr.data_le.e_shnum)
|
||||
{
|
||||
@ -64,8 +68,6 @@ namespace loader
|
||||
if (m_stream->Read(m_shdrs.data(), size) != size)
|
||||
return broken_file;
|
||||
}
|
||||
else
|
||||
m_shdrs.clear();
|
||||
|
||||
return ok;
|
||||
}
|
||||
@ -133,7 +135,7 @@ namespace loader
|
||||
auto armv7_thr_stop_data = vm::psv::ptr<u32>::make(Memory.PSV.RAM.AllocAlign(3 * 4));
|
||||
armv7_thr_stop_data[0] = 0xf870; // HACK instruction (Thumb)
|
||||
armv7_thr_stop_data[1] = 0x0001; // index 1
|
||||
Emu.SetCPUThreadExit(armv7_thr_stop_data.addr());
|
||||
Emu.SetCPUThreadStop(armv7_thr_stop_data.addr());
|
||||
|
||||
u32 entry = 0; // actual entry point (ELFs entry point is ignored)
|
||||
u32 fnid_addr = 0;
|
||||
|
@ -23,10 +23,23 @@ namespace loader
|
||||
{
|
||||
handler::error_code elf64::init(vfsStream& stream)
|
||||
{
|
||||
m_ehdr = {};
|
||||
m_sprx_module_info = {};
|
||||
m_sprx_function_info = {};
|
||||
|
||||
m_phdrs.clear();
|
||||
m_shdrs.clear();
|
||||
|
||||
m_sprx_segments_info.clear();
|
||||
m_sprx_import_info.clear();
|
||||
m_sprx_export_info.clear();
|
||||
|
||||
error_code res = handler::init(stream);
|
||||
|
||||
if (res != ok)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
|
||||
m_stream->Read(&m_ehdr, sizeof(ehdr));
|
||||
|
||||
@ -58,8 +71,6 @@ namespace loader
|
||||
if (m_stream->Read(m_phdrs.data(), m_ehdr.e_phnum * sizeof(phdr)) != m_ehdr.e_phnum * sizeof(phdr))
|
||||
return broken_file;
|
||||
}
|
||||
else
|
||||
m_phdrs.clear();
|
||||
|
||||
if (m_ehdr.e_shnum)
|
||||
{
|
||||
@ -68,8 +79,6 @@ namespace loader
|
||||
if (m_stream->Read(m_shdrs.data(), m_ehdr.e_shnum * sizeof(shdr)) != m_ehdr.e_shnum * sizeof(shdr))
|
||||
return broken_file;
|
||||
}
|
||||
else
|
||||
m_shdrs.clear();
|
||||
|
||||
if (is_sprx())
|
||||
{
|
||||
@ -79,11 +88,6 @@ namespace loader
|
||||
//m_stream->Seek(handler::get_stream_offset() + m_phdrs[1].p_vaddr.addr());
|
||||
//m_stream->Read(&m_sprx_function_info, sizeof(sprx_function_info));
|
||||
}
|
||||
else
|
||||
{
|
||||
m_sprx_import_info.clear();
|
||||
m_sprx_export_info.clear();
|
||||
}
|
||||
|
||||
return ok;
|
||||
}
|
||||
@ -95,6 +99,7 @@ namespace loader
|
||||
switch ((u32)phdr.p_type)
|
||||
{
|
||||
case 0x1: //load
|
||||
{
|
||||
if (phdr.p_memsz)
|
||||
{
|
||||
sprx_segment_info segment;
|
||||
@ -186,8 +191,10 @@ namespace loader
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x700000a4: //relocation
|
||||
{
|
||||
m_stream->Seek(handler::get_stream_offset() + phdr.p_offset);
|
||||
|
||||
for (uint i = 0; i < phdr.p_filesz; i += sizeof(sys_prx_relocation_info_t))
|
||||
@ -228,6 +235,7 @@ namespace loader
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &m : info.modules)
|
||||
{
|
||||
@ -264,6 +272,12 @@ namespace loader
|
||||
//store elf to memory
|
||||
vm::ps3::init();
|
||||
|
||||
error_code res = alloc_memory(0);
|
||||
if (res != ok)
|
||||
{
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<u32> start_funcs;
|
||||
std::vector<u32> stop_funcs;
|
||||
|
||||
@ -273,6 +287,7 @@ namespace loader
|
||||
for (const auto module : lle_dir)
|
||||
{
|
||||
elf64 sprx_handler;
|
||||
|
||||
vfsFile fsprx(lle_dir.GetPath() + "/" + module->name);
|
||||
|
||||
if (fsprx.IsOpened())
|
||||
@ -286,12 +301,12 @@ namespace loader
|
||||
|
||||
if (!load_lib.LoadValue(false))
|
||||
{
|
||||
LOG_ERROR(LOADER, "skipped lle library '%s'", sprx_handler.sprx_get_module_name().c_str());
|
||||
LOG_WARNING(LOADER, "Skipped LLE library '%s'", sprx_handler.sprx_get_module_name().c_str());
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_WARNING(LOADER, "loading lle library '%s'", sprx_handler.sprx_get_module_name().c_str());
|
||||
LOG_WARNING(LOADER, "Loading LLE library '%s'", sprx_handler.sprx_get_module_name().c_str());
|
||||
}
|
||||
|
||||
sprx_info info;
|
||||
@ -332,7 +347,7 @@ namespace loader
|
||||
}
|
||||
}
|
||||
|
||||
error_code res = load_data(0);
|
||||
res = load_data(0);
|
||||
if (res != ok)
|
||||
return res;
|
||||
|
||||
@ -345,18 +360,11 @@ namespace loader
|
||||
rsx_callback_data[1] = SC(0);
|
||||
rsx_callback_data[2] = BLR();
|
||||
|
||||
auto ppu_thr_exit_data = vm::ptr<u32>::make(Memory.MainMem.AllocAlign(3 * 4));
|
||||
ppu_thr_exit_data[0] = ADDI(r11, 0, 41);
|
||||
ppu_thr_exit_data[1] = SC(0);
|
||||
ppu_thr_exit_data[2] = BLR();
|
||||
Emu.SetCPUThreadExit(ppu_thr_exit_data.addr());
|
||||
|
||||
auto ppu_thr_stop_data = vm::ptr<u32>::make(Memory.MainMem.AllocAlign(2 * 4));
|
||||
ppu_thr_stop_data[0] = SC(3);
|
||||
ppu_thr_stop_data[1] = BLR();
|
||||
Emu.SetCPUThreadStop(ppu_thr_stop_data.addr());
|
||||
|
||||
//vm::write64(Memory.PRXMem.AllocAlign(0x10000), 0xDEADBEEFABADCAFE);
|
||||
/*
|
||||
//TODO
|
||||
static const int branch_size = 6 * 4;
|
||||
@ -395,6 +403,31 @@ namespace loader
|
||||
return ok;
|
||||
}
|
||||
|
||||
handler::error_code elf64::alloc_memory(u64 offset)
|
||||
{
|
||||
for (auto &phdr : m_phdrs)
|
||||
{
|
||||
switch (phdr.p_type.value())
|
||||
{
|
||||
case 0x00000001: //LOAD
|
||||
{
|
||||
if (phdr.p_memsz)
|
||||
{
|
||||
if (!vm::alloc(vm::cast(phdr.p_vaddr.addr()), vm::cast(phdr.p_memsz, "phdr.p_memsz"), vm::main))
|
||||
{
|
||||
LOG_ERROR(LOADER, "%s(): AllocFixed(0x%llx, 0x%llx) failed", __FUNCTION__, phdr.p_vaddr.addr(), phdr.p_memsz);
|
||||
|
||||
return loading_error;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
handler::error_code elf64::load_data(u64 offset)
|
||||
{
|
||||
for (auto &phdr : m_phdrs)
|
||||
@ -402,16 +435,9 @@ namespace loader
|
||||
switch (phdr.p_type.value())
|
||||
{
|
||||
case 0x00000001: //LOAD
|
||||
{
|
||||
if (phdr.p_memsz)
|
||||
{
|
||||
if (!vm::alloc(phdr.p_vaddr.addr(), (u32)phdr.p_memsz, vm::main))
|
||||
{
|
||||
// addr() has be_t<> type (test)
|
||||
LOG_ERROR(LOADER, "%s(): AllocFixed(0x%llx, 0x%x) failed", __FUNCTION__, phdr.p_vaddr.addr(), (u32)phdr.p_memsz);
|
||||
|
||||
return loading_error;
|
||||
}
|
||||
|
||||
if (phdr.p_filesz)
|
||||
{
|
||||
m_stream->Seek(handler::get_stream_offset() + phdr.p_offset);
|
||||
@ -420,15 +446,19 @@ namespace loader
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x00000007: //TLS
|
||||
{
|
||||
Emu.SetTLSData(
|
||||
vm::cast(phdr.p_vaddr.addr(), "TLS: phdr.p_vaddr"),
|
||||
vm::cast(phdr.p_filesz.value(), "TLS: phdr.p_filesz"),
|
||||
vm::cast(phdr.p_memsz.value(), "TLS: phdr.p_memsz"));
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x60000001: //LOOS+1
|
||||
{
|
||||
if (phdr.p_filesz)
|
||||
{
|
||||
const sys_process_param& proc_param = *(sys_process_param*)phdr.p_vaddr.get_ptr();
|
||||
@ -458,8 +488,10 @@ namespace loader
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x60000002: //LOOS+2
|
||||
{
|
||||
if (phdr.p_filesz)
|
||||
{
|
||||
const sys_proc_prx_param& proc_prx_param = *(sys_proc_prx_param*)phdr.p_vaddr.get_ptr();
|
||||
@ -538,6 +570,7 @@ namespace loader
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
@ -156,6 +156,7 @@ namespace loader
|
||||
|
||||
error_code init(vfsStream& stream) override;
|
||||
error_code load() override;
|
||||
error_code alloc_memory(u64 offset);
|
||||
error_code load_data(u64 offset);
|
||||
error_code load_sprx(sprx_info& info);
|
||||
bool is_sprx() const { return m_ehdr.e_type == 0xffa4; }
|
||||
|
@ -19,11 +19,11 @@ namespace loader
|
||||
return true;
|
||||
}
|
||||
|
||||
LOG_ERROR(LOADER, "loader::load() failed: %s", i->get_error_code().c_str());
|
||||
LOG_NOTICE(LOADER, "loader::load() failed: %s", i->get_error_code().c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_ERROR(LOADER, "loader::init() failed: %s", i->get_error_code().c_str());
|
||||
LOG_NOTICE(LOADER, "loader::init() failed: %s", i->get_error_code().c_str());
|
||||
stream.Seek(i->get_stream_offset());
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user