1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 02:32:36 +01:00

Memory (unfinished)

This commit is contained in:
Nekotekina 2015-02-16 04:53:53 +03:00
parent 1189503b4d
commit e6c628caba
5 changed files with 71 additions and 41 deletions

View File

@ -530,10 +530,12 @@ bool get_x64_reg_value(x64_context* context, x64_reg_t reg, size_t d_size, size_
else if (reg - X64R_AL < 4 && d_size == 1) else if (reg - X64R_AL < 4 && d_size == 1)
{ {
out_value = (u8)(*X64REG(context, reg - X64R_AL)); out_value = (u8)(*X64REG(context, reg - X64R_AL));
return true;
} }
else if (reg - X64R_AH < 4 && d_size == 1) else if (reg - X64R_AH < 4 && d_size == 1)
{ {
out_value = (u8)(*X64REG(context, reg - X64R_AH) >> 8); out_value = (u8)(*X64REG(context, reg - X64R_AH) >> 8);
return true;
} }
else if (reg == X64_IMM32) else if (reg == X64_IMM32)
{ {
@ -549,6 +551,7 @@ bool get_x64_reg_value(x64_context* context, x64_reg_t reg, size_t d_size, size_
else if (reg == X64R_ECX) else if (reg == X64R_ECX)
{ {
out_value = (u32)RCX(context); out_value = (u32)RCX(context);
return true;
} }
LOG_ERROR(GENERAL, "get_x64_reg_value(): invalid arguments (reg=%d, d_size=%lld, i_size=%lld)", reg, d_size, i_size); LOG_ERROR(GENERAL, "get_x64_reg_value(): invalid arguments (reg=%d, d_size=%lld, i_size=%lld)", reg, d_size, i_size);
@ -571,7 +574,7 @@ bool put_x64_reg_value(x64_context* context, x64_reg_t reg, size_t d_size, u64 v
void fix_x64_reg_op(x64_context* context, x64_op_t& op, x64_reg_t& reg, size_t& d_size, size_t& i_size) void fix_x64_reg_op(x64_context* context, x64_op_t& op, x64_reg_t& reg, size_t& d_size, size_t& i_size)
{ {
if (op == X64OP_MOVS && reg != X64_NOT_SET) if (op == X64OP_MOVS && reg != X64_NOT_SET) // get "full" access size from RCX register
{ {
u64 counter; u64 counter;
if (!get_x64_reg_value(context, reg, 8, i_size, counter)) if (!get_x64_reg_value(context, reg, 8, i_size, counter))
@ -584,6 +587,8 @@ void fix_x64_reg_op(x64_context* context, x64_op_t& op, x64_reg_t& reg, size_t&
} }
d_size *= counter; d_size *= counter;
reg = X64_NOT_SET;
return;
} }
} }
@ -600,6 +605,29 @@ bool handle_access_violation(const u32 addr, bool is_writing, x64_context* conte
decode_x64_reg_op(code, op, reg, d_size, i_size); decode_x64_reg_op(code, op, reg, d_size, i_size);
fix_x64_reg_op(context, op, reg, d_size, i_size); fix_x64_reg_op(context, op, reg, d_size, i_size);
if (d_size + addr >= 0x100000000ull)
{
LOG_ERROR(GENERAL, "Invalid d_size (0x%llx)", d_size);
return false;
}
if (op == X64OP_CMPXCHG)
{
// detect whether this instruction can't actually modify memory to avoid breaking reservation;
// this may theoretically cause endless loop, but it shouldn't be a problem if only read_sync() generates such instruction
u64 cmp, exch;
if (!get_x64_reg_value(context, reg, d_size, i_size, cmp) || !get_x64_reg_value(context, X64R_RAX, d_size, i_size, exch))
{
return false;
}
if (cmp == exch)
{
// could also be emulated without attempt to write memory
is_writing = false;
}
}
// check if address is RawSPU MMIO register // check if address is RawSPU MMIO register
if (addr - RAW_SPU_BASE_ADDR < (6 * RAW_SPU_OFFSET) && (addr % RAW_SPU_OFFSET) >= RAW_SPU_PROB_OFFSET) if (addr - RAW_SPU_BASE_ADDR < (6 * RAW_SPU_OFFSET) && (addr % RAW_SPU_OFFSET) >= RAW_SPU_PROB_OFFSET)
{ {
@ -645,7 +673,7 @@ bool handle_access_violation(const u32 addr, bool is_writing, x64_context* conte
} }
// check if fault is caused by reservation // check if fault is caused by reservation
if (vm::reservation_query(addr, is_writing)) if (vm::reservation_query(addr, (u32)d_size, is_writing))
{ {
return true; return true;
} }

View File

@ -203,7 +203,10 @@ void SPUThread::WriteSNR(bool number, u32 value)
void SPUThread::ProcessCmd(u32 cmd, u32 tag, u32 lsa, u64 ea, u32 size) void SPUThread::ProcessCmd(u32 cmd, u32 tag, u32 lsa, u64 ea, u32 size)
{ {
if (cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK)) _mm_mfence(); if (cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK))
{
_mm_mfence();
}
u32 eal = vm::cast(ea, "ea"); u32 eal = vm::cast(ea, "ea");
@ -298,7 +301,7 @@ void SPUThread::ListCmd(u32 lsa, u64 ea, u16 tag, u16 size, u32 cmd, MFCReg& MFC
if (Ini.HLELogging.GetValue() || rec->s.data()) if (Ini.HLELogging.GetValue() || rec->s.data())
{ {
LOG_NOTICE(Log::SPU, "*** list element(%d/%d): s = 0x%x, ts = 0x%x, low ea = 0x%x (lsa = 0x%x)", i, list_size, rec->s, rec->ts, rec->ea, lsa | (addr & 0xf)); LOG_NOTICE(Log::SPU, "*** list element(%d/%d): s=0x%x, ts=0x%x, eal=0x%x (lsa=0x%x)", i, list_size, rec->s, rec->ts, rec->ea, lsa | (addr & 0xf));
} }
if (size) if (size)
@ -346,7 +349,7 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
case MFC_PUTR_CMD: // ??? case MFC_PUTR_CMD: // ???
case MFC_GET_CMD: case MFC_GET_CMD:
{ {
if (Ini.HLELogging.GetValue()) LOG_NOTICE(Log::SPU, "DMA %s%s%s%s: lsa = 0x%x, ea = 0x%llx, tag = 0x%x, size = 0x%x, cmd = 0x%x", if (Ini.HLELogging.GetValue()) LOG_NOTICE(Log::SPU, "DMA %s%s%s%s: lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x, cmd=0x%x",
(op & MFC_PUT_CMD ? "PUT" : "GET"), (op & MFC_PUT_CMD ? "PUT" : "GET"),
(op & MFC_RESULT_MASK ? "R" : ""), (op & MFC_RESULT_MASK ? "R" : ""),
(op & MFC_BARRIER_MASK ? "B" : ""), (op & MFC_BARRIER_MASK ? "B" : ""),
@ -362,7 +365,7 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
case MFC_PUTRL_CMD: // ??? case MFC_PUTRL_CMD: // ???
case MFC_GETL_CMD: case MFC_GETL_CMD:
{ {
if (Ini.HLELogging.GetValue()) LOG_NOTICE(Log::SPU, "DMA %s%s%s%s: lsa = 0x%x, list = 0x%llx, tag = 0x%x, size = 0x%x, cmd = 0x%x", if (Ini.HLELogging.GetValue()) LOG_NOTICE(Log::SPU, "DMA %s%s%s%s: lsa=0x%x, list=0x%llx, tag=0x%x, size=0x%x, cmd=0x%x",
(op & MFC_PUT_CMD ? "PUT" : "GET"), (op & MFC_PUT_CMD ? "PUT" : "GET"),
(op & MFC_RESULT_MASK ? "RL" : "L"), (op & MFC_RESULT_MASK ? "RL" : "L"),
(op & MFC_BARRIER_MASK ? "B" : ""), (op & MFC_BARRIER_MASK ? "B" : ""),
@ -378,39 +381,16 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
case MFC_PUTLLUC_CMD: case MFC_PUTLLUC_CMD:
case MFC_PUTQLLUC_CMD: case MFC_PUTQLLUC_CMD:
{ {
if (Ini.HLELogging.GetValue() || size != 128) LOG_NOTICE(Log::SPU, "DMA %s: lsa=0x%x, ea = 0x%llx, (tag) = 0x%x, (size) = 0x%x, cmd = 0x%x", if (Ini.HLELogging.GetValue() || size != 128) LOG_NOTICE(Log::SPU, "DMA %s: lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x, cmd=0x%x",
(op == MFC_GETLLAR_CMD ? "GETLLAR" : (op == MFC_GETLLAR_CMD ? "GETLLAR" :
op == MFC_PUTLLC_CMD ? "PUTLLC" : op == MFC_PUTLLC_CMD ? "PUTLLC" :
op == MFC_PUTLLUC_CMD ? "PUTLLUC" : "PUTQLLUC"), op == MFC_PUTLLUC_CMD ? "PUTLLUC" : "PUTQLLUC"),
lsa, ea, tag, size, cmd); lsa, ea, tag, size, cmd);
if ((u32)ea != ea)
{
LOG_ERROR(Log::SPU, "DMA %s: Invalid external address (0x%llx)",
(op == MFC_GETLLAR_CMD ? "GETLLAR" :
op == MFC_PUTLLC_CMD ? "PUTLLC" :
op == MFC_PUTLLUC_CMD ? "PUTLLUC" : "PUTQLLUC"),
ea);
Emu.Pause();
return;
}
if (op == MFC_GETLLAR_CMD) // get reservation if (op == MFC_GETLLAR_CMD) // get reservation
{ {
//std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
vm::reservation_acquire(vm::get_ptr(ls_offset + lsa), vm::cast(ea), 128, [this]() vm::reservation_acquire(vm::get_ptr(ls_offset + lsa), vm::cast(ea), 128, [this]()
{ {
//std::shared_ptr<CPUThread> t = Emu.GetCPU().GetThread(tid);
//if (t && (t->GetType() == CPU_THREAD_SPU || t->GetType() == CPU_THREAD_RAW_SPU))
//{
// SPUThread& spu = static_cast<SPUThread&>(*t);
// spu.m_events |= SPU_EVENT_LR; // TODO: atomic op
// spu.Notify();
//}
m_events |= SPU_EVENT_LR; // TODO: atomic op m_events |= SPU_EVENT_LR; // TODO: atomic op
Notify(); Notify();
}); });
@ -448,16 +428,16 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
} }
default: default:
LOG_ERROR(Log::SPU, "Unknown MFC cmd. (opcode=0x%x, cmd=0x%x, lsa = 0x%x, ea = 0x%llx, tag = 0x%x, size = 0x%x)", {
op, cmd, lsa, ea, tag, size); LOG_ERROR(Log::SPU, "Unknown MFC cmd (opcode=0x%x, cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", op, cmd, lsa, ea, tag, size);
Emu.Pause();
break; break;
} }
}
} }
bool SPUThread::CheckEvents() bool SPUThread::CheckEvents()
{ {
// checks events:
return (m_events & m_event_mask) != 0; return (m_events & m_event_mask) != 0;
} }

View File

@ -297,7 +297,7 @@ bool DynamicMemoryBlockBase::AllocFixed(u32 addr, u32 size)
for (u32 i = 0; i<m_allocated.size(); ++i) for (u32 i = 0; i<m_allocated.size(); ++i)
{ {
if (addr >= m_allocated[i].addr && addr < m_allocated[i].addr + m_allocated[i].size) return false; if (addr >= m_allocated[i].addr && addr <= m_allocated[i].addr + m_allocated[i].size - 1) return false;
} }
AppendMem(addr, size); AppendMem(addr, size);
@ -342,8 +342,8 @@ u32 DynamicMemoryBlockBase::AllocAlign(u32 size, u32 align)
for (u32 i = 0; i<m_allocated.size(); ++i) for (u32 i = 0; i<m_allocated.size(); ++i)
{ {
if ((addr >= m_allocated[i].addr && addr < m_allocated[i].addr + m_allocated[i].size) || if ((addr >= m_allocated[i].addr && addr <= m_allocated[i].addr + m_allocated[i].size - 1) ||
(m_allocated[i].addr >= addr && m_allocated[i].addr < addr + exsize)) (m_allocated[i].addr >= addr && m_allocated[i].addr <= addr + exsize - 1))
{ {
is_good_addr = false; is_good_addr = false;
addr = m_allocated[i].addr + m_allocated[i].size; addr = m_allocated[i].addr + m_allocated[i].size;

View File

@ -271,7 +271,7 @@ namespace vm
return true; return true;
} }
bool reservation_query(u32 addr, bool is_writing) bool reservation_query(u32 addr, u32 size, bool is_writing)
{ {
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex); std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
@ -282,8 +282,30 @@ namespace vm
if (is_writing) if (is_writing)
{ {
// break the reservation assert(size);
_reservation_break(addr);
if (addr + size - 1 >= g_reservation_addr && g_reservation_addr + g_reservation_size - 1 >= addr)
{
// break the reservation if writing access and reservation overlap
_reservation_break(addr);
}
else
{
// full-size check (isn't accurate enough)
if (!check_addr(addr, size))
{
return false;
}
// assume that the same memory page is accessed (isn't accurate enough)
if (g_reservation_addr >> 12 != addr >> 12)
{
return false;
}
// write memory using "privileged" access to avoid breaking reservation
return false;
}
} }
return true; return true;

View File

@ -39,7 +39,7 @@ namespace vm
// attempt to atomically update reserved memory // attempt to atomically update reserved memory
bool reservation_update(u32 addr, const void* data, u32 size); bool reservation_update(u32 addr, const void* data, u32 size);
// for internal use // for internal use
bool reservation_query(u32 addr, bool is_writing); bool reservation_query(u32 addr, u32 size, bool is_writing);
// for internal use // for internal use
void reservation_free(); void reservation_free();
// perform complete operation // perform complete operation