1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-25 12:12:50 +01:00

Minor bugfix, cleanup

This commit is contained in:
Nekotekina 2015-07-21 23:14:04 +03:00
parent 3bc6c53eb3
commit a8fcf71f9c
9 changed files with 205 additions and 184 deletions

View File

@ -431,8 +431,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
case MFC_GETB_CMD:
case MFC_GETF_CMD:
{
do_dma_transfer(cmd, ch_mfc_args);
return;
return do_dma_transfer(cmd, ch_mfc_args);
}
case MFC_PUTL_CMD:
@ -445,8 +444,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
case MFC_GETLB_CMD:
case MFC_GETLF_CMD:
{
do_dma_list_cmd(cmd, ch_mfc_args);
return;
return do_dma_list_cmd(cmd, ch_mfc_args);
}
case MFC_GETLLAR_CMD: // acquire reservation
@ -458,8 +456,16 @@ void SPUThread::process_mfc_cmd(u32 cmd)
vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), VM_CAST(ch_mfc_args.ea), 128);
ch_atomic_stat.push_uncond(MFC_GETLLAR_SUCCESS);
return;
if (ch_event_stat.load() & SPU_EVENT_AR)
{
ch_event_stat |= SPU_EVENT_LR;
}
else
{
ch_event_stat |= SPU_EVENT_AR;
}
return ch_atomic_stat.push_uncond(MFC_GETLLAR_SUCCESS);
}
case MFC_PUTLLC_CMD: // store conditionally
@ -469,16 +475,26 @@ void SPUThread::process_mfc_cmd(u32 cmd)
break;
}
const bool was_acquired = (ch_event_stat._and_not(SPU_EVENT_AR) & SPU_EVENT_AR) != 0;
if (vm::reservation_update(VM_CAST(ch_mfc_args.ea), vm::get_ptr(offset + ch_mfc_args.lsa), 128))
{
ch_atomic_stat.push_uncond(MFC_PUTLLC_SUCCESS);
if (!was_acquired)
{
throw EXCEPTION("Unexpected: PUTLLC command succeeded, but GETLLAR command not detected");
}
return ch_atomic_stat.push_uncond(MFC_PUTLLC_SUCCESS);
}
else
{
ch_atomic_stat.push_uncond(MFC_PUTLLC_FAILURE);
}
if (was_acquired)
{
ch_event_stat |= SPU_EVENT_LR;
}
return;
return ch_atomic_stat.push_uncond(MFC_PUTLLC_FAILURE);
}
}
case MFC_PUTLLUC_CMD: // store unconditionally
@ -491,17 +507,18 @@ void SPUThread::process_mfc_cmd(u32 cmd)
vm::reservation_op(VM_CAST(ch_mfc_args.ea), 128, [this]()
{
memcpy(vm::priv_ptr(VM_CAST(ch_mfc_args.ea)), vm::get_ptr(offset + ch_mfc_args.lsa), 128);
std::memcpy(vm::priv_ptr(VM_CAST(ch_mfc_args.ea)), vm::get_ptr(offset + ch_mfc_args.lsa), 128);
});
if (ch_event_stat._and_not(SPU_EVENT_AR) & SPU_EVENT_AR && vm::g_tls_did_break_reservation)
{
ch_event_stat |= SPU_EVENT_LR;
}
if (cmd == MFC_PUTLLUC_CMD)
{
ch_atomic_stat.push_uncond(MFC_PUTLLUC_SUCCESS);
}
else
{
// tag may be used here
}
return;
}
@ -510,6 +527,18 @@ void SPUThread::process_mfc_cmd(u32 cmd)
throw EXCEPTION("Unknown command %s (cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", get_mfc_cmd_name(cmd), cmd, ch_mfc_args.lsa, ch_mfc_args.ea, ch_mfc_args.tag, ch_mfc_args.size);
}
u32 SPUThread::get_events()
{
// check reservation status and set SPU_EVENT_LR if lost
if (ch_event_stat.load() & SPU_EVENT_AR && !vm::reservation_test())
{
ch_event_stat |= SPU_EVENT_LR;
ch_event_stat &= ~SPU_EVENT_AR;
}
return ch_event_stat.load() & ch_event_mask;
}
u32 SPUThread::get_ch_count(u32 ch)
{
if (Ini.HLELogging.GetValue())
@ -531,7 +560,7 @@ u32 SPUThread::get_ch_count(u32 ch)
case SPU_RdSigNotify1: return ch_snr1.get_count(); break;
case SPU_RdSigNotify2: return ch_snr2.get_count(); break;
case MFC_RdAtomicStat: return ch_atomic_stat.get_count(); break;
case SPU_RdEventStat: return ch_event_stat.load() & ch_event_mask ? 1 : 0; break;
case SPU_RdEventStat: return get_events() ? 1 : 0; break;
}
throw EXCEPTION("Unknown/illegal channel (ch=%d [%s])", ch, ch < 128 ? spu_ch_name[ch] : "???");
@ -648,7 +677,7 @@ u32 SPUThread::get_ch_value(u32 ch)
u32 result;
while ((result = ch_event_stat.load() & ch_event_mask) == 0)
while ((result = get_events()) == 0)
{
CHECK_EMU_STATUS;
@ -1030,6 +1059,11 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
case SPU_WrEventAck:
{
if (value & ~(SPU_EVENT_IMPLEMENTED))
{
break;
}
ch_event_stat &= ~value;
return;
}

View File

@ -1,4 +1,5 @@
#pragma once
#include "Emu/Cell/Common.h"
#include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/SPUContext.h"
@ -11,58 +12,60 @@ struct lv2_int_tag_t;
// SPU Channels
enum : u32
{
SPU_RdEventStat = 0, //Read event status with mask applied
SPU_WrEventMask = 1, //Write event mask
SPU_WrEventAck = 2, //Write end of event processing
SPU_RdSigNotify1 = 3, //Signal notification 1
SPU_RdSigNotify2 = 4, //Signal notification 2
SPU_WrDec = 7, //Write decrementer count
SPU_RdDec = 8, //Read decrementer count
SPU_RdEventMask = 11, //Read event mask
SPU_RdMachStat = 13, //Read SPU run status
SPU_WrSRR0 = 14, //Write SPU machine state save/restore register 0 (SRR0)
SPU_RdSRR0 = 15, //Read SPU machine state save/restore register 0 (SRR0)
SPU_WrOutMbox = 28, //Write outbound mailbox contents
SPU_RdInMbox = 29, //Read inbound mailbox contents
SPU_WrOutIntrMbox = 30, //Write outbound interrupt mailbox contents (interrupting PPU)
SPU_RdEventStat = 0, // Read event status with mask applied
SPU_WrEventMask = 1, // Write event mask
SPU_WrEventAck = 2, // Write end of event processing
SPU_RdSigNotify1 = 3, // Signal notification 1
SPU_RdSigNotify2 = 4, // Signal notification 2
SPU_WrDec = 7, // Write decrementer count
SPU_RdDec = 8, // Read decrementer count
SPU_RdEventMask = 11, // Read event mask
SPU_RdMachStat = 13, // Read SPU run status
SPU_WrSRR0 = 14, // Write SPU machine state save/restore register 0 (SRR0)
SPU_RdSRR0 = 15, // Read SPU machine state save/restore register 0 (SRR0)
SPU_WrOutMbox = 28, // Write outbound mailbox contents
SPU_RdInMbox = 29, // Read inbound mailbox contents
SPU_WrOutIntrMbox = 30, // Write outbound interrupt mailbox contents (interrupting PPU)
};
// MFC Channels
enum : u32
{
MFC_WrMSSyncReq = 9, //Write multisource synchronization request
MFC_RdTagMask = 12, //Read tag mask
MFC_LSA = 16, //Write local memory address command parameter
MFC_EAH = 17, //Write high order DMA effective address command parameter
MFC_EAL = 18, //Write low order DMA effective address command parameter
MFC_Size = 19, //Write DMA transfer size command parameter
MFC_TagID = 20, //Write tag identifier command parameter
MFC_Cmd = 21, //Write and enqueue DMA command with associated class ID
MFC_WrTagMask = 22, //Write tag mask
MFC_WrTagUpdate = 23, //Write request for conditional or unconditional tag status update
MFC_RdTagStat = 24, //Read tag status with mask applied
MFC_RdListStallStat = 25, //Read DMA list stall-and-notify status
MFC_WrListStallAck = 26, //Write DMA list stall-and-notify acknowledge
MFC_RdAtomicStat = 27, //Read completion status of last completed immediate MFC atomic update command
MFC_WrMSSyncReq = 9, // Write multisource synchronization request
MFC_RdTagMask = 12, // Read tag mask
MFC_LSA = 16, // Write local memory address command parameter
MFC_EAH = 17, // Write high order DMA effective address command parameter
MFC_EAL = 18, // Write low order DMA effective address command parameter
MFC_Size = 19, // Write DMA transfer size command parameter
MFC_TagID = 20, // Write tag identifier command parameter
MFC_Cmd = 21, // Write and enqueue DMA command with associated class ID
MFC_WrTagMask = 22, // Write tag mask
MFC_WrTagUpdate = 23, // Write request for conditional or unconditional tag status update
MFC_RdTagStat = 24, // Read tag status with mask applied
MFC_RdListStallStat = 25, // Read DMA list stall-and-notify status
MFC_WrListStallAck = 26, // Write DMA list stall-and-notify acknowledge
MFC_RdAtomicStat = 27, // Read completion status of last completed immediate MFC atomic update command
};
// SPU Events
enum : u32
{
SPU_EVENT_MS = 0x1000, // multisource synchronization event
SPU_EVENT_A = 0x800, // privileged attention event
SPU_EVENT_LR = 0x400, // lock line reservation lost event
SPU_EVENT_S1 = 0x200, // signal notification register 1 available
SPU_EVENT_S2 = 0x100, // signal notification register 2 available
SPU_EVENT_LE = 0x80, // SPU outbound mailbox available
SPU_EVENT_ME = 0x40, // SPU outbound interrupt mailbox available
SPU_EVENT_TM = 0x20, // SPU decrementer became negative (?)
SPU_EVENT_MB = 0x10, // SPU inbound mailbox available
SPU_EVENT_QV = 0x4, // MFC SPU command queue available
SPU_EVENT_SN = 0x2, // MFC list command stall-and-notify event
SPU_EVENT_TG = 0x1, // MFC tag-group status update event
SPU_EVENT_MS = 0x1000, // Multisource Synchronization event
SPU_EVENT_A = 0x800, // Privileged Attention event
SPU_EVENT_LR = 0x400, // Lock Line Reservation Lost event
SPU_EVENT_S1 = 0x200, // Signal Notification Register 1 available
SPU_EVENT_S2 = 0x100, // Signal Notification Register 2 available
SPU_EVENT_LE = 0x80, // SPU Outbound Mailbox available
SPU_EVENT_ME = 0x40, // SPU Outbound Interrupt Mailbox available
SPU_EVENT_TM = 0x20, // SPU Decrementer became negative (?)
SPU_EVENT_MB = 0x10, // SPU Inbound mailbox available
SPU_EVENT_QV = 0x4, // MFC SPU Command Queue available
SPU_EVENT_SN = 0x2, // MFC List Command stall-and-notify event
SPU_EVENT_TG = 0x1, // MFC Tag Group status update event
SPU_EVENT_IMPLEMENTED = SPU_EVENT_LR,
SPU_EVENT_IMPLEMENTED = SPU_EVENT_LR, // Mask of implemented events
SPU_EVENT_AR = 0x80000000, // Set after acquiring the reservation (hack)
};
// SPU Class 0 Interrupts
@ -552,6 +555,7 @@ public:
void do_dma_list_cmd(u32 cmd, spu_mfc_arg_t args);
void process_mfc_cmd(u32 cmd);
u32 get_events();
u32 get_ch_count(u32 ch);
u32 get_ch_value(u32 ch);
void set_ch_value(u32 ch, u32 value);

View File

@ -78,16 +78,21 @@ namespace vm
std::array<atomic_t<u8>, 0x100000000ull / 4096> g_pages = {}; // information about every page
const thread_ctrl_t* const INVALID_THREAD = reinterpret_cast<const thread_ctrl_t*>(~0ull);
class reservation_mutex_t
{
atomic_t<const thread_ctrl_t*> m_owner{};
atomic_t<const thread_ctrl_t*> m_owner;
std::condition_variable m_cv;
std::mutex m_mutex;
public:
reservation_mutex_t() = default;
reservation_mutex_t()
{
m_owner.store(INVALID_THREAD);
}
bool do_notify;
bool do_notify = false;
never_inline void lock()
{
@ -95,9 +100,9 @@ namespace vm
std::unique_lock<std::mutex> lock(m_mutex, std::defer_lock);
while (auto old = m_owner.compare_and_swap(nullptr, owner))
while (!m_owner.compare_and_swap_test(INVALID_THREAD, owner))
{
if (old == owner)
if (m_owner.load() == owner)
{
throw EXCEPTION("Deadlock");
}
@ -118,7 +123,7 @@ namespace vm
{
auto owner = get_current_thread_ctrl();
if (!m_owner.compare_and_swap_test(owner, nullptr))
if (!m_owner.compare_and_swap_test(owner, INVALID_THREAD))
{
throw EXCEPTION("Lost lock");
}
@ -135,30 +140,32 @@ namespace vm
u32 g_reservation_addr = 0;
u32 g_reservation_size = 0;
thread_local bool g_tls_did_break_reservation = false;
reservation_mutex_t g_reservation_mutex;
void _reservation_set(u32 addr, bool no_access = false)
{
#ifdef _WIN32
DWORD old;
if (!VirtualProtect(vm::get_ptr(addr & ~0xfff), 4096, no_access ? PAGE_NOACCESS : PAGE_READONLY, &old))
if (!VirtualProtect(get_ptr(addr & ~0xfff), 4096, no_access ? PAGE_NOACCESS : PAGE_READONLY, &old))
#else
if (mprotect(vm::get_ptr(addr & ~0xfff), 4096, no_access ? PROT_NONE : PROT_READ))
if (mprotect(get_ptr(addr & ~0xfff), 4096, no_access ? PROT_NONE : PROT_READ))
#endif
{
throw EXCEPTION("System failure (addr=0x%x)", addr);
}
}
void _reservation_break(u32 addr)
bool _reservation_break(u32 addr)
{
if (g_reservation_addr >> 12 == addr >> 12)
{
#ifdef _WIN32
DWORD old;
if (!VirtualProtect(vm::get_ptr(addr & ~0xfff), 4096, PAGE_READWRITE, &old))
if (!VirtualProtect(get_ptr(addr & ~0xfff), 4096, PAGE_READWRITE, &old))
#else
if (mprotect(vm::get_ptr(addr & ~0xfff), 4096, PROT_READ | PROT_WRITE))
if (mprotect(get_ptr(addr & ~0xfff), 4096, PROT_READ | PROT_WRITE))
#endif
{
throw EXCEPTION("System failure (addr=0x%x)", addr);
@ -167,22 +174,30 @@ namespace vm
g_reservation_addr = 0;
g_reservation_size = 0;
g_reservation_owner = nullptr;
return true;
}
return false;
}
void reservation_break(u32 addr)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
_reservation_break(addr);
g_tls_did_break_reservation = _reservation_break(addr);
}
void reservation_acquire(void* data, u32 addr, u32 size)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 128);
assert((addr + size - 1 & ~0xfff) == (addr & ~0xfff));
const u64 align = 0x80000000ull >> cntlz32(size);
if (!size || !addr || size > 4096 || size != align || addr & (align - 1))
{
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
}
const u8 flags = g_pages[addr >> 12].load();
@ -194,11 +209,8 @@ namespace vm
// silent unlocking to prevent priority boost for threads going to break reservation
//g_reservation_mutex.do_notify = false;
// break previous reservation
if (g_reservation_owner)
{
_reservation_break(g_reservation_addr);
}
// break the reservation
g_tls_did_break_reservation = g_reservation_owner && _reservation_break(g_reservation_addr);
// change memory protection to read-only
_reservation_set(addr);
@ -212,15 +224,19 @@ namespace vm
g_reservation_owner = get_current_thread_ctrl();
// copy data
memcpy(data, vm::get_ptr(addr), size);
std::memcpy(data, get_ptr(addr), size);
}
bool reservation_update(u32 addr, const void* data, u32 size)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 128);
assert((addr + size - 1 & ~0xfff) == (addr & ~0xfff));
const u64 align = 0x80000000ull >> cntlz32(size);
if (!size || !addr || size > 4096 || size != align || addr & (align - 1))
{
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
}
if (g_reservation_owner != get_current_thread_ctrl() || g_reservation_addr != addr || g_reservation_size != size)
{
@ -232,7 +248,7 @@ namespace vm
_reservation_set(addr, true);
// update memory using privileged access
memcpy(vm::priv_ptr(addr), data, size);
std::memcpy(priv_ptr(addr), data, size);
// free the reservation and restore memory protection
_reservation_break(addr);
@ -256,7 +272,7 @@ namespace vm
if (size && addr + size - 1 >= g_reservation_addr && g_reservation_addr + g_reservation_size - 1 >= addr)
{
// break the reservation if overlap
_reservation_break(addr);
g_tls_did_break_reservation = _reservation_break(addr);
}
else
{
@ -280,7 +296,7 @@ namespace vm
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
_reservation_break(g_reservation_addr);
g_tls_did_break_reservation = _reservation_break(g_reservation_addr);
}
}
@ -288,16 +304,21 @@ namespace vm
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 128);
assert((addr + size - 1 & ~0xfff) == (addr & ~0xfff));
const u64 align = 0x80000000ull >> cntlz32(size);
// break previous reservation
if (!size || !addr || size > 4096 || size != align || addr & (align - 1))
{
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
}
g_tls_did_break_reservation = false;
// check and possibly break previous reservation
if (g_reservation_owner != get_current_thread_ctrl() || g_reservation_addr != addr || g_reservation_size != size)
{
if (g_reservation_owner)
{
_reservation_break(g_reservation_addr);
}
_reservation_break(g_reservation_addr);
g_tls_did_break_reservation = true;
}
// change memory protection to no access
@ -330,8 +351,8 @@ namespace vm
}
}
void* real_addr = vm::get_ptr(addr);
void* priv_addr = vm::priv_ptr(addr);
void* real_addr = get_ptr(addr);
void* priv_addr = priv_ptr(addr);
#ifdef _WIN32
auto protection = flags & page_writable ? PAGE_READWRITE : (flags & page_readable ? PAGE_READONLY : PAGE_NOACCESS);
@ -388,7 +409,7 @@ namespace vm
if (f1 != f2)
{
void* real_addr = vm::get_ptr(i * 4096);
void* real_addr = get_ptr(i * 4096);
#ifdef _WIN32
DWORD old;
@ -430,8 +451,8 @@ namespace vm
}
}
void* real_addr = vm::get_ptr(addr);
void* priv_addr = vm::priv_ptr(addr);
void* real_addr = get_ptr(addr);
void* priv_addr = priv_ptr(addr);
#ifdef _WIN32
DWORD old;
@ -551,8 +572,7 @@ namespace vm
// deallocate all memory
for (auto& entry : m_map)
{
// unmap memory pages
vm::_page_unmap(entry.first, entry.second);
_page_unmap(entry.first, entry.second);
}
}
@ -630,7 +650,7 @@ namespace vm
used -= size;
// unmap memory pages
std::lock_guard<reservation_mutex_t>{ g_reservation_mutex }, vm::_page_unmap(addr, size);
std::lock_guard<reservation_mutex_t>{ g_reservation_mutex }, _page_unmap(addr, size);
return true;
}
@ -664,7 +684,7 @@ namespace vm
{
if (g_pages[i].load())
{
throw EXCEPTION("Unexpected memory usage");
throw EXCEPTION("Unexpected pages allocated (current_addr=0x%x)", i * 4096);
}
}
@ -730,7 +750,7 @@ namespace vm
std::make_shared<block_t>(0xC0000000, 0x10000000), // video
std::make_shared<block_t>(0xD0000000, 0x10000000), // stack
std::make_shared<block_t>(0xE0000000, 0x20000000), // RawSPU
std::make_shared<block_t>(0xE0000000, 0x20000000), // SPU
};
}
}
@ -771,13 +791,13 @@ namespace vm
g_locations.clear();
}
u32 stack_push(CPUThread& CPU, u32 size, u32 align_v, u32& old_pos)
u32 stack_push(CPUThread& cpu, u32 size, u32 align_v, u32& old_pos)
{
switch (CPU.GetType())
switch (cpu.GetType())
{
case CPU_THREAD_PPU:
{
PPUThread& context = static_cast<PPUThread&>(CPU);
PPUThread& context = static_cast<PPUThread&>(cpu);
old_pos = VM_CAST(context.GPR[1]);
context.GPR[1] -= align(size, 8); // room minimal possible size
@ -796,7 +816,7 @@ namespace vm
case CPU_THREAD_SPU:
case CPU_THREAD_RAW_SPU:
{
SPUThread& context = static_cast<SPUThread&>(CPU);
SPUThread& context = static_cast<SPUThread&>(cpu);
old_pos = context.GPR[1]._u32[3];
context.GPR[1]._u32[3] -= align(size, 16);
@ -814,7 +834,7 @@ namespace vm
case CPU_THREAD_ARMv7:
{
ARMv7Context& context = static_cast<ARMv7Thread&>(CPU);
ARMv7Context& context = static_cast<ARMv7Thread&>(cpu);
old_pos = context.SP;
context.SP -= align(size, 4); // room minimal possible size
@ -832,18 +852,18 @@ namespace vm
default:
{
throw EXCEPTION("Invalid thread type (%d)", CPU.GetId());
throw EXCEPTION("Invalid thread type (%d)", cpu.GetId());
}
}
}
void stack_pop(CPUThread& CPU, u32 addr, u32 old_pos)
void stack_pop(CPUThread& cpu, u32 addr, u32 old_pos)
{
switch (CPU.GetType())
switch (cpu.GetType())
{
case CPU_THREAD_PPU:
{
PPUThread& context = static_cast<PPUThread&>(CPU);
PPUThread& context = static_cast<PPUThread&>(cpu);
if (context.GPR[1] != addr)
{
@ -857,7 +877,7 @@ namespace vm
case CPU_THREAD_SPU:
case CPU_THREAD_RAW_SPU:
{
SPUThread& context = static_cast<SPUThread&>(CPU);
SPUThread& context = static_cast<SPUThread&>(cpu);
if (context.GPR[1]._u32[3] + context.offset != addr)
{
@ -870,7 +890,7 @@ namespace vm
case CPU_THREAD_ARMv7:
{
ARMv7Context& context = static_cast<ARMv7Thread&>(CPU);
ARMv7Context& context = static_cast<ARMv7Thread&>(cpu);
if (context.SP != addr)
{
@ -883,7 +903,7 @@ namespace vm
default:
{
throw EXCEPTION("Invalid thread type (%d)", CPU.GetType());
throw EXCEPTION("Invalid thread type (%d)", cpu.GetType());
}
}
}

View File

@ -32,6 +32,14 @@ namespace vm
page_allocated = (1 << 7),
};
// This flag is changed by various reservation functions and may have different meaning.
// reservation_break() - true if the reservation was successfully broken.
// reservation_acquire() - true if another existing reservation was broken.
// reservation_free() - true if this thread's reservation was successfully removed.
// reservation_op() - false if reservation_update() would succeed if called instead.
// Write access to reserved memory - only set to true if the reservation was broken.
extern thread_local bool g_tls_did_break_reservation;
// Unconditionally break the reservation at specified address
void reservation_break(u32 addr);
@ -208,9 +216,9 @@ namespace vm
return cast_ptr<T>::cast(addr, file, line, func);
}
static u8 read8(u32 addr)
static const u8& read8(u32 addr)
{
return get_ref<u8>(addr);
return get_ref<const u8>(addr);
}
static void write8(u32 addr, u8 value)
@ -232,11 +240,6 @@ namespace vm
get_ref<be_t<u16>>(addr) = value;
}
inline void write16(u32 addr, u16 value)
{
get_ref<be_t<u16>>(addr) = value;
}
inline const be_t<u32>& read32(u32 addr)
{
return get_ref<const be_t<u32>>(addr);
@ -247,11 +250,6 @@ namespace vm
get_ref<be_t<u32>>(addr) = value;
}
inline void write32(u32 addr, u32 value)
{
get_ref<be_t<u32>>(addr) = value;
}
inline const be_t<u64>& read64(u32 addr)
{
return get_ref<const be_t<u64>>(addr);
@ -262,11 +260,6 @@ namespace vm
get_ref<be_t<u64>>(addr) = value;
}
inline void write64(u32 addr, u64 value)
{
get_ref<be_t<u64>>(addr) = value;
}
inline const be_t<u128>& read128(u32 addr)
{
return get_ref<const be_t<u128>>(addr);
@ -276,11 +269,6 @@ namespace vm
{
get_ref<be_t<u128>>(addr) = value;
}
inline void write128(u32 addr, u128 value)
{
get_ref<be_t<u128>>(addr) = value;
}
}
namespace psv
@ -297,11 +285,6 @@ namespace vm
get_ref<le_t<u16>>(addr) = value;
}
inline void write16(u32 addr, u16 value)
{
get_ref<le_t<u16>>(addr) = value;
}
inline const le_t<u32>& read32(u32 addr)
{
return get_ref<const le_t<u32>>(addr);
@ -312,11 +295,6 @@ namespace vm
get_ref<le_t<u32>>(addr) = value;
}
inline void write32(u32 addr, u32 value)
{
get_ref<le_t<u32>>(addr) = value;
}
inline const le_t<u64>& read64(u32 addr)
{
return get_ref<const le_t<u64>>(addr);
@ -327,11 +305,6 @@ namespace vm
get_ref<le_t<u64>>(addr) = value;
}
inline void write64(u32 addr, u64 value)
{
get_ref<le_t<u64>>(addr) = value;
}
inline const le_t<u128>& read128(u32 addr)
{
return get_ref<const le_t<u128>>(addr);
@ -341,11 +314,6 @@ namespace vm
{
get_ref<le_t<u128>>(addr) = value;
}
inline void write128(u32 addr, u128 value)
{
get_ref<le_t<u128>>(addr) = value;
}
}
namespace psp

View File

@ -13,7 +13,7 @@ namespace vm
T* m_ptr;
public:
var(u32 size = sizeof(T), u32 align = sizeof(T))
var(u32 size = sizeof32(T), u32 align = alignof32(T))
: m_size(size)
, m_align(align)
{
@ -35,7 +35,7 @@ namespace vm
void alloc()
{
m_addr = Memory.Alloc(size(), m_align);
m_addr = vm::alloc(size(), vm::main, std::max<u32>(m_align, 4096));
m_ptr = vm::get_ptr<T>(m_addr);
}
@ -43,13 +43,13 @@ namespace vm
{
if (m_addr)
{
Memory.Free(m_addr);
vm::dealloc(m_addr);
m_addr = 0;
m_ptr = vm::get_ptr<T>(0u);
}
}
static var make(u32 addr, u32 size = sizeof(T), u32 align = sizeof(T))
static var make(u32 addr, u32 size = sizeof32(T), u32 align = alignof32(T))
{
var res;
@ -149,7 +149,7 @@ namespace vm
T* m_ptr;
public:
var(u32 count, u32 size = sizeof(T), u32 align = sizeof(T))
var(u32 count, u32 size = sizeof32(T), u32 align = alignof32(T))
: m_count(count)
, m_size(size)
, m_align(align)
@ -172,7 +172,7 @@ namespace vm
void alloc()
{
m_addr = Memory.Alloc(size(), m_align);
m_addr = vm::alloc(size(), vm::main, std::max<u32>(m_align, 4096));
m_ptr = vm::get_ptr<T>(m_addr);
}
@ -180,13 +180,13 @@ namespace vm
{
if (m_addr)
{
Memory.Free(m_addr);
vm::dealloc(m_addr);
m_addr = 0;
m_ptr = nullptr;
}
}
static var make(u32 addr, u32 count, u32 size = sizeof(T), u32 align = sizeof(T))
static var make(u32 addr, u32 count, u32 size = sizeof32(T), u32 align = alignof32(T))
{
var res;
@ -344,7 +344,7 @@ namespace vm
T* m_ptr;
public:
var(u32 size = sizeof(T), u32 align = sizeof(T))
var(u32 size = sizeof32(T), u32 align = alignof32(T))
: m_size(size)
, m_align(align)
{
@ -366,7 +366,7 @@ namespace vm
void alloc()
{
m_addr = (u32)Memory.Alloc(size(), m_align);
m_addr = vm::alloc(size(), vm::main, std::max<u32>(m_align, 4096));
m_ptr = vm::get_ptr<T>(m_addr);
}
@ -374,7 +374,7 @@ namespace vm
{
if (m_addr)
{
Memory.Free(m_addr);
vm::dealloc(m_addr);
m_addr = 0;
m_ptr = vm::get_ptr<T>(0u);
}

View File

@ -3,7 +3,8 @@
#include "Emu/SysCalls/Modules.h"
#ifdef _WIN32
#include <winsock.h>
#include <winsock2.h>
#include <WS2tcpip.h>
#else
extern "C"
{
@ -104,7 +105,7 @@ namespace sys_net_func
sockaddr _addr;
memcpy(&_addr, addr.get_ptr(), sizeof(sockaddr));
_addr.sa_family = addr->sa_family;
int _paddrlen;
::socklen_t _paddrlen;
s32 ret = ::accept(s, &_addr, &_paddrlen);
*paddrlen = _paddrlen;
*g_lastError = getLastError();
@ -253,7 +254,7 @@ namespace sys_net_func
sockaddr _addr;
memcpy(&_addr, addr.get_ptr(), sizeof(sockaddr));
_addr.sa_family = addr->sa_family;
int _paddrlen;
::socklen_t _paddrlen;
s32 ret = ::recvfrom(s, buf.get_ptr(), len, flags, &_addr, &_paddrlen);
*paddrlen = _paddrlen;
*g_lastError = getLastError();
@ -416,7 +417,7 @@ s32 sys_net_initialize_network_ex(vm::ptr<sys_net_initialize_parameter> param)
g_lastError = vm::ptr<s32>::make(vm::alloc(4, vm::main));
#ifdef _WIN32
WSADATA wsaData;
WORD wVersionRequested = MAKEWORD(1, 1);
WORD wVersionRequested = MAKEWORD(2, 2);
WSAStartup(wVersionRequested, &wsaData);
#endif
return CELL_OK;

View File

@ -68,14 +68,14 @@ struct sys_event_t
be_t<u64> data3;
};
struct event_t
struct lv2_event_t
{
u64 source;
u64 data1;
u64 data2;
u64 data3;
const u64 source;
const u64 data1;
const u64 data2;
const u64 data3;
event_t(u64 source, u64 data1, u64 data2, u64 data3)
lv2_event_t(u64 source, u64 data1, u64 data2, u64 data3)
: source(source)
, data1(data1)
, data2(data2)
@ -93,7 +93,7 @@ struct lv2_event_queue_t
const u64 key;
const s32 size;
std::deque<event_t> events;
std::deque<lv2_event_t> events;
std::atomic<bool> cancelled;
// TODO: use sleep queue, possibly remove condition variable

View File

@ -887,14 +887,6 @@ s32 sys_spu_thread_group_disconnect_event(u32 id, u32 et)
return CELL_OK;
}
/*
SPU-Side functions:
s32 sys_spu_thread_receive_event(u32 spuq_num, mem32_t d1, mem32_t d2, mem32_t d3);
s32 sys_spu_thread_send_event(u8 spup, u24 data0, u32 data1);
s32 sys_spu_thread_throw_event(u8 spup, u24 data0, u32 data1);
s32 sys_spu_thread_tryreceive_event(u32 spuq_num, mem32_t d1, mem32_t d2, mem32_t d3);
*/
s32 sys_spu_thread_connect_event(u32 id, u32 eq, u32 et, u8 spup)
{
sys_spu.Warning("sys_spu_thread_connect_event(id=0x%x, eq=0x%x, et=%d, spup=%d)", id, eq, et, spup);

View File

@ -365,6 +365,8 @@ void Emulator::Stop()
}
}
LOG_NOTICE(GENERAL, "All threads signaled...");
while (g_thread_count)
{
std::this_thread::sleep_for(std::chrono::milliseconds(1));