mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 02:32:36 +01:00
commit
bf1e29e227
@ -263,6 +263,7 @@ namespace fmt
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef __APPLE__
|
||||
template<>
|
||||
struct get_fmt<unsigned long>
|
||||
{
|
||||
@ -286,6 +287,7 @@ namespace fmt
|
||||
}
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
template<>
|
||||
struct get_fmt<u64>
|
||||
@ -383,6 +385,7 @@ namespace fmt
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef __APPLE__
|
||||
template<>
|
||||
struct get_fmt<long>
|
||||
{
|
||||
@ -406,6 +409,7 @@ namespace fmt
|
||||
}
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
template<>
|
||||
struct get_fmt<s64>
|
||||
|
@ -9,8 +9,10 @@
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#else
|
||||
#ifdef __APPLE__
|
||||
#define _XOPEN_SOURCE
|
||||
#define __USE_GNU
|
||||
#endif
|
||||
#include <signal.h>
|
||||
#include <ucontext.h>
|
||||
#endif
|
||||
@ -210,49 +212,50 @@ typedef ucontext_t x64_context;
|
||||
|
||||
uint64_t* darwin_x64reg(x64_context *context, int reg)
|
||||
{
|
||||
auto *state = &context->uc_mcontext->__ss;
|
||||
switch(reg)
|
||||
{
|
||||
case 0: // RAX
|
||||
return &state->__rax;
|
||||
case 1: // RCX
|
||||
return &state->__rcx;
|
||||
case 2: // RDX
|
||||
return &state->__rdx;
|
||||
case 3: // RBX
|
||||
return &state->__rbx;
|
||||
case 4: // RSP
|
||||
return &state->__rsp;
|
||||
case 5: // RBP
|
||||
return &state->__rbp;
|
||||
case 6: // RSI
|
||||
return &state->__rsi;
|
||||
case 7: // RDI
|
||||
return &state->__rdi;
|
||||
case 8: // R8
|
||||
return &state->__r8;
|
||||
case 9: // R9
|
||||
return &state->__r9;
|
||||
case 10: // R10
|
||||
return &state->__r10;
|
||||
case 11: // R11
|
||||
return &state->__r11;
|
||||
case 12: // R12
|
||||
return &state->__r12;
|
||||
case 13: // R13
|
||||
return &state->__r13;
|
||||
case 14: // R14
|
||||
return &state->__r14;
|
||||
case 15: // R15
|
||||
return &state->__r15;
|
||||
case 16: // RIP
|
||||
return &state->__rip;
|
||||
default: // FAIL
|
||||
assert(0);
|
||||
}
|
||||
auto *state = &context->uc_mcontext->__ss;
|
||||
switch(reg)
|
||||
{
|
||||
case 0: // RAX
|
||||
return &state->__rax;
|
||||
case 1: // RCX
|
||||
return &state->__rcx;
|
||||
case 2: // RDX
|
||||
return &state->__rdx;
|
||||
case 3: // RBX
|
||||
return &state->__rbx;
|
||||
case 4: // RSP
|
||||
return &state->__rsp;
|
||||
case 5: // RBP
|
||||
return &state->__rbp;
|
||||
case 6: // RSI
|
||||
return &state->__rsi;
|
||||
case 7: // RDI
|
||||
return &state->__rdi;
|
||||
case 8: // R8
|
||||
return &state->__r8;
|
||||
case 9: // R9
|
||||
return &state->__r9;
|
||||
case 10: // R10
|
||||
return &state->__r10;
|
||||
case 11: // R11
|
||||
return &state->__r11;
|
||||
case 12: // R12
|
||||
return &state->__r12;
|
||||
case 13: // R13
|
||||
return &state->__r13;
|
||||
case 14: // R14
|
||||
return &state->__r14;
|
||||
case 15: // R15
|
||||
return &state->__r15;
|
||||
case 16: // RIP
|
||||
return &state->__rip;
|
||||
default: // FAIL
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
typedef decltype(REG_RIP) reg_table_t;
|
||||
|
||||
static const reg_table_t reg_table[17] =
|
||||
@ -262,6 +265,7 @@ static const reg_table_t reg_table[17] =
|
||||
};
|
||||
|
||||
#define X64REG(context, reg) (&context->uc_mcontext.gregs[reg_table[reg]])
|
||||
|
||||
#endif // __APPLE__
|
||||
|
||||
#endif
|
||||
@ -379,7 +383,7 @@ void signal_handler(int sig, siginfo_t* info, void* uct)
|
||||
const u64 addr64 = (u64)info->si_addr - (u64)vm::g_base_addr;
|
||||
|
||||
#ifdef __APPLE__
|
||||
const bool is_writing = ((ucontext_t*)uct)->uc_mcontext->__es.__err & 0x2;
|
||||
const bool is_writing = ((ucontext_t*)uct)->uc_mcontext->__es.__err & 0x2;
|
||||
#else
|
||||
const bool is_writing = ((ucontext_t*)uct)->uc_mcontext.gregs[REG_ERR] & 0x2;
|
||||
#endif
|
||||
|
@ -1281,7 +1281,7 @@ void armv7_decoder_initialize(u32 addr, u32 end_addr, bool dump)
|
||||
const u32 i2 = (code.data >> 11) & 0x1 ^ s ^ 1;
|
||||
const u32 target = (addr + 4 & ~3) + sign<25, u32>(s << 24 | i2 << 23 | i1 << 22 | (code.data & 0x3ff0000) >> 4 | (code.data & 0x7ff) << 1);
|
||||
|
||||
const u32 instr = Memory.IsGoodAddr(target, 4) ? vm::psv::read32(target) : 0;
|
||||
const u32 instr = vm::check_addr(target, 4) ? vm::psv::read32(target) : 0;
|
||||
|
||||
// possibly a call to imported function:
|
||||
if (target >= end_addr && ((target - end_addr) % 16) == 0 && (instr & 0xfff000f0) == 0xe0700090)
|
||||
|
@ -39,7 +39,7 @@ std::array<std::atomic<u32>, TLS_MAX> g_armv7_tls_owners;
|
||||
|
||||
void armv7_init_tls()
|
||||
{
|
||||
g_armv7_tls_start = Emu.GetTLSMemsz() ? vm::cast(Memory.PSV.RAM.AllocAlign(Emu.GetTLSMemsz() * TLS_MAX, 4096)) : 0;
|
||||
g_armv7_tls_start = Emu.GetTLSMemsz() ? Memory.PSV.RAM.AllocAlign(Emu.GetTLSMemsz() * TLS_MAX, 4096) : 0;
|
||||
|
||||
for (auto& v : g_armv7_tls_owners)
|
||||
{
|
||||
@ -126,7 +126,7 @@ void ARMv7Thread::InitStack()
|
||||
if (!m_stack_addr)
|
||||
{
|
||||
assert(m_stack_size);
|
||||
m_stack_addr = vm::cast(Memory.Alloc(m_stack_size, 4096));
|
||||
m_stack_addr = Memory.Alloc(m_stack_size, 4096);
|
||||
}
|
||||
}
|
||||
|
||||
@ -269,7 +269,7 @@ cpu_thread& armv7_thread::args(std::initializer_list<std::string> values)
|
||||
argc++;
|
||||
}
|
||||
|
||||
argv = vm::cast(Memory.PSV.RAM.AllocAlign(argv_size, 4096)); // allocate arg list
|
||||
argv = Memory.PSV.RAM.AllocAlign(argv_size, 4096); // allocate arg list
|
||||
memcpy(vm::get_ptr(argv), argv_data.data(), argv_size); // copy arg list
|
||||
|
||||
return *this;
|
||||
|
@ -2504,7 +2504,7 @@ private:
|
||||
}
|
||||
void LWARX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u32 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
|
||||
be_t<u32> value;
|
||||
vm::reservation_acquire(&value, vm::cast(addr), sizeof(value));
|
||||
|
@ -85,7 +85,7 @@ void PPUThread::InitStack()
|
||||
if (!m_stack_addr)
|
||||
{
|
||||
assert(m_stack_size);
|
||||
m_stack_addr = vm::cast(Memory.StackMem.AllocAlign(m_stack_size, 4096));
|
||||
m_stack_addr = Memory.StackMem.AllocAlign(m_stack_size, 4096);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -887,6 +887,7 @@ struct cast_ppu_gpr<u32, false>
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef __APPLE__
|
||||
template<>
|
||||
struct cast_ppu_gpr<unsigned long, false>
|
||||
{
|
||||
@ -900,6 +901,7 @@ struct cast_ppu_gpr<unsigned long, false>
|
||||
return static_cast<unsigned long>(reg);
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
template<>
|
||||
struct cast_ppu_gpr<u64, false>
|
||||
|
@ -19,9 +19,9 @@ RawSPUThread::~RawSPUThread()
|
||||
Memory.CloseRawSPU(this, m_index);
|
||||
}
|
||||
|
||||
bool RawSPUThread::Read32(const u64 addr, u32* value)
|
||||
bool RawSPUThread::Read32(const u32 addr, u32* value)
|
||||
{
|
||||
const u64 offset = addr - GetStartAddr() - RAW_SPU_PROB_OFFSET;
|
||||
const u32 offset = addr - GetStartAddr() - RAW_SPU_PROB_OFFSET;
|
||||
|
||||
switch (offset)
|
||||
{
|
||||
@ -68,9 +68,9 @@ bool RawSPUThread::Read32(const u64 addr, u32* value)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RawSPUThread::Write32(const u64 addr, const u32 value)
|
||||
bool RawSPUThread::Write32(const u32 addr, const u32 value)
|
||||
{
|
||||
const u64 offset = addr - GetStartAddr() - RAW_SPU_PROB_OFFSET;
|
||||
const u32 offset = addr - GetStartAddr() - RAW_SPU_PROB_OFFSET;
|
||||
|
||||
switch (offset)
|
||||
{
|
||||
@ -198,7 +198,7 @@ bool RawSPUThread::Write32(const u64 addr, const u32 value)
|
||||
|
||||
void RawSPUThread::InitRegs()
|
||||
{
|
||||
ls_offset = m_offset = (u32)GetStartAddr() + RAW_SPU_LS_OFFSET;
|
||||
ls_offset = m_offset = GetStartAddr() + RAW_SPU_LS_OFFSET;
|
||||
SPUThread::InitRegs();
|
||||
}
|
||||
|
||||
@ -213,5 +213,5 @@ void RawSPUThread::Task()
|
||||
|
||||
SPUThread::Task();
|
||||
|
||||
SPU.NPC.SetValue((u32)PC);
|
||||
SPU.NPC.SetValue(PC);
|
||||
}
|
||||
|
@ -16,9 +16,8 @@ public:
|
||||
RawSPUThread(CPUThreadType type = CPU_THREAD_RAW_SPU);
|
||||
virtual ~RawSPUThread();
|
||||
|
||||
bool Read32(const u64 addr, u32* value);
|
||||
|
||||
bool Write32(const u64 addr, const u32 value);
|
||||
bool Read32(const u32 addr, u32* value);
|
||||
bool Write32(const u32 addr, const u32 value);
|
||||
|
||||
public:
|
||||
virtual void InitRegs();
|
||||
|
@ -437,7 +437,7 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
|
||||
{
|
||||
//std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
vm::reservation_acquire(vm::get_ptr(ls_offset + lsa), ea, 128, [this]()
|
||||
vm::reservation_acquire(vm::get_ptr(ls_offset + lsa), vm::cast(ea), 128, [this]()
|
||||
{
|
||||
//std::shared_ptr<CPUThread> t = Emu.GetCPU().GetThread(tid);
|
||||
|
||||
@ -457,7 +457,7 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
|
||||
}
|
||||
else if (op == MFC_PUTLLC_CMD) // store conditional
|
||||
{
|
||||
if (vm::reservation_update(ea, vm::get_ptr(ls_offset + lsa), 128))
|
||||
if (vm::reservation_update(vm::cast(ea), vm::get_ptr(ls_offset + lsa), 128))
|
||||
{
|
||||
MFCArgs.AtomicStat.PushUncond(MFC_PUTLLC_SUCCESS);
|
||||
}
|
||||
@ -468,7 +468,7 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
|
||||
}
|
||||
else // store unconditional (may be wrong)
|
||||
{
|
||||
vm::reservation_op(ea, 128, [this, tag, lsa, ea]()
|
||||
vm::reservation_op(vm::cast(ea), 128, [this, tag, lsa, ea]()
|
||||
{
|
||||
memcpy(vm::get_priv_ptr(vm::cast(ea)), vm::get_ptr(ls_offset + lsa), 128);
|
||||
});
|
||||
|
@ -4,61 +4,8 @@
|
||||
#include "Memory.h"
|
||||
#include "Emu/Cell/RawSPUThread.h"
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <sys/mman.h>
|
||||
|
||||
/* OS X uses MAP_ANON instead of MAP_ANONYMOUS */
|
||||
#ifndef MAP_ANONYMOUS
|
||||
#define MAP_ANONYMOUS MAP_ANON
|
||||
#endif
|
||||
#else
|
||||
#include <Windows.h>
|
||||
#endif
|
||||
|
||||
MemoryBase Memory;
|
||||
|
||||
void MemoryBase::RegisterPages(u64 addr, u32 size)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
//LOG_NOTICE(MEMORY, "RegisterPages(addr=0x%llx, size=0x%x)", addr, size);
|
||||
for (u64 i = addr / 4096; i < (addr + size) / 4096; i++)
|
||||
{
|
||||
if (i >= sizeof(m_pages) / sizeof(m_pages[0]))
|
||||
{
|
||||
LOG_ERROR(MEMORY, "%s(): invalid address 0x%llx", __FUNCTION__, i * 4096);
|
||||
break;
|
||||
}
|
||||
if (m_pages[i])
|
||||
{
|
||||
LOG_ERROR(MEMORY, "Page already registered (addr=0x%llx)", i * 4096);
|
||||
Emu.Pause();
|
||||
}
|
||||
m_pages[i] = 1; // TODO: define page parameters
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBase::UnregisterPages(u64 addr, u32 size)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
//LOG_NOTICE(MEMORY, "UnregisterPages(addr=0x%llx, size=0x%x)", addr, size);
|
||||
for (u64 i = addr / 4096; i < (addr + size) / 4096; i++)
|
||||
{
|
||||
if (i >= sizeof(m_pages) / sizeof(m_pages[0]))
|
||||
{
|
||||
LOG_ERROR(MEMORY, "%s(): invalid address 0x%llx", __FUNCTION__, i * 4096);
|
||||
break;
|
||||
}
|
||||
if (!m_pages[i])
|
||||
{
|
||||
LOG_ERROR(MEMORY, "Page not registered (addr=0x%llx)", i * 4096);
|
||||
Emu.Pause();
|
||||
}
|
||||
m_pages[i] = 0; // TODO: define page parameters
|
||||
}
|
||||
}
|
||||
|
||||
u32 MemoryBase::InitRawSPU(MemoryBlock* raw_spu)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
@ -99,10 +46,9 @@ void MemoryBase::Init(MemoryType type)
|
||||
if (m_inited) return;
|
||||
m_inited = true;
|
||||
|
||||
memset(m_pages, 0, sizeof(m_pages));
|
||||
memset(RawSPUMem, 0, sizeof(RawSPUMem));
|
||||
|
||||
LOG_NOTICE(MEMORY, "Initializing memory: base_addr = 0x%llx, priv_addr = 0x%llx", (u64)vm::g_base_addr, (u64)vm::g_priv_addr);
|
||||
LOG_NOTICE(MEMORY, "Initializing memory: g_base_addr = 0x%llx, g_priv_addr = 0x%llx", (u64)vm::g_base_addr, (u64)vm::g_priv_addr);
|
||||
|
||||
#ifdef _WIN32
|
||||
if (!vm::g_base_addr || !vm::g_priv_addr)
|
||||
@ -117,11 +63,8 @@ void MemoryBase::Init(MemoryType type)
|
||||
switch (type)
|
||||
{
|
||||
case Memory_PS3:
|
||||
MemoryBlocks.push_back(MainMem.SetRange(0x00010000, 0x2FFF0000));
|
||||
MemoryBlocks.push_back(UserMemory = PRXMem.SetRange(0x30000000, 0x10000000));
|
||||
MemoryBlocks.push_back(RSXCMDMem.SetRange(0x40000000, 0x10000000));
|
||||
MemoryBlocks.push_back(SPRXMem.SetRange(0x50000000, 0x10000000));
|
||||
MemoryBlocks.push_back(MmaperMem.SetRange(0xB0000000, 0x10000000));
|
||||
MemoryBlocks.push_back(MainMem.SetRange(0x00010000, 0x1FFF0000));
|
||||
MemoryBlocks.push_back(UserMemory = Userspace.SetRange(0x20000000, 0x10000000));
|
||||
MemoryBlocks.push_back(RSXFBMem.SetRange(0xC0000000, 0x10000000));
|
||||
MemoryBlocks.push_back(StackMem.SetRange(0xD0000000, 0x10000000));
|
||||
break;
|
||||
@ -187,29 +130,27 @@ u32 MemoryBase::ReadMMIO32(u32 addr)
|
||||
throw fmt::Format("%s(addr=0x%x) failed", __FUNCTION__, addr);
|
||||
}
|
||||
|
||||
bool MemoryBase::Map(const u64 addr, const u32 size)
|
||||
bool MemoryBase::Map(const u32 addr, const u32 size)
|
||||
{
|
||||
assert(size && (size | addr) % 4096 == 0);
|
||||
|
||||
LV2_LOCK(0);
|
||||
|
||||
if ((addr | (addr + size)) & ~0xFFFFFFFFull)
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (u32 i = (u32)addr / 4096; i <= ((u32)addr + size - 1) / 4096; i++)
|
||||
if (vm::check_addr(i * 4096, 4096))
|
||||
{
|
||||
if (m_pages[i]) return false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
MemoryBlocks.push_back((new MemoryBlock())->SetRange(addr, size));
|
||||
|
||||
LOG_WARNING(MEMORY, "Memory mapped at 0x%llx: size=0x%x", addr, size);
|
||||
LOG_WARNING(MEMORY, "Memory mapped at 0x%x: size=0x%x", addr, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MemoryBase::Unmap(const u64 addr)
|
||||
bool MemoryBase::Unmap(const u32 addr)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
@ -225,46 +166,18 @@ bool MemoryBase::Unmap(const u64 addr)
|
||||
return false;
|
||||
}
|
||||
|
||||
MemBlockInfo::MemBlockInfo(u64 _addr, u32 _size)
|
||||
: MemInfo(_addr, PAGE_4K(_size))
|
||||
MemBlockInfo::MemBlockInfo(u32 addr, u32 size)
|
||||
: MemInfo(addr, size)
|
||||
{
|
||||
void* real_addr = vm::get_ptr(vm::cast(_addr));
|
||||
void* priv_addr = vm::get_priv_ptr(vm::cast(_addr));
|
||||
|
||||
#ifdef _WIN32
|
||||
if (!VirtualAlloc(priv_addr, size, MEM_COMMIT, PAGE_READWRITE) || !VirtualAlloc(real_addr, size, MEM_COMMIT, PAGE_READWRITE))
|
||||
#else
|
||||
if (mprotect(real_addr, size, PROT_READ | PROT_WRITE) || mprotect(priv_addr, size, PROT_READ | PROT_WRITE))
|
||||
#endif
|
||||
{
|
||||
LOG_ERROR(MEMORY, "Memory allocation failed (addr=0x%llx, size=0x%x)", addr, size);
|
||||
Emu.Pause();
|
||||
}
|
||||
else
|
||||
{
|
||||
Memory.RegisterPages(_addr, PAGE_4K(_size));
|
||||
|
||||
mem = real_addr;
|
||||
memset(mem, 0, size); // ???
|
||||
}
|
||||
vm::page_map(addr, size, vm::page_readable | vm::page_writable | vm::page_executable);
|
||||
}
|
||||
|
||||
void MemBlockInfo::Free()
|
||||
{
|
||||
if (mem)
|
||||
if (addr && size)
|
||||
{
|
||||
Memory.UnregisterPages(addr, size);
|
||||
#ifdef _WIN32
|
||||
DWORD old;
|
||||
|
||||
if (!VirtualProtect(mem, size, PAGE_NOACCESS, &old) || !VirtualProtect(vm::get_priv_ptr(vm::cast(addr)), size, PAGE_NOACCESS, &old))
|
||||
#else
|
||||
if (mprotect(mem, size, PROT_NONE) || mprotect(vm::get_priv_ptr(vm::cast(addr)), size, PROT_NONE))
|
||||
#endif
|
||||
{
|
||||
LOG_ERROR(MEMORY, "Memory deallocation failed (addr=0x%llx, size=0x%x)", addr, size);
|
||||
Emu.Pause();
|
||||
}
|
||||
vm::page_unmap(addr, size);
|
||||
addr = size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -283,21 +196,14 @@ void MemoryBlock::Init()
|
||||
{
|
||||
range_start = 0;
|
||||
range_size = 0;
|
||||
|
||||
mem = vm::get_ptr<u8>(0u);
|
||||
}
|
||||
|
||||
void MemoryBlock::InitMemory()
|
||||
{
|
||||
if (!range_size)
|
||||
{
|
||||
mem = vm::get_ptr<u8>(range_start);
|
||||
}
|
||||
else
|
||||
if (range_size)
|
||||
{
|
||||
Free();
|
||||
mem_inf = new MemBlockInfo(range_start, range_size);
|
||||
mem = (u8*)mem_inf->mem;
|
||||
}
|
||||
}
|
||||
|
||||
@ -316,15 +222,8 @@ void MemoryBlock::Delete()
|
||||
Init();
|
||||
}
|
||||
|
||||
u64 MemoryBlock::FixAddr(const u64 addr) const
|
||||
MemoryBlock* MemoryBlock::SetRange(const u32 start, const u32 size)
|
||||
{
|
||||
return addr - GetStartAddr();
|
||||
}
|
||||
|
||||
MemoryBlock* MemoryBlock::SetRange(const u64 start, const u32 size)
|
||||
{
|
||||
if (start + size > 0x100000000) return nullptr;
|
||||
|
||||
range_start = start;
|
||||
range_size = size;
|
||||
|
||||
@ -332,11 +231,6 @@ MemoryBlock* MemoryBlock::SetRange(const u64 start, const u32 size)
|
||||
return this;
|
||||
}
|
||||
|
||||
bool MemoryBlock::IsMyAddress(const u64 addr)
|
||||
{
|
||||
return mem && addr >= GetStartAddr() && addr < GetEndAddr();
|
||||
}
|
||||
|
||||
DynamicMemoryBlockBase::DynamicMemoryBlockBase()
|
||||
: MemoryBlock()
|
||||
, m_max_size(0)
|
||||
@ -357,22 +251,12 @@ const u32 DynamicMemoryBlockBase::GetUsedSize() const
|
||||
return size;
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::IsInMyRange(const u64 addr)
|
||||
bool DynamicMemoryBlockBase::IsInMyRange(const u32 addr, const u32 size)
|
||||
{
|
||||
return addr >= MemoryBlock::GetStartAddr() && addr < MemoryBlock::GetStartAddr() + GetSize();
|
||||
return addr >= MemoryBlock::GetStartAddr() && addr + size - 1 <= MemoryBlock::GetEndAddr();
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::IsInMyRange(const u64 addr, const u32 size)
|
||||
{
|
||||
return IsInMyRange(addr) && IsInMyRange(addr + size - 1);
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::IsMyAddress(const u64 addr)
|
||||
{
|
||||
return IsInMyRange(addr);
|
||||
}
|
||||
|
||||
MemoryBlock* DynamicMemoryBlockBase::SetRange(const u64 start, const u32 size)
|
||||
MemoryBlock* DynamicMemoryBlockBase::SetRange(const u32 start, const u32 size)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
@ -396,8 +280,10 @@ void DynamicMemoryBlockBase::Delete()
|
||||
MemoryBlock::Delete();
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::AllocFixed(u64 addr, u32 size)
|
||||
bool DynamicMemoryBlockBase::AllocFixed(u32 addr, u32 size)
|
||||
{
|
||||
assert(size);
|
||||
|
||||
size = PAGE_4K(size + (addr & 4095)); // align size
|
||||
|
||||
addr &= ~4095; // align start address
|
||||
@ -420,13 +306,15 @@ bool DynamicMemoryBlockBase::AllocFixed(u64 addr, u32 size)
|
||||
return true;
|
||||
}
|
||||
|
||||
void DynamicMemoryBlockBase::AppendMem(u64 addr, u32 size) /* private */
|
||||
void DynamicMemoryBlockBase::AppendMem(u32 addr, u32 size) /* private */
|
||||
{
|
||||
m_allocated.emplace_back(addr, size);
|
||||
}
|
||||
|
||||
u64 DynamicMemoryBlockBase::AllocAlign(u32 size, u32 align)
|
||||
u32 DynamicMemoryBlockBase::AllocAlign(u32 size, u32 align)
|
||||
{
|
||||
assert(size && align);
|
||||
|
||||
if (!MemoryBlock::GetStartAddr())
|
||||
{
|
||||
LOG_ERROR(MEMORY, "DynamicMemoryBlockBase::AllocAlign(size=0x%x, align=0x%x): memory block not initialized", size, align);
|
||||
@ -449,7 +337,7 @@ u64 DynamicMemoryBlockBase::AllocAlign(u32 size, u32 align)
|
||||
|
||||
LV2_LOCK(0);
|
||||
|
||||
for (u64 addr = MemoryBlock::GetStartAddr(); addr <= MemoryBlock::GetEndAddr() - exsize;)
|
||||
for (u32 addr = MemoryBlock::GetStartAddr(); addr <= MemoryBlock::GetEndAddr() - exsize;)
|
||||
{
|
||||
bool is_good_addr = true;
|
||||
|
||||
@ -471,7 +359,7 @@ u64 DynamicMemoryBlockBase::AllocAlign(u32 size, u32 align)
|
||||
addr = (addr + (align - 1)) & ~(align - 1);
|
||||
}
|
||||
|
||||
//LOG_NOTICE(MEMORY, "AllocAlign(size=0x%x) -> 0x%llx", size, addr);
|
||||
//LOG_NOTICE(MEMORY, "AllocAlign(size=0x%x) -> 0x%x", size, addr);
|
||||
|
||||
AppendMem(addr, size);
|
||||
|
||||
@ -486,7 +374,7 @@ bool DynamicMemoryBlockBase::Alloc()
|
||||
return AllocAlign(GetSize() - GetUsedSize()) != 0;
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::Free(u64 addr)
|
||||
bool DynamicMemoryBlockBase::Free(u32 addr)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
@ -494,52 +382,26 @@ bool DynamicMemoryBlockBase::Free(u64 addr)
|
||||
{
|
||||
if (addr == m_allocated[num].addr)
|
||||
{
|
||||
//LOG_NOTICE(MEMORY, "Free(0x%llx)", addr);
|
||||
//LOG_NOTICE(MEMORY, "Free(0x%x)", addr);
|
||||
|
||||
m_allocated.erase(m_allocated.begin() + num);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_ERROR(MEMORY, "DynamicMemoryBlock::Free(addr=0x%llx): failed", addr);
|
||||
LOG_ERROR(MEMORY, "DynamicMemoryBlock::Free(addr=0x%x): failed", addr);
|
||||
for (u32 i = 0; i < m_allocated.size(); i++)
|
||||
{
|
||||
LOG_NOTICE(MEMORY, "*** Memory Block: addr = 0x%llx, size = 0x%x", m_allocated[i].addr, m_allocated[i].size);
|
||||
LOG_NOTICE(MEMORY, "*** Memory Block: addr = 0x%x, size = 0x%x", m_allocated[i].addr, m_allocated[i].size);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
u8* DynamicMemoryBlockBase::GetMem(u64 addr) const
|
||||
{
|
||||
return MemoryBlock::GetMem(addr);
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::IsLocked(u64 addr)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "DynamicMemoryBlockBase::IsLocked() not implemented");
|
||||
Emu.Pause();
|
||||
return false;
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::Lock(u64 addr, u32 size)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "DynamicMemoryBlockBase::Lock() not implemented");
|
||||
Emu.Pause();
|
||||
return false;
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::Unlock(u64 addr, u32 size)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "DynamicMemoryBlockBase::Unlock() not implemented");
|
||||
Emu.Pause();
|
||||
return false;
|
||||
}
|
||||
|
||||
VirtualMemoryBlock::VirtualMemoryBlock() : MemoryBlock(), m_reserve_size(0)
|
||||
{
|
||||
}
|
||||
|
||||
MemoryBlock* VirtualMemoryBlock::SetRange(const u64 start, const u32 size)
|
||||
MemoryBlock* VirtualMemoryBlock::SetRange(const u32 start, const u32 size)
|
||||
{
|
||||
range_start = start;
|
||||
range_size = size;
|
||||
@ -547,32 +409,16 @@ MemoryBlock* VirtualMemoryBlock::SetRange(const u64 start, const u32 size)
|
||||
return this;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::IsInMyRange(const u64 addr)
|
||||
bool VirtualMemoryBlock::IsInMyRange(const u32 addr, const u32 size)
|
||||
{
|
||||
return addr >= GetStartAddr() && addr < GetStartAddr() + GetSize() - GetReservedAmount();
|
||||
return addr >= GetStartAddr() && addr + size - 1 <= GetEndAddr() - GetReservedAmount();
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::IsInMyRange(const u64 addr, const u32 size)
|
||||
u32 VirtualMemoryBlock::Map(u32 realaddr, u32 size)
|
||||
{
|
||||
return IsInMyRange(addr) && IsInMyRange(addr + size - 1);
|
||||
}
|
||||
assert(size);
|
||||
|
||||
bool VirtualMemoryBlock::IsMyAddress(const u64 addr)
|
||||
{
|
||||
for (u32 i = 0; i<m_mapped_memory.size(); ++i)
|
||||
{
|
||||
if (addr >= m_mapped_memory[i].addr && addr < m_mapped_memory[i].addr + m_mapped_memory[i].size)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 VirtualMemoryBlock::Map(u64 realaddr, u32 size)
|
||||
{
|
||||
for (u64 addr = GetStartAddr(); addr <= GetEndAddr() - GetReservedAmount() - size;)
|
||||
for (u32 addr = GetStartAddr(); addr <= GetEndAddr() - GetReservedAmount() - size;)
|
||||
{
|
||||
bool is_good_addr = true;
|
||||
|
||||
@ -598,16 +444,28 @@ u64 VirtualMemoryBlock::Map(u64 realaddr, u32 size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::Map(u64 realaddr, u32 size, u64 addr)
|
||||
bool VirtualMemoryBlock::Map(u32 realaddr, u32 size, u32 addr)
|
||||
{
|
||||
if (!IsInMyRange(addr, size) && (IsMyAddress(addr) || IsMyAddress(addr + size - 1)))
|
||||
assert(size);
|
||||
|
||||
if (!IsInMyRange(addr, size))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
for (u32 i = 0; i<m_mapped_memory.size(); ++i)
|
||||
{
|
||||
if (addr >= m_mapped_memory[i].addr && addr + size - 1 <= m_mapped_memory[i].addr + m_mapped_memory[i].size - 1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
m_mapped_memory.emplace_back(addr, realaddr, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::UnmapRealAddress(u64 realaddr, u32& size)
|
||||
bool VirtualMemoryBlock::UnmapRealAddress(u32 realaddr, u32& size)
|
||||
{
|
||||
for (u32 i = 0; i<m_mapped_memory.size(); ++i)
|
||||
{
|
||||
@ -622,7 +480,7 @@ bool VirtualMemoryBlock::UnmapRealAddress(u64 realaddr, u32& size)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::UnmapAddress(u64 addr, u32& size)
|
||||
bool VirtualMemoryBlock::UnmapAddress(u32 addr, u32& size)
|
||||
{
|
||||
for (u32 i = 0; i<m_mapped_memory.size(); ++i)
|
||||
{
|
||||
@ -637,25 +495,25 @@ bool VirtualMemoryBlock::UnmapAddress(u64 addr, u32& size)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::Read32(const u64 addr, u32* value)
|
||||
bool VirtualMemoryBlock::Read32(const u32 addr, u32* value)
|
||||
{
|
||||
u64 realAddr;
|
||||
u32 realAddr;
|
||||
if (!getRealAddr(addr, realAddr))
|
||||
return false;
|
||||
*value = vm::read32(realAddr);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::Write32(const u64 addr, const u32 value)
|
||||
bool VirtualMemoryBlock::Write32(const u32 addr, const u32 value)
|
||||
{
|
||||
u64 realAddr;
|
||||
u32 realAddr;
|
||||
if (!getRealAddr(addr, realAddr))
|
||||
return false;
|
||||
vm::write32(realAddr, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::getRealAddr(u64 addr, u64& result)
|
||||
bool VirtualMemoryBlock::getRealAddr(u32 addr, u32& result)
|
||||
{
|
||||
for (u32 i = 0; i<m_mapped_memory.size(); ++i)
|
||||
{
|
||||
@ -669,7 +527,7 @@ bool VirtualMemoryBlock::getRealAddr(u64 addr, u64& result)
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 VirtualMemoryBlock::getMappedAddress(u64 realAddress)
|
||||
u32 VirtualMemoryBlock::getMappedAddress(u32 realAddress)
|
||||
{
|
||||
for (u32 i = 0; i<m_mapped_memory.size(); ++i)
|
||||
{
|
||||
|
@ -22,24 +22,15 @@ enum : u32
|
||||
RAW_SPU_PROB_OFFSET = 0x00040000,
|
||||
};
|
||||
|
||||
namespace vm
|
||||
{
|
||||
extern void* const g_base_addr;
|
||||
}
|
||||
|
||||
class MemoryBase
|
||||
{
|
||||
std::vector<MemoryBlock*> MemoryBlocks;
|
||||
u32 m_pages[0x100000000 / 4096]; // information about every page
|
||||
|
||||
public:
|
||||
MemoryBlock* UserMemory;
|
||||
|
||||
DynamicMemoryBlock MainMem;
|
||||
DynamicMemoryBlock SPRXMem;
|
||||
DynamicMemoryBlock PRXMem;
|
||||
DynamicMemoryBlock RSXCMDMem;
|
||||
DynamicMemoryBlock MmaperMem;
|
||||
DynamicMemoryBlock Userspace;
|
||||
DynamicMemoryBlock RSXFBMem;
|
||||
DynamicMemoryBlock StackMem;
|
||||
MemoryBlock* RawSPUMem[(0x100000000 - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET];
|
||||
@ -72,9 +63,9 @@ public:
|
||||
Close();
|
||||
}
|
||||
|
||||
void RegisterPages(u64 addr, u32 size);
|
||||
void RegisterPages(u32 addr, u32 size);
|
||||
|
||||
void UnregisterPages(u64 addr, u32 size);
|
||||
void UnregisterPages(u32 addr, u32 size);
|
||||
|
||||
u32 InitRawSPU(MemoryBlock* raw_spu);
|
||||
|
||||
@ -82,27 +73,6 @@ public:
|
||||
|
||||
void Init(MemoryType type);
|
||||
|
||||
bool IsGoodAddr(const u32 addr)
|
||||
{
|
||||
return m_pages[addr / 4096] != 0; // TODO: define page parameters
|
||||
}
|
||||
|
||||
bool IsGoodAddr(const u32 addr, const u32 size)
|
||||
{
|
||||
if (!size || addr + size - 1 < addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
|
||||
{
|
||||
if (!m_pages[i]) return false; // TODO: define page parameters
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void Close();
|
||||
|
||||
__noinline void WriteMMIO32(u32 addr, const u32 data);
|
||||
@ -119,19 +89,19 @@ public:
|
||||
return UserMemory->GetSize() - UserMemory->GetUsedSize();
|
||||
}
|
||||
|
||||
u64 Alloc(const u32 size, const u32 align)
|
||||
u32 Alloc(const u32 size, const u32 align)
|
||||
{
|
||||
return UserMemory->AllocAlign(size, align);
|
||||
}
|
||||
|
||||
bool Free(const u64 addr)
|
||||
bool Free(const u32 addr)
|
||||
{
|
||||
return UserMemory->Free(addr);
|
||||
}
|
||||
|
||||
bool Map(const u64 addr, const u32 size);
|
||||
bool Map(const u32 addr, const u32 size);
|
||||
|
||||
bool Unmap(const u64 addr);
|
||||
bool Unmap(const u32 addr);
|
||||
};
|
||||
|
||||
extern MemoryBase Memory;
|
||||
|
@ -2,29 +2,27 @@
|
||||
|
||||
#define PAGE_4K(x) (x + 4095) & ~(4095)
|
||||
|
||||
//#include <emmintrin.h>
|
||||
|
||||
struct MemInfo
|
||||
{
|
||||
u64 addr;
|
||||
u32 addr;
|
||||
u32 size;
|
||||
|
||||
MemInfo(u64 _addr, u32 _size)
|
||||
: addr(_addr)
|
||||
, size(_size)
|
||||
MemInfo(u32 addr, u32 size)
|
||||
: addr(addr)
|
||||
, size(size)
|
||||
{
|
||||
}
|
||||
|
||||
MemInfo() : addr(0), size(0)
|
||||
MemInfo()
|
||||
: addr(0)
|
||||
, size(0)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
struct MemBlockInfo : public MemInfo
|
||||
{
|
||||
void *mem;
|
||||
|
||||
MemBlockInfo(u64 _addr, u32 _size);
|
||||
MemBlockInfo(u32 addr, u32 size);
|
||||
|
||||
void Free();
|
||||
|
||||
@ -32,37 +30,36 @@ struct MemBlockInfo : public MemInfo
|
||||
|
||||
MemBlockInfo(MemBlockInfo &&other)
|
||||
: MemInfo(other.addr,other.size)
|
||||
, mem(other.mem)
|
||||
{
|
||||
other.mem = nullptr;
|
||||
other.addr = 0;
|
||||
other.size = 0;
|
||||
}
|
||||
|
||||
MemBlockInfo& operator =(MemBlockInfo &other) = delete;
|
||||
|
||||
MemBlockInfo& operator =(MemBlockInfo &&other)
|
||||
{
|
||||
this->Free();
|
||||
Free();
|
||||
this->addr = other.addr;
|
||||
this->size = other.size;
|
||||
this->mem = other.mem;
|
||||
other.mem = nullptr;
|
||||
other.addr = 0;
|
||||
other.size = 0;
|
||||
return *this;
|
||||
}
|
||||
|
||||
~MemBlockInfo()
|
||||
{
|
||||
Free();
|
||||
mem = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
struct VirtualMemInfo : public MemInfo
|
||||
{
|
||||
u64 realAddress;
|
||||
u32 realAddress;
|
||||
|
||||
VirtualMemInfo(u64 _addr, u64 _realaddr, u32 _size)
|
||||
: MemInfo(_addr, _size)
|
||||
, realAddress(_realaddr)
|
||||
VirtualMemInfo(u32 addr, u32 realaddr, u32 size)
|
||||
: MemInfo(addr, size)
|
||||
, realAddress(realaddr)
|
||||
{
|
||||
}
|
||||
|
||||
@ -76,8 +73,7 @@ struct VirtualMemInfo : public MemInfo
|
||||
class MemoryBlock
|
||||
{
|
||||
protected:
|
||||
u8* mem;
|
||||
u64 range_start;
|
||||
u32 range_start;
|
||||
u32 range_size;
|
||||
|
||||
public:
|
||||
@ -93,25 +89,17 @@ private:
|
||||
public:
|
||||
virtual void Delete();
|
||||
|
||||
u64 FixAddr(const u64 addr) const;
|
||||
virtual MemoryBlock* SetRange(const u32 start, const u32 size);
|
||||
|
||||
virtual MemoryBlock* SetRange(const u64 start, const u32 size);
|
||||
virtual bool IsMyAddress(const u64 addr);
|
||||
virtual bool IsLocked(const u64 addr) { return false; }
|
||||
|
||||
const u64 GetStartAddr() const { return range_start; }
|
||||
const u64 GetEndAddr() const { return GetStartAddr() + GetSize() - 1; }
|
||||
const u32 GetStartAddr() const { return range_start; }
|
||||
const u32 GetEndAddr() const { return GetStartAddr() + GetSize() - 1; }
|
||||
virtual const u32 GetSize() const { return range_size; }
|
||||
virtual const u32 GetUsedSize() const { return GetSize(); }
|
||||
u8* GetMem() const { return mem; }
|
||||
virtual u8* GetMem(u64 addr) const { return mem + addr; }
|
||||
|
||||
virtual bool AllocFixed(u64 addr, u32 size) { return false; }
|
||||
virtual u64 AllocAlign(u32 size, u32 align = 1) { return 0; }
|
||||
virtual bool AllocFixed(u32 addr, u32 size) { return false; }
|
||||
virtual u32 AllocAlign(u32 size, u32 align = 1) { return 0; }
|
||||
virtual bool Alloc() { return false; }
|
||||
virtual bool Free(u64 addr) { return false; }
|
||||
virtual bool Lock(u64 addr, u32 size) { return false; }
|
||||
virtual bool Unlock(u64 addr, u32 size) { return false; }
|
||||
virtual bool Free(u32 addr) { return false; }
|
||||
};
|
||||
|
||||
class DynamicMemoryBlockBase : public MemoryBlock
|
||||
@ -125,26 +113,19 @@ public:
|
||||
const u32 GetSize() const { return m_max_size; }
|
||||
const u32 GetUsedSize() const;
|
||||
|
||||
virtual bool IsInMyRange(const u64 addr);
|
||||
virtual bool IsInMyRange(const u64 addr, const u32 size);
|
||||
virtual bool IsMyAddress(const u64 addr);
|
||||
virtual bool IsLocked(const u64 addr);
|
||||
virtual bool IsInMyRange(const u32 addr, const u32 size = 1);
|
||||
|
||||
virtual MemoryBlock* SetRange(const u64 start, const u32 size);
|
||||
virtual MemoryBlock* SetRange(const u32 start, const u32 size);
|
||||
|
||||
virtual void Delete();
|
||||
|
||||
virtual bool AllocFixed(u64 addr, u32 size);
|
||||
virtual u64 AllocAlign(u32 size, u32 align = 1);
|
||||
virtual bool AllocFixed(u32 addr, u32 size);
|
||||
virtual u32 AllocAlign(u32 size, u32 align = 1);
|
||||
virtual bool Alloc();
|
||||
virtual bool Free(u64 addr);
|
||||
virtual bool Lock(u64 addr, u32 size);
|
||||
virtual bool Unlock(u64 addr, u32 size);
|
||||
|
||||
virtual u8* GetMem(u64 addr) const;
|
||||
virtual bool Free(u32 addr);
|
||||
|
||||
private:
|
||||
void AppendMem(u64 addr, u32 size);
|
||||
void AppendMem(u32 addr, u32 size);
|
||||
};
|
||||
|
||||
class VirtualMemoryBlock : public MemoryBlock
|
||||
@ -155,22 +136,20 @@ class VirtualMemoryBlock : public MemoryBlock
|
||||
public:
|
||||
VirtualMemoryBlock();
|
||||
|
||||
virtual MemoryBlock* SetRange(const u64 start, const u32 size);
|
||||
virtual bool IsInMyRange(const u64 addr);
|
||||
virtual bool IsInMyRange(const u64 addr, const u32 size);
|
||||
virtual bool IsMyAddress(const u64 addr);
|
||||
virtual MemoryBlock* SetRange(const u32 start, const u32 size);
|
||||
virtual bool IsInMyRange(const u32 addr, const u32 size = 1);
|
||||
virtual void Delete();
|
||||
|
||||
// maps real address to virtual address space, returns the mapped address or 0 on failure (if no address is specified the
|
||||
// first mappable space is used)
|
||||
virtual bool Map(u64 realaddr, u32 size, u64 addr);
|
||||
virtual u64 Map(u64 realaddr, u32 size);
|
||||
virtual bool Map(u32 realaddr, u32 size, u32 addr);
|
||||
virtual u32 Map(u32 realaddr, u32 size);
|
||||
|
||||
// Unmap real address (please specify only starting point, no midway memory will be unmapped), returns the size of the unmapped area
|
||||
virtual bool UnmapRealAddress(u64 realaddr, u32& size);
|
||||
virtual bool UnmapRealAddress(u32 realaddr, u32& size);
|
||||
|
||||
// Unmap address (please specify only starting point, no midway memory will be unmapped), returns the size of the unmapped area
|
||||
virtual bool UnmapAddress(u64 addr, u32& size);
|
||||
virtual bool UnmapAddress(u32 addr, u32& size);
|
||||
|
||||
// Reserve a certain amount so no one can use it, returns true on succces, false on failure
|
||||
virtual bool Reserve(u32 size);
|
||||
@ -181,24 +160,23 @@ public:
|
||||
// Return the total amount of reserved memory
|
||||
virtual u32 GetReservedAmount();
|
||||
|
||||
bool Read32(const u64 addr, u32* value);
|
||||
bool Read32(const u32 addr, u32* value);
|
||||
|
||||
bool Write32(const u64 addr, const u32 value);
|
||||
bool Write32(const u32 addr, const u32 value);
|
||||
|
||||
// try to get the real address given a mapped address
|
||||
// return true for success
|
||||
bool getRealAddr(u64 addr, u64& result);
|
||||
bool getRealAddr(u32 addr, u32& result);
|
||||
|
||||
u64 RealAddr(u64 addr)
|
||||
u32 RealAddr(u32 addr)
|
||||
{
|
||||
u64 realAddr = 0;
|
||||
u32 realAddr = 0;
|
||||
getRealAddr(addr, realAddr);
|
||||
return realAddr;
|
||||
}
|
||||
|
||||
// return the mapped address given a real address, if not mapped return 0
|
||||
u64 getMappedAddress(u64 realAddress);
|
||||
u32 getMappedAddress(u32 realAddress);
|
||||
};
|
||||
|
||||
typedef DynamicMemoryBlockBase DynamicMemoryBlock;
|
||||
|
||||
|
@ -25,35 +25,32 @@
|
||||
|
||||
namespace vm
|
||||
{
|
||||
#ifdef _WIN32
|
||||
HANDLE g_memory_handle;
|
||||
#endif
|
||||
|
||||
void* g_priv_addr;
|
||||
|
||||
void* initialize()
|
||||
{
|
||||
#ifdef _WIN32
|
||||
g_memory_handle = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE | SEC_RESERVE, 0x1, 0x0, NULL);
|
||||
HANDLE memory_handle = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE | SEC_RESERVE, 0x1, 0x0, NULL);
|
||||
|
||||
void* base_addr = MapViewOfFile(g_memory_handle, FILE_MAP_WRITE, 0, 0, 0x100000000); // main memory
|
||||
g_priv_addr = MapViewOfFile(g_memory_handle, FILE_MAP_WRITE, 0, 0, 0x100000000); // memory mirror for privileged access
|
||||
void* base_addr = MapViewOfFile(memory_handle, FILE_MAP_WRITE, 0, 0, 0x100000000);
|
||||
g_priv_addr = MapViewOfFile(memory_handle, FILE_MAP_WRITE, 0, 0, 0x100000000);
|
||||
|
||||
CloseHandle(memory_handle);
|
||||
|
||||
return base_addr;
|
||||
|
||||
//return VirtualAlloc(nullptr, 0x100000000, MEM_RESERVE, PAGE_NOACCESS);
|
||||
#else
|
||||
//shm_unlink("/rpcs3_vm");
|
||||
|
||||
int memory_handle = shm_open("/rpcs3_vm", O_RDWR | O_CREAT | O_EXCL, 0);
|
||||
|
||||
if (memory_handle == -1)
|
||||
{
|
||||
printf("shm_open() failed\n");
|
||||
printf("shm_open('/rpcs3_vm') failed\n");
|
||||
return (void*)-1;
|
||||
}
|
||||
|
||||
ftruncate(memory_handle, 0x100000000);
|
||||
if (ftruncate(memory_handle, 0x100000000) == -1)
|
||||
{
|
||||
printf("ftruncate(memory_handle) failed\n");
|
||||
shm_unlink("/rpcs3_vm");
|
||||
return (void*)-1;
|
||||
}
|
||||
|
||||
void* base_addr = mmap(nullptr, 0x100000000, PROT_NONE, MAP_SHARED, memory_handle, 0);
|
||||
g_priv_addr = mmap(nullptr, 0x100000000, PROT_NONE, MAP_SHARED, memory_handle, 0);
|
||||
@ -61,8 +58,6 @@ namespace vm
|
||||
shm_unlink("/rpcs3_vm");
|
||||
|
||||
return base_addr;
|
||||
|
||||
//return mmap(nullptr, 0x100000000, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -71,14 +66,16 @@ namespace vm
|
||||
#ifdef _WIN32
|
||||
UnmapViewOfFile(g_base_addr);
|
||||
UnmapViewOfFile(g_priv_addr);
|
||||
CloseHandle(g_memory_handle);
|
||||
#else
|
||||
munmap(g_base_addr, 0x100000000);
|
||||
munmap(g_priv_addr, 0x100000000);
|
||||
#endif
|
||||
}
|
||||
|
||||
void* const g_base_addr = (atexit(finalize), initialize());
|
||||
void* g_base_addr = (atexit(finalize), initialize());
|
||||
void* g_priv_addr;
|
||||
|
||||
std::array<atomic_le_t<u8>, 0x100000000ull / 4096> g_page_info = {}; // information about every page
|
||||
|
||||
class reservation_mutex_t
|
||||
{
|
||||
@ -211,6 +208,12 @@ namespace vm
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
u8 flags = g_page_info[addr >> 12].read_relaxed();
|
||||
if (!(flags & page_writable) || !(flags & page_allocated) || (flags & page_no_reservations))
|
||||
{
|
||||
throw fmt::format("vm::reservation_acquire(addr=0x%x, size=0x%x) failed (invalid page flags: 0x%x)", addr, size, flags);
|
||||
}
|
||||
|
||||
// silent unlocking to prevent priority boost for threads going to break reservation
|
||||
//g_reservation_mutex.do_notify = false;
|
||||
|
||||
@ -272,13 +275,9 @@ namespace vm
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
if (!check_addr(addr))
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
if (!Memory.IsGoodAddr(addr))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_writing)
|
||||
@ -335,11 +334,157 @@ namespace vm
|
||||
_reservation_break(addr);
|
||||
}
|
||||
|
||||
bool check_addr(u32 addr)
|
||||
void page_map(u32 addr, u32 size, u8 flags)
|
||||
{
|
||||
// Checking address before using it is unsafe.
|
||||
// The only safe way to check it is to protect both actions (checking and using) with mutex that is used for mapping/allocation.
|
||||
return false;
|
||||
assert(size && (size | addr) % 4096 == 0 && flags < page_allocated);
|
||||
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if (g_page_info[i].read_relaxed())
|
||||
{
|
||||
throw fmt::format("vm::page_map(addr=0x%x, size=0x%x, flags=0x%x) failed (already mapped at 0x%x)", addr, size, flags, i * 4096);
|
||||
}
|
||||
}
|
||||
|
||||
void* real_addr = vm::get_ptr(addr);
|
||||
void* priv_addr = vm::get_priv_ptr(addr);
|
||||
|
||||
#ifdef _WIN32
|
||||
auto protection = flags & page_writable ? PAGE_READWRITE : (flags & page_readable ? PAGE_READONLY : PAGE_NOACCESS);
|
||||
if (!VirtualAlloc(priv_addr, size, MEM_COMMIT, PAGE_READWRITE) || !VirtualAlloc(real_addr, size, MEM_COMMIT, protection))
|
||||
#else
|
||||
auto protection = flags & page_writable ? PROT_WRITE | PROT_READ : (flags & page_readable ? PROT_READ : PROT_NONE);
|
||||
if (mprotect(priv_addr, size, PROT_READ | PROT_WRITE) || mprotect(real_addr, size, protection))
|
||||
#endif
|
||||
{
|
||||
throw fmt::format("vm::page_map(addr=0x%x, size=0x%x, flags=0x%x) failed (API)", addr, size, flags);
|
||||
}
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if (g_page_info[i].exchange(flags | page_allocated))
|
||||
{
|
||||
throw fmt::format("vm::page_map(addr=0x%x, size=0x%x, flags=0x%x) failed (concurrent access at 0x%x)", addr, size, flags, i * 4096);
|
||||
}
|
||||
}
|
||||
|
||||
memset(priv_addr, 0, size); // ???
|
||||
}
|
||||
|
||||
bool page_protect(u32 addr, u32 size, u8 flags_test, u8 flags_set, u8 flags_clear)
|
||||
{
|
||||
u8 flags_inv = flags_set & flags_clear;
|
||||
|
||||
assert(size && (size | addr) % 4096 == 0);
|
||||
|
||||
flags_test |= page_allocated;
|
||||
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if ((g_page_info[i].read_relaxed() & flags_test) != (flags_test | page_allocated))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!flags_inv && !flags_set && !flags_clear)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
_reservation_break(i * 4096);
|
||||
|
||||
const u8 f1 = g_page_info[i]._or(flags_set & ~flags_inv) & (page_writable | page_readable);
|
||||
g_page_info[i]._and_not(flags_clear & ~flags_inv);
|
||||
const u8 f2 = (g_page_info[i] ^= flags_inv) & (page_writable | page_readable);
|
||||
|
||||
if (f1 != f2)
|
||||
{
|
||||
void* real_addr = vm::get_ptr(i * 4096);
|
||||
|
||||
#ifdef _WIN32
|
||||
DWORD old;
|
||||
|
||||
auto protection = f2 & page_writable ? PAGE_READWRITE : (f2 & page_readable ? PAGE_READONLY : PAGE_NOACCESS);
|
||||
if (!VirtualProtect(real_addr, size, protection, &old))
|
||||
#else
|
||||
auto protection = f2 & page_writable ? PROT_WRITE | PROT_READ : (f2 & page_readable ? PROT_READ : PROT_NONE);
|
||||
if (mprotect(real_addr, size, protection))
|
||||
#endif
|
||||
{
|
||||
throw fmt::format("vm::page_protect(addr=0x%x, size=0x%x, flags_test=0x%x, flags_set=0x%x, flags_clear=0x%x) failed (API)", addr, size, flags_test, flags_set, flags_clear);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void page_unmap(u32 addr, u32 size)
|
||||
{
|
||||
assert(size && (size | addr) % 4096 == 0);
|
||||
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if (!(g_page_info[i].read_relaxed() & page_allocated))
|
||||
{
|
||||
throw fmt::format("vm::page_unmap(addr=0x%x, size=0x%x) failed (not mapped at 0x%x)", addr, size, i * 4096);
|
||||
}
|
||||
}
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
_reservation_break(i * 4096);
|
||||
|
||||
if (!(g_page_info[i].exchange(0) & page_allocated))
|
||||
{
|
||||
throw fmt::format("vm::page_unmap(addr=0x%x, size=0x%x) failed (concurrent access at 0x%x)", addr, size, i * 4096);
|
||||
}
|
||||
}
|
||||
|
||||
void* real_addr = vm::get_ptr(addr);
|
||||
void* priv_addr = vm::get_priv_ptr(addr);
|
||||
|
||||
#ifdef _WIN32
|
||||
DWORD old;
|
||||
|
||||
if (!VirtualProtect(real_addr, size, PAGE_NOACCESS, &old) || !VirtualProtect(priv_addr, size, PAGE_NOACCESS, &old))
|
||||
#else
|
||||
if (mprotect(real_addr, size, PROT_NONE) || mprotect(priv_addr, size, PROT_NONE))
|
||||
#endif
|
||||
{
|
||||
throw fmt::format("vm::page_unmap(addr=0x%x, size=0x%x) failed (API)", addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
// Not checked if address is writable/readable. Checking address before using it is unsafe.
|
||||
// The only safe way to check it is to protect both actions (checking and using) with mutex that is used for mapping/allocation.
|
||||
bool check_addr(u32 addr, u32 size)
|
||||
{
|
||||
assert(size);
|
||||
|
||||
if (addr + (size - 1) < addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
|
||||
{
|
||||
if ((g_page_info[i].read_sync() & page_allocated) != page_allocated)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//TODO
|
||||
@ -406,6 +551,19 @@ namespace vm
|
||||
Memory.MainMem.Free(addr);
|
||||
}
|
||||
|
||||
u32 user_space_alloc(u32 size)
|
||||
{
|
||||
return Memory.Userspace.AllocAlign(size, 1);
|
||||
}
|
||||
u32 user_space_fixed_alloc(u32 addr, u32 size)
|
||||
{
|
||||
return Memory.Userspace.AllocFixed(addr, size) ? addr : 0;
|
||||
}
|
||||
void user_space_dealloc(u32 addr)
|
||||
{
|
||||
Memory.Userspace.Free(addr);
|
||||
}
|
||||
|
||||
u32 g_stack_offset = 0;
|
||||
|
||||
u32 stack_alloc(u32 size)
|
||||
@ -421,32 +579,6 @@ namespace vm
|
||||
Memory.StackMem.Free(addr);
|
||||
}
|
||||
|
||||
u32 sprx_alloc(u32 size)
|
||||
{
|
||||
return Memory.SPRXMem.AllocAlign(size, 1);
|
||||
}
|
||||
u32 sprx_fixed_alloc(u32 addr, u32 size)
|
||||
{
|
||||
return Memory.SPRXMem.AllocFixed(Memory.SPRXMem.GetStartAddr() + addr, size) ? Memory.SPRXMem.GetStartAddr() + addr : 0;
|
||||
}
|
||||
void sprx_dealloc(u32 addr)
|
||||
{
|
||||
Memory.SPRXMem.Free(addr);
|
||||
}
|
||||
|
||||
u32 user_space_alloc(u32 size)
|
||||
{
|
||||
return Memory.PRXMem.AllocAlign(size, 1);
|
||||
}
|
||||
u32 user_space_fixed_alloc(u32 addr, u32 size)
|
||||
{
|
||||
return Memory.PRXMem.AllocFixed(addr, size) ? addr : 0;
|
||||
}
|
||||
void user_space_dealloc(u32 addr)
|
||||
{
|
||||
Memory.PRXMem.Free(addr);
|
||||
}
|
||||
|
||||
void init()
|
||||
{
|
||||
Memory.Init(Memory_PS3);
|
||||
@ -471,13 +603,9 @@ namespace vm
|
||||
|
||||
location_info g_locations[memory_location_count] =
|
||||
{
|
||||
{ 0x00010000, 0x2FFF0000, ps3::main_alloc, ps3::main_fixed_alloc, ps3::main_dealloc },
|
||||
{ 0x00010000, 0x1FFF0000, ps3::main_alloc, ps3::main_fixed_alloc, ps3::main_dealloc },
|
||||
{ 0x20000000, 0x10000000, ps3::user_space_alloc, ps3::user_space_fixed_alloc, ps3::user_space_dealloc },
|
||||
{ 0xD0000000, 0x10000000, ps3::stack_alloc, ps3::stack_fixed_alloc, ps3::stack_dealloc },
|
||||
|
||||
//remove me
|
||||
{ 0x00010000, 0x2FFF0000, ps3::sprx_alloc, ps3::sprx_fixed_alloc, ps3::sprx_dealloc },
|
||||
|
||||
{ 0x30000000, 0x10000000, ps3::user_space_alloc, ps3::user_space_fixed_alloc, ps3::user_space_dealloc },
|
||||
};
|
||||
|
||||
void close()
|
||||
@ -495,7 +623,7 @@ namespace vm
|
||||
{
|
||||
PPUThread& PPU = static_cast<PPUThread&>(CPU);
|
||||
|
||||
old_pos = (u32)PPU.GPR[1];
|
||||
old_pos = vm::cast(PPU.GPR[1], "SP");
|
||||
PPU.GPR[1] -= align(size, 8); // room minimal possible size
|
||||
PPU.GPR[1] &= ~(align_v - 1); // fix stack alignment
|
||||
|
||||
|
@ -5,40 +5,56 @@ class CPUThread;
|
||||
|
||||
namespace vm
|
||||
{
|
||||
extern void* g_base_addr; // base address of ps3/psv virtual memory for common access
|
||||
extern void* g_priv_addr; // base address of ps3/psv virtual memory for privileged access
|
||||
|
||||
enum memory_location : uint
|
||||
{
|
||||
main,
|
||||
stack,
|
||||
|
||||
//remove me
|
||||
sprx,
|
||||
|
||||
user_space,
|
||||
stack,
|
||||
|
||||
memory_location_count
|
||||
};
|
||||
|
||||
enum page_info_t : u8
|
||||
{
|
||||
page_readable = (1 << 0),
|
||||
page_writable = (1 << 1),
|
||||
page_executable = (1 << 2),
|
||||
|
||||
page_fault_notification = (1 << 3),
|
||||
page_no_reservations = (1 << 4),
|
||||
|
||||
page_allocated = (1 << 7),
|
||||
};
|
||||
|
||||
static void set_stack_size(u32 size) {}
|
||||
static void initialize_stack() {}
|
||||
|
||||
#ifdef _WIN32
|
||||
extern HANDLE g_memory_handle;
|
||||
#endif
|
||||
|
||||
extern void* g_priv_addr;
|
||||
extern void* const g_base_addr;
|
||||
|
||||
// break the reservation, return true if it was successfully broken
|
||||
bool reservation_break(u32 addr);
|
||||
// read memory and reserve it for further atomic update, return true if the previous reservation was broken
|
||||
bool reservation_acquire(void* data, u32 addr, u32 size, const std::function<void()>& callback = nullptr);
|
||||
// attempt to atomically update reserved memory
|
||||
bool reservation_update(u32 addr, const void* data, u32 size);
|
||||
// for internal use
|
||||
bool reservation_query(u32 addr, bool is_writing);
|
||||
// for internal use
|
||||
void reservation_free();
|
||||
// perform complete operation
|
||||
void reservation_op(u32 addr, u32 size, std::function<void()> proc);
|
||||
|
||||
// for internal use
|
||||
void page_map(u32 addr, u32 size, u8 flags);
|
||||
// for internal use
|
||||
bool page_protect(u32 addr, u32 size, u8 flags_test = 0, u8 flags_set = 0, u8 flags_clear = 0);
|
||||
// for internal use
|
||||
void page_unmap(u32 addr, u32 size);
|
||||
|
||||
// unsafe address check
|
||||
bool check_addr(u32 addr, u32 size = 1);
|
||||
|
||||
bool map(u32 addr, u32 size, u32 flags);
|
||||
bool unmap(u32 addr, u32 size = 0, u32 flags = 0);
|
||||
u32 alloc(u32 size, memory_location location = user_space);
|
||||
@ -84,20 +100,22 @@ namespace vm
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct cast_ptr<unsigned long>
|
||||
{
|
||||
__forceinline static u32 cast(const unsigned long addr, const char* func)
|
||||
{
|
||||
const u32 res = static_cast<u32>(addr);
|
||||
if (res != addr)
|
||||
{
|
||||
vm::error(addr, func);
|
||||
}
|
||||
#ifdef __APPLE__
|
||||
template<>
|
||||
struct cast_ptr<unsigned long>
|
||||
{
|
||||
__forceinline static u32 cast(const unsigned long addr, const char* func)
|
||||
{
|
||||
const u32 res = static_cast<u32>(addr);
|
||||
if (res != addr)
|
||||
{
|
||||
vm::error(addr, func);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
};
|
||||
return res;
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
template<>
|
||||
struct cast_ptr<u32>
|
||||
|
@ -35,7 +35,7 @@ namespace vm
|
||||
|
||||
void alloc()
|
||||
{
|
||||
m_addr = vm::cast(Memory.Alloc(size(), m_align));
|
||||
m_addr = Memory.Alloc(size(), m_align);
|
||||
m_ptr = vm::get_ptr<T>(m_addr);
|
||||
}
|
||||
|
||||
@ -162,7 +162,7 @@ namespace vm
|
||||
|
||||
void alloc()
|
||||
{
|
||||
m_addr = vm::cast(Memory.Alloc(size(), m_align));
|
||||
m_addr = Memory.Alloc(size(), m_align);
|
||||
m_ptr = vm::get_ptr<T>(m_addr);
|
||||
}
|
||||
|
||||
|
@ -235,6 +235,7 @@ struct gcmInfo
|
||||
u32 config_addr;
|
||||
u32 context_addr;
|
||||
u32 control_addr;
|
||||
u32 label_addr;
|
||||
};
|
||||
|
||||
struct CellGcmSurface
|
||||
|
@ -104,12 +104,7 @@ void GLTexture::Init(RSXTexture& tex)
|
||||
|
||||
Bind();
|
||||
|
||||
const u64 texaddr = GetAddress(tex.GetOffset(), tex.GetLocation());
|
||||
if (!Memory.IsGoodAddr(texaddr))
|
||||
{
|
||||
LOG_ERROR(RSX, "Bad texture address=0x%x", texaddr);
|
||||
return;
|
||||
}
|
||||
const u32 texaddr = GetAddress(tex.GetOffset(), tex.GetLocation());
|
||||
//LOG_WARNING(RSX, "texture addr = 0x%x, width = %d, height = %d, max_aniso=%d, mipmap=%d, remap=0x%x, zfunc=0x%x, wraps=0x%x, wrapt=0x%x, wrapr=0x%x, minlod=0x%x, maxlod=0x%x",
|
||||
// m_offset, m_width, m_height, m_maxaniso, m_mipmap, m_remap, m_zfunc, m_wraps, m_wrapt, m_wrapr, m_minlod, m_maxlod);
|
||||
|
||||
@ -1242,11 +1237,6 @@ void GLGSRender::WriteDepthBuffer()
|
||||
}
|
||||
|
||||
u32 address = GetAddress(m_surface_offset_z, m_context_dma_z - 0xfeed0000);
|
||||
if (!Memory.IsGoodAddr(address))
|
||||
{
|
||||
LOG_WARNING(RSX, "Bad depth buffer address: address=0x%x, offset=0x%x, dma=0x%x", address, m_surface_offset_z, m_context_dma_z);
|
||||
return;
|
||||
}
|
||||
|
||||
auto ptr = vm::get_ptr<void>(address);
|
||||
glBindBuffer(GL_PIXEL_PACK_BUFFER, g_pbo[4]);
|
||||
@ -1279,11 +1269,6 @@ void GLGSRender::WriteColorBufferA()
|
||||
}
|
||||
|
||||
u32 address = GetAddress(m_surface_offset_a, m_context_dma_color_a - 0xfeed0000);
|
||||
if (!Memory.IsGoodAddr(address))
|
||||
{
|
||||
LOG_ERROR(RSX, "Bad color buffer A address: address=0x%x, offset=0x%x, dma=0x%x", address, m_surface_offset_a, m_context_dma_color_a);
|
||||
return;
|
||||
}
|
||||
|
||||
glReadBuffer(GL_COLOR_ATTACHMENT0);
|
||||
checkForGlError("WriteColorBufferA(): glReadBuffer");
|
||||
@ -1310,11 +1295,6 @@ void GLGSRender::WriteColorBufferB()
|
||||
}
|
||||
|
||||
u32 address = GetAddress(m_surface_offset_b, m_context_dma_color_b - 0xfeed0000);
|
||||
if (!Memory.IsGoodAddr(address))
|
||||
{
|
||||
LOG_ERROR(RSX, "Bad color buffer B address: address=0x%x, offset=0x%x, dma=0x%x", address, m_surface_offset_b, m_context_dma_color_b);
|
||||
return;
|
||||
}
|
||||
|
||||
glReadBuffer(GL_COLOR_ATTACHMENT1);
|
||||
checkForGlError("WriteColorBufferB(): glReadBuffer");
|
||||
@ -1341,11 +1321,6 @@ void GLGSRender::WriteColorBufferC()
|
||||
}
|
||||
|
||||
u32 address = GetAddress(m_surface_offset_c, m_context_dma_color_c - 0xfeed0000);
|
||||
if (!Memory.IsGoodAddr(address))
|
||||
{
|
||||
LOG_ERROR(RSX, "Bad color buffer C address: address=0x%x, offset=0x%x, dma=0x%x", address, m_surface_offset_c, m_context_dma_color_c);
|
||||
return;
|
||||
}
|
||||
|
||||
glReadBuffer(GL_COLOR_ATTACHMENT2);
|
||||
checkForGlError("WriteColorBufferC(): glReadBuffer");
|
||||
@ -1372,11 +1347,6 @@ void GLGSRender::WriteColorBufferD()
|
||||
}
|
||||
|
||||
u32 address = GetAddress(m_surface_offset_d, m_context_dma_color_d - 0xfeed0000);
|
||||
if (!Memory.IsGoodAddr(address))
|
||||
{
|
||||
LOG_ERROR(RSX, "Bad color buffer D address: address=0x%x, offset=0x%x, dma=0x%x", address, m_surface_offset_d, m_context_dma_color_d);
|
||||
return;
|
||||
}
|
||||
|
||||
glReadBuffer(GL_COLOR_ATTACHMENT3);
|
||||
checkForGlError("WriteColorBufferD(): glReadBuffer");
|
||||
@ -1682,14 +1652,9 @@ void GLGSRender::InitDrawBuffers()
|
||||
u32 format = GL_BGRA;
|
||||
CellGcmDisplayInfo* buffers = vm::get_ptr<CellGcmDisplayInfo>(m_gcm_buffers_addr);
|
||||
u32 addr = GetAddress(buffers[m_gcm_current_buffer].offset, CELL_GCM_LOCATION_LOCAL);
|
||||
|
||||
if (Memory.IsGoodAddr(addr))
|
||||
{
|
||||
u32 width = buffers[m_gcm_current_buffer].width;
|
||||
u32 height = buffers[m_gcm_current_buffer].height;
|
||||
|
||||
glDrawPixels(width, height, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8, vm::get_ptr(addr));
|
||||
}
|
||||
u32 width = buffers[m_gcm_current_buffer].width;
|
||||
u32 height = buffers[m_gcm_current_buffer].height;
|
||||
glDrawPixels(width, height, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8, vm::get_ptr(addr));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2139,17 +2104,9 @@ void GLGSRender::Flip()
|
||||
format = GL_BGRA;
|
||||
CellGcmDisplayInfo* buffers = vm::get_ptr<CellGcmDisplayInfo>(m_gcm_buffers_addr);
|
||||
u32 addr = GetAddress(buffers[m_gcm_current_buffer].offset, CELL_GCM_LOCATION_LOCAL);
|
||||
|
||||
if (Memory.IsGoodAddr(addr))
|
||||
{
|
||||
width = buffers[m_gcm_current_buffer].width;
|
||||
height = buffers[m_gcm_current_buffer].height;
|
||||
src_buffer = vm::get_ptr<u8>(addr);
|
||||
}
|
||||
else
|
||||
{
|
||||
src_buffer = nullptr;
|
||||
}
|
||||
width = buffers[m_gcm_current_buffer].width;
|
||||
height = buffers[m_gcm_current_buffer].height;
|
||||
src_buffer = vm::get_ptr<u8>(addr);
|
||||
}
|
||||
else if (m_fbo.IsCreated())
|
||||
{
|
||||
|
@ -261,7 +261,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
||||
if (m_set_semaphore_offset)
|
||||
{
|
||||
m_set_semaphore_offset = false;
|
||||
vm::write32(Memory.RSXCMDMem.GetStartAddr() + m_semaphore_offset, ARGS(0));
|
||||
vm::write32(m_label_addr + m_semaphore_offset, ARGS(0));
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -274,7 +274,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
||||
u32 value = ARGS(0);
|
||||
value = (value & 0xff00ff00) | ((value & 0xff) << 16) | ((value >> 16) & 0xff);
|
||||
|
||||
vm::write32(Memory.RSXCMDMem.GetStartAddr() + m_semaphore_offset, value);
|
||||
vm::write32(m_label_addr + m_semaphore_offset, value);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -134,6 +134,7 @@ public:
|
||||
u32 m_gcm_current_buffer;
|
||||
u32 m_ctxt_addr;
|
||||
u32 m_report_main_addr;
|
||||
u32 m_label_addr;
|
||||
|
||||
// DMA
|
||||
u32 dma_report;
|
||||
|
@ -41,8 +41,8 @@ s32 cellAudioInit()
|
||||
g_audio.start_time = get_system_time();
|
||||
|
||||
// alloc memory (only once until the emulator is stopped)
|
||||
g_audio.buffer = g_audio.buffer ? g_audio.buffer : vm::cast(Memory.MainMem.AllocAlign(AUDIO_PORT_OFFSET * AUDIO_PORT_COUNT, 4096));
|
||||
g_audio.indexes = g_audio.indexes ? g_audio.indexes : vm::cast(Memory.MainMem.AllocAlign(sizeof(u64) * AUDIO_PORT_COUNT, __alignof(u64)));
|
||||
g_audio.buffer = g_audio.buffer ? g_audio.buffer : Memory.MainMem.AllocAlign(AUDIO_PORT_OFFSET * AUDIO_PORT_COUNT, 4096);
|
||||
g_audio.indexes = g_audio.indexes ? g_audio.indexes : Memory.MainMem.AllocAlign(sizeof(u64) * AUDIO_PORT_COUNT, __alignof(u64));
|
||||
|
||||
// clear memory
|
||||
memset(vm::get_ptr<void>(g_audio.buffer), 0, AUDIO_PORT_OFFSET * AUDIO_PORT_COUNT);
|
||||
|
@ -14,6 +14,7 @@ Module *cellGame = nullptr;
|
||||
|
||||
std::string contentInfo = "";
|
||||
std::string usrdir = "";
|
||||
bool path_set = false;
|
||||
|
||||
int cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGameContentSize> size, vm::ptr<char[CELL_GAME_DIRNAME_SIZE]> dirName)
|
||||
{
|
||||
@ -52,6 +53,7 @@ int cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGa
|
||||
if (dirName) strcpy_trunc(*dirName, ""); // ???
|
||||
contentInfo = "/dev_bdvd/PS3_GAME";
|
||||
usrdir = "/dev_bdvd/PS3_GAME/USRDIR";
|
||||
path_set = true;
|
||||
}
|
||||
else if (category.substr(0, 2) == "HG")
|
||||
{
|
||||
@ -61,6 +63,7 @@ int cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGa
|
||||
if (dirName) strcpy_trunc(*dirName, titleId);
|
||||
contentInfo = "/dev_hdd0/game/" + titleId;
|
||||
usrdir = "/dev_hdd0/game/" + titleId + "/USRDIR";
|
||||
path_set = true;
|
||||
}
|
||||
else if (category.substr(0, 2) == "GD")
|
||||
{
|
||||
@ -70,6 +73,7 @@ int cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGa
|
||||
if (dirName) strcpy_trunc(*dirName, titleId); // ???
|
||||
contentInfo = "/dev_bdvd/PS3_GAME";
|
||||
usrdir = "/dev_bdvd/PS3_GAME/USRDIR";
|
||||
path_set = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -124,6 +128,7 @@ int cellGamePatchCheck(vm::ptr<CellGameContentSize> size, u32 reserved_addr)
|
||||
std::string titleId = psf.GetString("TITLE_ID");
|
||||
contentInfo = "/dev_hdd0/game/" + titleId;
|
||||
usrdir = "/dev_hdd0/game/" + titleId + "/USRDIR";
|
||||
path_set = true;
|
||||
|
||||
return CELL_GAME_RET_OK;
|
||||
}
|
||||
@ -155,10 +160,15 @@ int cellGameDataCheck(u32 type, vm::ptr<const char> dirName, vm::ptr<CellGameCon
|
||||
if (!Emu.GetVFS().ExistsDir("/dev_bdvd/PS3_GAME"))
|
||||
{
|
||||
cellGame->Warning("cellGameDataCheck(): /dev_bdvd/PS3_GAME not found");
|
||||
contentInfo = "";
|
||||
usrdir = "";
|
||||
path_set = true;
|
||||
return CELL_GAME_RET_NONE;
|
||||
}
|
||||
|
||||
contentInfo = "/dev_bdvd/PS3_GAME";
|
||||
usrdir = "/dev_bdvd/PS3_GAME/USRDIR";
|
||||
path_set = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -167,10 +177,15 @@ int cellGameDataCheck(u32 type, vm::ptr<const char> dirName, vm::ptr<CellGameCon
|
||||
if (!Emu.GetVFS().ExistsDir(dir))
|
||||
{
|
||||
cellGame->Warning("cellGameDataCheck(): '%s' directory not found", dir.c_str());
|
||||
contentInfo = "";
|
||||
usrdir = "";
|
||||
path_set = true;
|
||||
return CELL_GAME_RET_NONE;
|
||||
}
|
||||
|
||||
contentInfo = dir;
|
||||
usrdir = dir + "/USRDIR";
|
||||
path_set = true;
|
||||
}
|
||||
|
||||
return CELL_GAME_RET_OK;
|
||||
@ -186,9 +201,8 @@ int cellGameContentPermit(vm::ptr<char[CELL_GAME_PATH_MAX]> contentInfoPath, vm:
|
||||
return CELL_GAME_ERROR_PARAM;
|
||||
}
|
||||
|
||||
if (contentInfo == "" && usrdir == "")
|
||||
if (!path_set)
|
||||
{
|
||||
cellGame->Warning("cellGameContentPermit(): CELL_GAME_ERROR_FAILURE (no permission given)");
|
||||
return CELL_GAME_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
@ -197,6 +211,7 @@ int cellGameContentPermit(vm::ptr<char[CELL_GAME_PATH_MAX]> contentInfoPath, vm:
|
||||
|
||||
contentInfo = "";
|
||||
usrdir = "";
|
||||
path_set = false;
|
||||
|
||||
return CELL_GAME_RET_OK;
|
||||
}
|
||||
@ -476,6 +491,10 @@ void cellGame_init(Module *pxThis)
|
||||
{
|
||||
cellGame = pxThis;
|
||||
|
||||
contentInfo = "";
|
||||
usrdir = "";
|
||||
path_set = false;
|
||||
|
||||
// (TODO: Disc Exchange functions missing)
|
||||
|
||||
cellGame->AddFunc(0xf52639ea, cellGameBootCheck);
|
||||
|
@ -76,7 +76,7 @@ void InitOffsetTable()
|
||||
u32 cellGcmGetLabelAddress(u8 index)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmGetLabelAddress(index=%d)", index);
|
||||
return (u32)Memory.RSXCMDMem.GetStartAddr() + 0x10 * index;
|
||||
return gcm_info.label_addr + 0x10 * index;
|
||||
}
|
||||
|
||||
vm::ptr<CellGcmReportData> cellGcmGetReportDataAddressLocation(u32 index, u32 location)
|
||||
@ -115,7 +115,7 @@ u64 cellGcmGetTimeStamp(u32 index)
|
||||
return vm::read64(Memory.RSXFBMem.GetStartAddr() + index * 0x10);
|
||||
}
|
||||
|
||||
int cellGcmGetCurrentField()
|
||||
s32 cellGcmGetCurrentField()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
@ -226,13 +226,13 @@ u32 cellGcmGetDefaultSegmentWordSize()
|
||||
return 0x100;
|
||||
}
|
||||
|
||||
int cellGcmInitDefaultFifoMode(s32 mode)
|
||||
s32 cellGcmInitDefaultFifoMode(s32 mode)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmInitDefaultFifoMode(mode=%d)", mode);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetDefaultFifoSize(u32 bufferSize, u32 segmentSize)
|
||||
s32 cellGcmSetDefaultFifoSize(u32 bufferSize, u32 segmentSize)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmSetDefaultFifoSize(bufferSize=0x%x, segmentSize=0x%x)", bufferSize, segmentSize);
|
||||
return CELL_OK;
|
||||
@ -242,7 +242,7 @@ int cellGcmSetDefaultFifoSize(u32 bufferSize, u32 segmentSize)
|
||||
// Hardware Resource Management
|
||||
//----------------------------------------------------------------------------
|
||||
|
||||
int cellGcmBindTile(u8 index)
|
||||
s32 cellGcmBindTile(u8 index)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmBindTile(index=%d)", index);
|
||||
|
||||
@ -258,7 +258,7 @@ int cellGcmBindTile(u8 index)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmBindZcull(u8 index)
|
||||
s32 cellGcmBindZcull(u8 index)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmBindZcull(index=%d)", index);
|
||||
|
||||
@ -274,7 +274,7 @@ int cellGcmBindZcull(u8 index)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmGetConfiguration(vm::ptr<CellGcmConfig> config)
|
||||
s32 cellGcmGetConfiguration(vm::ptr<CellGcmConfig> config)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmGetConfiguration(config_addr=0x%x)", config.addr());
|
||||
|
||||
@ -283,9 +283,9 @@ int cellGcmGetConfiguration(vm::ptr<CellGcmConfig> config)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmGetFlipStatus()
|
||||
s32 cellGcmGetFlipStatus()
|
||||
{
|
||||
int status = Emu.GetGSManager().GetRender().m_flip_status;
|
||||
s32 status = Emu.GetGSManager().GetRender().m_flip_status;
|
||||
|
||||
cellGcmSys->Log("cellGcmGetFlipStatus() -> %d", status);
|
||||
|
||||
@ -345,7 +345,7 @@ s32 _cellGcmInitBody(vm::ptr<CellGcmContextData> context, u32 cmdSize, u32 ioSiz
|
||||
Memory.RSXIOMem.SetRange(0, 0x10000000 /*256MB*/);
|
||||
}
|
||||
|
||||
if(cellGcmMapEaIoAddress(ioAddress, 0, ioSize) != CELL_OK)
|
||||
if (gcmMapEaIoAddress(ioAddress, 0, ioSize, false) != CELL_OK)
|
||||
{
|
||||
cellGcmSys->Error("cellGcmInit : CELL_GCM_ERROR_FAILURE");
|
||||
return CELL_GCM_ERROR_FAILURE;
|
||||
@ -360,8 +360,6 @@ s32 _cellGcmInitBody(vm::ptr<CellGcmContextData> context, u32 cmdSize, u32 ioSiz
|
||||
current_config.memoryFrequency = 650000000;
|
||||
current_config.coreFrequency = 500000000;
|
||||
|
||||
Memory.RSXCMDMem.AllocAlign(cmdSize);
|
||||
|
||||
u32 ctx_begin = ioAddress/* + 0x1000*/;
|
||||
u32 ctx_size = 0x6ffc;
|
||||
current_context.begin = ctx_begin;
|
||||
@ -369,9 +367,11 @@ s32 _cellGcmInitBody(vm::ptr<CellGcmContextData> context, u32 cmdSize, u32 ioSiz
|
||||
current_context.current = current_context.begin;
|
||||
current_context.callback.set(be_t<u32>::make(Emu.GetRSXCallback() - 4));
|
||||
|
||||
gcm_info.context_addr = (u32)Memory.MainMem.AllocAlign(0x1000);
|
||||
gcm_info.context_addr = Memory.MainMem.AllocAlign(0x1000);
|
||||
gcm_info.control_addr = gcm_info.context_addr + 0x40;
|
||||
|
||||
gcm_info.label_addr = Memory.MainMem.AllocAlign(0x1000); // ???
|
||||
|
||||
vm::get_ref<CellGcmContextData>(gcm_info.context_addr) = current_context;
|
||||
vm::write32(context.addr(), gcm_info.context_addr);
|
||||
|
||||
@ -388,12 +388,13 @@ s32 _cellGcmInitBody(vm::ptr<CellGcmContextData> context, u32 cmdSize, u32 ioSiz
|
||||
render.m_gcm_buffers_count = 0;
|
||||
render.m_gcm_current_buffer = 0;
|
||||
render.m_main_mem_addr = 0;
|
||||
render.m_label_addr = gcm_info.label_addr;
|
||||
render.Init(ctx_begin, ctx_size, gcm_info.control_addr, local_addr);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmResetFlipStatus()
|
||||
s32 cellGcmResetFlipStatus()
|
||||
{
|
||||
cellGcmSys->Log("cellGcmResetFlipStatus()");
|
||||
|
||||
@ -402,7 +403,7 @@ int cellGcmResetFlipStatus()
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetDebugOutputLevel(int level)
|
||||
s32 cellGcmSetDebugOutputLevel(s32 level)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmSetDebugOutputLevel(level=%d)", level);
|
||||
|
||||
@ -420,7 +421,7 @@ int cellGcmSetDebugOutputLevel(int level)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetDisplayBuffer(u32 id, u32 offset, u32 pitch, u32 width, u32 height)
|
||||
s32 cellGcmSetDisplayBuffer(u32 id, u32 offset, u32 pitch, u32 width, u32 height)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmSetDisplayBuffer(id=0x%x,offset=0x%x,pitch=%d,width=%d,height=%d)", id, offset, width ? pitch / width : pitch, width, height);
|
||||
|
||||
@ -443,14 +444,6 @@ int cellGcmSetDisplayBuffer(u32 id, u32 offset, u32 pitch, u32 width, u32 height
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetFlip(vm::ptr<CellGcmContextData> ctxt, u32 id)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmSetFlip(ctx=0x%x, id=0x%x)", ctxt.addr(), id);
|
||||
|
||||
int res = cellGcmSetPrepareFlip(ctxt, id);
|
||||
return res < 0 ? CELL_GCM_ERROR_FAILURE : CELL_OK;
|
||||
}
|
||||
|
||||
void cellGcmSetFlipHandler(vm::ptr<void(u32)> handler)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmSetFlipHandler(handler_addr=%d)", handler.addr());
|
||||
@ -458,7 +451,7 @@ void cellGcmSetFlipHandler(vm::ptr<void(u32)> handler)
|
||||
Emu.GetGSManager().GetRender().m_flip_handler = handler;
|
||||
}
|
||||
|
||||
int cellGcmSetFlipMode(u32 mode)
|
||||
s32 cellGcmSetFlipMode(u32 mode)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmSetFlipMode(mode=%d)", mode);
|
||||
|
||||
@ -531,7 +524,15 @@ s32 cellGcmSetPrepareFlip(vm::ptr<CellGcmContextData> ctxt, u32 id)
|
||||
return id;
|
||||
}
|
||||
|
||||
int cellGcmSetSecondVFrequency(u32 freq)
|
||||
s32 cellGcmSetFlip(vm::ptr<CellGcmContextData> ctxt, u32 id)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmSetFlip(ctx=0x%x, id=0x%x)", ctxt.addr(), id);
|
||||
|
||||
s32 res = cellGcmSetPrepareFlip(ctxt, id);
|
||||
return res < 0 ? CELL_GCM_ERROR_FAILURE : CELL_OK;
|
||||
}
|
||||
|
||||
s32 cellGcmSetSecondVFrequency(u32 freq)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmSetSecondVFrequency(level=%d)", freq);
|
||||
|
||||
@ -549,7 +550,7 @@ int cellGcmSetSecondVFrequency(u32 freq)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetTileInfo(u8 index, u8 location, u32 offset, u32 size, u32 pitch, u8 comp, u16 base, u8 bank)
|
||||
s32 cellGcmSetTileInfo(u8 index, u8 location, u32 offset, u32 size, u32 pitch, u8 comp, u16 base, u8 bank)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmSetTileInfo(index=%d, location=%d, offset=%d, size=%d, pitch=%d, comp=%d, base=%d, bank=%d)",
|
||||
index, location, offset, size, pitch, comp, base, bank);
|
||||
@ -604,7 +605,7 @@ void cellGcmSetVBlankHandler(vm::ptr<void(u32)> handler)
|
||||
Emu.GetGSManager().GetRender().m_vblank_handler = handler;
|
||||
}
|
||||
|
||||
int cellGcmSetWaitFlip(vm::ptr<CellGcmContextData> ctxt)
|
||||
s32 cellGcmSetWaitFlip(vm::ptr<CellGcmContextData> ctxt)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmSetWaitFlip(ctx=0x%x)", ctxt.addr());
|
||||
|
||||
@ -612,7 +613,7 @@ int cellGcmSetWaitFlip(vm::ptr<CellGcmContextData> ctxt)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetZcull(u8 index, u32 offset, u32 width, u32 height, u32 cullStart, u32 zFormat, u32 aaFormat, u32 zCullDir, u32 zCullFormat, u32 sFunc, u32 sRef, u32 sMask)
|
||||
s32 cellGcmSetZcull(u8 index, u32 offset, u32 width, u32 height, u32 cullStart, u32 zFormat, u32 aaFormat, u32 zCullDir, u32 zCullFormat, u32 sFunc, u32 sRef, u32 sMask)
|
||||
{
|
||||
cellGcmSys->Todo("cellGcmSetZcull(index=%d, offset=0x%x, width=%d, height=%d, cullStart=0x%x, zFormat=0x%x, aaFormat=0x%x, zCullDir=0x%x, zCullFormat=0x%x, sFunc=0x%x, sRef=0x%x, sMask=0x%x)",
|
||||
index, offset, width, height, cullStart, zFormat, aaFormat, zCullDir, zCullFormat, sFunc, sRef, sMask);
|
||||
@ -640,7 +641,7 @@ int cellGcmSetZcull(u8 index, u32 offset, u32 width, u32 height, u32 cullStart,
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmUnbindTile(u8 index)
|
||||
s32 cellGcmUnbindTile(u8 index)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmUnbindTile(index=%d)", index);
|
||||
|
||||
@ -656,7 +657,7 @@ int cellGcmUnbindTile(u8 index)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmUnbindZcull(u8 index)
|
||||
s32 cellGcmUnbindZcull(u8 index)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmUnbindZcull(index=%d)", index);
|
||||
|
||||
@ -690,7 +691,7 @@ u32 cellGcmGetDisplayInfo()
|
||||
return Emu.GetGSManager().GetRender().m_gcm_buffers_addr;
|
||||
}
|
||||
|
||||
int cellGcmGetCurrentDisplayBufferId(u32 id_addr)
|
||||
s32 cellGcmGetCurrentDisplayBufferId(u32 id_addr)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmGetCurrentDisplayBufferId(id_addr=0x%x)", id_addr);
|
||||
|
||||
@ -699,19 +700,19 @@ int cellGcmGetCurrentDisplayBufferId(u32 id_addr)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetInvalidateTile()
|
||||
s32 cellGcmSetInvalidateTile()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmDumpGraphicsError()
|
||||
s32 cellGcmDumpGraphicsError()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmGetDisplayBufferByFlipIndex()
|
||||
s32 cellGcmGetDisplayBufferByFlipIndex()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
@ -724,7 +725,7 @@ u64 cellGcmGetLastFlipTime()
|
||||
return Emu.GetGSManager().GetRender().m_last_flip_time;
|
||||
}
|
||||
|
||||
int cellGcmGetLastSecondVTime()
|
||||
s32 cellGcmGetLastSecondVTime()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
@ -737,7 +738,7 @@ u64 cellGcmGetVBlankCount()
|
||||
return Emu.GetGSManager().GetRender().m_vblank_count;
|
||||
}
|
||||
|
||||
int cellGcmInitSystemMode(u64 mode)
|
||||
s32 cellGcmInitSystemMode(u64 mode)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmInitSystemMode(mode=0x%x)", mode);
|
||||
|
||||
@ -746,7 +747,7 @@ int cellGcmInitSystemMode(u64 mode)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetFlipImmediate(u8 id)
|
||||
s32 cellGcmSetFlipImmediate(u8 id)
|
||||
{
|
||||
cellGcmSys->Todo("cellGcmSetFlipImmediate(fid=0x%x)", id);
|
||||
|
||||
@ -761,31 +762,31 @@ int cellGcmSetFlipImmediate(u8 id)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetGraphicsHandler()
|
||||
s32 cellGcmSetGraphicsHandler()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetQueueHandler()
|
||||
s32 cellGcmSetQueueHandler()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetSecondVHandler()
|
||||
s32 cellGcmSetSecondVHandler()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetVBlankFrequency()
|
||||
s32 cellGcmSetVBlankFrequency()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSortRemapEaIoAddress()
|
||||
s32 cellGcmSortRemapEaIoAddress()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
@ -794,31 +795,35 @@ int cellGcmSortRemapEaIoAddress()
|
||||
//----------------------------------------------------------------------------
|
||||
// Memory Mapping
|
||||
//----------------------------------------------------------------------------
|
||||
s32 cellGcmAddressToOffset(u64 address, vm::ptr<be_t<u32>> offset)
|
||||
s32 cellGcmAddressToOffset(u32 address, vm::ptr<be_t<u32>> offset)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmAddressToOffset(address=0x%x,offset_addr=0x%x)", address, offset.addr());
|
||||
|
||||
// Address not on main memory or local memory
|
||||
if (address >= 0xD0000000) {
|
||||
if (address >= 0xD0000000)
|
||||
{
|
||||
return CELL_GCM_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
u32 result;
|
||||
|
||||
// Address in local memory
|
||||
if (Memory.RSXFBMem.IsInMyRange(address)) {
|
||||
result = (u32)(address - Memory.RSXFBMem.GetStartAddr());
|
||||
if (Memory.RSXFBMem.IsInMyRange(address))
|
||||
{
|
||||
result = address - Memory.RSXFBMem.GetStartAddr();
|
||||
}
|
||||
// Address in main memory else check
|
||||
else
|
||||
{
|
||||
u16 upper12Bits = offsetTable.ioAddress[address >> 20];
|
||||
const u32 upper12Bits = offsetTable.ioAddress[address >> 20];
|
||||
|
||||
// If the address is mapped in IO
|
||||
if (upper12Bits != 0xFFFF) {
|
||||
result = ((u64)upper12Bits << 20) | (address & 0xFFFFF);
|
||||
if (upper12Bits != 0xFFFF)
|
||||
{
|
||||
result = (upper12Bits << 20) | (address & 0xFFFFF);
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
return CELL_GCM_ERROR_FAILURE;
|
||||
}
|
||||
}
|
||||
@ -842,16 +847,16 @@ void cellGcmGetOffsetTable(vm::ptr<CellGcmOffsetTable> table)
|
||||
table->eaAddress = offsetTable.eaAddress;
|
||||
}
|
||||
|
||||
s32 cellGcmIoOffsetToAddress(u32 ioOffset, u64 address)
|
||||
s32 cellGcmIoOffsetToAddress(u32 ioOffset, vm::ptr<u32> address)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmIoOffsetToAddress(ioOffset=0x%x, address=0x%llx)", ioOffset, address);
|
||||
cellGcmSys->Log("cellGcmIoOffsetToAddress(ioOffset=0x%x, address=0x%x)", ioOffset, address);
|
||||
|
||||
u64 realAddr;
|
||||
u32 realAddr;
|
||||
|
||||
if (!Memory.RSXIOMem.getRealAddr(ioOffset, realAddr))
|
||||
return CELL_GCM_ERROR_FAILURE;
|
||||
|
||||
vm::write64(address, realAddr);
|
||||
*address = realAddr;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -896,17 +901,14 @@ s32 cellGcmMapEaIoAddressWithFlags(u32 ea, u32 io, u32 size, u32 flags)
|
||||
return gcmMapEaIoAddress(ea, io, size, true);
|
||||
}
|
||||
|
||||
s32 cellGcmMapLocalMemory(u64 address, u64 size)
|
||||
s32 cellGcmMapLocalMemory(vm::ptr<u32> address, vm::ptr<u32> size)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmMapLocalMemory(address=0x%llx, size=0x%llx)", address, size);
|
||||
cellGcmSys->Warning("cellGcmMapLocalMemory(address=*0x%x, size=*0x%x)", address, size);
|
||||
|
||||
if (!local_size && !local_addr)
|
||||
if (!local_addr && !local_size && Memory.RSXFBMem.AllocFixed(local_addr = Memory.RSXFBMem.GetStartAddr(), local_size = 0xf900000 /* TODO */))
|
||||
{
|
||||
local_size = 0xf900000; //TODO
|
||||
local_addr = (u32)Memory.RSXFBMem.GetStartAddr();
|
||||
Memory.RSXFBMem.AllocAlign(local_size);
|
||||
vm::write32(address, local_addr);
|
||||
vm::write32(size, local_size);
|
||||
*address = local_addr;
|
||||
*size = local_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -969,18 +971,16 @@ s32 cellGcmReserveIoMapSize(u32 size)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 cellGcmUnmapEaIoAddress(u64 ea)
|
||||
s32 cellGcmUnmapEaIoAddress(u32 ea)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmUnmapEaIoAddress(ea=0x%llx)", ea);
|
||||
cellGcmSys->Log("cellGcmUnmapEaIoAddress(ea=0x%x)", ea);
|
||||
|
||||
u32 size;
|
||||
if (Memory.RSXIOMem.UnmapRealAddress(ea, size))
|
||||
{
|
||||
u64 io;
|
||||
ea = ea >> 20;
|
||||
io = offsetTable.ioAddress[ea];
|
||||
const u32 io = offsetTable.ioAddress[ea >>= 20];
|
||||
|
||||
for (u32 i = 0; i<size; i++)
|
||||
for (u32 i = 0; i < size >> 20; i++)
|
||||
{
|
||||
offsetTable.ioAddress[ea + i] = 0xFFFF;
|
||||
offsetTable.eaAddress[io + i] = 0xFFFF;
|
||||
@ -988,26 +988,23 @@ s32 cellGcmUnmapEaIoAddress(u64 ea)
|
||||
}
|
||||
else
|
||||
{
|
||||
cellGcmSys->Error("cellGcmUnmapEaIoAddress : CELL_GCM_ERROR_FAILURE");
|
||||
cellGcmSys->Error("cellGcmUnmapEaIoAddress(ea=0x%x): UnmapRealAddress() failed");
|
||||
return CELL_GCM_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 cellGcmUnmapIoAddress(u64 io)
|
||||
s32 cellGcmUnmapIoAddress(u32 io)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmUnmapIoAddress(io=0x%llx)", io);
|
||||
cellGcmSys->Log("cellGcmUnmapIoAddress(io=0x%x)", io);
|
||||
|
||||
u32 size;
|
||||
if (Memory.RSXIOMem.UnmapAddress(io, size))
|
||||
{
|
||||
u64 ea;
|
||||
io = io >> 20;
|
||||
size = size >> 20;
|
||||
ea = offsetTable.eaAddress[io];
|
||||
const u32 ea = offsetTable.eaAddress[io >>= 20];
|
||||
|
||||
for (u32 i = 0; i<size; i++)
|
||||
for (u32 i = 0; i < size >> 20; i++)
|
||||
{
|
||||
offsetTable.ioAddress[ea + i] = 0xFFFF;
|
||||
offsetTable.eaAddress[io + i] = 0xFFFF;
|
||||
@ -1015,7 +1012,7 @@ s32 cellGcmUnmapIoAddress(u64 io)
|
||||
}
|
||||
else
|
||||
{
|
||||
cellGcmSys->Error("cellGcmUnmapIoAddress : CELL_GCM_ERROR_FAILURE");
|
||||
cellGcmSys->Error("cellGcmUnmapIoAddress(io=0x%x): UnmapAddress() failed");
|
||||
return CELL_GCM_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
@ -1046,37 +1043,37 @@ s32 cellGcmUnreserveIoMapSize(u32 size)
|
||||
// Cursor
|
||||
//----------------------------------------------------------------------------
|
||||
|
||||
int cellGcmInitCursor()
|
||||
s32 cellGcmInitCursor()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetCursorPosition(s32 x, s32 y)
|
||||
s32 cellGcmSetCursorPosition(s32 x, s32 y)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetCursorDisable()
|
||||
s32 cellGcmSetCursorDisable()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmUpdateCursor()
|
||||
s32 cellGcmUpdateCursor()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetCursorEnable()
|
||||
s32 cellGcmSetCursorEnable()
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetCursorImageOffset(u32 offset)
|
||||
s32 cellGcmSetCursorImageOffset(u32 offset)
|
||||
{
|
||||
UNIMPLEMENTED_FUNC(cellGcmSys);
|
||||
return CELL_OK;
|
||||
@ -1096,24 +1093,24 @@ void cellGcmSetDefaultCommandBuffer()
|
||||
// Other
|
||||
//------------------------------------------------------------------------
|
||||
|
||||
int cellGcmSetFlipCommand(vm::ptr<CellGcmContextData> ctx, u32 id)
|
||||
s32 cellGcmSetFlipCommand(vm::ptr<CellGcmContextData> ctx, u32 id)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmSetFlipCommand(ctx_addr=0x%x, id=0x%x)", ctx.addr(), id);
|
||||
|
||||
return cellGcmSetPrepareFlip(ctx, id);
|
||||
}
|
||||
|
||||
int cellGcmSetFlipCommandWithWaitLabel(vm::ptr<CellGcmContextData> ctx, u32 id, u32 label_index, u32 label_value)
|
||||
s32 cellGcmSetFlipCommandWithWaitLabel(vm::ptr<CellGcmContextData> ctx, u32 id, u32 label_index, u32 label_value)
|
||||
{
|
||||
cellGcmSys->Log("cellGcmSetFlipCommandWithWaitLabel(ctx_addr=0x%x, id=0x%x, label_index=0x%x, label_value=0x%x)",
|
||||
ctx.addr(), id, label_index, label_value);
|
||||
|
||||
int res = cellGcmSetPrepareFlip(ctx, id);
|
||||
vm::write32(Memory.RSXCMDMem.GetStartAddr() + 0x10 * label_index, label_value);
|
||||
s32 res = cellGcmSetPrepareFlip(ctx, id);
|
||||
vm::write32(gcm_info.label_addr + 0x10 * label_index, label_value);
|
||||
return res < 0 ? CELL_GCM_ERROR_FAILURE : CELL_OK;
|
||||
}
|
||||
|
||||
int cellGcmSetTile(u8 index, u8 location, u32 offset, u32 size, u32 pitch, u8 comp, u16 base, u8 bank)
|
||||
s32 cellGcmSetTile(u8 index, u8 location, u32 offset, u32 size, u32 pitch, u8 comp, u16 base, u8 bank)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmSetTile(index=%d, location=%d, offset=%d, size=%d, pitch=%d, comp=%d, base=%d, bank=%d)",
|
||||
index, location, offset, size, pitch, comp, base, bank);
|
||||
|
@ -18,23 +18,7 @@ struct CellGcmOffsetTable
|
||||
};
|
||||
|
||||
// Auxiliary functions
|
||||
void InitOffsetTable();
|
||||
u32 gcmGetLocalMemorySize();
|
||||
|
||||
// libgcm functions
|
||||
s32 cellGcmSetPrepareFlip(vm::ptr<CellGcmContextData> ctxt, u32 id);
|
||||
|
||||
s32 cellGcmAddressToOffset(u64 address, vm::ptr<be_t<u32>> offset);
|
||||
u32 cellGcmGetMaxIoMapSize();
|
||||
void cellGcmGetOffsetTable(vm::ptr<CellGcmOffsetTable> table);
|
||||
s32 cellGcmIoOffsetToAddress(u32 ioOffset, u64 address);
|
||||
s32 cellGcmMapEaIoAddress(u32 ea, u32 io, u32 size);
|
||||
s32 cellGcmMapEaIoAddressWithFlags(u32 ea, u32 io, u32 size, u32 flags);
|
||||
s32 cellGcmMapMainMemory(u32 ea, u32 size, vm::ptr<u32> offset);
|
||||
s32 cellGcmReserveIoMapSize(u32 size);
|
||||
s32 cellGcmUnmapEaIoAddress(u64 ea);
|
||||
s32 cellGcmUnmapIoAddress(u64 io);
|
||||
s32 cellGcmUnreserveIoMapSize(u32 size);
|
||||
s32 gcmMapEaIoAddress(u32 ea, u32 io, u32 size, bool is_strict);
|
||||
|
||||
// Syscall
|
||||
s32 cellGcmCallback(vm::ptr<CellGcmContextData> context, u32 count);
|
||||
|
@ -376,7 +376,7 @@ s64 cellPngDecCreate(vm::ptr<u32> mainHandle, vm::ptr<const CellPngDecThreadInPa
|
||||
mainHandle.addr(), threadInParam.addr(), threadOutParam.addr());
|
||||
|
||||
// create decoder
|
||||
if (s32 res = pngDecCreate(mainHandle, threadInParam)) return res;
|
||||
if (auto res = pngDecCreate(mainHandle, threadInParam)) return res;
|
||||
|
||||
// set codec version
|
||||
threadOutParam->pngCodecVersion = PNGDEC_CODEC_VERSION;
|
||||
@ -400,7 +400,7 @@ s64 cellPngDecExtCreate(
|
||||
mainHandle.addr(), threadInParam.addr(), threadOutParam.addr(), extThreadInParam.addr(), extThreadOutParam.addr());
|
||||
|
||||
// create decoder
|
||||
if (s32 res = pngDecCreate(mainHandle, threadInParam, extThreadInParam)) return res;
|
||||
if (auto res = pngDecCreate(mainHandle, threadInParam, extThreadInParam)) return res;
|
||||
|
||||
// set codec version
|
||||
threadOutParam->pngCodecVersion = PNGDEC_CODEC_VERSION;
|
||||
|
@ -11,13 +11,13 @@
|
||||
Module *cellResc = nullptr;
|
||||
|
||||
extern s32 cellVideoOutConfigure(u32 videoOut, vm::ptr<CellVideoOutConfiguration> config, vm::ptr<CellVideoOutOption> option, u32 waitForEvent);
|
||||
extern int cellGcmSetFlipMode(u32 mode);
|
||||
extern s32 cellGcmSetFlipMode(u32 mode);
|
||||
extern void cellGcmSetFlipHandler(vm::ptr<void(u32)> handler);
|
||||
extern void cellGcmSetVBlankHandler(vm::ptr<void(u32)> handler);
|
||||
extern int cellGcmAddressToOffset(u64 address, vm::ptr<be_t<u32>> offset);
|
||||
extern int cellGcmSetDisplayBuffer(u32 id, u32 offset, u32 pitch, u32 width, u32 height);
|
||||
extern int cellGcmSetPrepareFlip(vm::ptr<CellGcmContextData> ctx, u32 id);
|
||||
extern int cellGcmSetSecondVFrequency(u32 freq);
|
||||
extern s32 cellGcmAddressToOffset(u32 address, vm::ptr<be_t<u32>> offset);
|
||||
extern s32 cellGcmSetDisplayBuffer(u32 id, u32 offset, u32 pitch, u32 width, u32 height);
|
||||
extern s32 cellGcmSetPrepareFlip(vm::ptr<CellGcmContextData> ctx, u32 id);
|
||||
extern s32 cellGcmSetSecondVFrequency(u32 freq);
|
||||
extern u32 cellGcmGetLabelAddress(u8 index);
|
||||
extern u32 cellGcmGetTiledPitchSize(u32 size);
|
||||
|
||||
|
@ -24,8 +24,10 @@
|
||||
Module *sysPrxForUser = nullptr;
|
||||
|
||||
#define TLS_MAX 128
|
||||
#define TLS_SYS 0x30
|
||||
|
||||
u32 g_tls_start; // start of TLS memory area
|
||||
u32 g_tls_size;
|
||||
|
||||
std::array<std::atomic<u32>, TLS_MAX> g_tls_owners;
|
||||
|
||||
@ -38,8 +40,9 @@ u32 ppu_get_tls(u32 thread)
|
||||
{
|
||||
if (!g_tls_start)
|
||||
{
|
||||
g_tls_start = vm::cast(Memory.MainMem.AllocAlign(Emu.GetTLSMemsz() * TLS_MAX, 4096)); // memory for up to TLS_MAX threads
|
||||
sysPrxForUser->Notice("Thread Local Storage initialized (g_tls_start=0x%x, size = 0x%x)\n*** TLS segment addr: 0x%08x\n*** TLS segment size: 0x%08x",
|
||||
g_tls_size = Emu.GetTLSMemsz() + TLS_SYS;
|
||||
g_tls_start = Memory.MainMem.AllocAlign(g_tls_size * TLS_MAX, 4096); // memory for up to TLS_MAX threads
|
||||
sysPrxForUser->Notice("Thread Local Storage initialized (g_tls_start=0x%x, user_size=0x%x)\n*** TLS segment addr: 0x%08x\n*** TLS segment size: 0x%08x",
|
||||
g_tls_start, Emu.GetTLSMemsz(), Emu.GetTLSAddr(), Emu.GetTLSFilesz());
|
||||
}
|
||||
|
||||
@ -52,7 +55,7 @@ u32 ppu_get_tls(u32 thread)
|
||||
{
|
||||
if (g_tls_owners[i] == thread)
|
||||
{
|
||||
return g_tls_start + i * Emu.GetTLSMemsz(); // if already initialized, return TLS address
|
||||
return g_tls_start + i * g_tls_size + TLS_SYS; // if already initialized, return TLS address
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,7 +64,8 @@ u32 ppu_get_tls(u32 thread)
|
||||
u32 old = 0;
|
||||
if (g_tls_owners[i].compare_exchange_strong(old, thread))
|
||||
{
|
||||
const u32 addr = g_tls_start + i * Emu.GetTLSMemsz(); // get TLS address
|
||||
const u32 addr = g_tls_start + i * g_tls_size + TLS_SYS; // get TLS address
|
||||
memset(vm::get_ptr(addr - TLS_SYS), 0, TLS_SYS); // initialize system area with zeros
|
||||
memcpy(vm::get_ptr(addr), vm::get_ptr(Emu.GetTLSAddr()), Emu.GetTLSFilesz()); // initialize from TLS image
|
||||
memset(vm::get_ptr(addr + Emu.GetTLSFilesz()), 0, Emu.GetTLSMemsz() - Emu.GetTLSFilesz()); // fill the rest with zeros
|
||||
return addr;
|
||||
|
@ -88,7 +88,7 @@ SPUThread* spu_thread_initialize(std::shared_ptr<SpuGroupInfo>& group, u32 spu_n
|
||||
const u32 spu_ep = img.entry_point;
|
||||
// Copy SPU image:
|
||||
// TODO: use segment info
|
||||
const u32 spu_offset = vm::cast(Memory.MainMem.AllocAlign(256 * 1024, 4096));
|
||||
const u32 spu_offset = Memory.MainMem.AllocAlign(256 * 1024, 4096);
|
||||
memcpy(vm::get_ptr<void>(spu_offset), vm::get_ptr<void>(img.addr), 256 * 1024);
|
||||
|
||||
SPUThread& new_thread = static_cast<SPUThread&>(Emu.GetCPU().AddThread(CPU_THREAD_SPU));
|
||||
|
@ -27,7 +27,7 @@ s32 sys_vm_memory_map(u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, u32 a
|
||||
}
|
||||
|
||||
// Use fixed address (TODO: search and use some free address instead)
|
||||
u32 new_addr = Memory.IsGoodAddr(0x60000000) ? 0x70000000 : 0x60000000;
|
||||
u32 new_addr = vm::check_addr(0x60000000) ? 0x70000000 : 0x60000000;
|
||||
|
||||
// If container ID is SYS_MEMORY_CONTAINER_ID_INVALID, allocate directly.
|
||||
if(cid == SYS_MEMORY_CONTAINER_ID_INVALID)
|
||||
@ -45,7 +45,10 @@ s32 sys_vm_memory_map(u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, u32 a
|
||||
}
|
||||
|
||||
// Allocate actual memory using virtual size (physical size is ignored)
|
||||
assert(Memory.Map(new_addr, vsize));
|
||||
if (!Memory.Map(new_addr, vsize))
|
||||
{
|
||||
return CELL_ENOMEM;
|
||||
}
|
||||
|
||||
// Write a pointer for the allocated memory.
|
||||
vm::write32(addr, new_addr);
|
||||
|
@ -251,7 +251,7 @@ void InterpreterDisAsmFrame::ShowAddr(const u64 addr)
|
||||
disasm->offset = vm::get_ptr<u8>(CPU->GetOffset());
|
||||
for(uint i=0, count = 4; i<m_item_count; ++i, PC += count)
|
||||
{
|
||||
if(!Memory.IsGoodAddr(CPU->GetOffset() + PC, 4))
|
||||
if(!vm::check_addr(CPU->GetOffset() + PC, 4))
|
||||
{
|
||||
m_list->SetItem(i, 0, wxString(IsBreakPoint(PC) ? ">>> " : " ") + wxString::Format("[%08llx] illegal address", PC));
|
||||
count = 4;
|
||||
|
@ -45,7 +45,7 @@ void MemoryStringSearcher::Search(wxCommandEvent& event)
|
||||
u32 strIndex = 0;
|
||||
u32 numFound = 0;
|
||||
for (u32 addr = Memory.MainMem.GetStartAddr(); addr < Memory.MainMem.GetEndAddr(); addr++) {
|
||||
if (!Memory.IsGoodAddr(addr)) {
|
||||
if (!vm::check_addr(addr)) {
|
||||
strIndex = 0;
|
||||
continue;
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ void MemoryViewerPanel::ShowMemory()
|
||||
{
|
||||
u32 addr = m_addr + row * m_colcount + col;
|
||||
|
||||
if (Memory.IsGoodAddr(addr))
|
||||
if (vm::check_addr(addr))
|
||||
{
|
||||
const u8 rmem = vm::read8(addr);
|
||||
t_mem_hex_str += wxString::Format("%02x ", rmem);
|
||||
|
@ -259,14 +259,14 @@ void RSXDebugger::OnChangeToolsAddr(wxCommandEvent& event)
|
||||
|
||||
void RSXDebugger::OnScrollMemory(wxMouseEvent& event)
|
||||
{
|
||||
if(Memory.IsGoodAddr(m_addr))
|
||||
if(vm::check_addr(m_addr))
|
||||
{
|
||||
int items = event.ControlDown() ? m_item_count : 1;
|
||||
|
||||
for(int i=0; i<items; ++i)
|
||||
{
|
||||
u32 offset;
|
||||
if(Memory.IsGoodAddr(m_addr))
|
||||
if(vm::check_addr(m_addr))
|
||||
{
|
||||
u32 cmd = vm::read32(m_addr);
|
||||
u32 count = (cmd & (CELL_GCM_METHOD_FLAG_JUMP | CELL_GCM_METHOD_FLAG_CALL))
|
||||
@ -304,7 +304,7 @@ void RSXDebugger::OnClickBuffer(wxMouseEvent& event)
|
||||
#define SHOW_BUFFER(id) \
|
||||
{ \
|
||||
u32 addr = render.m_local_mem_addr + buffers[id].offset; \
|
||||
if (Memory.IsGoodAddr(addr) && buffers[id].width && buffers[id].height) \
|
||||
if (vm::check_addr(addr) && buffers[id].width && buffers[id].height) \
|
||||
MemoryViewerPanel::ShowImage(this, addr, 3, buffers[id].width, buffers[id].height, true); \
|
||||
return; \
|
||||
} \
|
||||
@ -316,7 +316,7 @@ void RSXDebugger::OnClickBuffer(wxMouseEvent& event)
|
||||
if (event.GetId() == p_buffer_tex->GetId())
|
||||
{
|
||||
u8 location = render.m_textures[m_cur_texture].GetLocation();
|
||||
if(location <= 1 && Memory.IsGoodAddr(GetAddress(render.m_textures[m_cur_texture].GetOffset(), location))
|
||||
if(location <= 1 && vm::check_addr(GetAddress(render.m_textures[m_cur_texture].GetOffset(), location))
|
||||
&& render.m_textures[m_cur_texture].GetWidth() && render.m_textures[m_cur_texture].GetHeight())
|
||||
MemoryViewerPanel::ShowImage(this,
|
||||
GetAddress(render.m_textures[m_cur_texture].GetOffset(), location), 1,
|
||||
@ -331,9 +331,9 @@ void RSXDebugger::GoToGet(wxCommandEvent& event)
|
||||
{
|
||||
if (!RSXReady()) return;
|
||||
auto ctrl = vm::get_ptr<CellGcmControl>(Emu.GetGSManager().GetRender().m_ctrlAddress);
|
||||
u64 realAddr;
|
||||
u32 realAddr;
|
||||
if (Memory.RSXIOMem.getRealAddr(ctrl->get.read_relaxed(), realAddr)) {
|
||||
m_addr = realAddr; // WARNING: Potential Truncation? Cast from u64 to u32
|
||||
m_addr = realAddr;
|
||||
t_addr->SetValue(wxString::Format("%08x", m_addr));
|
||||
UpdateInformation();
|
||||
event.Skip();
|
||||
@ -345,9 +345,9 @@ void RSXDebugger::GoToPut(wxCommandEvent& event)
|
||||
{
|
||||
if (!RSXReady()) return;
|
||||
auto ctrl = vm::get_ptr<CellGcmControl>(Emu.GetGSManager().GetRender().m_ctrlAddress);
|
||||
u64 realAddr;
|
||||
u32 realAddr;
|
||||
if (Memory.RSXIOMem.getRealAddr(ctrl->put.read_relaxed(), realAddr)) {
|
||||
m_addr = realAddr; // WARNING: Potential Truncation? Cast from u64 to u32
|
||||
m_addr = realAddr;
|
||||
t_addr->SetValue(wxString::Format("%08x", m_addr));
|
||||
UpdateInformation();
|
||||
event.Skip();
|
||||
@ -380,7 +380,7 @@ void RSXDebugger::GetMemory()
|
||||
{
|
||||
m_list_commands->SetItem(i, 0, wxString::Format("%08x", addr));
|
||||
|
||||
if (isReady && Memory.IsGoodAddr(addr))
|
||||
if (isReady && vm::check_addr(addr))
|
||||
{
|
||||
u32 cmd = vm::read32(addr);
|
||||
u32 count = (cmd >> 18) & 0x7ff;
|
||||
@ -409,13 +409,13 @@ void RSXDebugger::GetBuffers()
|
||||
// TODO: Currently it only supports color buffers
|
||||
for (u32 bufferId=0; bufferId < render.m_gcm_buffers_count; bufferId++)
|
||||
{
|
||||
if(!Memory.IsGoodAddr(render.m_gcm_buffers_addr))
|
||||
if(!vm::check_addr(render.m_gcm_buffers_addr))
|
||||
continue;
|
||||
|
||||
auto buffers = vm::get_ptr<CellGcmDisplayInfo>(render.m_gcm_buffers_addr);
|
||||
u32 RSXbuffer_addr = render.m_local_mem_addr + buffers[bufferId].offset;
|
||||
|
||||
if(!Memory.IsGoodAddr(RSXbuffer_addr))
|
||||
if(!vm::check_addr(RSXbuffer_addr))
|
||||
continue;
|
||||
|
||||
auto RSXbuffer = vm::get_ptr<unsigned char>(RSXbuffer_addr);
|
||||
@ -467,7 +467,7 @@ void RSXDebugger::GetBuffers()
|
||||
|
||||
u32 TexBuffer_addr = GetAddress(offset, location);
|
||||
|
||||
if(!Memory.IsGoodAddr(TexBuffer_addr))
|
||||
if(!vm::check_addr(TexBuffer_addr))
|
||||
return;
|
||||
|
||||
unsigned char* TexBuffer = vm::get_ptr<unsigned char>(TexBuffer_addr);
|
||||
|
@ -101,7 +101,7 @@ namespace loader
|
||||
segment.size = phdr.p_memsz;
|
||||
segment.size_file = phdr.p_filesz;
|
||||
|
||||
segment.begin.set(vm::alloc(segment.size, vm::sprx));
|
||||
segment.begin.set(vm::alloc(segment.size, vm::main));
|
||||
|
||||
if (!segment.begin)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user