mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 02:32:36 +01:00
VM_CAST macro
This commit is contained in:
parent
8c00dcd02d
commit
721ad404d2
@ -7,7 +7,7 @@ namespace vm
|
||||
template<typename AT, typename RT, typename... T>
|
||||
force_inline RT _ptr_base<RT(T...), AT>::operator()(ARMv7Context& context, T... args) const
|
||||
{
|
||||
return psv_func_detail::func_caller<RT, T...>::call(context, vm::cast(this->addr()), args...);
|
||||
return psv_func_detail::func_caller<RT, T...>::call(context, VM_CAST(this->addr()), args...);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1733,7 +1733,7 @@ void ppu_interpreter::LVSL(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LVEBX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.VPR[op.vd]._u8[15 - (addr & 0xf)] = vm::read8(vm::cast(addr));
|
||||
CPU.VPR[op.vd]._u8[15 - (addr & 0xf)] = vm::read8(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::SUBFC(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -1780,7 +1780,7 @@ void ppu_interpreter::LWARX(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
|
||||
be_t<u32> value;
|
||||
vm::reservation_acquire(&value, vm::cast(addr), sizeof(value));
|
||||
vm::reservation_acquire(&value, VM_CAST(addr), sizeof32(value));
|
||||
|
||||
CPU.GPR[op.rd] = value;
|
||||
}
|
||||
@ -1788,13 +1788,13 @@ void ppu_interpreter::LWARX(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LDX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::read64(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read64(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LWZX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read32(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::SLW(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -1873,7 +1873,7 @@ void ppu_interpreter::LVSR(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LVEHX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb]) & ~1ULL;
|
||||
CPU.VPR[op.vd]._u16[7 - ((addr >> 1) & 0x7)] = vm::read16(vm::cast(addr));
|
||||
CPU.VPR[op.vd]._u16[7 - ((addr >> 1) & 0x7)] = vm::read16(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::SUBF(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -1888,7 +1888,7 @@ void ppu_interpreter::SUBF(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LDUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::read64(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read64(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -1899,7 +1899,7 @@ void ppu_interpreter::DCBST(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LWZUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read32(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -1929,7 +1929,7 @@ void ppu_interpreter::TD(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LVEWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb]) & ~3ULL;
|
||||
CPU.VPR[op.vd]._u32[3 - ((addr >> 2) & 0x3)] = vm::read32(vm::cast(addr));
|
||||
CPU.VPR[op.vd]._u32[3 - ((addr >> 2) & 0x3)] = vm::read32(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::MULHD(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -1951,7 +1951,7 @@ void ppu_interpreter::LDARX(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
|
||||
be_t<u64> value;
|
||||
vm::reservation_acquire(&value, vm::cast(addr), sizeof(value));
|
||||
vm::reservation_acquire(&value, VM_CAST(addr), sizeof32(value));
|
||||
|
||||
CPU.GPR[op.rd] = value;
|
||||
}
|
||||
@ -1963,13 +1963,13 @@ void ppu_interpreter::DCBF(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LBZX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::read8(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read8(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LVX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb]) & ~0xfull;
|
||||
CPU.VPR[op.vd] = vm::read128(vm::cast(addr));
|
||||
CPU.VPR[op.vd] = vm::read128(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::NEG(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -1983,7 +1983,7 @@ void ppu_interpreter::NEG(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LBZUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::read8(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read8(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -1997,7 +1997,7 @@ void ppu_interpreter::STVEBX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
const u8 eb = addr & 0xf;
|
||||
vm::write8(vm::cast(addr), CPU.VPR[op.vs]._u8[15 - eb]);
|
||||
vm::write8(VM_CAST(addr), CPU.VPR[op.vs]._u8[15 - eb]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::SUBFE(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2073,7 +2073,7 @@ void ppu_interpreter::MTOCRF(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::STDX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::write64(vm::cast(addr), CPU.GPR[op.rs]);
|
||||
vm::write64(VM_CAST(addr), CPU.GPR[op.rs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STWCX_(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2081,33 +2081,33 @@ void ppu_interpreter::STWCX_(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
|
||||
const be_t<u32> value = (u32)CPU.GPR[op.rs];
|
||||
CPU.SetCR_EQ(0, vm::reservation_update(vm::cast(addr), &value, sizeof(value)));
|
||||
CPU.SetCR_EQ(0, vm::reservation_update(VM_CAST(addr), &value, sizeof32(value)));
|
||||
}
|
||||
|
||||
void ppu_interpreter::STWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[op.rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[op.rs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STVEHX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb]) & ~1ULL;
|
||||
const u8 eb = (addr & 0xf) >> 1;
|
||||
vm::write16(vm::cast(addr), CPU.VPR[op.vs]._u16[7 - eb]);
|
||||
vm::write16(VM_CAST(addr), CPU.VPR[op.vs]._u16[7 - eb]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STDUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
vm::write64(vm::cast(addr), CPU.GPR[op.rs]);
|
||||
vm::write64(VM_CAST(addr), CPU.GPR[op.rs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::STWUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[op.rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[op.rs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2115,7 +2115,7 @@ void ppu_interpreter::STVEWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb]) & ~3ULL;
|
||||
const u8 eb = (addr & 0xf) >> 2;
|
||||
vm::write32(vm::cast(addr), CPU.VPR[op.vs]._u32[3 - eb]);
|
||||
vm::write32(VM_CAST(addr), CPU.VPR[op.vs]._u32[3 - eb]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::SUBFZE(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2141,19 +2141,19 @@ void ppu_interpreter::STDCX_(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
|
||||
const be_t<u64> value = CPU.GPR[op.rs];
|
||||
CPU.SetCR_EQ(0, vm::reservation_update(vm::cast(addr), &value, sizeof(value)));
|
||||
CPU.SetCR_EQ(0, vm::reservation_update(VM_CAST(addr), &value, sizeof32(value)));
|
||||
}
|
||||
|
||||
void ppu_interpreter::STBX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::write8(vm::cast(addr), (u8)CPU.GPR[op.rs]);
|
||||
vm::write8(VM_CAST(addr), (u8)CPU.GPR[op.rs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STVX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb]) & ~0xfull;
|
||||
vm::write128(vm::cast(addr), CPU.VPR[op.vs]);
|
||||
vm::write128(VM_CAST(addr), CPU.VPR[op.vs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::MULLD(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2202,7 +2202,7 @@ void ppu_interpreter::DCBTST(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::STBUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
vm::write8(vm::cast(addr), (u8)CPU.GPR[op.rs]);
|
||||
vm::write8(VM_CAST(addr), (u8)CPU.GPR[op.rs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2222,7 +2222,7 @@ void ppu_interpreter::DCBT(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LHZX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::read16(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read16(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::EQV(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2239,7 +2239,7 @@ void ppu_interpreter::ECIWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LHZUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::read16(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read16(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2280,7 +2280,7 @@ void ppu_interpreter::MFSPR(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LWAX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = (s64)(s32)vm::read32(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = (s64)(s32)vm::read32(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::DST(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2290,13 +2290,13 @@ void ppu_interpreter::DST(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LHAX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = (s64)(s16)vm::read16(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = (s64)(s16)vm::read16(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LVXL(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb]) & ~0xfull;
|
||||
CPU.VPR[op.vd] = vm::read128(vm::cast(addr));
|
||||
CPU.VPR[op.vd] = vm::read128(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::MFTB(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2315,7 +2315,7 @@ void ppu_interpreter::MFTB(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LWAUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = (s64)(s32)vm::read32(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = (s64)(s32)vm::read32(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2326,14 +2326,14 @@ void ppu_interpreter::DSTST(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LHAUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = (s64)(s16)vm::read16(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = (s64)(s16)vm::read16(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::STHX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::write16(vm::cast(addr), (u16)CPU.GPR[op.rs]);
|
||||
vm::write16(VM_CAST(addr), (u16)CPU.GPR[op.rs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::ORC(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2350,7 +2350,7 @@ void ppu_interpreter::ECOWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::STHUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
vm::write16(vm::cast(addr), (u16)CPU.GPR[op.rs]);
|
||||
vm::write16(VM_CAST(addr), (u16)CPU.GPR[op.rs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2436,7 +2436,7 @@ void ppu_interpreter::NAND(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::STVXL(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb]) & ~0xfull;
|
||||
vm::write128(vm::cast(addr), CPU.VPR[op.vs]);
|
||||
vm::write128(VM_CAST(addr), CPU.VPR[op.vs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::DIVD(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2483,13 +2483,13 @@ void ppu_interpreter::LVLX(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u32 eb = addr & 0xf;
|
||||
|
||||
CPU.VPR[op.vd].clear();
|
||||
for (u32 i = 0; i < 16u - eb; ++i) CPU.VPR[op.vd]._u8[15 - i] = vm::read8(vm::cast(addr + i));
|
||||
for (u32 i = 0; i < 16u - eb; ++i) CPU.VPR[op.vd]._u8[15 - i] = vm::read8(VM_CAST(addr + i));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LDBRX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::get_ref<u64>(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::get_ref<u64>(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LSWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2498,14 +2498,14 @@ void ppu_interpreter::LSWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
u32 count = CPU.XER.XER & 0x7F;
|
||||
for (; count >= 4; count -= 4, addr += 4, op.rd = (op.rd + 1) & 31)
|
||||
{
|
||||
CPU.GPR[op.rd] = vm::get_ref<be_t<u32>>(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::get_ref<be_t<u32>>(VM_CAST(addr));
|
||||
}
|
||||
if (count)
|
||||
{
|
||||
u32 value = 0;
|
||||
for (u32 byte = 0; byte < count; byte++)
|
||||
{
|
||||
u32 byte_value = vm::get_ref<u8>(vm::cast(addr + byte));
|
||||
u32 byte_value = vm::get_ref<u8>(VM_CAST(addr + byte));
|
||||
value |= byte_value << ((3 ^ byte) * 8);
|
||||
}
|
||||
CPU.GPR[op.rd] = value;
|
||||
@ -2515,13 +2515,13 @@ void ppu_interpreter::LSWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LWBRX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::get_ref<u32>(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::get_ref<u32>(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LFSX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<float>>(vm::cast(addr)).value();
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<float>>(VM_CAST(addr)).value();
|
||||
}
|
||||
|
||||
void ppu_interpreter::SRW(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2550,7 +2550,7 @@ void ppu_interpreter::LVRX(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u8 eb = addr & 0xf;
|
||||
|
||||
CPU.VPR[op.vd].clear();
|
||||
for (u32 i = 16 - eb; i < 16; ++i) CPU.VPR[op.vd]._u8[15 - i] = vm::read8(vm::cast(addr + i - 16));
|
||||
for (u32 i = 16 - eb; i < 16; ++i) CPU.VPR[op.vd]._u8[15 - i] = vm::read8(VM_CAST(addr + i - 16));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LSWI(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2563,7 +2563,7 @@ void ppu_interpreter::LSWI(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
if (N > 3)
|
||||
{
|
||||
CPU.GPR[reg] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[reg] = vm::read32(VM_CAST(addr));
|
||||
addr += 4;
|
||||
N -= 4;
|
||||
}
|
||||
@ -2574,7 +2574,7 @@ void ppu_interpreter::LSWI(PPUThread& CPU, ppu_opcode_t op)
|
||||
while (N > 0)
|
||||
{
|
||||
N = N - 1;
|
||||
buf |= vm::read8(vm::cast(addr)) << (i * 8);
|
||||
buf |= vm::read8(VM_CAST(addr)) << (i * 8);
|
||||
addr++;
|
||||
i--;
|
||||
}
|
||||
@ -2587,7 +2587,7 @@ void ppu_interpreter::LSWI(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LFSUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<float>>(vm::cast(addr)).value();
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<float>>(VM_CAST(addr)).value();
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2599,13 +2599,13 @@ void ppu_interpreter::SYNC(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::LFDX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<double>>(vm::cast(addr)).value();
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<double>>(VM_CAST(addr)).value();
|
||||
}
|
||||
|
||||
void ppu_interpreter::LFDUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<double>>(vm::cast(addr)).value();
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<double>>(VM_CAST(addr)).value();
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2614,13 +2614,13 @@ void ppu_interpreter::STVLX(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
const u32 eb = addr & 0xf;
|
||||
|
||||
for (u32 i = 0; i < 16u - eb; ++i) vm::write8(vm::cast(addr + i), CPU.VPR[op.vs]._u8[15 - i]);
|
||||
for (u32 i = 0; i < 16u - eb; ++i) vm::write8(VM_CAST(addr + i), CPU.VPR[op.vs]._u8[15 - i]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STDBRX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::get_ref<u64>(vm::cast(addr)) = CPU.GPR[op.rs];
|
||||
vm::get_ref<u64>(VM_CAST(addr)) = CPU.GPR[op.rs];
|
||||
}
|
||||
|
||||
void ppu_interpreter::STSWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2629,7 +2629,7 @@ void ppu_interpreter::STSWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
u32 count = CPU.XER.XER & 0x7F;
|
||||
for (; count >= 4; count -= 4, addr += 4, op.rs = (op.rs + 1) & 31)
|
||||
{
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[op.rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[op.rs]);
|
||||
}
|
||||
if (count)
|
||||
{
|
||||
@ -2637,7 +2637,7 @@ void ppu_interpreter::STSWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
for (u32 byte = 0; byte < count; byte++)
|
||||
{
|
||||
u32 byte_value = (u8)(value >> ((3 ^ byte) * 8));
|
||||
vm::write8(vm::cast(addr + byte), byte_value);
|
||||
vm::write8(VM_CAST(addr + byte), byte_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2645,13 +2645,13 @@ void ppu_interpreter::STSWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::STWBRX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::get_ref<u32>(vm::cast(addr)) = (u32)CPU.GPR[op.rs];
|
||||
vm::get_ref<u32>(VM_CAST(addr)) = (u32)CPU.GPR[op.rs];
|
||||
}
|
||||
|
||||
void ppu_interpreter::STFSX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::get_ref<be_t<float>>(vm::cast(addr)) = static_cast<float>(CPU.FPR[op.frs]);
|
||||
vm::get_ref<be_t<float>>(VM_CAST(addr)) = static_cast<float>(CPU.FPR[op.frs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STVRX(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2659,13 +2659,13 @@ void ppu_interpreter::STVRX(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
const u8 eb = addr & 0xf;
|
||||
|
||||
for (u32 i = 16 - eb; i < 16; ++i) vm::write8(vm::cast(addr + i - 16), CPU.VPR[op.vs]._u8[15 - i]);
|
||||
for (u32 i = 16 - eb; i < 16; ++i) vm::write8(VM_CAST(addr + i - 16), CPU.VPR[op.vs]._u8[15 - i]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STFSUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
vm::get_ref<be_t<float>>(vm::cast(addr)) = static_cast<float>(CPU.FPR[op.frs]);
|
||||
vm::get_ref<be_t<float>>(VM_CAST(addr)) = static_cast<float>(CPU.FPR[op.frs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2679,7 +2679,7 @@ void ppu_interpreter::STSWI(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
if (N > 3)
|
||||
{
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[reg]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[reg]);
|
||||
addr += 4;
|
||||
N -= 4;
|
||||
}
|
||||
@ -2689,7 +2689,7 @@ void ppu_interpreter::STSWI(PPUThread& CPU, ppu_opcode_t op)
|
||||
while (N > 0)
|
||||
{
|
||||
N = N - 1;
|
||||
vm::write8(vm::cast(addr), (0xFF000000 & buf) >> 24);
|
||||
vm::write8(VM_CAST(addr), (0xFF000000 & buf) >> 24);
|
||||
buf <<= 8;
|
||||
addr++;
|
||||
}
|
||||
@ -2701,13 +2701,13 @@ void ppu_interpreter::STSWI(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::STFDX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::get_ref<be_t<double>>(vm::cast(addr)) = CPU.FPR[op.frs];
|
||||
vm::get_ref<be_t<double>>(VM_CAST(addr)) = CPU.FPR[op.frs];
|
||||
}
|
||||
|
||||
void ppu_interpreter::STFDUX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + CPU.GPR[op.rb];
|
||||
vm::get_ref<be_t<double>>(vm::cast(addr)) = CPU.FPR[op.frs];
|
||||
vm::get_ref<be_t<double>>(VM_CAST(addr)) = CPU.FPR[op.frs];
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2717,13 +2717,13 @@ void ppu_interpreter::LVLXL(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u32 eb = addr & 0xf;
|
||||
|
||||
CPU.VPR[op.vd].clear();
|
||||
for (u32 i = 0; i < 16u - eb; ++i) CPU.VPR[op.vd]._u8[15 - i] = vm::read8(vm::cast(addr + i));
|
||||
for (u32 i = 0; i < 16u - eb; ++i) CPU.VPR[op.vd]._u8[15 - i] = vm::read8(VM_CAST(addr + i));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LHBRX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
CPU.GPR[op.rd] = vm::get_ref<u16>(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::get_ref<u16>(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::SRAW(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2768,7 +2768,7 @@ void ppu_interpreter::LVRXL(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u8 eb = addr & 0xf;
|
||||
|
||||
CPU.VPR[op.vd].clear();
|
||||
for (u32 i = 16 - eb; i < 16; ++i) CPU.VPR[op.vd]._u8[15 - i] = vm::read8(vm::cast(addr + i - 16));
|
||||
for (u32 i = 16 - eb; i < 16; ++i) CPU.VPR[op.vd]._u8[15 - i] = vm::read8(VM_CAST(addr + i - 16));
|
||||
}
|
||||
|
||||
void ppu_interpreter::DSS(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2804,13 +2804,13 @@ void ppu_interpreter::STVLXL(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
const u32 eb = addr & 0xf;
|
||||
|
||||
for (u32 i = 0; i < 16u - eb; ++i) vm::write8(vm::cast(addr + i), CPU.VPR[op.vs]._u8[15 - i]);
|
||||
for (u32 i = 0; i < 16u - eb; ++i) vm::write8(VM_CAST(addr + i), CPU.VPR[op.vs]._u8[15 - i]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STHBRX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::get_ref<u16>(vm::cast(addr)) = (u16)CPU.GPR[op.rs];
|
||||
vm::get_ref<u16>(VM_CAST(addr)) = (u16)CPU.GPR[op.rs];
|
||||
}
|
||||
|
||||
void ppu_interpreter::EXTSH(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2824,7 +2824,7 @@ void ppu_interpreter::STVRXL(PPUThread& CPU, ppu_opcode_t op)
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
const u8 eb = addr & 0xf;
|
||||
|
||||
for (u32 i = 16 - eb; i < 16; ++i) vm::write8(vm::cast(addr + i - 16), CPU.VPR[op.vs]._u8[15 - i]);
|
||||
for (u32 i = 16 - eb; i < 16; ++i) vm::write8(VM_CAST(addr + i - 16), CPU.VPR[op.vs]._u8[15 - i]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::EXTSB(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2836,7 +2836,7 @@ void ppu_interpreter::EXTSB(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::STFIWX(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
vm::write32(vm::cast(addr), (u32&)CPU.FPR[op.frs]);
|
||||
vm::write32(VM_CAST(addr), (u32&)CPU.FPR[op.frs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::EXTSW(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -2853,97 +2853,97 @@ void ppu_interpreter::DCBZ(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + CPU.GPR[op.rb] : CPU.GPR[op.rb];
|
||||
|
||||
memset(vm::get_ptr<u8>(vm::cast(addr) & ~127), 0, 128);
|
||||
memset(vm::get_ptr<u8>(VM_CAST(addr) & ~127), 0, 128);
|
||||
}
|
||||
|
||||
void ppu_interpreter::LWZ(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
CPU.GPR[op.rd] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read32(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LWZU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
CPU.GPR[op.rd] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read32(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::LBZ(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
CPU.GPR[op.rd] = vm::read8(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read8(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LBZU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
CPU.GPR[op.rd] = vm::read8(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read8(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::STW(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[op.rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[op.rs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STWU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[op.rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[op.rs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::STB(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
vm::write8(vm::cast(addr), (u8)CPU.GPR[op.rs]);
|
||||
vm::write8(VM_CAST(addr), (u8)CPU.GPR[op.rs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STBU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
vm::write8(vm::cast(addr), (u8)CPU.GPR[op.rs]);
|
||||
vm::write8(VM_CAST(addr), (u8)CPU.GPR[op.rs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::LHZ(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
CPU.GPR[op.rd] = vm::read16(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read16(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LHZU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
CPU.GPR[op.rd] = vm::read16(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read16(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::LHA(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
CPU.GPR[op.rd] = (s64)(s16)vm::read16(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = (s64)(s16)vm::read16(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LHAU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
CPU.GPR[op.rd] = (s64)(s16)vm::read16(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = (s64)(s16)vm::read16(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::STH(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
vm::write16(vm::cast(addr), (u16)CPU.GPR[op.rs]);
|
||||
vm::write16(VM_CAST(addr), (u16)CPU.GPR[op.rs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STHU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
vm::write16(vm::cast(addr), (u16)CPU.GPR[op.rs]);
|
||||
vm::write16(VM_CAST(addr), (u16)CPU.GPR[op.rs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
@ -2952,7 +2952,7 @@ void ppu_interpreter::LMW(PPUThread& CPU, ppu_opcode_t op)
|
||||
u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
for (u32 i = op.rd; i<32; ++i, addr += 4)
|
||||
{
|
||||
CPU.GPR[i] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[i] = vm::read32(VM_CAST(addr));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2961,79 +2961,79 @@ void ppu_interpreter::STMW(PPUThread& CPU, ppu_opcode_t op)
|
||||
u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
for (u32 i = op.rs; i<32; ++i, addr += 4)
|
||||
{
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[i]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void ppu_interpreter::LFS(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<float>>(vm::cast(addr)).value();
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<float>>(VM_CAST(addr)).value();
|
||||
}
|
||||
|
||||
void ppu_interpreter::LFSU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<float>>(vm::cast(addr)).value();
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<float>>(VM_CAST(addr)).value();
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::LFD(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<double>>(vm::cast(addr)).value();
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<double>>(VM_CAST(addr)).value();
|
||||
}
|
||||
|
||||
void ppu_interpreter::LFDU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<double>>(vm::cast(addr)).value();
|
||||
CPU.FPR[op.frd]._double = vm::get_ref<be_t<double>>(VM_CAST(addr)).value();
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::STFS(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
vm::get_ref<be_t<float>>(vm::cast(addr)) = static_cast<float>(CPU.FPR[op.frs]);
|
||||
vm::get_ref<be_t<float>>(VM_CAST(addr)) = static_cast<float>(CPU.FPR[op.frs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STFSU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
vm::get_ref<be_t<float>>(vm::cast(addr)) = static_cast<float>(CPU.FPR[op.frs]);
|
||||
vm::get_ref<be_t<float>>(VM_CAST(addr)) = static_cast<float>(CPU.FPR[op.frs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::STFD(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = op.ra ? CPU.GPR[op.ra] + op.simm16 : op.simm16;
|
||||
vm::get_ref<be_t<double>>(vm::cast(addr)) = CPU.FPR[op.frs];
|
||||
vm::get_ref<be_t<double>>(VM_CAST(addr)) = CPU.FPR[op.frs];
|
||||
}
|
||||
|
||||
void ppu_interpreter::STFDU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + op.simm16;
|
||||
vm::get_ref<be_t<double>>(vm::cast(addr)) = CPU.FPR[op.frs];
|
||||
vm::get_ref<be_t<double>>(VM_CAST(addr)) = CPU.FPR[op.frs];
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::LD(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.simm16 & ~3) + (op.ra ? CPU.GPR[op.ra] : 0);
|
||||
CPU.GPR[op.rd] = vm::read64(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read64(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::LDU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + (op.simm16 & ~3);
|
||||
CPU.GPR[op.rd] = vm::read64(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = vm::read64(VM_CAST(addr));
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
void ppu_interpreter::LWA(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.simm16 & ~3) + (op.ra ? CPU.GPR[op.ra] : 0);
|
||||
CPU.GPR[op.rd] = (s64)(s32)vm::read32(vm::cast(addr));
|
||||
CPU.GPR[op.rd] = (s64)(s32)vm::read32(VM_CAST(addr));
|
||||
}
|
||||
|
||||
void ppu_interpreter::FDIVS(PPUThread& CPU, ppu_opcode_t op)
|
||||
@ -3099,13 +3099,13 @@ void ppu_interpreter::FNMADDS(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::STD(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = (op.simm16 & ~3) + (op.ra ? CPU.GPR[op.ra] : 0);
|
||||
vm::write64(vm::cast(addr), CPU.GPR[op.rs]);
|
||||
vm::write64(VM_CAST(addr), CPU.GPR[op.rs]);
|
||||
}
|
||||
|
||||
void ppu_interpreter::STDU(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u64 addr = CPU.GPR[op.ra] + (op.simm16 & ~3);
|
||||
vm::write64(vm::cast(addr), CPU.GPR[op.rs]);
|
||||
vm::write64(VM_CAST(addr), CPU.GPR[op.rs]);
|
||||
CPU.GPR[op.ra] = addr;
|
||||
}
|
||||
|
||||
|
@ -2438,7 +2438,7 @@ private:
|
||||
void LVEBX(u32 vd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.VPR[vd]._u8[15 - (addr & 0xf)] = vm::read8(vm::cast(addr));
|
||||
CPU.VPR[vd]._u8[15 - (addr & 0xf)] = vm::read8(VM_CAST(addr));
|
||||
// check LVEWX comments
|
||||
}
|
||||
void SUBFC(u32 rd, u32 ra, u32 rb, u32 oe, bool rc)
|
||||
@ -2480,19 +2480,19 @@ private:
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
|
||||
be_t<u32> value;
|
||||
vm::reservation_acquire(&value, vm::cast(addr), sizeof(value));
|
||||
vm::reservation_acquire(&value, VM_CAST(addr), sizeof(value));
|
||||
|
||||
CPU.GPR[rd] = value;
|
||||
}
|
||||
void LDX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::read64(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read64(VM_CAST(addr));
|
||||
}
|
||||
void LWZX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read32(VM_CAST(addr));
|
||||
}
|
||||
void SLW(u32 ra, u32 rs, u32 rb, bool rc)
|
||||
{
|
||||
@ -2564,7 +2564,7 @@ private:
|
||||
void LVEHX(u32 vd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = (ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb]) & ~1ULL;
|
||||
CPU.VPR[vd]._u16[7 - ((addr >> 1) & 0x7)] = vm::read16(vm::cast(addr));
|
||||
CPU.VPR[vd]._u16[7 - ((addr >> 1) & 0x7)] = vm::read16(VM_CAST(addr));
|
||||
// check LVEWX comments
|
||||
}
|
||||
void SUBF(u32 rd, u32 ra, u32 rb, u32 oe, bool rc)
|
||||
@ -2578,7 +2578,7 @@ private:
|
||||
void LDUX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::read64(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read64(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void DCBST(u32 ra, u32 rb)
|
||||
@ -2587,7 +2587,7 @@ private:
|
||||
void LWZUX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read32(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void CNTLZD(u32 ra, u32 rs, bool rc)
|
||||
@ -2613,7 +2613,7 @@ private:
|
||||
void LVEWX(u32 vd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = (ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb]) & ~3ULL;
|
||||
CPU.VPR[vd]._u32[3 - ((addr >> 2) & 0x3)] = vm::read32(vm::cast(addr));
|
||||
CPU.VPR[vd]._u32[3 - ((addr >> 2) & 0x3)] = vm::read32(VM_CAST(addr));
|
||||
// It's not very good idea to implement it using read128(),
|
||||
// because it can theoretically read RawSPU 32-bit MMIO register (read128() will fail)
|
||||
//CPU.VPR[vd] = vm::read128((ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb]) & ~0xfULL);
|
||||
@ -2635,7 +2635,7 @@ private:
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
|
||||
be_t<u64> value;
|
||||
vm::reservation_acquire(&value, vm::cast(addr), sizeof(value));
|
||||
vm::reservation_acquire(&value, VM_CAST(addr), sizeof(value));
|
||||
|
||||
CPU.GPR[rd] = value;
|
||||
}
|
||||
@ -2645,12 +2645,12 @@ private:
|
||||
void LBZX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::read8(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read8(VM_CAST(addr));
|
||||
}
|
||||
void LVX(u32 vd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = (ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb]) & ~0xfull;
|
||||
CPU.VPR[vd] = vm::read128(vm::cast(addr));
|
||||
CPU.VPR[vd] = vm::read128(VM_CAST(addr));
|
||||
}
|
||||
void NEG(u32 rd, u32 ra, u32 oe, bool rc)
|
||||
{
|
||||
@ -2662,7 +2662,7 @@ private:
|
||||
void LBZUX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::read8(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read8(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void NOR(u32 ra, u32 rs, u32 rb, bool rc)
|
||||
@ -2674,7 +2674,7 @@ private:
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
const u8 eb = addr & 0xf;
|
||||
vm::write8(vm::cast(addr), CPU.VPR[vs]._u8[15 - eb]);
|
||||
vm::write8(VM_CAST(addr), CPU.VPR[vs]._u8[15 - eb]);
|
||||
}
|
||||
void SUBFE(u32 rd, u32 ra, u32 rb, u32 oe, bool rc)
|
||||
{
|
||||
@ -2746,43 +2746,43 @@ private:
|
||||
void STDX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
vm::write64(vm::cast(addr), CPU.GPR[rs]);
|
||||
vm::write64(VM_CAST(addr), CPU.GPR[rs]);
|
||||
}
|
||||
void STWCX_(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
|
||||
const be_t<u32> value = (u32)CPU.GPR[rs];
|
||||
CPU.SetCR_EQ(0, vm::reservation_update(vm::cast(addr), &value, sizeof(value)));
|
||||
CPU.SetCR_EQ(0, vm::reservation_update(VM_CAST(addr), &value, sizeof(value)));
|
||||
}
|
||||
void STWX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[rs]);
|
||||
}
|
||||
void STVEHX(u32 vs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = (ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb]) & ~1ULL;
|
||||
const u8 eb = (addr & 0xf) >> 1;
|
||||
vm::write16(vm::cast(addr), CPU.VPR[vs]._u16[7 - eb]);
|
||||
vm::write16(VM_CAST(addr), CPU.VPR[vs]._u16[7 - eb]);
|
||||
}
|
||||
void STDUX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
vm::write64(vm::cast(addr), CPU.GPR[rs]);
|
||||
vm::write64(VM_CAST(addr), CPU.GPR[rs]);
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void STWUX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[rs]);
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void STVEWX(u32 vs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = (ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb]) & ~3ULL;
|
||||
const u8 eb = (addr & 0xf) >> 2;
|
||||
vm::write32(vm::cast(addr), CPU.VPR[vs]._u32[3 - eb]);
|
||||
vm::write32(VM_CAST(addr), CPU.VPR[vs]._u32[3 - eb]);
|
||||
}
|
||||
void SUBFZE(u32 rd, u32 ra, u32 oe, bool rc)
|
||||
{
|
||||
@ -2805,17 +2805,17 @@ private:
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
|
||||
const be_t<u64> value = CPU.GPR[rs];
|
||||
CPU.SetCR_EQ(0, vm::reservation_update(vm::cast(addr), &value, sizeof(value)));
|
||||
CPU.SetCR_EQ(0, vm::reservation_update(VM_CAST(addr), &value, sizeof(value)));
|
||||
}
|
||||
void STBX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
vm::write8(vm::cast(addr), (u8)CPU.GPR[rs]);
|
||||
vm::write8(VM_CAST(addr), (u8)CPU.GPR[rs]);
|
||||
}
|
||||
void STVX(u32 vs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = (ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb]) & ~0xfull;
|
||||
vm::write128(vm::cast(addr), CPU.VPR[vs]);
|
||||
vm::write128(VM_CAST(addr), CPU.VPR[vs]);
|
||||
}
|
||||
void MULLD(u32 rd, u32 ra, u32 rb, u32 oe, bool rc)
|
||||
{
|
||||
@ -2858,7 +2858,7 @@ private:
|
||||
void STBUX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
vm::write8(vm::cast(addr), (u8)CPU.GPR[rs]);
|
||||
vm::write8(VM_CAST(addr), (u8)CPU.GPR[rs]);
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void ADD(u32 rd, u32 ra, u32 rb, u32 oe, bool rc)
|
||||
@ -2875,7 +2875,7 @@ private:
|
||||
void LHZX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::read16(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read16(VM_CAST(addr));
|
||||
}
|
||||
void EQV(u32 ra, u32 rs, u32 rb, bool rc)
|
||||
{
|
||||
@ -2889,7 +2889,7 @@ private:
|
||||
void LHZUX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::read16(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read16(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void XOR(u32 ra, u32 rs, u32 rb, bool rc)
|
||||
@ -2904,7 +2904,7 @@ private:
|
||||
void LWAX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = (s64)(s32)vm::read32(vm::cast(addr));
|
||||
CPU.GPR[rd] = (s64)(s32)vm::read32(VM_CAST(addr));
|
||||
}
|
||||
void DST(u32 ra, u32 rb, u32 strm, u32 t)
|
||||
{
|
||||
@ -2912,12 +2912,12 @@ private:
|
||||
void LHAX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = (s64)(s16)vm::read16(vm::cast(addr));
|
||||
CPU.GPR[rd] = (s64)(s16)vm::read16(VM_CAST(addr));
|
||||
}
|
||||
void LVXL(u32 vd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = (ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb]) & ~0xfull;
|
||||
CPU.VPR[vd] = vm::read128(vm::cast(addr));
|
||||
CPU.VPR[vd] = vm::read128(VM_CAST(addr));
|
||||
}
|
||||
void MFTB(u32 rd, u32 spr)
|
||||
{
|
||||
@ -2934,7 +2934,7 @@ private:
|
||||
void LWAUX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = (s64)(s32)vm::read32(vm::cast(addr));
|
||||
CPU.GPR[rd] = (s64)(s32)vm::read32(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void DSTST(u32 ra, u32 rb, u32 strm, u32 t)
|
||||
@ -2943,13 +2943,13 @@ private:
|
||||
void LHAUX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = (s64)(s16)vm::read16(vm::cast(addr));
|
||||
CPU.GPR[rd] = (s64)(s16)vm::read16(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void STHX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
vm::write16(vm::cast(addr), (u16)CPU.GPR[rs]);
|
||||
vm::write16(VM_CAST(addr), (u16)CPU.GPR[rs]);
|
||||
}
|
||||
void ORC(u32 ra, u32 rs, u32 rb, bool rc)
|
||||
{
|
||||
@ -2963,7 +2963,7 @@ private:
|
||||
void STHUX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
vm::write16(vm::cast(addr), (u16)CPU.GPR[rs]);
|
||||
vm::write16(VM_CAST(addr), (u16)CPU.GPR[rs]);
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void OR(u32 ra, u32 rs, u32 rb, bool rc)
|
||||
@ -3023,7 +3023,7 @@ private:
|
||||
void STVXL(u32 vs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = (ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb]) & ~0xfull;
|
||||
vm::write128(vm::cast(addr), CPU.VPR[vs]);
|
||||
vm::write128(VM_CAST(addr), CPU.VPR[vs]);
|
||||
}
|
||||
void DIVD(u32 rd, u32 ra, u32 rb, u32 oe, bool rc)
|
||||
{
|
||||
@ -3067,12 +3067,12 @@ private:
|
||||
const u32 eb = addr & 0xf;
|
||||
|
||||
CPU.VPR[vd].clear();
|
||||
for (u32 i = 0; i < 16u - eb; ++i) CPU.VPR[vd]._u8[15 - i] = vm::read8(vm::cast(addr + i));
|
||||
for (u32 i = 0; i < 16u - eb; ++i) CPU.VPR[vd]._u8[15 - i] = vm::read8(VM_CAST(addr + i));
|
||||
}
|
||||
void LDBRX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::get_ref<u64>(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::get_ref<u64>(VM_CAST(addr));
|
||||
}
|
||||
void LSWX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
@ -3080,14 +3080,14 @@ private:
|
||||
u32 count = CPU.XER.XER & 0x7F;
|
||||
for (; count >= 4; count -= 4, addr += 4, rd = (rd+1) & 31)
|
||||
{
|
||||
CPU.GPR[rd] = vm::get_ref<be_t<u32>>(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::get_ref<be_t<u32>>(VM_CAST(addr));
|
||||
}
|
||||
if (count)
|
||||
{
|
||||
u32 value = 0;
|
||||
for (u32 byte = 0; byte < count; byte++)
|
||||
{
|
||||
u32 byte_value = vm::get_ref<u8>(vm::cast(addr+byte));
|
||||
u32 byte_value = vm::get_ref<u8>(VM_CAST(addr+byte));
|
||||
value |= byte_value << ((3^byte)*8);
|
||||
}
|
||||
CPU.GPR[rd] = value;
|
||||
@ -3096,12 +3096,12 @@ private:
|
||||
void LWBRX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::get_ref<u32>(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::get_ref<u32>(VM_CAST(addr));
|
||||
}
|
||||
void LFSX(u32 frd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
float val = vm::get_ref<be_t<float>>(vm::cast(addr)).value();
|
||||
float val = vm::get_ref<be_t<float>>(VM_CAST(addr)).value();
|
||||
if (!FPRdouble::IsNaN(val))
|
||||
{
|
||||
CPU.FPR[frd] = val;
|
||||
@ -3136,7 +3136,7 @@ private:
|
||||
const u8 eb = addr & 0xf;
|
||||
|
||||
CPU.VPR[vd].clear();
|
||||
for (u32 i = 16 - eb; i < 16; ++i) CPU.VPR[vd]._u8[15 - i] = vm::read8(vm::cast(addr + i - 16));
|
||||
for (u32 i = 16 - eb; i < 16; ++i) CPU.VPR[vd]._u8[15 - i] = vm::read8(VM_CAST(addr + i - 16));
|
||||
}
|
||||
void LSWI(u32 rd, u32 ra, u32 nb)
|
||||
{
|
||||
@ -3148,7 +3148,7 @@ private:
|
||||
{
|
||||
if (N > 3)
|
||||
{
|
||||
CPU.GPR[reg] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[reg] = vm::read32(VM_CAST(addr));
|
||||
addr += 4;
|
||||
N -= 4;
|
||||
}
|
||||
@ -3159,7 +3159,7 @@ private:
|
||||
while (N > 0)
|
||||
{
|
||||
N = N - 1;
|
||||
buf |= vm::read8(vm::cast(addr)) << (i * 8);
|
||||
buf |= vm::read8(VM_CAST(addr)) << (i * 8);
|
||||
addr++;
|
||||
i--;
|
||||
}
|
||||
@ -3171,7 +3171,7 @@ private:
|
||||
void LFSUX(u32 frd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
float val = vm::get_ref<be_t<float>>(vm::cast(addr)).value();
|
||||
float val = vm::get_ref<be_t<float>>(VM_CAST(addr)).value();
|
||||
if (!FPRdouble::IsNaN(val))
|
||||
{
|
||||
CPU.FPR[frd] = val;
|
||||
@ -3190,12 +3190,12 @@ private:
|
||||
void LFDX(u32 frd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.FPR[frd] = vm::get_ref<be_t<double>>(vm::cast(addr)).value();
|
||||
CPU.FPR[frd] = vm::get_ref<be_t<double>>(VM_CAST(addr)).value();
|
||||
}
|
||||
void LFDUX(u32 frd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
CPU.FPR[frd] = vm::get_ref<be_t<double>>(vm::cast(addr)).value();
|
||||
CPU.FPR[frd] = vm::get_ref<be_t<double>>(VM_CAST(addr)).value();
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void STVLX(u32 vs, u32 ra, u32 rb)
|
||||
@ -3203,12 +3203,12 @@ private:
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
const u32 eb = addr & 0xf;
|
||||
|
||||
for (u32 i = 0; i < 16u - eb; ++i) vm::write8(vm::cast(addr + i), CPU.VPR[vs]._u8[15 - i]);
|
||||
for (u32 i = 0; i < 16u - eb; ++i) vm::write8(VM_CAST(addr + i), CPU.VPR[vs]._u8[15 - i]);
|
||||
}
|
||||
void STDBRX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
vm::get_ref<u64>(vm::cast(addr)) = CPU.GPR[rs];
|
||||
vm::get_ref<u64>(VM_CAST(addr)) = CPU.GPR[rs];
|
||||
}
|
||||
void STSWX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
@ -3216,7 +3216,7 @@ private:
|
||||
u32 count = CPU.XER.XER & 0x7F;
|
||||
for (; count >= 4; count -= 4, addr += 4, rs = (rs+1) & 31)
|
||||
{
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[rs]);
|
||||
}
|
||||
if (count)
|
||||
{
|
||||
@ -3224,14 +3224,14 @@ private:
|
||||
for (u32 byte = 0; byte < count; byte++)
|
||||
{
|
||||
u32 byte_value = (u8)(value >> ((3^byte)*8));
|
||||
vm::write8(vm::cast(addr+byte), byte_value);
|
||||
vm::write8(VM_CAST(addr+byte), byte_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
void STWBRX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
vm::get_ref<u32>(vm::cast(addr)) = (u32)CPU.GPR[rs];
|
||||
vm::get_ref<u32>(VM_CAST(addr)) = (u32)CPU.GPR[rs];
|
||||
}
|
||||
void STFSX(u32 frs, u32 ra, u32 rb)
|
||||
{
|
||||
@ -3239,13 +3239,13 @@ private:
|
||||
double val = CPU.FPR[frs];
|
||||
if (!FPRdouble::IsNaN(val))
|
||||
{
|
||||
vm::get_ref<be_t<float>>(vm::cast(addr)) = (float)val;
|
||||
vm::get_ref<be_t<float>>(VM_CAST(addr)) = (float)val;
|
||||
}
|
||||
else
|
||||
{
|
||||
u64 bits = (u64&)val;
|
||||
u32 bits32 = (bits>>32 & 0x80000000) | (bits>>29 & 0x7fffffff);
|
||||
vm::get_ref<be_t<u32>>(vm::cast(addr)) = bits32;
|
||||
vm::get_ref<be_t<u32>>(VM_CAST(addr)) = bits32;
|
||||
}
|
||||
}
|
||||
void STVRX(u32 vs, u32 ra, u32 rb)
|
||||
@ -3253,7 +3253,7 @@ private:
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
const u8 eb = addr & 0xf;
|
||||
|
||||
for (u32 i = 16 - eb; i < 16; ++i) vm::write8(vm::cast(addr + i - 16), CPU.VPR[vs]._u8[15 - i]);
|
||||
for (u32 i = 16 - eb; i < 16; ++i) vm::write8(VM_CAST(addr + i - 16), CPU.VPR[vs]._u8[15 - i]);
|
||||
}
|
||||
void STFSUX(u32 frs, u32 ra, u32 rb)
|
||||
{
|
||||
@ -3261,13 +3261,13 @@ private:
|
||||
double val = CPU.FPR[frs];
|
||||
if (!FPRdouble::IsNaN(val))
|
||||
{
|
||||
vm::get_ref<be_t<float>>(vm::cast(addr)) = (float)val;
|
||||
vm::get_ref<be_t<float>>(VM_CAST(addr)) = (float)val;
|
||||
}
|
||||
else
|
||||
{
|
||||
u64 bits = (u64&)val;
|
||||
u32 bits32 = (bits>>32 & 0x80000000) | (bits>>29 & 0x7fffffff);
|
||||
vm::get_ref<be_t<u32>>(vm::cast(addr)) = bits32;
|
||||
vm::get_ref<be_t<u32>>(VM_CAST(addr)) = bits32;
|
||||
}
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
@ -3281,7 +3281,7 @@ private:
|
||||
{
|
||||
if (N > 3)
|
||||
{
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[reg]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[reg]);
|
||||
addr += 4;
|
||||
N -= 4;
|
||||
}
|
||||
@ -3291,7 +3291,7 @@ private:
|
||||
while (N > 0)
|
||||
{
|
||||
N = N - 1;
|
||||
vm::write8(vm::cast(addr), (0xFF000000 & buf) >> 24);
|
||||
vm::write8(VM_CAST(addr), (0xFF000000 & buf) >> 24);
|
||||
buf <<= 8;
|
||||
addr++;
|
||||
}
|
||||
@ -3302,12 +3302,12 @@ private:
|
||||
void STFDX(u32 frs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
vm::get_ref<be_t<double>>(vm::cast(addr)) = CPU.FPR[frs];
|
||||
vm::get_ref<be_t<double>>(VM_CAST(addr)) = CPU.FPR[frs];
|
||||
}
|
||||
void STFDUX(u32 frs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + CPU.GPR[rb];
|
||||
vm::get_ref<be_t<double>>(vm::cast(addr)) = CPU.FPR[frs];
|
||||
vm::get_ref<be_t<double>>(VM_CAST(addr)) = CPU.FPR[frs];
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void LVLXL(u32 vd, u32 ra, u32 rb)
|
||||
@ -3316,12 +3316,12 @@ private:
|
||||
const u32 eb = addr & 0xf;
|
||||
|
||||
CPU.VPR[vd].clear();
|
||||
for (u32 i = 0; i < 16u - eb; ++i) CPU.VPR[vd]._u8[15 - i] = vm::read8(vm::cast(addr + i));
|
||||
for (u32 i = 0; i < 16u - eb; ++i) CPU.VPR[vd]._u8[15 - i] = vm::read8(VM_CAST(addr + i));
|
||||
}
|
||||
void LHBRX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
CPU.GPR[rd] = vm::get_ref<u16>(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::get_ref<u16>(VM_CAST(addr));
|
||||
}
|
||||
void SRAW(u32 ra, u32 rs, u32 rb, bool rc)
|
||||
{
|
||||
@ -3363,7 +3363,7 @@ private:
|
||||
const u8 eb = addr & 0xf;
|
||||
|
||||
CPU.VPR[vd].clear();
|
||||
for (u32 i = 16 - eb; i < 16; ++i) CPU.VPR[vd]._u8[15 - i] = vm::read8(vm::cast(addr + i - 16));
|
||||
for (u32 i = 16 - eb; i < 16; ++i) CPU.VPR[vd]._u8[15 - i] = vm::read8(VM_CAST(addr + i - 16));
|
||||
}
|
||||
void DSS(u32 strm, u32 a)
|
||||
{
|
||||
@ -3397,12 +3397,12 @@ private:
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
const u32 eb = addr & 0xf;
|
||||
|
||||
for (u32 i = 0; i < 16u - eb; ++i) vm::write8(vm::cast(addr + i), CPU.VPR[vs]._u8[15 - i]);
|
||||
for (u32 i = 0; i < 16u - eb; ++i) vm::write8(VM_CAST(addr + i), CPU.VPR[vs]._u8[15 - i]);
|
||||
}
|
||||
void STHBRX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
vm::get_ref<u16>(vm::cast(addr)) = (u16)CPU.GPR[rs];
|
||||
vm::get_ref<u16>(VM_CAST(addr)) = (u16)CPU.GPR[rs];
|
||||
}
|
||||
void EXTSH(u32 ra, u32 rs, bool rc)
|
||||
{
|
||||
@ -3414,7 +3414,7 @@ private:
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
const u8 eb = addr & 0xf;
|
||||
|
||||
for (u32 i = 16 - eb; i < 16; ++i) vm::write8(vm::cast(addr + i - 16), CPU.VPR[vs]._u8[15 - i]);
|
||||
for (u32 i = 16 - eb; i < 16; ++i) vm::write8(VM_CAST(addr + i - 16), CPU.VPR[vs]._u8[15 - i]);
|
||||
}
|
||||
void EXTSB(u32 ra, u32 rs, bool rc)
|
||||
{
|
||||
@ -3424,7 +3424,7 @@ private:
|
||||
void STFIWX(u32 frs, u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
vm::write32(vm::cast(addr), (u32&)CPU.FPR[frs]);
|
||||
vm::write32(VM_CAST(addr), (u32&)CPU.FPR[frs]);
|
||||
}
|
||||
void EXTSW(u32 ra, u32 rs, bool rc)
|
||||
{
|
||||
@ -3439,83 +3439,83 @@ private:
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
|
||||
memset(vm::get_ptr<u8>(vm::cast(addr) & ~127), 0, 128);
|
||||
memset(vm::get_ptr<u8>(VM_CAST(addr) & ~127), 0, 128);
|
||||
}
|
||||
void LWZ(u32 rd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
CPU.GPR[rd] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read32(VM_CAST(addr));
|
||||
}
|
||||
void LWZU(u32 rd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + d;
|
||||
CPU.GPR[rd] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read32(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void LBZ(u32 rd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
CPU.GPR[rd] = vm::read8(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read8(VM_CAST(addr));
|
||||
}
|
||||
void LBZU(u32 rd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + d;
|
||||
CPU.GPR[rd] = vm::read8(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read8(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void STW(u32 rs, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[rs]);
|
||||
}
|
||||
void STWU(u32 rs, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + d;
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[rs]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[rs]);
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void STB(u32 rs, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
vm::write8(vm::cast(addr), (u8)CPU.GPR[rs]);
|
||||
vm::write8(VM_CAST(addr), (u8)CPU.GPR[rs]);
|
||||
}
|
||||
void STBU(u32 rs, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + d;
|
||||
vm::write8(vm::cast(addr), (u8)CPU.GPR[rs]);
|
||||
vm::write8(VM_CAST(addr), (u8)CPU.GPR[rs]);
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void LHZ(u32 rd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
CPU.GPR[rd] = vm::read16(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read16(VM_CAST(addr));
|
||||
}
|
||||
void LHZU(u32 rd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + d;
|
||||
CPU.GPR[rd] = vm::read16(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read16(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void LHA(u32 rd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
CPU.GPR[rd] = (s64)(s16)vm::read16(vm::cast(addr));
|
||||
CPU.GPR[rd] = (s64)(s16)vm::read16(VM_CAST(addr));
|
||||
}
|
||||
void LHAU(u32 rd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + d;
|
||||
CPU.GPR[rd] = (s64)(s16)vm::read16(vm::cast(addr));
|
||||
CPU.GPR[rd] = (s64)(s16)vm::read16(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void STH(u32 rs, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
vm::write16(vm::cast(addr), (u16)CPU.GPR[rs]);
|
||||
vm::write16(VM_CAST(addr), (u16)CPU.GPR[rs]);
|
||||
}
|
||||
void STHU(u32 rs, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + d;
|
||||
vm::write16(vm::cast(addr), (u16)CPU.GPR[rs]);
|
||||
vm::write16(VM_CAST(addr), (u16)CPU.GPR[rs]);
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void LMW(u32 rd, u32 ra, s32 d)
|
||||
@ -3523,7 +3523,7 @@ private:
|
||||
u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
for(u32 i=rd; i<32; ++i, addr += 4)
|
||||
{
|
||||
CPU.GPR[i] = vm::read32(vm::cast(addr));
|
||||
CPU.GPR[i] = vm::read32(VM_CAST(addr));
|
||||
}
|
||||
}
|
||||
void STMW(u32 rs, u32 ra, s32 d)
|
||||
@ -3531,13 +3531,13 @@ private:
|
||||
u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
for(u32 i=rs; i<32; ++i, addr += 4)
|
||||
{
|
||||
vm::write32(vm::cast(addr), (u32)CPU.GPR[i]);
|
||||
vm::write32(VM_CAST(addr), (u32)CPU.GPR[i]);
|
||||
}
|
||||
}
|
||||
void LFS(u32 frd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
float val = vm::get_ref<be_t<float>>(vm::cast(addr)).value();
|
||||
float val = vm::get_ref<be_t<float>>(VM_CAST(addr)).value();
|
||||
if (!FPRdouble::IsNaN(val))
|
||||
{
|
||||
CPU.FPR[frd] = val;
|
||||
@ -3551,7 +3551,7 @@ private:
|
||||
void LFSU(u32 frd, u32 ra, s32 ds)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + ds;
|
||||
float val = vm::get_ref<be_t<float>>(vm::cast(addr)).value();
|
||||
float val = vm::get_ref<be_t<float>>(VM_CAST(addr)).value();
|
||||
if (!FPRdouble::IsNaN(val))
|
||||
{
|
||||
CPU.FPR[frd] = val;
|
||||
@ -3566,12 +3566,12 @@ private:
|
||||
void LFD(u32 frd, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
CPU.FPR[frd] = vm::get_ref<be_t<double>>(vm::cast(addr)).value();
|
||||
CPU.FPR[frd] = vm::get_ref<be_t<double>>(VM_CAST(addr)).value();
|
||||
}
|
||||
void LFDU(u32 frd, u32 ra, s32 ds)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + ds;
|
||||
CPU.FPR[frd] = vm::get_ref<be_t<double>>(vm::cast(addr)).value();
|
||||
CPU.FPR[frd] = vm::get_ref<be_t<double>>(VM_CAST(addr)).value();
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void STFS(u32 frs, u32 ra, s32 d)
|
||||
@ -3580,13 +3580,13 @@ private:
|
||||
double val = CPU.FPR[frs];
|
||||
if (!FPRdouble::IsNaN(val))
|
||||
{
|
||||
vm::get_ref<be_t<float>>(vm::cast(addr)) = (float)val;
|
||||
vm::get_ref<be_t<float>>(VM_CAST(addr)) = (float)val;
|
||||
}
|
||||
else
|
||||
{
|
||||
u64 bits = (u64&)val;
|
||||
u32 bits32 = (bits>>32 & 0x80000000) | (bits>>29 & 0x7fffffff);
|
||||
vm::get_ref<be_t<u32>>(vm::cast(addr)) = bits32;
|
||||
vm::get_ref<be_t<u32>>(VM_CAST(addr)) = bits32;
|
||||
}
|
||||
}
|
||||
void STFSU(u32 frs, u32 ra, s32 d)
|
||||
@ -3595,42 +3595,42 @@ private:
|
||||
double val = CPU.FPR[frs];
|
||||
if (!FPRdouble::IsNaN(val))
|
||||
{
|
||||
vm::get_ref<be_t<float>>(vm::cast(addr)) = (float)val;
|
||||
vm::get_ref<be_t<float>>(VM_CAST(addr)) = (float)val;
|
||||
}
|
||||
else
|
||||
{
|
||||
u64 bits = (u64&)val;
|
||||
u32 bits32 = (bits>>32 & 0x80000000) | (bits>>29 & 0x7fffffff);
|
||||
vm::get_ref<be_t<u32>>(vm::cast(addr)) = bits32;
|
||||
vm::get_ref<be_t<u32>>(VM_CAST(addr)) = bits32;
|
||||
}
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void STFD(u32 frs, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
vm::get_ref<be_t<double>>(vm::cast(addr)) = CPU.FPR[frs];
|
||||
vm::get_ref<be_t<double>>(VM_CAST(addr)) = CPU.FPR[frs];
|
||||
}
|
||||
void STFDU(u32 frs, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + d;
|
||||
vm::get_ref<be_t<double>>(vm::cast(addr)) = CPU.FPR[frs];
|
||||
vm::get_ref<be_t<double>>(VM_CAST(addr)) = CPU.FPR[frs];
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void LD(u32 rd, u32 ra, s32 ds)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + ds : ds;
|
||||
CPU.GPR[rd] = vm::read64(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read64(VM_CAST(addr));
|
||||
}
|
||||
void LDU(u32 rd, u32 ra, s32 ds)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + ds;
|
||||
CPU.GPR[rd] = vm::read64(vm::cast(addr));
|
||||
CPU.GPR[rd] = vm::read64(VM_CAST(addr));
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void LWA(u32 rd, u32 ra, s32 ds)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + ds : ds;
|
||||
CPU.GPR[rd] = (s64)(s32)vm::read32(vm::cast(addr));
|
||||
CPU.GPR[rd] = (s64)(s32)vm::read32(VM_CAST(addr));
|
||||
}
|
||||
void FDIVS(u32 frd, u32 fra, u32 frb, bool rc) {FDIV(frd, fra, frb, rc, true);}
|
||||
void FSUBS(u32 frd, u32 fra, u32 frb, bool rc) {FSUB(frd, fra, frb, rc, true);}
|
||||
@ -3684,12 +3684,12 @@ private:
|
||||
void STD(u32 rs, u32 ra, s32 d)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + d : d;
|
||||
vm::write64(vm::cast(addr), CPU.GPR[rs]);
|
||||
vm::write64(VM_CAST(addr), CPU.GPR[rs]);
|
||||
}
|
||||
void STDU(u32 rs, u32 ra, s32 ds)
|
||||
{
|
||||
const u64 addr = CPU.GPR[ra] + ds;
|
||||
vm::write64(vm::cast(addr), CPU.GPR[rs]);
|
||||
vm::write64(VM_CAST(addr), CPU.GPR[rs]);
|
||||
CPU.GPR[ra] = addr;
|
||||
}
|
||||
void MTFSB1(u32 crbd, bool rc)
|
||||
|
@ -634,7 +634,7 @@ int FPRdouble::Cmp(PPCdouble a, PPCdouble b)
|
||||
|
||||
u64 PPUThread::GetStackArg(s32 i)
|
||||
{
|
||||
return vm::read64(vm::cast(GPR[1] + 0x70 + 0x8 * (i - 9)));
|
||||
return vm::read64(VM_CAST(GPR[1] + 0x70 + 0x8 * (i - 9)));
|
||||
}
|
||||
|
||||
void PPUThread::FastCall2(u32 addr, u32 rtoc)
|
||||
|
@ -269,7 +269,7 @@ void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args)
|
||||
_mm_mfence();
|
||||
}
|
||||
|
||||
u32 eal = vm::cast(args.ea, "ea");
|
||||
u32 eal = VM_CAST(args.ea);
|
||||
|
||||
if (eal >= SYS_SPU_THREAD_BASE_LOW && m_type == CPU_THREAD_SPU) // SPU Thread Group MMIO (LS and SNR)
|
||||
{
|
||||
@ -419,7 +419,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
|
||||
break;
|
||||
}
|
||||
|
||||
vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), vm::cast(ch_mfc_args.ea), 128, [this]()
|
||||
vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), VM_CAST(ch_mfc_args.ea), 128, [this]()
|
||||
{
|
||||
ch_event_stat |= SPU_EVENT_LR;
|
||||
cv.notify_one();
|
||||
@ -436,7 +436,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
|
||||
break;
|
||||
}
|
||||
|
||||
if (vm::reservation_update(vm::cast(ch_mfc_args.ea), vm::get_ptr(offset + ch_mfc_args.lsa), 128))
|
||||
if (vm::reservation_update(VM_CAST(ch_mfc_args.ea), vm::get_ptr(offset + ch_mfc_args.lsa), 128))
|
||||
{
|
||||
ch_atomic_stat.push_uncond(MFC_PUTLLC_SUCCESS);
|
||||
}
|
||||
@ -456,9 +456,9 @@ void SPUThread::process_mfc_cmd(u32 cmd)
|
||||
break;
|
||||
}
|
||||
|
||||
vm::reservation_op(vm::cast(ch_mfc_args.ea), 128, [this]()
|
||||
vm::reservation_op(VM_CAST(ch_mfc_args.ea), 128, [this]()
|
||||
{
|
||||
memcpy(vm::priv_ptr(vm::cast(ch_mfc_args.ea)), vm::get_ptr(offset + ch_mfc_args.lsa), 128);
|
||||
memcpy(vm::priv_ptr(VM_CAST(ch_mfc_args.ea)), vm::get_ptr(offset + ch_mfc_args.lsa), 128);
|
||||
});
|
||||
|
||||
if (cmd == MFC_PUTLLUC_CMD)
|
||||
|
@ -10,7 +10,7 @@ u64 vfsStreamMemory::Write(const void* src, u64 count)
|
||||
count = m_size - m_pos;
|
||||
}
|
||||
|
||||
memcpy(vm::get_ptr<void>(vm::cast(m_addr + m_pos)), src, count);
|
||||
memcpy(vm::get_ptr<void>(VM_CAST(m_addr + m_pos)), src, count);
|
||||
m_pos += count;
|
||||
return count;
|
||||
}
|
||||
@ -23,7 +23,7 @@ u64 vfsStreamMemory::Read(void* dst, u64 count)
|
||||
count = m_size - m_pos;
|
||||
}
|
||||
|
||||
memcpy(dst, vm::get_ptr<void>(vm::cast(m_addr + m_pos)), count);
|
||||
memcpy(dst, vm::get_ptr<void>(VM_CAST(m_addr + m_pos)), count);
|
||||
m_pos += count;
|
||||
return count;
|
||||
}
|
||||
|
@ -609,7 +609,7 @@ namespace vm
|
||||
{
|
||||
PPUThread& context = static_cast<PPUThread&>(CPU);
|
||||
|
||||
old_pos = vm::cast(context.GPR[1], "SP");
|
||||
old_pos = VM_CAST(context.GPR[1]);
|
||||
context.GPR[1] -= align(size, 8); // room minimal possible size
|
||||
context.GPR[1] &= ~(align_v - 1); // fix stack alignment
|
||||
|
||||
|
@ -86,7 +86,7 @@ namespace vm
|
||||
|
||||
inline u32 get_addr(const void* real_pointer)
|
||||
{
|
||||
const uintptr_t diff = reinterpret_cast<uintptr_t>(real_pointer) - reinterpret_cast<uintptr_t>(g_base_addr);
|
||||
const std::uintptr_t diff = reinterpret_cast<std::uintptr_t>(real_pointer) - reinterpret_cast<std::uintptr_t>(g_base_addr);
|
||||
const u32 res = static_cast<u32>(diff);
|
||||
|
||||
if (res == diff)
|
||||
@ -104,9 +104,9 @@ namespace vm
|
||||
|
||||
template<typename T> struct cast_ptr
|
||||
{
|
||||
static_assert(std::is_same<T, u32>::value, "Unsupported vm::cast() type");
|
||||
static_assert(std::is_same<T, u32>::value, "Unsupported VM_CAST() type");
|
||||
|
||||
force_inline static u32 cast(const T& addr, const char* func)
|
||||
force_inline static u32 cast(const T& addr, const char* file, int line, const char* func)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -114,7 +114,7 @@ namespace vm
|
||||
|
||||
template<> struct cast_ptr<u32>
|
||||
{
|
||||
force_inline static u32 cast(const u32 addr, const char* func)
|
||||
force_inline static u32 cast(const u32 addr, const char* file, int line, const char* func)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
@ -122,13 +122,13 @@ namespace vm
|
||||
|
||||
template<> struct cast_ptr<u64>
|
||||
{
|
||||
force_inline static u32 cast(const u64 addr, const char* func)
|
||||
force_inline static u32 cast(const u64 addr, const char* file, int line, const char* func)
|
||||
{
|
||||
const u32 res = static_cast<u32>(addr);
|
||||
|
||||
if (res != addr)
|
||||
{
|
||||
throw EXCEPTION("%s(): failed to cast 0x%llx (too big value)", func, addr);
|
||||
throw fmt::exception(file, line, func, "VM_CAST failed (addr=0x%llx)", addr);
|
||||
}
|
||||
|
||||
return res;
|
||||
@ -137,23 +137,24 @@ namespace vm
|
||||
|
||||
template<typename T> struct cast_ptr<be_t<T>>
|
||||
{
|
||||
force_inline static u32 cast(const be_t<T>& addr, const char* func)
|
||||
force_inline static u32 cast(const be_t<T>& addr, const char* file, int line, const char* func)
|
||||
{
|
||||
return cast_ptr<T>::cast(addr.value(), func);
|
||||
return cast_ptr<T>::cast(addr.value(), file, line, func);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T> struct cast_ptr<le_t<T>>
|
||||
{
|
||||
force_inline static u32 cast(const le_t<T>& addr, const char* func)
|
||||
force_inline static u32 cast(const le_t<T>& addr, const char* file, int line, const char* func)
|
||||
{
|
||||
return cast_ptr<T>::cast(addr.value(), func);
|
||||
return cast_ptr<T>::cast(addr.value(), file, line, func);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T> force_inline static u32 cast(const T& addr, const char* func = "vm::cast")
|
||||
// function for VM_CAST
|
||||
template<typename T> force_inline static u32 impl_cast(const T& addr, const char* file, int line, const char* func)
|
||||
{
|
||||
return cast_ptr<T>::cast(addr, func);
|
||||
return cast_ptr<T>::cast(addr, file, line, func);
|
||||
}
|
||||
|
||||
static u8 read8(u32 addr)
|
||||
|
@ -37,14 +37,14 @@ namespace vm
|
||||
template<typename MT, typename T2, typename = if_comparable_t<T, T2>> _ptr_base<MT> of(MT T2::*const member) const
|
||||
{
|
||||
const u32 offset = static_cast<u32>(reinterpret_cast<std::ptrdiff_t>(&(reinterpret_cast<T*>(0ull)->*member)));
|
||||
return{ vm::cast(m_addr + offset) };
|
||||
return{ VM_CAST(m_addr + offset) };
|
||||
}
|
||||
|
||||
// get vm pointer to array member with array subscribtion
|
||||
template<typename MT, typename T2, typename = if_comparable_t<T, T2>> _ptr_base<std::remove_extent_t<MT>> of(MT T2::*const member, u32 index) const
|
||||
{
|
||||
const u32 offset = static_cast<u32>(reinterpret_cast<std::ptrdiff_t>(&(reinterpret_cast<T*>(0ull)->*member)));
|
||||
return{ vm::cast(m_addr + offset + sizeof32(T) * index) };
|
||||
return{ VM_CAST(m_addr + offset + sizeof32(T) * index) };
|
||||
}
|
||||
|
||||
template<typename CT> std::enable_if_t<std::is_assignable<AT&, CT>::value> set(const CT& value)
|
||||
@ -59,12 +59,12 @@ namespace vm
|
||||
|
||||
T* get_ptr() const
|
||||
{
|
||||
return vm::get_ptr<T>(vm::cast(m_addr));
|
||||
return vm::get_ptr<T>(VM_CAST(m_addr));
|
||||
}
|
||||
|
||||
T* priv_ptr() const
|
||||
{
|
||||
return vm::priv_ptr<T>(vm::cast(m_addr));
|
||||
return vm::priv_ptr<T>(VM_CAST(m_addr));
|
||||
}
|
||||
|
||||
T* operator ->() const
|
||||
@ -76,13 +76,13 @@ namespace vm
|
||||
{
|
||||
static_assert(!std::is_void<T>::value, "vm::_ptr_base<> error: operator[] is not available for void pointers");
|
||||
|
||||
return vm::get_ref<T>(vm::cast(m_addr + sizeof32(T) * index));
|
||||
return vm::get_ref<T>(VM_CAST(m_addr + sizeof32(T) * index));
|
||||
}
|
||||
|
||||
// enable only the conversions which are originally possible between pointer types
|
||||
template<typename T2, typename AT2, typename = std::enable_if_t<std::is_convertible<T*, T2*>::value>> operator _ptr_base<T2, AT2>() const
|
||||
{
|
||||
return{ vm::cast(m_addr) };
|
||||
return{ VM_CAST(m_addr) };
|
||||
}
|
||||
|
||||
template<typename T2, typename = std::enable_if_t<std::is_convertible<T*, T2*>::value>> explicit operator T2*() const
|
||||
@ -141,7 +141,7 @@ namespace vm
|
||||
// conversion to another function pointer
|
||||
template<typename AT2> operator _ptr_base<type, AT2>() const
|
||||
{
|
||||
return{ vm::cast(m_addr) };
|
||||
return{ VM_CAST(m_addr) };
|
||||
}
|
||||
|
||||
explicit operator bool() const
|
||||
@ -265,7 +265,7 @@ template<typename T, typename AT> inline vm::_ptr_base<T, AT> operator +(const v
|
||||
// indirection operator for vm::_ptr_base
|
||||
template<typename T, typename AT> inline vm::if_arithmetical_ptr_t<T, T&> operator *(const vm::_ptr_base<T, AT>& ptr)
|
||||
{
|
||||
return vm::get_ref<T>(vm::cast(ptr.m_addr));
|
||||
return vm::get_ref<T>(VM_CAST(ptr.m_addr));
|
||||
}
|
||||
|
||||
// postfix increment operator for vm::_ptr_base
|
||||
|
@ -24,12 +24,12 @@ namespace vm
|
||||
|
||||
T& get_ref() const
|
||||
{
|
||||
return vm::get_ref<T>(vm::cast(m_addr));
|
||||
return vm::get_ref<T>(VM_CAST(m_addr));
|
||||
}
|
||||
|
||||
T& priv_ref() const
|
||||
{
|
||||
return vm::priv_ref<T>(vm::cast(m_addr));
|
||||
return vm::priv_ref<T>(VM_CAST(m_addr));
|
||||
}
|
||||
|
||||
// TODO: conversion operator (seems hard to define it correctly)
|
||||
|
@ -164,7 +164,7 @@ namespace vm
|
||||
template<typename AT, typename RT, typename... T>
|
||||
force_inline RT _ptr_base<RT(T...), AT>::operator()(PPUThread& CPU, T... args) const
|
||||
{
|
||||
const auto data = vm::get_ptr<be_t<u32>>(vm::cast(m_addr));
|
||||
const auto data = vm::get_ptr<be_t<u32>>(VM_CAST(m_addr));
|
||||
const u32 pc = data[0];
|
||||
const u32 rtoc = data[1];
|
||||
|
||||
|
@ -106,7 +106,7 @@ void execute_ppu_func_by_index(PPUThread& CPU, u32 index)
|
||||
// save RTOC if necessary
|
||||
if (index & EIF_SAVE_RTOC)
|
||||
{
|
||||
vm::write64(vm::cast(CPU.GPR[1] + 0x28), CPU.GPR[2]);
|
||||
vm::write64(VM_CAST(CPU.GPR[1] + 0x28), CPU.GPR[2]);
|
||||
}
|
||||
|
||||
// save old syscall/NID value
|
||||
@ -196,7 +196,7 @@ void execute_ppu_func_by_index(PPUThread& CPU, u32 index)
|
||||
if (index & EIF_PERFORM_BLR)
|
||||
{
|
||||
// return if necessary
|
||||
CPU.PC = vm::cast(CPU.LR & ~3) - 4;
|
||||
CPU.PC = VM_CAST(CPU.LR & ~3) - 4;
|
||||
}
|
||||
|
||||
CPU.hle_code = last_code;
|
||||
|
@ -505,7 +505,7 @@ s32 cellFsStReadStart(u32 fd, u64 offset, u64 size)
|
||||
if (file->st_total_read - file->st_copied <= file->st_ringbuf_size - file->st_block_size && file->st_total_read < file->st_read_size)
|
||||
{
|
||||
// get buffer position
|
||||
const u32 position = vm::cast(file->st_buffer + file->st_total_read % file->st_ringbuf_size);
|
||||
const u32 position = VM_CAST(file->st_buffer + file->st_total_read % file->st_ringbuf_size);
|
||||
|
||||
// read data
|
||||
auto old = file->file->Tell();
|
||||
@ -595,7 +595,7 @@ s32 cellFsStRead(u32 fd, vm::ptr<u8> buf, u64 size, vm::ptr<u64> rsize)
|
||||
}
|
||||
|
||||
const u64 copied = file->st_copied.load();
|
||||
const u32 position = vm::cast(file->st_buffer + copied % file->st_ringbuf_size);
|
||||
const u32 position = VM_CAST(file->st_buffer + copied % file->st_ringbuf_size);
|
||||
const u64 total_read = file->st_total_read.load();
|
||||
const u64 copy_size = (*rsize = std::min<u64>(size, total_read - copied)); // write rsize
|
||||
|
||||
@ -629,7 +629,7 @@ s32 cellFsStReadGetCurrentAddr(u32 fd, vm::ptr<u32> addr, vm::ptr<u64> size)
|
||||
}
|
||||
|
||||
const u64 copied = file->st_copied.load();
|
||||
const u32 position = vm::cast(file->st_buffer + copied % file->st_ringbuf_size);
|
||||
const u32 position = VM_CAST(file->st_buffer + copied % file->st_ringbuf_size);
|
||||
const u64 total_read = file->st_total_read.load();
|
||||
|
||||
if ((*size = std::min<u64>(file->st_ringbuf_size - (position - file->st_buffer), total_read - copied)).data())
|
||||
|
@ -509,7 +509,7 @@ void spursHandlerWaitReady(PPUThread& CPU, vm::ptr<CellSpurs> spurs)
|
||||
/// Entry point of the SPURS handler thread. This thread is responsible for starting the SPURS SPU thread group.
|
||||
void spursHandlerEntry(PPUThread& CPU)
|
||||
{
|
||||
auto spurs = vm::ptr<CellSpurs>::make(vm::cast(CPU.GPR[3]));
|
||||
auto spurs = vm::ptr<CellSpurs>::make(VM_CAST(CPU.GPR[3]));
|
||||
|
||||
if (spurs->flags & SAF_UNKNOWN_FLAG_30)
|
||||
{
|
||||
@ -631,7 +631,7 @@ s32 spursWakeUpShutdownCompletionWaiter(PPUThread& CPU, vm::ptr<CellSpurs> spurs
|
||||
/// Entry point of the SPURS event helper thread
|
||||
void spursEventHelperEntry(PPUThread& CPU)
|
||||
{
|
||||
const auto spurs = vm::ptr<CellSpurs>::make(vm::cast(CPU.GPR[3]));
|
||||
const auto spurs = vm::ptr<CellSpurs>::make(VM_CAST(CPU.GPR[3]));
|
||||
|
||||
bool terminate = false;
|
||||
|
||||
@ -3270,7 +3270,7 @@ s32 cellSpursEventFlagGetTasksetAddress(vm::ptr<CellSpursEventFlag> eventFlag, v
|
||||
return CELL_SPURS_TASK_ERROR_ALIGN;
|
||||
}
|
||||
|
||||
taskset->set(eventFlag->isIwl ? 0u : vm::cast(eventFlag->addr));
|
||||
taskset->set(eventFlag->isIwl ? 0u : VM_CAST(eventFlag->addr));
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ bool spursKernel1SelectWorkload(SPUThread & spu) {
|
||||
u32 wklSelectedId;
|
||||
u32 pollStatus;
|
||||
|
||||
vm::reservation_op(vm::cast(ctxt->spurs.addr()), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->spurs.addr()), 128, [&]() {
|
||||
// lock the first 0x80 bytes of spurs
|
||||
auto spurs = ctxt->spurs.priv_ptr();
|
||||
|
||||
@ -326,7 +326,7 @@ bool spursKernel2SelectWorkload(SPUThread & spu) {
|
||||
u32 wklSelectedId;
|
||||
u32 pollStatus;
|
||||
|
||||
vm::reservation_op(vm::cast(ctxt->spurs.addr()), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->spurs.addr()), 128, [&]() {
|
||||
// lock the first 0x80 bytes of spurs
|
||||
auto spurs = ctxt->spurs.priv_ptr();
|
||||
|
||||
@ -609,7 +609,7 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
|
||||
std::unique_lock<std::mutex> lock(spu.mutex, std::defer_lock);
|
||||
|
||||
while (true) {
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), vm::cast(ctxt->spurs.addr()), 128, [&spu](){ spu.cv.notify_one(); });
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), VM_CAST(ctxt->spurs.addr()), 128, [&spu](){ spu.cv.notify_one(); });
|
||||
auto spurs = vm::get_ptr<CellSpurs>(spu.offset + 0x100);
|
||||
|
||||
// Find the number of SPUs that are idling in this SPURS instance
|
||||
@ -682,7 +682,7 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (vm::reservation_update(vm::cast(ctxt->spurs.addr()), vm::get_ptr(spu.offset + 0x100), 128) && (shouldExit || foundReadyWorkload)) {
|
||||
if (vm::reservation_update(VM_CAST(ctxt->spurs.addr()), vm::get_ptr(spu.offset + 0x100), 128) && (shouldExit || foundReadyWorkload)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -705,9 +705,9 @@ void spursSysServiceMain(SPUThread & spu, u32 pollStatus) {
|
||||
if (ctxt->sysSrvInitialised == 0) {
|
||||
ctxt->sysSrvInitialised = 1;
|
||||
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), vm::cast(ctxt->spurs.addr()), 128);
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), VM_CAST(ctxt->spurs.addr()), 128);
|
||||
|
||||
vm::reservation_op(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
auto spurs = ctxt->spurs.priv_ptr();
|
||||
|
||||
// Halt if already initialised
|
||||
@ -796,7 +796,7 @@ void spursSysServiceProcessRequests(SPUThread & spu, SpursKernelContext * ctxt)
|
||||
bool updateWorkload = false;
|
||||
bool terminate = false;
|
||||
|
||||
vm::reservation_op(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
auto spurs = ctxt->spurs.priv_ptr();
|
||||
|
||||
// Terminate request
|
||||
@ -838,9 +838,9 @@ void spursSysServiceProcessRequests(SPUThread & spu, SpursKernelContext * ctxt)
|
||||
/// Activate a workload
|
||||
void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt) {
|
||||
auto spurs = vm::get_ptr<CellSpurs>(spu.offset + 0x100);
|
||||
memcpy(vm::get_ptr(spu.offset + 0x30000), vm::get_ptr(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, wklInfo1))), 0x200);
|
||||
memcpy(vm::get_ptr(spu.offset + 0x30000), vm::get_ptr(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklInfo1))), 0x200);
|
||||
if (spurs->flags1 & SF1_32_WORKLOADS) {
|
||||
memcpy(vm::get_ptr(spu.offset + 0x30200), vm::get_ptr(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, wklInfo2))), 0x200);
|
||||
memcpy(vm::get_ptr(spu.offset + 0x30200), vm::get_ptr(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklInfo2))), 0x200);
|
||||
}
|
||||
|
||||
u32 wklShutdownBitSet = 0;
|
||||
@ -863,7 +863,7 @@ void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt)
|
||||
}
|
||||
}
|
||||
|
||||
vm::reservation_op(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
auto spurs = ctxt->spurs.priv_ptr();
|
||||
|
||||
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
|
||||
@ -920,7 +920,7 @@ void spursSysServiceUpdateShutdownCompletionEvents(SPUThread & spu, SpursKernelC
|
||||
// workloads that have a shutdown completion hook registered
|
||||
u32 wklNotifyBitSet;
|
||||
u8 spuPort;
|
||||
vm::reservation_op(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
auto spurs = ctxt->spurs.priv_ptr();
|
||||
|
||||
wklNotifyBitSet = 0;
|
||||
@ -962,7 +962,7 @@ void spursSysServiceTraceUpdate(SPUThread & spu, SpursKernelContext * ctxt, u32
|
||||
bool notify;
|
||||
|
||||
u8 sysSrvMsgUpdateTrace;
|
||||
vm::reservation_op(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
auto spurs = ctxt->spurs.priv_ptr();
|
||||
|
||||
sysSrvMsgUpdateTrace = spurs->sysSrvTrace.data.sysSrvMsgUpdateTrace;
|
||||
@ -986,7 +986,7 @@ void spursSysServiceTraceUpdate(SPUThread & spu, SpursKernelContext * ctxt, u32
|
||||
|
||||
// Get trace parameters from CellSpurs and store them in the LS
|
||||
if (((sysSrvMsgUpdateTrace & (1 << ctxt->spuNum)) != 0) || (arg3 != 0)) {
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x80), vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, traceBuffer)), 128);
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x80), VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, traceBuffer)), 128);
|
||||
auto spurs = vm::get_ptr<CellSpurs>(spu.offset + 0x80 - offsetof(CellSpurs, traceBuffer));
|
||||
|
||||
if (ctxt->traceMsgCount != 0xFF || spurs->traceBuffer.addr() == 0) {
|
||||
@ -1016,7 +1016,7 @@ void spursSysServiceCleanupAfterSystemWorkload(SPUThread & spu, SpursKernelConte
|
||||
|
||||
bool do_return = false;
|
||||
|
||||
vm::reservation_op(vm::cast(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
|
||||
auto spurs = ctxt->spurs.priv_ptr();
|
||||
|
||||
if (spurs->sysSrvPreemptWklId[ctxt->spuNum] == 0xFF) {
|
||||
@ -1034,7 +1034,7 @@ void spursSysServiceCleanupAfterSystemWorkload(SPUThread & spu, SpursKernelConte
|
||||
|
||||
spursSysServiceActivateWorkload(spu, ctxt);
|
||||
|
||||
vm::reservation_op(vm::cast(ctxt->spurs.addr()), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->spurs.addr()), 128, [&]() {
|
||||
auto spurs = ctxt->spurs.priv_ptr();
|
||||
|
||||
if (wklId >= CELL_SPURS_MAX_WORKLOAD) {
|
||||
@ -1180,7 +1180,7 @@ s32 spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 *
|
||||
|
||||
s32 rc = CELL_OK;
|
||||
s32 numNewlyReadyTasks;
|
||||
vm::reservation_op(vm::cast(ctxt->taskset.addr()), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(ctxt->taskset.addr()), 128, [&]() {
|
||||
auto taskset = ctxt->taskset.priv_ptr();
|
||||
|
||||
// Verify taskset state is valid
|
||||
@ -1319,7 +1319,7 @@ s32 spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 *
|
||||
});
|
||||
|
||||
// Increment the ready count of the workload by the number of tasks that have become ready
|
||||
vm::reservation_op(vm::cast(kernelCtxt->spurs.addr()), 128, [&]() {
|
||||
vm::reservation_op(VM_CAST(kernelCtxt->spurs.addr()), 128, [&]() {
|
||||
auto spurs = kernelCtxt->spurs.priv_ptr();
|
||||
|
||||
s32 readyCount = kernelCtxt->wklCurrentId < CELL_SPURS_MAX_WORKLOAD ? spurs->wklReadyCount1[kernelCtxt->wklCurrentId].load() : spurs->wklIdleSpuCountOrReadyCount2[kernelCtxt->wklCurrentId & 0x0F].load();
|
||||
@ -1429,7 +1429,7 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) {
|
||||
ctxt->savedWriteTagGroupQueryMask = spu.get_ch_value(MFC_RdTagMask);
|
||||
|
||||
// Store the processor context
|
||||
const u32 contextSaveStorage = vm::cast(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80);
|
||||
const u32 contextSaveStorage = VM_CAST(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80);
|
||||
memcpy(vm::get_ptr(contextSaveStorage), vm::get_ptr(spu.offset + 0x2C80), 0x380);
|
||||
|
||||
// Save LS context
|
||||
@ -1528,7 +1528,7 @@ void spursTasksetDispatch(SPUThread & spu) {
|
||||
}
|
||||
|
||||
// Load saved context from main memory to LS
|
||||
const u32 contextSaveStorage = vm::cast(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80);
|
||||
const u32 contextSaveStorage = VM_CAST(taskInfo->context_save_storage_and_alloc_ls_blocks & -0x80);
|
||||
memcpy(vm::get_ptr(spu.offset + 0x2C80), vm::get_ptr(contextSaveStorage), 0x380);
|
||||
for (auto i = 6; i < 128; i++) {
|
||||
if (ls_pattern._bit[i]) {
|
||||
@ -1680,7 +1680,7 @@ s32 spursTasksetLoadElf(SPUThread & spu, u32 * entryPoint, u32 * lowestLoadAddr,
|
||||
return CELL_SPURS_TASK_ERROR_INVAL;
|
||||
}
|
||||
|
||||
vfsStreamMemory stream(vm::cast(elfAddr));
|
||||
vfsStreamMemory stream(VM_CAST(elfAddr));
|
||||
loader::handlers::elf32 loader;
|
||||
auto rc = loader.init(stream);
|
||||
if (rc != loader::handler::ok) {
|
||||
|
@ -1112,7 +1112,7 @@ s32 _cellSyncLFQueuePushBody(PPUThread& CPU, vm::ptr<CellSyncLFQueue> queue, vm:
|
||||
const s32 depth = queue->m_depth;
|
||||
const s32 size = queue->m_size;
|
||||
const s32 pos = position.value();
|
||||
const u32 addr = vm::cast<u64>((queue->m_buffer.addr() & ~1ull) + size * (pos >= depth ? pos - depth : pos));
|
||||
const u32 addr = VM_CAST((u64)((queue->m_buffer.addr() & ~1ull) + size * (pos >= depth ? pos - depth : pos)));
|
||||
std::memcpy(vm::get_ptr<void>(addr), buffer.get_ptr(), size);
|
||||
|
||||
if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY)
|
||||
@ -1425,7 +1425,7 @@ s32 _cellSyncLFQueuePopBody(PPUThread& CPU, vm::ptr<CellSyncLFQueue> queue, vm::
|
||||
const s32 depth = queue->m_depth;
|
||||
const s32 size = queue->m_size;
|
||||
const s32 pos = position.value();
|
||||
const u32 addr = vm::cast<u64>((queue->m_buffer.addr() & ~1) + size * (pos >= depth ? pos - depth : pos));
|
||||
const u32 addr = VM_CAST((u64)((queue->m_buffer.addr() & ~1) + size * (pos >= depth ? pos - depth : pos)));
|
||||
std::memcpy(buffer.get_ptr(), vm::get_ptr<void>(addr), size);
|
||||
|
||||
if (queue->m_direction != CELL_SYNC_QUEUE_ANY2ANY)
|
||||
|
@ -111,6 +111,7 @@ template<typename T> struct ID_type;
|
||||
|
||||
#define WRAP_EXPR(expr) [&]{ return (expr); }
|
||||
#define EXCEPTION(text, ...) fmt::exception(__FILE__, __LINE__, __FUNCTION__, text, ##__VA_ARGS__)
|
||||
#define VM_CAST(value) vm::impl_cast(value, __FILE__, __LINE__, __FUNCTION__)
|
||||
|
||||
#define _PRGNAME_ "RPCS3"
|
||||
#define _PRGVER_ "0.0.0.5"
|
||||
|
Loading…
Reference in New Issue
Block a user