1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 10:42:36 +01:00

Use vm::passive_lock for SPU threads

This commit is contained in:
Nekotekina 2018-04-03 17:19:07 +03:00
parent e88508b679
commit d392379c7a
8 changed files with 76 additions and 51 deletions

View File

@ -133,7 +133,11 @@ bool cpu_thread::check_state()
if (!test(state, cpu_state_pause))
{
if (cpu_flag_memory) vm::passive_lock(*this);
if (cpu_flag_memory)
{
cpu_mem();
}
break;
}
else if (!cpu_sleep_called && test(state, cpu_flag::suspend))

View File

@ -65,6 +65,12 @@ public:
// Callback for cpu_flag::suspend
virtual void cpu_sleep() {}
// Callback for cpu_flag::memory
virtual void cpu_mem() {}
// Callback for vm::temporary_unlock
virtual void cpu_unmem() {}
};
inline cpu_thread* get_current_cpu_thread() noexcept

View File

@ -588,6 +588,22 @@ void ppu_thread::cpu_task()
}
}
void ppu_thread::cpu_sleep()
{
vm::temporary_unlock(*this);
lv2_obj::awake(*this);
}
void ppu_thread::cpu_mem()
{
vm::passive_lock(*this);
}
void ppu_thread::cpu_unmem()
{
state.test_and_set(cpu_flag::memory);
}
void ppu_thread::exec_task()
{
if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm)

View File

@ -37,6 +37,8 @@ public:
virtual std::string dump() const override;
virtual void cpu_task() override;
virtual void cpu_sleep() override;
virtual void cpu_mem() override;
virtual void cpu_unmem() override;
virtual ~ppu_thread() override;
ppu_thread(const std::string& name, u32 prio = 0, u32 stack = 0x10000);
@ -96,7 +98,7 @@ public:
u8 cnt{}; // 0..6
}
xer;
/*
Saturation. A sticky status bit indicating that some field in a saturating instruction saturated since the last
time SAT was cleared. In other words when SAT = '1' it remains set to '1' until it is cleared to '0' by an
@ -133,11 +135,11 @@ public:
u32 raddr{0}; // Reservation addr
u64 rtime{0};
u64 rdata{0}; // Reservation data
atomic_t<u32> prio{0}; // Thread priority (0..3071)
const u32 stack_size; // Stack size
const u32 stack_addr; // Stack address
atomic_t<u32> joiner{~0u}; // Joining thread (-1 if detached)
lf_fifo<atomic_t<cmd64>, 127> cmd_queue; // Command queue for asynchronous operations.

View File

@ -485,6 +485,18 @@ void SPUThread::cpu_task()
}
}
void SPUThread::cpu_mem()
{
mfc_barrier = -1;
mfc_fence = -1;
}
void SPUThread::cpu_unmem()
{
mfc_barrier = -1;
mfc_fence = -1;
}
SPUThread::~SPUThread()
{
// Deallocate Local Storage
@ -760,9 +772,12 @@ bool SPUThread::do_dma_check(const spu_mfc_cmd& args)
if (mfc_barrier & mask || (args.cmd & MFC_FENCE_MASK && mfc_fence & mask))
{
mfc_barrier = -1;
mfc_fence = -1;
return false;
}
vm::passive_lock(*this);
return true;
}
@ -774,8 +789,6 @@ bool SPUThread::do_dma_check(const spu_mfc_cmd& args)
bool SPUThread::do_list_transfer(spu_mfc_cmd& args)
{
vm::reader_lock lock;
struct list_element
{
be_t<u16> sb; // Stall-and-Notify bit (0x8000)
@ -808,16 +821,6 @@ bool SPUThread::do_list_transfer(spu_mfc_cmd& args)
if (size)
{
if (!vm::check_addr(addr, size, vm::page_allocated | vm::page_readable | (args.cmd & MFC_PUT_CMD ? vm::page_writable : 0)) && args.eal < RAW_SPU_BASE_ADDR)
{
Emu.Pause();
state += cpu_flag::stop;
LOG_FATAL(SPU, "Access violation %s location 0x%x (%s, size=0x%x)",
args.cmd & MFC_PUT_CMD ? "writing" : "reading", addr, args.cmd, size);
return false;
}
spu_mfc_cmd transfer;
transfer.eal = addr;
transfer.eah = 0;
@ -879,6 +882,7 @@ void SPUThread::do_mfc()
// Check special value
if (UNLIKELY(mfc_barrier == -1 && mfc_fence == -1))
{
vm::passive_lock(*this);
}
// Process enqueued commands
@ -949,20 +953,6 @@ void SPUThread::do_mfc()
// Also ignore MFC_SYNC_CMD
if (args.size)
{
vm::reader_lock lock;
if (!vm::check_addr(args.eal, args.size, vm::page_allocated | vm::page_readable | (args.cmd & MFC_PUT_CMD ? vm::page_writable : 0)) && args.eal < RAW_SPU_BASE_ADDR)
{
Emu.Pause();
state += cpu_flag::stop;
LOG_FATAL(SPU, "Access violation %s location 0x%x (%s, size=0x%x)",
args.cmd & MFC_PUT_CMD ? "writing" : "reading",
args.eal, args.cmd, args.size);
barrier |= -1;
return false;
}
do_dma_transfer(args);
}
@ -1011,6 +1001,8 @@ bool SPUThread::process_mfc_cmd(spu_mfc_cmd args)
// Stall infinitely if MFC queue is full
while (mfc_size >= 16)
{
vm::temporary_unlock(*this);
if (test(state, cpu_flag::stop))
{
return false;
@ -1054,6 +1046,8 @@ bool SPUThread::process_mfc_cmd(spu_mfc_cmd args)
while (vm::reservation_acquire(raddr, 128) == waiter.stamp && rdata == data)
{
vm::temporary_unlock(*this);
if (test(state, cpu_flag::stop))
{
break;
@ -1236,20 +1230,6 @@ bool SPUThread::process_mfc_cmd(spu_mfc_cmd args)
if (LIKELY(args.size))
{
vm::reader_lock lock;
if (!vm::check_addr(args.eal, args.size, vm::page_allocated | vm::page_readable | (args.cmd & MFC_PUT_CMD ? vm::page_writable : 0)) && args.eal < RAW_SPU_BASE_ADDR)
{
Emu.Pause();
state += cpu_flag::stop;
LOG_FATAL(SPU, "Access violation %s location 0x%x (%s, size=0x%x)",
args.cmd & MFC_PUT_CMD ? "writing" : "reading",
args.eal, args.cmd, args.size);
mfc_queue[mfc_size++] = args;
return true;
}
do_dma_transfer(args);
}
@ -1414,11 +1394,14 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
{
for (int i = 0; i < 10 && channel.get_count() == 0; i++)
{
vm::temporary_unlock(*this);
busy_wait();
}
while (!channel.try_pop(out))
{
vm::temporary_unlock(*this);
if (test(state, cpu_flag::stop))
{
return false;
@ -1443,6 +1426,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
{
for (int i = 0; i < 10 && ch_in_mbox.get_count() == 0; i++)
{
vm::temporary_unlock(*this);
busy_wait();
}
@ -1456,6 +1440,8 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
return true;
}
vm::temporary_unlock(*this);
if (test(state & cpu_flag::stop))
{
return false;
@ -1561,6 +1547,8 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
while (!(res = get_events(true)))
{
vm::temporary_unlock(*this);
if (test(state & cpu_flag::stop))
{
return false;
@ -1603,6 +1591,8 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{
while (!ch_out_intr_mbox.try_push(value))
{
vm::temporary_unlock(*this);
if (test(state & cpu_flag::stop))
{
return false;
@ -1749,6 +1739,8 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{
while (!ch_out_mbox.try_push(value))
{
vm::temporary_unlock(*this);
if (test(state & cpu_flag::stop))
{
return false;
@ -1949,6 +1941,8 @@ bool SPUThread::stop_and_signal(u32 code)
// HACK: wait for executable code
while (!_ref<u32>(pc))
{
vm::temporary_unlock(*this);
if (test(state & cpu_flag::stop))
{
return false;
@ -1962,6 +1956,7 @@ bool SPUThread::stop_and_signal(u32 code)
case 0x001:
{
vm::temporary_unlock(*this);
thread_ctrl::wait_for(1000); // hack
return true;
}
@ -1976,6 +1971,8 @@ bool SPUThread::stop_and_signal(u32 code)
{
/* ===== sys_spu_thread_receive_event ===== */
vm::temporary_unlock(*this);
u32 spuq;
if (!ch_out_mbox.try_pop(spuq))
@ -2131,6 +2128,8 @@ bool SPUThread::stop_and_signal(u32 code)
{
/* ===== sys_spu_thread_group_exit ===== */
vm::temporary_unlock(*this);
u32 value;
if (!ch_out_mbox.try_pop(value))
@ -2164,6 +2163,8 @@ bool SPUThread::stop_and_signal(u32 code)
{
/* ===== sys_spu_thread_exit ===== */
vm::temporary_unlock(*this);
if (!ch_out_mbox.get_count())
{
fmt::throw_exception("sys_spu_thread_exit(): Out_MBox is empty" HERE);

View File

@ -509,6 +509,8 @@ public:
virtual std::string get_name() const override;
virtual std::string dump() const override;
virtual void cpu_task() override;
virtual void cpu_mem() override;
virtual void cpu_unmem() override;
virtual ~SPUThread() override;
void cpu_init();

View File

@ -1179,9 +1179,3 @@ void lv2_obj::schedule_all()
}
}
}
void ppu_thread::cpu_sleep()
{
vm::temporary_unlock(*this);
lv2_obj::awake(*this);
}

View File

@ -109,7 +109,7 @@ namespace vm
{
if (g_tls_locked && g_tls_locked->compare_and_swap_test(&cpu, nullptr))
{
cpu.state.test_and_set(cpu_flag::memory);
cpu.cpu_unmem();
}
}