1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 18:53:28 +01:00

SPU_EVENT_LR improved

This commit is contained in:
Nekotekina 2015-07-15 21:11:32 +03:00
parent 1e021cdbba
commit a7668ff57f
6 changed files with 109 additions and 42 deletions

View File

@ -73,6 +73,9 @@ public:
// check whether it is the current running thread
bool is_current() const;
// get internal thread pointer
const thread_ctrl_t* get_ctrl() const { return m_thread.get(); }
};
class autojoin_thread_t final : private thread_t

View File

@ -19,9 +19,10 @@ enum : u64
CPU_STATE_STEP = (1ull << 3), // forces the thread to pause after executing just one instruction or something appropriate, set by the debugger
CPU_STATE_DEAD = (1ull << 4), // indicates irreversible exit of the thread
CPU_STATE_RETURN = (1ull << 5), // used for callback return
CPU_STATE_SIGNAL = (1ull << 6),
CPU_STATE_SIGNAL = (1ull << 6), // used for HLE signaling
CPU_STATE_INTR = (1ull << 7), // thread interrupted
CPU_STATE_MAX = (1ull << 7), // added to (subtracted from) m_state by Sleep()/Awake() calls to trigger status check
CPU_STATE_MAX = (1ull << 8), // added to (subtracted from) m_state by Sleep()/Awake() calls to trigger status check
};
// "HLE return" exception event
@ -49,6 +50,8 @@ protected:
public:
using thread_t::mutex;
using thread_t::cv;
using thread_t::is_current;
using thread_t::get_ctrl;
protected:
CPUThread(CPUThreadType type, const std::string& name, std::function<std::string()> thread_name);

View File

@ -210,6 +210,7 @@ void SPUThread::InitRegs()
ch_event_mask = 0;
ch_event_stat = {};
last_raddr = 0;
ch_dec_start_timestamp = get_timebased_time(); // ???
ch_dec_value = 0;
@ -454,16 +455,16 @@ void SPUThread::process_mfc_cmd(u32 cmd)
break;
}
vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), VM_CAST(ch_mfc_args.ea), 128);
const u32 raddr = VM_CAST(ch_mfc_args.ea);
if (ch_event_stat.load() & SPU_EVENT_AR)
vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), raddr, 128);
if (last_raddr)
{
ch_event_stat |= SPU_EVENT_LR;
}
else
{
ch_event_stat |= SPU_EVENT_AR;
}
last_raddr = raddr;
return ch_atomic_stat.push_uncond(MFC_GETLLAR_SUCCESS);
}
@ -475,22 +476,24 @@ void SPUThread::process_mfc_cmd(u32 cmd)
break;
}
const bool was_acquired = (ch_event_stat._and_not(SPU_EVENT_AR) & SPU_EVENT_AR) != 0;
if (vm::reservation_update(VM_CAST(ch_mfc_args.ea), vm::get_ptr(offset + ch_mfc_args.lsa), 128))
{
if (!was_acquired)
if (last_raddr == 0)
{
throw EXCEPTION("Unexpected: PUTLLC command succeeded, but GETLLAR command not detected");
}
last_raddr = 0;
return ch_atomic_stat.push_uncond(MFC_PUTLLC_SUCCESS);
}
else
{
if (was_acquired)
if (last_raddr != 0)
{
ch_event_stat |= SPU_EVENT_LR;
last_raddr = 0;
}
return ch_atomic_stat.push_uncond(MFC_PUTLLC_FAILURE);
@ -510,9 +513,11 @@ void SPUThread::process_mfc_cmd(u32 cmd)
std::memcpy(vm::priv_ptr(VM_CAST(ch_mfc_args.ea)), vm::get_ptr(offset + ch_mfc_args.lsa), 128);
});
if (ch_event_stat._and_not(SPU_EVENT_AR) & SPU_EVENT_AR && vm::g_tls_did_break_reservation)
if (last_raddr != 0 && vm::g_tls_did_break_reservation)
{
ch_event_stat |= SPU_EVENT_LR;
last_raddr = 0;
}
if (cmd == MFC_PUTLLUC_CMD)
@ -527,18 +532,61 @@ void SPUThread::process_mfc_cmd(u32 cmd)
throw EXCEPTION("Unknown command %s (cmd=0x%x, lsa=0x%x, ea=0x%llx, tag=0x%x, size=0x%x)", get_mfc_cmd_name(cmd), cmd, ch_mfc_args.lsa, ch_mfc_args.ea, ch_mfc_args.tag, ch_mfc_args.size);
}
u32 SPUThread::get_events()
u32 SPUThread::get_events(bool waiting)
{
// check reservation status and set SPU_EVENT_LR if lost
if (ch_event_stat.load() & SPU_EVENT_AR && !vm::reservation_test())
if (last_raddr != 0 && !vm::reservation_test(get_ctrl()))
{
ch_event_stat |= SPU_EVENT_LR;
ch_event_stat &= ~SPU_EVENT_AR;
last_raddr = 0;
}
// initialize waiting
if (waiting)
{
// polling with atomically set/removed SPU_EVENT_WAITING flag
return ch_event_stat.atomic_op([this](u32& stat) -> u32
{
if (u32 res = stat & ch_event_mask)
{
stat &= ~SPU_EVENT_WAITING;
return res;
}
else
{
stat |= SPU_EVENT_WAITING;
return 0;
}
});
}
// simple polling
return ch_event_stat.load() & ch_event_mask;
}
void SPUThread::set_events(u32 mask)
{
if (u32 unimpl = mask & ~SPU_EVENT_IMPLEMENTED)
{
throw EXCEPTION("Unimplemented events (0x%x)", unimpl);
}
// set new events, get old event mask
const u32 old_stat = ch_event_stat._or(mask);
// notify if some events were set
if (~old_stat & mask && old_stat & SPU_EVENT_WAITING)
{
std::lock_guard<std::mutex> lock(mutex);
if (ch_event_stat.load() & SPU_EVENT_WAITING)
{
cv.notify_one();
}
}
}
u32 SPUThread::get_ch_count(u32 ch)
{
if (Ini.HLELogging.GetValue())
@ -675,29 +723,40 @@ u32 SPUThread::get_ch_value(u32 ch)
{
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
u32 result;
while ((result = get_events()) == 0)
// start waiting or return immediately
if (u32 res = get_events(true))
{
CHECK_EMU_STATUS;
if (IsStopped()) throw CPUThreadStop{};
if (!lock)
{
lock.lock();
continue;
}
cv.wait_for(lock, std::chrono::milliseconds(1));
return res;
}
if (ch_event_mask & SPU_EVENT_LR)
{
// register waiter if polling reservation status is required
vm::wait_op(*this, last_raddr, 128, WRAP_EXPR(get_events(true) || IsStopped()));
}
else
{
lock.lock();
// simple waiting loop otherwise
while (!get_events(true) && !IsStopped())
{
CHECK_EMU_STATUS;
cv.wait(lock);
}
}
ch_event_stat &= ~SPU_EVENT_WAITING;
if (IsStopped()) throw CPUThreadStop{};
return result;
return get_events();
}
case SPU_RdMachStat:
{
return 1; // hack (not isolated, interrupts enabled)
return 0; // hack (not isolated, interrupts disabled)
}
}
@ -1048,7 +1107,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
case SPU_WrEventMask:
{
if (value & ~(SPU_EVENT_IMPLEMENTED))
if (value & ~SPU_EVENT_IMPLEMENTED)
{
break;
}
@ -1059,7 +1118,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
case SPU_WrEventAck:
{
if (value & ~(SPU_EVENT_IMPLEMENTED))
if (value & ~SPU_EVENT_IMPLEMENTED)
{
break;
}

View File

@ -65,7 +65,7 @@ enum : u32
SPU_EVENT_IMPLEMENTED = SPU_EVENT_LR, // Mask of implemented events
SPU_EVENT_AR = 0x80000000, // Set after acquiring the reservation (hack)
SPU_EVENT_WAITING = 0x80000000, // This bit is originally unused, set when SPU thread starts waiting on ch_event_stat
};
// SPU Class 0 Interrupts
@ -504,6 +504,7 @@ public:
u32 ch_event_mask;
atomic_t<u32> ch_event_stat;
u32 last_raddr; // Last Reservation Address (0 if not set)
u64 ch_dec_start_timestamp; // timestamp of writing decrementer value
u32 ch_dec_value; // written decrementer value
@ -555,7 +556,8 @@ public:
void do_dma_list_cmd(u32 cmd, spu_mfc_arg_t args);
void process_mfc_cmd(u32 cmd);
u32 get_events();
u32 get_events(bool waiting = false);
void set_events(u32 mask);
u32 get_ch_count(u32 ch);
u32 get_ch_value(u32 ch);
void set_ch_value(u32 ch, u32 value);

View File

@ -145,7 +145,7 @@ namespace vm
reservation_mutex_t g_reservation_mutex;
waiter_list_t g_waiter_list;
std::array<waiter_t, 1024> g_waiter_list;
std::size_t g_waiter_max = 0; // min unused position
std::size_t g_waiter_nil = 0; // min search position
@ -493,11 +493,11 @@ namespace vm
return true;
}
bool reservation_test()
bool reservation_test(const thread_ctrl_t* current)
{
const auto owner = g_reservation_owner;
return owner && owner == get_current_thread_ctrl();
return owner && owner == current;
}
void reservation_free()

View File

@ -2,6 +2,8 @@
#include "stdafx.h"
const class thread_ctrl_t* get_current_thread_ctrl();
class CPUThread;
namespace vm
@ -60,8 +62,6 @@ namespace vm
bool try_notify();
};
using waiter_list_t = std::array<waiter_t, 1024>;
class waiter_lock_t
{
waiter_t* m_waiter;
@ -125,7 +125,7 @@ namespace vm
bool reservation_query(u32 addr, u32 size, bool is_writing, std::function<bool()> callback);
// Returns true if the current thread owns reservation
bool reservation_test();
bool reservation_test(const thread_ctrl_t* current = get_current_thread_ctrl());
// Break all reservations created by the current thread
void reservation_free();