1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 18:53:28 +01:00

rsx: Fix emu stopping crash when using multi-threaded rsx

FXO signaled abort before it completed its work, leading to unsignalled vk::fence and deadlock. Fix it by deregistering it from FXO.
This commit is contained in:
Eladash 2022-07-09 16:00:19 +03:00 committed by Ivan
parent 87cd65ff03
commit 24fddf1ded
2 changed files with 25 additions and 25 deletions

View File

@ -84,14 +84,10 @@ namespace rsx
static constexpr auto thread_name = "RSX Offloader"sv;
};
using dma_thread = named_thread<dma_manager::offload_thread>;
static_assert(std::is_default_constructible_v<dma_thread>);
// initialization
void dma_manager::init()
{
m_thread = std::make_shared<named_thread<offload_thread>>();
}
// General transport
@ -103,8 +99,8 @@ namespace rsx
}
else
{
g_fxo->get<dma_thread>().m_enqueued_count++;
g_fxo->get<dma_thread>().m_work_queue.push(dst, src, length);
m_thread->m_enqueued_count++;
m_thread->m_work_queue.push(dst, src, length);
}
}
@ -116,8 +112,8 @@ namespace rsx
}
else
{
g_fxo->get<dma_thread>().m_enqueued_count++;
g_fxo->get<dma_thread>().m_work_queue.push(dst, src, length);
m_thread->m_enqueued_count++;
m_thread->m_work_queue.push(dst, src, length);
}
}
@ -131,8 +127,8 @@ namespace rsx
}
else
{
g_fxo->get<dma_thread>().m_enqueued_count++;
g_fxo->get<dma_thread>().m_work_queue.push(dst, primitive, count);
m_thread->m_enqueued_count++;
m_thread->m_work_queue.push(dst, primitive, count);
}
}
@ -141,16 +137,16 @@ namespace rsx
{
ensure(g_cfg.video.multithreaded_rsx);
g_fxo->get<dma_thread>().m_enqueued_count++;
g_fxo->get<dma_thread>().m_work_queue.push(request_code, args);
m_thread->m_enqueued_count++;
m_thread->m_work_queue.push(request_code, args);
}
// Synchronization
bool dma_manager::is_current_thread()
bool dma_manager::is_current_thread() const
{
if (auto cpu = thread_ctrl::get_current())
{
return g_fxo->get<dma_thread>().current_thread_ == cpu;
return m_thread->current_thread_ == cpu;
}
return false;
@ -158,7 +154,7 @@ namespace rsx
bool dma_manager::sync() const
{
auto& _thr = g_fxo->get<dma_thread>();
auto& _thr = *m_thread;
if (_thr.m_enqueued_count.load() <= _thr.m_processed_count.load()) [[likely]]
{
@ -191,8 +187,8 @@ namespace rsx
void dma_manager::join()
{
g_fxo->get<dma_thread>() = thread_state::aborting;
sync();
*m_thread = thread_state::aborting;
}
void dma_manager::set_mem_fault_flag()
@ -208,9 +204,9 @@ namespace rsx
}
// Fault recovery
utils::address_range dma_manager::get_fault_range(bool writing)
utils::address_range dma_manager::get_fault_range(bool writing) const
{
const auto m_current_job = ensure(g_fxo->get<dma_thread>().m_current_job);
const auto m_current_job = ensure(m_thread->m_current_job);
void *address = nullptr;
u32 range = m_current_job->length;

View File

@ -6,6 +6,9 @@
#include <vector>
template <typename T>
class named_thread;
namespace rsx
{
class dma_manager
@ -50,6 +53,9 @@ namespace rsx
atomic_t<bool> m_mem_fault_flag = false;
struct offload_thread;
std::shared_ptr<named_thread<offload_thread>> m_thread;
// TODO: Improved benchmarks here; value determined by profiling on a Ryzen CPU, rounded to the nearest 512 bytes
const u32 max_immediate_transfer_size = 3584;
@ -64,21 +70,19 @@ namespace rsx
void copy(void *dst, void *src, u32 length) const;
// Vertex utilities
static void emulate_as_indexed(void *dst, rsx::primitive_type primitive, u32 count);
void emulate_as_indexed(void *dst, rsx::primitive_type primitive, u32 count);
// Renderer callback
static void backend_ctrl(u32 request_code, void* args);
void backend_ctrl(u32 request_code, void* args);
// Synchronization
static bool is_current_thread();
bool is_current_thread() const;
bool sync() const;
void join();
void set_mem_fault_flag();
void clear_mem_fault_flag();
// Fault recovery
static utils::address_range get_fault_range(bool writing);
struct offload_thread;
utils::address_range get_fault_range(bool writing) const;
};
}