1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 18:53:28 +01:00

SPU: Optimize SPU ports/queues

This commit is contained in:
Eladash 2021-05-14 14:21:10 +03:00 committed by Ivan
parent cacb852a1e
commit 56471f4ad4
3 changed files with 21 additions and 28 deletions

View File

@ -3969,7 +3969,7 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
std::lock_guard lock(group->mutex);
const auto queue = this->spup[spup].lock();
const auto queue = this->spup[spup].get();
const auto res = ch_in_mbox.get_count() ? CELL_EBUSY :
!queue ? CELL_ENOTCONN :
@ -4001,7 +4001,7 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
spu_log.trace("sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data);
const auto queue = (std::lock_guard{group->mutex}, this->spup[spup].lock());
const auto queue = (std::lock_guard{group->mutex}, this->spup[spup]);
// TODO: check passing spup value
if (auto res = queue ? queue->send(SYS_SPU_THREAD_EVENT_USER_KEY, lv2_id, (u64{spup} << 32) | (value & 0x00ffffff), data) : CELL_ENOTCONN)
@ -4339,16 +4339,12 @@ bool spu_thread::stop_and_signal(u32 code)
return ch_in_mbox.set_values(1, CELL_EINVAL), true;
}
std::shared_ptr<lv2_event_queue> queue;
state += cpu_flag::wait;
spu_function_logger logger(*this, "sys_spu_thread_receive_event");
while (true)
{
queue.reset();
// Check group status, wait if necessary
for (auto _state = +group->run_state;
_state >= SPU_THREAD_GROUP_STATUS_WAITING && _state <= SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED;
@ -4364,8 +4360,6 @@ bool spu_thread::stop_and_signal(u32 code)
thread_ctrl::wait_on(state, old);;
}
reader_lock rlock(id_manager::g_mutex);
std::lock_guard lock(group->mutex);
if (is_stopped())
@ -4379,20 +4373,21 @@ bool spu_thread::stop_and_signal(u32 code)
continue;
}
lv2_event_queue* queue = nullptr;
for (auto& v : this->spuq)
{
if (spuq == v.first)
{
queue = v.second.lock();
if (lv2_event_queue::check(queue))
if (lv2_event_queue::check(v.second))
{
queue = v.second.get();
break;
}
}
}
if (!lv2_event_queue::check(queue))
if (!queue)
{
return ch_in_mbox.set_values(1, CELL_EINVAL), true;
}
@ -4447,7 +4442,7 @@ bool spu_thread::stop_and_signal(u32 code)
break;
}
thread_ctrl::wait_on(state, old);;
thread_ctrl::wait_on(state, old);
}
std::lock_guard lock(group->mutex);
@ -4498,20 +4493,21 @@ bool spu_thread::stop_and_signal(u32 code)
std::lock_guard lock(group->mutex);
std::shared_ptr<lv2_event_queue> queue;
lv2_event_queue* queue = nullptr;
for (auto& v : this->spuq)
{
if (spuq == v.first)
{
if (queue = v.second.lock(); lv2_event_queue::check(queue))
if (lv2_event_queue::check(v.second))
{
queue = v.second.get();
break;
}
}
}
if (!lv2_event_queue::check(queue))
if (!queue)
{
return ch_in_mbox.set_values(1, CELL_EINVAL), true;
}

View File

@ -736,8 +736,8 @@ public:
atomic_t<status_npc_sync_var> status_npc;
std::array<spu_int_ctrl_t, 3> int_ctrl; // SPU Class 0, 1, 2 Interrupt Management
std::array<std::pair<u32, std::weak_ptr<lv2_event_queue>>, 32> spuq; // Event Queue Keys for SPU Thread
std::weak_ptr<lv2_event_queue> spup[64]; // SPU Ports
std::array<std::pair<u32, std::shared_ptr<lv2_event_queue>>, 32> spuq; // Event Queue Keys for SPU Thread
std::shared_ptr<lv2_event_queue> spup[64]; // SPU Ports
spu_channel exit_status{}; // Threaded SPU exit status (not a channel, but the interface fits)
atomic_t<u32> last_exit_status; // Value to be written in exit_status after checking group termination

View File

@ -1490,7 +1490,7 @@ error_code sys_spu_thread_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et,
sys_spu.warning("sys_spu_thread_connect_event(id=0x%x, eq=0x%x, et=%d, spup=%d)", id, eq, et, spup);
const auto [thread, group] = lv2_spu_group::get_thread(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
if (!queue || !thread) [[unlikely]]
{
@ -1512,7 +1512,7 @@ error_code sys_spu_thread_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et,
return CELL_EISCONN;
}
port = queue;
port = std::move(queue);
return CELL_OK;
}
@ -1557,7 +1557,7 @@ error_code sys_spu_thread_bind_queue(ppu_thread& ppu, u32 id, u32 spuq, u32 spuq
sys_spu.warning("sys_spu_thread_bind_queue(id=0x%x, spuq=0x%x, spuq_num=0x%x)", id, spuq, spuq_num);
const auto [thread, group] = lv2_spu_group::get_thread(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(spuq);
auto queue = idm::get<lv2_obj, lv2_event_queue>(spuq);
if (!queue || !thread) [[unlikely]]
{
@ -1576,8 +1576,7 @@ error_code sys_spu_thread_bind_queue(ppu_thread& ppu, u32 id, u32 spuq, u32 spuq
for (auto& v : thread->spuq)
{
// Check if the entry is assigned at all
if (const decltype(v.second) test{};
!v.second.owner_before(test) && !test.owner_before(v.second))
if (!v.second)
{
if (!q)
{
@ -1587,8 +1586,7 @@ error_code sys_spu_thread_bind_queue(ppu_thread& ppu, u32 id, u32 spuq, u32 spuq
continue;
}
if (v.first == spuq_num ||
(!v.second.owner_before(queue) && !queue.owner_before(v.second)))
if (v.first == spuq_num || v.second == queue)
{
return CELL_EBUSY;
}
@ -1600,7 +1598,7 @@ error_code sys_spu_thread_bind_queue(ppu_thread& ppu, u32 id, u32 spuq, u32 spuq
}
q->first = spuq_num;
q->second = queue;
q->second = std::move(queue);
return CELL_OK;
}
@ -1626,8 +1624,7 @@ error_code sys_spu_thread_unbind_queue(ppu_thread& ppu, u32 id, u32 spuq_num)
continue;
}
if (const decltype(v.second) test{};
!v.second.owner_before(test) && !test.owner_before(v.second))
if (!v.second)
{
continue;
}