1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 18:53:28 +01:00

Remove timeout support from lf_queue::wait

Add notify method and use atomic wait
This commit is contained in:
Nekotekina 2019-09-09 00:55:48 +03:00
parent faba366f89
commit b91661ae71
3 changed files with 21 additions and 46 deletions

View File

@ -443,35 +443,3 @@ bool shared_cond::notify_all(shared_cond::shared_lock& lock) noexcept
balanced_awaken<true>(m_cvx32, utils::popcnt32(wait_mask)); balanced_awaken<true>(m_cvx32, utils::popcnt32(wait_mask));
return true; return true;
} }
bool lf_queue_base::wait(u64 _timeout)
{
auto _old = m_head.compare_and_swap(0, 1);
if (_old)
{
verify("lf_queue concurrent wait" HERE), _old != 1;
return true;
}
return balanced_wait_until(m_head, _timeout, [](std::uintptr_t& head, auto... ret) -> int
{
if (head != 1)
{
return +1;
}
if constexpr (sizeof...(ret))
{
head = 0;
return -1;
}
return 0;
});
}
void lf_queue_base::imp_notify()
{
balanced_awaken(m_head, 1);
}

View File

@ -315,12 +315,6 @@ class lf_queue_base
{ {
protected: protected:
atomic_t<std::uintptr_t> m_head = 0; atomic_t<std::uintptr_t> m_head = 0;
void imp_notify();
public:
// Wait for new elements pushed, no other thread shall call wait() or pop_all() simultaneously
bool wait(u64 usec_timeout = -1);
}; };
// Linked list-based multi-producer queue (the consumer drains the whole queue at once) // Linked list-based multi-producer queue (the consumer drains the whole queue at once)
@ -361,20 +355,28 @@ public:
delete reinterpret_cast<lf_queue_item<T>*>(m_head.load()); delete reinterpret_cast<lf_queue_item<T>*>(m_head.load());
} }
void wait() noexcept
{
while (m_head == 0)
{
m_head.wait(0);
}
}
void notify() noexcept
{
m_head.notify_one();
}
template <typename... Args> template <typename... Args>
void push(Args&&... args) void push(Args&&... args)
{ {
auto _old = m_head.load(); auto _old = m_head.load();
auto* item = new lf_queue_item<T>(_old & 1 ? nullptr : reinterpret_cast<lf_queue_item<T>*>(_old), std::forward<Args>(args)...); auto* item = new lf_queue_item<T>(reinterpret_cast<lf_queue_item<T>*>(_old), std::forward<Args>(args)...);
while (!m_head.compare_exchange(_old, reinterpret_cast<std::uint64_t>(item))) while (!m_head.compare_exchange(_old, reinterpret_cast<std::uint64_t>(item)))
{ {
item->m_link = _old & 1 ? nullptr : reinterpret_cast<lf_queue_item<T>*>(_old); item->m_link = reinterpret_cast<lf_queue_item<T>*>(_old);
}
if (_old & 1)
{
lf_queue_base::imp_notify();
} }
} }

View File

@ -184,7 +184,7 @@ struct vdec_context final
{ {
if (!cmds) if (!cmds)
{ {
in_cmd.wait(1000); in_cmd.wait();
continue; continue;
} }
@ -515,6 +515,7 @@ error_code cellVdecClose(ppu_thread& ppu, u32 handle)
lv2_obj::sleep(ppu); lv2_obj::sleep(ppu);
vdec->out_max = 0; vdec->out_max = 0;
vdec->in_cmd.push(vdec_close); vdec->in_cmd.push(vdec_close);
vdec->in_cmd.notify();
while (!atomic_storage<u64>::load(vdec->ppu_tid)) while (!atomic_storage<u64>::load(vdec->ppu_tid))
{ {
@ -538,6 +539,7 @@ error_code cellVdecStartSeq(u32 handle)
} }
vdec->in_cmd.push(vdec_start_seq); vdec->in_cmd.push(vdec_start_seq);
vdec->in_cmd.notify();
return CELL_OK; return CELL_OK;
} }
@ -553,6 +555,7 @@ error_code cellVdecEndSeq(u32 handle)
} }
vdec->in_cmd.push(vdec_cmd{-1}); vdec->in_cmd.push(vdec_cmd{-1});
vdec->in_cmd.notify();
return CELL_OK; return CELL_OK;
} }
@ -574,6 +577,7 @@ error_code cellVdecDecodeAu(u32 handle, CellVdecDecodeMode mode, vm::cptr<CellVd
// TODO: check info // TODO: check info
vdec->in_cmd.push(vdec_cmd{mode, *auInfo}); vdec->in_cmd.push(vdec_cmd{mode, *auInfo});
vdec->in_cmd.notify();
return CELL_OK; return CELL_OK;
} }
@ -924,6 +928,7 @@ error_code cellVdecSetFrameRate(u32 handle, CellVdecFrameRate frc)
// TODO: check frc value // TODO: check frc value
vdec->in_cmd.push(frc); vdec->in_cmd.push(frc);
vdec->in_cmd.notify();
return CELL_OK; return CELL_OK;
} }