1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-25 04:02:42 +01:00

WRAP_EXPR macro removed

This commit is contained in:
Nekotekina 2016-08-15 17:11:45 +03:00
parent dafb6b5c92
commit dbcb5df172
9 changed files with 30 additions and 30 deletions

View File

@ -2072,7 +2072,7 @@ void thread_ctrl::join()
{
// Hard way
std::unique_lock<std::mutex> lock(m_data->mutex);
m_data->jcv.wait(lock, WRAP_EXPR(m_joining >= 0x80000000));
m_data->jcv.wait(lock, [&] { return m_joining >= 0x80000000; });
}
if (UNLIKELY(m_data && m_data->exception && !std::uncaught_exception()))

View File

@ -58,9 +58,6 @@
#define HERE "\n(in file " __FILE__ ":" STRINGIZE(__LINE__) ")"
// Wrap an expression into lambda. Obsolete.
#define WRAP_EXPR(...) [&] { return __VA_ARGS__; }
// Ensure that the expression evaluates to true. Obsolete.
//#define EXPECTS(...) do { if (!(__VA_ARGS__)) fmt::raw_error("Precondition failed: " #__VA_ARGS__ HERE); } while (0)
//#define ENSURES(...) do { if (!(__VA_ARGS__)) fmt::raw_error("Postcondition failed: " #__VA_ARGS__ HERE); } while (0)

View File

@ -67,7 +67,7 @@ ppu_error_code cellSyncMutexLock(vm::ptr<CellSyncMutex> mutex)
const auto order = mutex->ctrl.atomic_op(_sync::mutex_acquire);
// wait until rel value is equal to old acq value
vm::wait_op(mutex.addr(), 4, WRAP_EXPR(mutex->ctrl.load().rel == order));
vm::wait_op(mutex.addr(), 4, [&] { return mutex->ctrl.load().rel == order; });
_mm_mfence();
@ -156,7 +156,7 @@ ppu_error_code cellSyncBarrierNotify(vm::ptr<CellSyncBarrier> barrier)
return CELL_SYNC_ERROR_ALIGN;
}
vm::wait_op(barrier.addr(), 4, WRAP_EXPR(barrier->ctrl.atomic_op(_sync::barrier::try_notify)));
vm::wait_op(barrier.addr(), 4, [&] { return barrier->ctrl.atomic_op(_sync::barrier::try_notify); });
vm::notify_at(barrier.addr(), 4);
@ -205,7 +205,7 @@ ppu_error_code cellSyncBarrierWait(vm::ptr<CellSyncBarrier> barrier)
_mm_mfence();
vm::wait_op(barrier.addr(), 4, WRAP_EXPR(barrier->ctrl.atomic_op(_sync::barrier::try_wait)));
vm::wait_op(barrier.addr(), 4, [&] { return barrier->ctrl.atomic_op(_sync::barrier::try_wait); });
vm::notify_at(barrier.addr(), 4);
@ -282,7 +282,7 @@ ppu_error_code cellSyncRwmRead(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer)
}
// wait until `writers` is zero, increase `readers`
vm::wait_op(rwm.addr(), 8, WRAP_EXPR(rwm->ctrl.atomic_op(_sync::rwlock::try_read_begin)));
vm::wait_op(rwm.addr(), 8, [&] { return rwm->ctrl.atomic_op(_sync::rwlock::try_read_begin); });
// copy data to buffer
std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size);
@ -347,10 +347,10 @@ ppu_error_code cellSyncRwmWrite(vm::ptr<CellSyncRwm> rwm, vm::cptr<void> buffer)
}
// wait until `writers` is zero, set to 1
vm::wait_op(rwm.addr(), 8, WRAP_EXPR(rwm->ctrl.atomic_op(_sync::rwlock::try_write_begin)));
vm::wait_op(rwm.addr(), 8, [&] { return rwm->ctrl.atomic_op(_sync::rwlock::try_write_begin); });
// wait until `readers` is zero
vm::wait_op(rwm.addr(), 8, WRAP_EXPR(!rwm->ctrl.load().readers));
vm::wait_op(rwm.addr(), 8, [&] { return rwm->ctrl.load().readers == 0; });
// copy data from buffer
std::memcpy(rwm->buffer.get_ptr(), buffer.get_ptr(), rwm->size);
@ -447,7 +447,7 @@ ppu_error_code cellSyncQueuePush(vm::ptr<CellSyncQueue> queue, vm::cptr<void> bu
u32 position;
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_push_begin, depth, &position)));
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_push_begin, depth, &position); });
// copy data from the buffer at the position
std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size);
@ -512,7 +512,7 @@ ppu_error_code cellSyncQueuePop(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buff
u32 position;
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_pop_begin, depth, &position)));
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_pop_begin, depth, &position); });
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size);
@ -577,7 +577,7 @@ ppu_error_code cellSyncQueuePeek(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buf
u32 position;
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_peek_begin, depth, &position)));
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_peek_begin, depth, &position); });
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size);
@ -659,8 +659,8 @@ ppu_error_code cellSyncQueueClear(vm::ptr<CellSyncQueue> queue)
const u32 depth = queue->check_depth();
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_clear_begin_1)));
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_clear_begin_2)));
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_clear_begin_1); });
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_clear_begin_2); });
queue->ctrl.exchange({ 0, 0 });

View File

@ -19,8 +19,8 @@ void sys_spinlock_lock(vm::ptr<atomic_be_t<u32>> lock)
{
sysPrxForUser.trace("sys_spinlock_lock(lock=*0x%x)", lock);
// prx: exchange with 0xabadcafe, repeat until exchanged with 0
vm::wait_op(lock.addr(), 4, WRAP_EXPR(!lock->exchange(0xabadcafe)));
// Try exchange with 0xabadcafe, repeat until exchanged with 0
vm::wait_op(lock.addr(), 4, [&] { return lock->exchange(0xabadcafe) == 0; });
}
s32 sys_spinlock_trylock(vm::ptr<atomic_be_t<u32>> lock)

View File

@ -599,7 +599,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
{
if (!channel.try_pop(out))
{
thread_lock{*this}, thread_ctrl::wait(WRAP_EXPR(test(state & cpu_flag::stop) || channel.try_pop(out)));
thread_lock{*this}, thread_ctrl::wait([&] { return test(state & cpu_flag::stop) || channel.try_pop(out); });
return !test(state & cpu_flag::stop);
}
@ -702,7 +702,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
if (ch_event_mask & SPU_EVENT_LR)
{
// register waiter if polling reservation status is required
vm::wait_op(last_raddr, 128, WRAP_EXPR(get_events(true) || test(state & cpu_flag::stop)));
vm::wait_op(last_raddr, 128, [&] { return get_events(true) || test(state & cpu_flag::stop); });
}
else
{

View File

@ -20,7 +20,7 @@ std::shared_ptr<lv2_event_queue_t> lv2_event_queue_t::make(u32 protocol, s32 typ
{
auto queue = std::make_shared<lv2_event_queue_t>(protocol, type, name, ipc_key, size);
auto make_expr = WRAP_EXPR(idm::import<lv2_event_queue_t>(WRAP_EXPR(queue)));
auto make_expr = [&] { return idm::import<lv2_event_queue_t>([&] { return queue; }); };
if (ipc_key == SYS_EVENT_QUEUE_LOCAL)
{

View File

@ -279,7 +279,7 @@ public:
template<typename T, typename Make = T, typename... Args>
static inline std::enable_if_t<std::is_constructible<Make, Args...>::value, std::shared_ptr<Make>> make_ptr(Args&&... args)
{
if (auto pair = create_id<T, Make>(WRAP_EXPR(std::make_shared<Make>(std::forward<Args>(args)...))))
if (auto pair = create_id<T, Make>([&] { return std::make_shared<Make>(std::forward<Args>(args)...); }))
{
id_manager::on_init<T>::func(static_cast<T*>(pair->second.get()), pair->second);
id_manager::on_stop<T>::func(nullptr);
@ -293,7 +293,7 @@ public:
template<typename T, typename Make = T, typename... Args>
static inline std::enable_if_t<std::is_constructible<Make, Args...>::value, u32> make(Args&&... args)
{
if (auto pair = create_id<T, Make>(WRAP_EXPR(std::make_shared<Make>(std::forward<Args>(args)...))))
if (auto pair = create_id<T, Make>([&] { return std::make_shared<Make>(std::forward<Args>(args)...); }))
{
id_manager::on_init<T>::func(static_cast<T*>(pair->second.get()), pair->second);
id_manager::on_stop<T>::func(nullptr);
@ -307,7 +307,7 @@ public:
template<typename T, typename Made = T>
static inline u32 import_existing(const std::shared_ptr<T>& ptr)
{
if (auto pair = create_id<T, Made>(WRAP_EXPR(ptr)))
if (auto pair = create_id<T, Made>([&] { return ptr; }))
{
id_manager::on_init<T>::func(static_cast<T*>(pair->second.get()), pair->second);
id_manager::on_stop<T>::func(nullptr);

View File

@ -49,7 +49,7 @@ namespace vm
};
// Wait until thread == nullptr
waiter{this}, thread_ctrl::wait(WRAP_EXPR(!thread || test()));
waiter{this}, thread_ctrl::wait([&] { return !thread || test(); });
}
bool waiter_base::try_notify()

View File

@ -542,7 +542,7 @@ struct psp2_event_flag final
{
if (!exec(task::signal, cpu.id))
{
thread_lock{cpu}, thread_ctrl::wait(WRAP_EXPR(cpu.state.test_and_reset(cpu_flag::signal)));
thread_lock{cpu}, thread_ctrl::wait([&] { return cpu.state.test_and_reset(cpu_flag::signal); });
}
else
{
@ -625,7 +625,7 @@ private:
idm::select<ARMv7Thread>([&](u32 id, ARMv7Thread& cpu)
{
if (cpu->lock_if(WRAP_EXPR(cpu.owner == this && pat_test(new_state.pattern, cpu.GPR[1], cpu.GPR[0]))))
if (cpu->lock_if([&] { return cpu.owner == this && pat_test(new_state.pattern, cpu.GPR[1], cpu.GPR[0]); }))
{
threads.emplace_back(cpu);
}
@ -648,7 +648,7 @@ private:
{
idm::get<ARMv7Thread>(new_state.waiters, [&](u32 id, ARMv7Thread& cpu)
{
if (cpu->lock_if(WRAP_EXPR(cpu.owner == this && pat_test(new_state.pattern, cpu.GPR[1], cpu.GPR[0]))))
if (cpu->lock_if([&] { return cpu.owner == this && pat_test(new_state.pattern, cpu.GPR[1], cpu.GPR[0]); }))
{
threads.emplace_back(cpu);
}
@ -706,7 +706,7 @@ private:
idm::select<ARMv7Thread>([&](u32, ARMv7Thread& cpu)
{
if (cpu->lock_if(WRAP_EXPR(cpu.owner == this)))
if (cpu->lock_if([&] { return cpu.owner == this; }))
{
cpu.GPR[0] = error;
cpu.GPR[1] = pattern;
@ -741,7 +741,10 @@ arm_error_code sceKernelCreateEventFlag(vm::cptr<char> pName, u32 attr, u32 init
auto evf = std::make_shared<psp2_event_flag>(pName.get_ptr(), attr, initPattern);
// Try to register IPC name, only if not empty string (TODO)
if (evf->name.empty() || !psp2_event_flag::ipc::add(evf->name, WRAP_EXPR(evf))) evf->ipc_ref = 0;
if (evf->name.empty() || !psp2_event_flag::ipc::add(evf->name, [&] { return evf; }))
{
evf->ipc_ref = 0;
}
// Register ID
return NOT_AN_ERROR(idm::import_existing(evf));
@ -863,7 +866,7 @@ arm_error_code sceKernelWaitEventFlag(ARMv7Thread& cpu, s32 evfId, u32 bitPatter
thread_lock entry(cpu);
if (!thread_ctrl::wait_for(timeout, WRAP_EXPR(cpu.state.test_and_reset(cpu_flag::signal))))
if (!thread_ctrl::wait_for(timeout, [&] { return cpu.state.test_and_reset(cpu_flag::signal); }))
{
// Timeout cleanup
cpu.owner = nullptr;