From 4f22559ff0255982265eb0589f6ab88965810cfe Mon Sep 17 00:00:00 2001 From: Nekotekina Date: Wed, 7 Aug 2019 03:56:47 +0300 Subject: [PATCH] Remove dumb overloads of atomic_op, fetch_op And lf_queue::apply. Fix inlining problem by passing F by value. --- Utilities/lockless.h | 18 +------ rpcs3/Emu/Cell/Modules/cellSync.cpp | 40 ++++++++-------- rpcs3/util/atomic.hpp | 74 +++-------------------------- 3 files changed, 28 insertions(+), 104 deletions(-) diff --git a/Utilities/lockless.h b/Utilities/lockless.h index b3c5ab1c90..28831f8e9d 100644 --- a/Utilities/lockless.h +++ b/Utilities/lockless.h @@ -388,27 +388,13 @@ public: // Apply func(data) to each element, return the total length template - std::size_t apply(F&& func) + std::size_t apply(F func) { std::size_t count = 0; for (auto slice = pop_all(); slice; slice.pop_front()) { - std::invoke(std::forward(func), *slice); - } - - return count; - } - - // apply() overload for callable template argument - template - std::size_t apply() - { - std::size_t count = 0; - - for (auto slice = pop_all(); slice; slice.pop_front()) - { - std::invoke(F, *slice); + std::invoke(func, *slice); } return count; diff --git a/rpcs3/Emu/Cell/Modules/cellSync.cpp b/rpcs3/Emu/Cell/Modules/cellSync.cpp index 0babe4424d..df3c040642 100644 --- a/rpcs3/Emu/Cell/Modules/cellSync.cpp +++ b/rpcs3/Emu/Cell/Modules/cellSync.cpp @@ -76,7 +76,7 @@ error_code cellSyncMutexLock(ppu_thread& ppu, vm::ptr mutex) } // Increase acq value and remember its old value - const auto order = mutex->ctrl.atomic_op<&CellSyncMutex::Counter::lock_begin>(); + const auto order = mutex->ctrl.atomic_op(&CellSyncMutex::Counter::lock_begin); // Wait until rel value is equal to old acq value while (mutex->ctrl.load().rel != order) @@ -105,7 +105,7 @@ error_code cellSyncMutexTryLock(vm::ptr mutex) return CELL_SYNC_ERROR_ALIGN; } - if (!mutex->ctrl.atomic_op<&CellSyncMutex::Counter::try_lock>()) + if (!mutex->ctrl.atomic_op(&CellSyncMutex::Counter::try_lock)) { return not_an_error(CELL_SYNC_ERROR_BUSY); } @@ -127,7 +127,7 @@ error_code cellSyncMutexUnlock(vm::ptr mutex) return CELL_SYNC_ERROR_ALIGN; } - mutex->ctrl.atomic_op<&CellSyncMutex::Counter::unlock>(); + mutex->ctrl.atomic_op(&CellSyncMutex::Counter::unlock); return CELL_OK; } @@ -171,7 +171,7 @@ error_code cellSyncBarrierNotify(ppu_thread& ppu, vm::ptr barri return CELL_SYNC_ERROR_ALIGN; } - while (!barrier->ctrl.atomic_op<&CellSyncBarrier::try_notify>()) + while (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_notify)) { if (ppu.test_stopped()) { @@ -198,7 +198,7 @@ error_code cellSyncBarrierTryNotify(vm::ptr barrier) std::atomic_thread_fence(std::memory_order_acq_rel); - if (!barrier->ctrl.atomic_op<&CellSyncBarrier::try_notify>()) + if (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_notify)) { return not_an_error(CELL_SYNC_ERROR_BUSY); } @@ -222,7 +222,7 @@ error_code cellSyncBarrierWait(ppu_thread& ppu, vm::ptr barrier std::atomic_thread_fence(std::memory_order_acq_rel); - while (!barrier->ctrl.atomic_op<&CellSyncBarrier::try_wait>()) + while (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_wait)) { if (ppu.test_stopped()) { @@ -249,7 +249,7 @@ error_code cellSyncBarrierTryWait(vm::ptr barrier) std::atomic_thread_fence(std::memory_order_acq_rel); - if (!barrier->ctrl.atomic_op<&CellSyncBarrier::try_wait>()) + if (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_wait)) { return not_an_error(CELL_SYNC_ERROR_BUSY); } @@ -301,7 +301,7 @@ error_code cellSyncRwmRead(ppu_thread& ppu, vm::ptr rwm, vm::ptrctrl.atomic_op<&CellSyncRwm::try_read_begin>()) + while (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_begin)) { if (ppu.test_stopped()) { @@ -313,7 +313,7 @@ error_code cellSyncRwmRead(ppu_thread& ppu, vm::ptr rwm, vm::ptrbuffer.get_ptr(), rwm->size); // decrease `readers`, return error if already zero - if (!rwm->ctrl.atomic_op<&CellSyncRwm::try_read_end>()) + if (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_end)) { return CELL_SYNC_ERROR_ABORT; } @@ -336,7 +336,7 @@ error_code cellSyncRwmTryRead(vm::ptr rwm, vm::ptr buffer) } // increase `readers` if `writers` is zero - if (!rwm->ctrl.atomic_op<&CellSyncRwm::try_read_begin>()) + if (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_begin)) { return not_an_error(CELL_SYNC_ERROR_BUSY); } @@ -345,7 +345,7 @@ error_code cellSyncRwmTryRead(vm::ptr rwm, vm::ptr buffer) std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size); // decrease `readers`, return error if already zero - if (!rwm->ctrl.atomic_op<&CellSyncRwm::try_read_end>()) + if (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_end)) { return CELL_SYNC_ERROR_ABORT; } @@ -368,7 +368,7 @@ error_code cellSyncRwmWrite(ppu_thread& ppu, vm::ptr rwm, vm::cptr< } // wait until `writers` is zero, set to 1 - while (!rwm->ctrl.atomic_op<&CellSyncRwm::try_write_begin>()) + while (!rwm->ctrl.atomic_op(&CellSyncRwm::try_write_begin)) { if (ppu.test_stopped()) { @@ -490,7 +490,7 @@ error_code cellSyncQueuePush(ppu_thread& ppu, vm::ptr queue, vm:: // copy data from the buffer at the position std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size); - queue->ctrl.atomic_op<&CellSyncQueue::push_end>(); + queue->ctrl.atomic_op(&CellSyncQueue::push_end); return CELL_OK; } @@ -524,7 +524,7 @@ error_code cellSyncQueueTryPush(vm::ptr queue, vm::cptr buf // copy data from the buffer at the position std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size); - queue->ctrl.atomic_op<&CellSyncQueue::push_end>(); + queue->ctrl.atomic_op(&CellSyncQueue::push_end); return CELL_OK; } @@ -561,7 +561,7 @@ error_code cellSyncQueuePop(ppu_thread& ppu, vm::ptr queue, vm::p // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size); - queue->ctrl.atomic_op<&CellSyncQueue::pop_end>(); + queue->ctrl.atomic_op(&CellSyncQueue::pop_end); return CELL_OK; } @@ -595,7 +595,7 @@ error_code cellSyncQueueTryPop(vm::ptr queue, vm::ptr buffe // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size); - queue->ctrl.atomic_op<&CellSyncQueue::pop_end>(); + queue->ctrl.atomic_op(&CellSyncQueue::pop_end); return CELL_OK; } @@ -632,7 +632,7 @@ error_code cellSyncQueuePeek(ppu_thread& ppu, vm::ptr queue, vm:: // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size); - queue->ctrl.atomic_op<&CellSyncQueue::pop_end>(); + queue->ctrl.atomic_op(&CellSyncQueue::pop_end); return CELL_OK; } @@ -666,7 +666,7 @@ error_code cellSyncQueueTryPeek(vm::ptr queue, vm::ptr buff // copy data at the position to the buffer std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size); - queue->ctrl.atomic_op<&CellSyncQueue::pop_end>(); + queue->ctrl.atomic_op(&CellSyncQueue::pop_end); return CELL_OK; } @@ -706,7 +706,7 @@ error_code cellSyncQueueClear(ppu_thread& ppu, vm::ptr queue) const u32 depth = queue->check_depth(); - while (!queue->ctrl.atomic_op<&CellSyncQueue::try_clear_begin_1>()) + while (!queue->ctrl.atomic_op(&CellSyncQueue::try_clear_begin_1)) { if (ppu.test_stopped()) { @@ -714,7 +714,7 @@ error_code cellSyncQueueClear(ppu_thread& ppu, vm::ptr queue) } } - while (!queue->ctrl.atomic_op<&CellSyncQueue::try_clear_begin_2>()) + while (!queue->ctrl.atomic_op(&CellSyncQueue::try_clear_begin_2)) { if (ppu.test_stopped()) { diff --git a/rpcs3/util/atomic.hpp b/rpcs3/util/atomic.hpp index 13a89074de..ea20eb4237 100644 --- a/rpcs3/util/atomic.hpp +++ b/rpcs3/util/atomic.hpp @@ -720,7 +720,7 @@ public: // Atomic operation; returns old value, or pair of old value and return value (cancel op if evaluates to false) template > - std::conditional_t, type, std::pair> fetch_op(F&& func) + std::conditional_t, type, std::pair> fetch_op(F func) { type _new, old = atomic_storage::load(m_data); @@ -730,7 +730,7 @@ public: if constexpr (std::is_void_v) { - std::invoke(std::forward(func), _new); + std::invoke(func, _new); if (LIKELY(atomic_storage::compare_exchange(m_data, old, _new))) { @@ -739,38 +739,7 @@ public: } else { - RT ret = std::invoke(std::forward(func), _new); - - if (LIKELY(!ret || atomic_storage::compare_exchange(m_data, old, _new))) - { - return {old, std::move(ret)}; - } - } - } - } - - // fetch_op overload with function (invokable) provided as a template parameter - template > - std::conditional_t, type, std::pair> fetch_op() - { - type _new, old = atomic_storage::load(m_data); - - while (true) - { - _new = old; - - if constexpr (std::is_void_v) - { - std::invoke(F, _new); - - if (LIKELY(atomic_storage::compare_exchange(m_data, old, _new))) - { - return old; - } - } - else - { - RT ret = std::invoke(F, _new); + RT ret = std::invoke(func, _new); if (LIKELY(!ret || atomic_storage::compare_exchange(m_data, old, _new))) { @@ -782,7 +751,7 @@ public: // Atomic operation; returns function result value, function is the lambda template > - RT atomic_op(F&& func) + RT atomic_op(F func) { type _new, old = atomic_storage::load(m_data); @@ -792,7 +761,7 @@ public: if constexpr (std::is_void_v) { - std::invoke(std::forward(func), _new); + std::invoke(func, _new); if (LIKELY(atomic_storage::compare_exchange(m_data, old, _new))) { @@ -801,38 +770,7 @@ public: } else { - RT result = std::invoke(std::forward(func), _new); - - if (LIKELY(atomic_storage::compare_exchange(m_data, old, _new))) - { - return result; - } - } - } - } - - // atomic_op overload with function (invokable) provided as a template parameter - template > - RT atomic_op() - { - type _new, old = atomic_storage::load(m_data); - - while (true) - { - _new = old; - - if constexpr (std::is_void_v) - { - std::invoke(F, _new); - - if (LIKELY(atomic_storage::compare_exchange(m_data, old, _new))) - { - return; - } - } - else - { - RT result = std::invoke(F, _new); + RT result = std::invoke(func, _new); if (LIKELY(atomic_storage::compare_exchange(m_data, old, _new))) {