1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 10:42:36 +01:00

Minor fixes

cellSyncMutexTryLock fixed
This commit is contained in:
Nekotekina 2016-08-17 19:50:20 +03:00
parent ad174eb12c
commit bbb0effe23
4 changed files with 216 additions and 218 deletions

View File

@ -18,8 +18,8 @@
#ifdef _MSC_VER
#define ASSUME(cond) __assume(cond)
#define LIKELY(cond) (cond)
#define UNLIKELY(cond) (cond)
#define LIKELY
#define UNLIKELY
#define SAFE_BUFFERS __declspec(safebuffers)
#define NEVER_INLINE __declspec(noinline)
#define FORCE_INLINE __forceinline
@ -871,19 +871,19 @@ public:
}
};
// Error code type (return type), implements error reporting
// Error code type (return type), implements error reporting. Could be a template.
struct error_code
{
// Use fixed s32 type for now (could be template argument)
// Use fixed s32 type for now
s32 value;
error_code() = default;
// Must be implemented
// Implementation must be provided specially
static s32 error_report(const fmt_type_info* sup, u64 arg);
// Helper type (TODO: use scoped enum when error code is widely used)
enum not_an_error : s32
// Helper type
enum class not_an_error : s32
{
__not_an_error // SFINAE marker
};

View File

@ -44,24 +44,6 @@ void fmt_class_string<CellSyncError>::format(std::string& out, u64 arg)
});
}
namespace _sync
{
static inline be_t<u16> mutex_acquire(mutex& ctrl)
{
return ctrl.acq++;
}
static inline bool mutex_try_lock(mutex& ctrl)
{
return ctrl.acq++ == ctrl.rel;
}
static inline void mutex_unlock(mutex& ctrl)
{
ctrl.rel++;
}
}
error_code cellSyncMutexInitialize(vm::ptr<CellSyncMutex> mutex)
{
cellSync.trace("cellSyncMutexInitialize(mutex=*0x%x)", mutex);
@ -76,7 +58,7 @@ error_code cellSyncMutexInitialize(vm::ptr<CellSyncMutex> mutex)
return CELL_SYNC_ERROR_ALIGN;
}
mutex->ctrl.exchange({ 0, 0 });
mutex->ctrl.exchange({0, 0});
return CELL_OK;
}
@ -95,11 +77,14 @@ error_code cellSyncMutexLock(vm::ptr<CellSyncMutex> mutex)
return CELL_SYNC_ERROR_ALIGN;
}
// increase acq value and remember its old value
const auto order = mutex->ctrl.atomic_op(_sync::mutex_acquire);
// Increase acq value and remember its old value
const auto order = mutex->ctrl.atomic_op(&CellSyncMutex::lock_begin);
// wait until rel value is equal to old acq value
vm::wait_op(mutex.addr(), 4, [&] { return mutex->ctrl.load().rel == order; });
// Wait until rel value is equal to old acq value
vm::wait_op(mutex.addr(), 4, [&]
{
return mutex->ctrl.load().rel == order;
});
_mm_mfence();
@ -120,7 +105,7 @@ error_code cellSyncMutexTryLock(vm::ptr<CellSyncMutex> mutex)
return CELL_SYNC_ERROR_ALIGN;
}
if (!mutex->ctrl.atomic_op(_sync::mutex_try_lock))
if (!mutex->ctrl.atomic_op(&CellSyncMutex::try_lock))
{
return not_an_error(CELL_SYNC_ERROR_BUSY);
}
@ -142,7 +127,7 @@ error_code cellSyncMutexUnlock(vm::ptr<CellSyncMutex> mutex)
return CELL_SYNC_ERROR_ALIGN;
}
mutex->ctrl.atomic_op(_sync::mutex_unlock);
mutex->ctrl.atomic_op(&CellSyncMutex::unlock);
vm::notify_at(mutex.addr(), 4);
@ -169,7 +154,7 @@ error_code cellSyncBarrierInitialize(vm::ptr<CellSyncBarrier> barrier, u16 total
}
// clear current value, write total_count and sync
barrier->ctrl.exchange({ 0, total_count });
barrier->ctrl.exchange({0, total_count});
return CELL_OK;
}
@ -188,7 +173,7 @@ error_code cellSyncBarrierNotify(vm::ptr<CellSyncBarrier> barrier)
return CELL_SYNC_ERROR_ALIGN;
}
vm::wait_op(barrier.addr(), 4, [&] { return barrier->ctrl.atomic_op(_sync::barrier::try_notify); });
vm::wait_op(barrier.addr(), 4, [&] { return barrier->ctrl.atomic_op(&CellSyncBarrier::try_notify); });
vm::notify_at(barrier.addr(), 4);
@ -211,7 +196,7 @@ error_code cellSyncBarrierTryNotify(vm::ptr<CellSyncBarrier> barrier)
_mm_mfence();
if (!barrier->ctrl.atomic_op(_sync::barrier::try_notify))
if (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_notify))
{
return not_an_error(CELL_SYNC_ERROR_BUSY);
}
@ -237,7 +222,7 @@ error_code cellSyncBarrierWait(vm::ptr<CellSyncBarrier> barrier)
_mm_mfence();
vm::wait_op(barrier.addr(), 4, [&] { return barrier->ctrl.atomic_op(_sync::barrier::try_wait); });
vm::wait_op(barrier.addr(), 4, [&] { return barrier->ctrl.atomic_op(&CellSyncBarrier::try_wait); });
vm::notify_at(barrier.addr(), 4);
@ -260,7 +245,7 @@ error_code cellSyncBarrierTryWait(vm::ptr<CellSyncBarrier> barrier)
_mm_mfence();
if (!barrier->ctrl.atomic_op(_sync::barrier::try_wait))
if (!barrier->ctrl.atomic_op(&CellSyncBarrier::try_wait))
{
return not_an_error(CELL_SYNC_ERROR_BUSY);
}
@ -314,13 +299,13 @@ error_code cellSyncRwmRead(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer)
}
// wait until `writers` is zero, increase `readers`
vm::wait_op(rwm.addr(), 8, [&] { return rwm->ctrl.atomic_op(_sync::rwlock::try_read_begin); });
vm::wait_op(rwm.addr(), 8, [&] { return rwm->ctrl.atomic_op(&CellSyncRwm::try_read_begin); });
// copy data to buffer
std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size);
// decrease `readers`, return error if already zero
if (!rwm->ctrl.atomic_op(_sync::rwlock::try_read_end))
if (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_end))
{
return CELL_SYNC_ERROR_ABORT;
}
@ -345,7 +330,7 @@ error_code cellSyncRwmTryRead(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer)
}
// increase `readers` if `writers` is zero
if (!rwm->ctrl.atomic_op(_sync::rwlock::try_read_begin))
if (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_begin))
{
return not_an_error(CELL_SYNC_ERROR_BUSY);
}
@ -354,7 +339,7 @@ error_code cellSyncRwmTryRead(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer)
std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size);
// decrease `readers`, return error if already zero
if (!rwm->ctrl.atomic_op(_sync::rwlock::try_read_end))
if (!rwm->ctrl.atomic_op(&CellSyncRwm::try_read_end))
{
return CELL_SYNC_ERROR_ABORT;
}
@ -379,7 +364,7 @@ error_code cellSyncRwmWrite(vm::ptr<CellSyncRwm> rwm, vm::cptr<void> buffer)
}
// wait until `writers` is zero, set to 1
vm::wait_op(rwm.addr(), 8, [&] { return rwm->ctrl.atomic_op(_sync::rwlock::try_write_begin); });
vm::wait_op(rwm.addr(), 8, [&] { return rwm->ctrl.atomic_op(&CellSyncRwm::try_write_begin); });
// wait until `readers` is zero
vm::wait_op(rwm.addr(), 8, [&] { return rwm->ctrl.load().readers == 0; });
@ -479,13 +464,12 @@ error_code cellSyncQueuePush(vm::ptr<CellSyncQueue> queue, vm::cptr<void> buffer
u32 position;
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_push_begin, depth, &position); });
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(&CellSyncQueue::try_push_begin, depth, &position); });
// copy data from the buffer at the position
std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size);
// ...push_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._push = 0; });
queue->ctrl.atomic_op(&CellSyncQueue::push_end);
vm::notify_at(queue.addr(), 8);
@ -510,7 +494,7 @@ error_code cellSyncQueueTryPush(vm::ptr<CellSyncQueue> queue, vm::cptr<void> buf
u32 position;
if (!queue->ctrl.atomic_op(_sync::queue::try_push_begin, depth, &position))
if (!queue->ctrl.atomic_op(&CellSyncQueue::try_push_begin, depth, &position))
{
return not_an_error(CELL_SYNC_ERROR_BUSY);
}
@ -518,8 +502,7 @@ error_code cellSyncQueueTryPush(vm::ptr<CellSyncQueue> queue, vm::cptr<void> buf
// copy data from the buffer at the position
std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size);
// ...push_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._push = 0; });
queue->ctrl.atomic_op(&CellSyncQueue::push_end);
vm::notify_at(queue.addr(), 8);
@ -544,13 +527,12 @@ error_code cellSyncQueuePop(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
u32 position;
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_pop_begin, depth, &position); });
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(&CellSyncQueue::try_pop_begin, depth, &position); });
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size);
// ...pop_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._pop = 0; });
queue->ctrl.atomic_op(&CellSyncQueue::pop_end);
vm::notify_at(queue.addr(), 8);
@ -575,7 +557,7 @@ error_code cellSyncQueueTryPop(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffe
u32 position;
if (!queue->ctrl.atomic_op(_sync::queue::try_pop_begin, depth, &position))
if (!queue->ctrl.atomic_op(&CellSyncQueue::try_pop_begin, depth, &position))
{
return not_an_error(CELL_SYNC_ERROR_BUSY);
}
@ -583,8 +565,7 @@ error_code cellSyncQueueTryPop(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffe
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size);
// ...pop_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._pop = 0; });
queue->ctrl.atomic_op(&CellSyncQueue::pop_end);
vm::notify_at(queue.addr(), 8);
@ -609,13 +590,12 @@ error_code cellSyncQueuePeek(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
u32 position;
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_peek_begin, depth, &position); });
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(&CellSyncQueue::try_peek_begin, depth, &position); });
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size);
// ...peek_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._pop = 0; });
queue->ctrl.atomic_op(&CellSyncQueue::pop_end);
vm::notify_at(queue.addr(), 8);
@ -640,7 +620,7 @@ error_code cellSyncQueueTryPeek(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buff
u32 position;
if (!queue->ctrl.atomic_op(_sync::queue::try_peek_begin, depth, &position))
if (!queue->ctrl.atomic_op(&CellSyncQueue::try_peek_begin, depth, &position))
{
return not_an_error(CELL_SYNC_ERROR_BUSY);
}
@ -648,8 +628,7 @@ error_code cellSyncQueueTryPeek(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buff
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size);
// ...peek_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._pop = 0; });
queue->ctrl.atomic_op(&CellSyncQueue::pop_end);
vm::notify_at(queue.addr(), 8);
@ -691,8 +670,8 @@ error_code cellSyncQueueClear(vm::ptr<CellSyncQueue> queue)
const u32 depth = queue->check_depth();
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_clear_begin_1); });
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(_sync::queue::try_clear_begin_2); });
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(&CellSyncQueue::try_clear_begin_1); });
vm::wait_op(queue.addr(), 8, [&] { return queue->ctrl.atomic_op(&CellSyncQueue::try_clear_begin_2); });
queue->ctrl.exchange({ 0, 0 });

View File

@ -34,123 +34,135 @@ enum CellSyncError1 : u32
CELL_SYNC_ERROR_UNKNOWNKEY = 0x80410113,
};
namespace _sync
struct CellSyncMutex
{
struct alignas(4) mutex // CellSyncMutex control variable
struct alignas(4) ctrl_t
{
be_t<u16> rel;
be_t<u16> acq;
};
}
struct CellSyncMutex
{
atomic_t<_sync::mutex> ctrl;
atomic_t<ctrl_t> ctrl;
static inline auto lock_begin(ctrl_t& ctrl)
{
return ctrl.acq++;
}
static inline bool try_lock(ctrl_t& ctrl)
{
if (UNLIKELY(ctrl.rel != ctrl.acq))
{
return false;
}
ctrl.acq++;
return true;
}
static inline void unlock(ctrl_t& ctrl)
{
ctrl.rel++;
}
};
CHECK_SIZE_ALIGN(CellSyncMutex, 4, 4);
namespace _sync
struct CellSyncBarrier
{
struct alignas(4) barrier // CellSyncBarrier control variable
struct alignas(4) ctrl_t
{
be_t<s16> value;
be_t<u16> count;
static inline bool try_notify(barrier& ctrl)
{
if (ctrl.value & 0x8000)
{
return false;
}
if (++ctrl.value == ctrl.count)
{
ctrl.value |= 0x8000;
}
return true;
};
static inline bool try_wait(barrier& ctrl)
{
if ((ctrl.value & 0x8000) == 0)
{
return false;
}
if (--ctrl.value == -0x8000)
{
ctrl.value = 0;
}
return true;
}
};
}
struct CellSyncBarrier
{
atomic_t<_sync::barrier> ctrl;
atomic_t<ctrl_t> ctrl;
static inline bool try_notify(ctrl_t& ctrl)
{
if (ctrl.value & 0x8000)
{
return false;
}
if (++ctrl.value == ctrl.count)
{
ctrl.value |= 0x8000;
}
return true;
};
static inline bool try_wait(ctrl_t& ctrl)
{
if ((ctrl.value & 0x8000) == 0)
{
return false;
}
if (--ctrl.value == -0x8000)
{
ctrl.value = 0;
}
return true;
}
};
CHECK_SIZE_ALIGN(CellSyncBarrier, 4, 4);
namespace _sync
struct alignas(16) CellSyncRwm
{
struct alignas(4) rwlock // CellSyncRwm control variable
struct alignas(4) ctrl_t
{
be_t<u16> readers;
be_t<u16> writers;
static inline bool try_read_begin(rwlock& ctrl)
{
if (ctrl.writers)
{
return false;
}
ctrl.readers++;
return true;
}
static inline bool try_read_end(rwlock& ctrl)
{
if (ctrl.readers == 0)
{
return false;
}
ctrl.readers--;
return true;
}
static inline bool try_write_begin(rwlock& ctrl)
{
if (ctrl.writers)
{
return false;
}
ctrl.writers = 1;
return true;
}
};
}
struct alignas(16) CellSyncRwm
{
atomic_t<_sync::rwlock> ctrl;
atomic_t<ctrl_t> ctrl;
be_t<u32> size;
vm::bptr<void, u64> buffer;
static inline bool try_read_begin(ctrl_t& ctrl)
{
if (ctrl.writers)
{
return false;
}
ctrl.readers++;
return true;
}
static inline bool try_read_end(ctrl_t& ctrl)
{
if (ctrl.readers == 0)
{
return false;
}
ctrl.readers--;
return true;
}
static inline bool try_write_begin(ctrl_t& ctrl)
{
if (ctrl.writers)
{
return false;
}
ctrl.writers = 1;
return true;
}
};
CHECK_SIZE_ALIGN(CellSyncRwm, 16, 16);
namespace _sync
struct alignas(32) CellSyncQueue
{
struct alignas(8) queue // CellSyncQueue control variable
struct alignas(8) ctrl_t
{
union
{
@ -167,79 +179,9 @@ namespace _sync
bf_t<be_t<u32>, 0, 24> count;
bf_t<be_t<u32>, 24, 8> _push;
};
static inline bool try_push_begin(queue& ctrl, u32 depth, u32* position)
{
const u32 count = ctrl.count;
if (ctrl._push || count + ctrl._pop >= depth)
{
return false;
}
*position = ctrl.next;
ctrl.next = *position + 1 != depth ? *position + 1 : 0;
ctrl.count = count + 1;
ctrl._push = 1;
return true;
}
static inline bool try_pop_begin(queue& ctrl, u32 depth, u32* position)
{
const u32 count = ctrl.count;
if (ctrl._pop || count <= ctrl._push)
{
return false;
}
ctrl._pop = 1;
*position = ctrl.next + depth - count;
ctrl.count = count - 1;
return true;
}
static inline bool try_peek_begin(queue& ctrl, u32 depth, u32* position)
{
const u32 count = ctrl.count;
if (ctrl._pop || count <= ctrl._push)
{
return false;
}
ctrl._pop = 1;
*position = ctrl.next + depth - count;
return true;
}
static inline bool try_clear_begin_1(queue& ctrl)
{
if (ctrl._pop)
{
return false;
}
ctrl._pop = 1;
return true;
}
static inline bool try_clear_begin_2(queue& ctrl)
{
if (ctrl._push)
{
return false;
}
ctrl._push = 1;
return true;
}
};
}
struct alignas(32) CellSyncQueue
{
atomic_t<_sync::queue> ctrl;
atomic_t<ctrl_t> ctrl;
be_t<u32> size;
be_t<u32> depth;
@ -257,6 +199,83 @@ struct alignas(32) CellSyncQueue
return depth;
}
static inline bool try_push_begin(ctrl_t& ctrl, u32 depth, u32* position)
{
const u32 count = ctrl.count;
if (ctrl._push || count + ctrl._pop >= depth)
{
return false;
}
*position = ctrl.next;
ctrl.next = *position + 1 != depth ? *position + 1 : 0;
ctrl.count = count + 1;
ctrl._push = 1;
return true;
}
static inline void push_end(ctrl_t& ctrl)
{
ctrl._push = 0;
}
static inline bool try_pop_begin(ctrl_t& ctrl, u32 depth, u32* position)
{
const u32 count = ctrl.count;
if (ctrl._pop || count <= ctrl._push)
{
return false;
}
ctrl._pop = 1;
*position = ctrl.next + depth - count;
ctrl.count = count - 1;
return true;
}
static inline bool try_peek_begin(ctrl_t& ctrl, u32 depth, u32* position)
{
const u32 count = ctrl.count;
if (ctrl._pop || count <= ctrl._push)
{
return false;
}
ctrl._pop = 1;
*position = ctrl.next + depth - count;
return true;
}
static inline void pop_end(ctrl_t& ctrl)
{
ctrl._pop = 0;
}
static inline bool try_clear_begin_1(ctrl_t& ctrl)
{
if (ctrl._pop)
{
return false;
}
ctrl._pop = 1;
return true;
}
static inline bool try_clear_begin_2(ctrl_t& ctrl)
{
if (ctrl._push)
{
return false;
}
ctrl._push = 1;
return true;
}
};
CHECK_SIZE_ALIGN(CellSyncQueue, 32, 32);

View File

@ -154,8 +154,8 @@ enum class elf_error
};
// ELF object with specified parameters.
// en_t: endianness (specify le_t or be_t)
// sz_t: size (specify u32 for ELF32, u64 for ELF64)
// en_t: endianness (elf_le or elf_be)
// sz_t: size (u32 for ELF32, u64 for ELF64)
template<template<typename T> class en_t, typename sz_t, elf_machine Machine, elf_os OS, elf_type Type>
class elf_object
{