1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 18:53:28 +01:00

SharedMutex improved

This commit is contained in:
Nekotekina 2015-11-30 18:10:17 +03:00
parent b18e337781
commit 82cb8fe5bd
6 changed files with 206 additions and 164 deletions

View File

@ -1,152 +1,103 @@
#include "stdafx.h"
#include "SharedMutex.h"
static const u32 MAX_READERS = 0x7fffffff; // 2^31-1
inline bool shared_mutex_t::try_lock_shared()
void shared_mutex::impl_lock_shared(u32 old_value)
{
return m_info.atomic_op([](ownership_info_t& info) -> bool
// Throw if reader count breaks the "second" limit (it should be impossible)
CHECK_ASSERTION((old_value & SM_READER_COUNT) != SM_READER_COUNT);
std::unique_lock<std::mutex> lock(m_mutex);
// Notify non-zero reader queue size
m_ctrl |= SM_READER_QUEUE;
// Compensate incorrectly increased reader count
if ((--m_ctrl & SM_READER_COUNT) == 0 && m_wq_size)
{
if (info.readers < MAX_READERS && !info.writers && !info.waiting_readers && !info.waiting_writers)
{
info.readers++;
return true;
}
// Notify current exclusive owner (condition passed)
m_ocv.notify_one();
}
return false;
});
}
CHECK_ASSERTION(++m_rq_size);
void shared_mutex_t::lock_shared()
{
if (!try_lock_shared())
// Obtain the reader lock
while (!atomic_op(m_ctrl, op_lock_shared))
{
std::unique_lock<std::mutex> lock(m_mutex);
m_rcv.wait(lock);
}
m_wrcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (info.waiting_readers < UINT16_MAX)
{
info.waiting_readers++;
return true;
}
CHECK_ASSERTION(m_rq_size--);
return false;
})));
m_rcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (!info.writers && !info.waiting_writers && info.readers < MAX_READERS)
{
info.readers++;
return true;
}
return false;
})));
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.waiting_readers--)
{
throw EXCEPTION("Invalid value");
}
});
if (info.waiting_readers == UINT16_MAX)
{
m_wrcv.notify_one();
}
if (m_rq_size == 0)
{
m_ctrl &= ~SM_READER_QUEUE;
}
}
void shared_mutex_t::unlock_shared()
void shared_mutex::impl_unlock_shared(u32 new_value)
{
const auto info = m_info.atomic_op([](ownership_info_t& info)
// Throw if reader count was zero
CHECK_ASSERTION((new_value & SM_READER_COUNT) != SM_READER_COUNT);
// Mutex cannot be unlocked before notification because m_ctrl has been changed outside
std::lock_guard<std::mutex> lock(m_mutex);
if (m_wq_size && (new_value & SM_READER_COUNT) == 0)
{
if (!info.readers--)
{
throw EXCEPTION("Not locked");
}
});
const bool notify_writers = info.readers == 1 && info.writers;
const bool notify_readers = info.readers == UINT32_MAX && info.waiting_readers;
if (notify_writers || notify_readers)
// Notify current exclusive owner that the latest reader is gone
m_ocv.notify_one();
}
else if (m_rq_size)
{
std::lock_guard<std::mutex> lock(m_mutex);
if (notify_writers) m_wcv.notify_one();
if (notify_readers) m_rcv.notify_one();
m_rcv.notify_one();
}
}
inline bool shared_mutex_t::try_lock()
void shared_mutex::impl_lock_excl(u32 value)
{
return m_info.compare_and_swap_test({ 0, 0, 0, 0 }, { 0, 1, 0, 0 });
}
std::unique_lock<std::mutex> lock(m_mutex);
void shared_mutex_t::lock()
{
if (!try_lock())
// Notify non-zero writer queue size
m_ctrl |= SM_WRITER_QUEUE;
CHECK_ASSERTION(++m_wq_size);
// Obtain the writer lock
while (!atomic_op(m_ctrl, op_lock_excl))
{
std::unique_lock<std::mutex> lock(m_mutex);
m_wcv.wait(lock);
}
m_wwcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (info.waiting_writers < UINT16_MAX)
{
info.waiting_writers++;
return true;
}
// Wait for remaining readers
while ((m_ctrl & SM_READER_COUNT) != 0)
{
m_ocv.wait(lock);
}
return false;
})));
CHECK_ASSERTION(m_wq_size--);
m_wcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (!info.writers)
{
info.writers++;
return true;
}
return false;
})));
m_wcv.wait(lock, WRAP_EXPR(m_info.load().readers == 0));
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.waiting_writers--)
{
throw EXCEPTION("Invalid value");
}
});
if (info.waiting_writers == UINT16_MAX)
{
m_wwcv.notify_one();
}
if (m_wq_size == 0)
{
m_ctrl &= ~SM_WRITER_QUEUE;
}
}
void shared_mutex_t::unlock()
void shared_mutex::impl_unlock_excl(u32 value)
{
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.writers--)
{
throw EXCEPTION("Not locked");
}
});
// Throw if was not locked exclusively
CHECK_ASSERTION(value & SM_WRITER_LOCK);
if (info.waiting_writers || info.waiting_readers)
// Mutex cannot be unlocked before notification because m_ctrl has been changed outside
std::lock_guard<std::mutex> lock(m_mutex);
if (m_wq_size)
{
std::lock_guard<std::mutex> lock(m_mutex);
if (info.waiting_writers) m_wcv.notify_one();
else if (info.waiting_readers) m_rcv.notify_all();
// Notify next exclusive owner
m_wcv.notify_one();
}
else if (m_rq_size)
{
// Notify all readers
m_rcv.notify_all();
}
}

View File

@ -1,46 +1,136 @@
#pragma once
#include <shared_mutex>
// An attempt to create lock-free (in optimistic case) implementation similar to std::shared_mutex;
// MSVC implementation of std::shared_timed_mutex is not lock-free and thus may be slow, and std::shared_mutex is not available.
class shared_mutex_t
//! An attempt to create effective implementation of "shared mutex", lock-free in optimistic case.
//! All locking and unlocking may be done by single LOCK XADD or LOCK CMPXCHG instructions.
//! MSVC implementation of std::shared_timed_mutex seems suboptimal.
//! std::shared_mutex is not available until C++17.
class shared_mutex final
{
struct ownership_info_t
enum : u32
{
u32 readers : 31;
u32 writers : 1;
u16 waiting_readers;
u16 waiting_writers;
SM_WRITER_LOCK = 1u << 31, // Exclusive lock flag, must be MSB
SM_WRITER_QUEUE = 1u << 30, // Flag set if m_wq_size != 0
SM_READER_QUEUE = 1u << 29, // Flag set if m_rq_size != 0
SM_READER_COUNT = SM_READER_QUEUE - 1, // Valid reader count bit mask
SM_READER_MAX = 1u << 24, // Max reader count
};
atomic_t<ownership_info_t> m_info{};
std::atomic<u32> m_ctrl{}; // Control atomic variable: reader count | SM_* flags
std::thread::id m_owner{}; // Current exclusive owner (TODO: implement only for debug mode?)
std::mutex m_mutex;
u32 m_rq_size{}; // Reader queue size (threads waiting on m_rcv)
u32 m_wq_size{}; // Writer queue size (threads waiting on m_wcv+m_ocv)
std::condition_variable m_rcv;
std::condition_variable m_wcv;
std::condition_variable m_wrcv;
std::condition_variable m_wwcv;
std::condition_variable m_rcv; // Reader queue
std::condition_variable m_wcv; // Writer queue
std::condition_variable m_ocv; // For current exclusive owner
static bool op_lock_shared(u32& ctrl)
{
// Check writer flags and reader limit
return (ctrl & ~SM_READER_QUEUE) < SM_READER_MAX ? ctrl++, true : false;
}
static bool op_lock_excl(u32& ctrl)
{
// Test and set writer lock
return (ctrl & SM_WRITER_LOCK) == 0 ? ctrl |= SM_WRITER_LOCK, true : false;
}
void impl_lock_shared(u32 old_ctrl);
void impl_unlock_shared(u32 new_ctrl);
void impl_lock_excl(u32 ctrl);
void impl_unlock_excl(u32 ctrl);
public:
shared_mutex_t() = default;
shared_mutex() = default;
// Lock in shared mode
void lock_shared();
void lock_shared()
{
const u32 old_ctrl = m_ctrl++;
// Check flags and reader limit
if (old_ctrl >= SM_READER_MAX)
{
impl_lock_shared(old_ctrl);
}
}
// Try to lock in shared mode
bool try_lock_shared();
bool try_lock_shared()
{
return atomic_op(m_ctrl, [](u32& ctrl)
{
// Check flags and reader limit
return ctrl < SM_READER_MAX ? ctrl++, true : false;
});
}
// Unlock in shared mode
void unlock_shared();
void unlock_shared()
{
const u32 new_ctrl = --m_ctrl;
// Check if notification required
if (new_ctrl >= SM_READER_MAX)
{
impl_unlock_shared(new_ctrl);
}
}
// Lock exclusively
void lock();
void lock()
{
u32 value = 0;
if (!m_ctrl.compare_exchange_strong(value, SM_WRITER_LOCK))
{
impl_lock_excl(value);
}
}
// Try to lock exclusively
bool try_lock();
bool try_lock()
{
u32 value = 0;
return m_ctrl.compare_exchange_strong(value, SM_WRITER_LOCK);
}
// Unlock exclusively
void unlock();
void unlock()
{
const u32 value = m_ctrl.fetch_add(SM_WRITER_LOCK);
// Check if notification required
if (value != SM_WRITER_LOCK)
{
impl_unlock_excl(value);
}
}
};
//! Simplified shared (reader) lock implementation, similar to std::lock_guard.
//! std::shared_lock may be used instead if necessary.
class reader_lock final
{
shared_mutex& m_mutex;
public:
reader_lock(const reader_lock&) = delete;
reader_lock(shared_mutex& mutex)
: m_mutex(mutex)
{
m_mutex.lock_shared();
}
~reader_lock()
{
m_mutex.unlock_shared();
}
};

View File

@ -50,7 +50,7 @@ std::shared_ptr<spu_function_t> SPUDatabase::analyse(const be_t<u32>* ls, u32 en
const u64 key = entry | u64{ ls[entry / 4] } << 32;
{
std::shared_lock<shared_mutex_t> lock(m_mutex);
reader_lock lock(m_mutex);
// Try to find existing function in the database
if (auto func = find(ls + entry / 4, key, max_limit - entry))
@ -59,7 +59,7 @@ std::shared_ptr<spu_function_t> SPUDatabase::analyse(const be_t<u32>* ls, u32 en
}
}
std::lock_guard<shared_mutex_t> lock(m_mutex);
std::lock_guard<shared_mutex> lock(m_mutex);
// Double-check
if (auto func = find(ls + entry / 4, key, max_limit - entry))

View File

@ -259,7 +259,7 @@ struct spu_function_t
// SPU Function Database (must be global or PS3 process-local)
class SPUDatabase final
{
shared_mutex_t m_mutex;
shared_mutex m_mutex;
// All registered functions (uses addr and first instruction as a key)
std::unordered_multimap<u64, std::shared_ptr<spu_function_t>> m_db;

View File

@ -3,7 +3,7 @@
namespace idm
{
std::mutex g_mutex;
shared_mutex g_mutex;
idm::map_t g_map;
@ -14,16 +14,16 @@ namespace idm
namespace fxm
{
std::mutex g_mutex;
shared_mutex g_mutex;
fxm::map_t g_map;
}
void idm::clear()
{
std::lock_guard<std::mutex> lock{g_mutex};
std::lock_guard<shared_mutex> lock(g_mutex);
// Call recorded id_finalize functions for all IDs
// Call recorded finalization functions for all IDs
for (auto& id : idm::map_t(std::move(g_map)))
{
(*id.second.type_index)(id.second.data.get());
@ -34,17 +34,16 @@ void idm::clear()
bool idm::check(u32 in_id, id_type_index_t type)
{
std::lock_guard<std::mutex> lock(g_mutex);
reader_lock lock(g_mutex);
const auto found = g_map.find(in_id);
return found != g_map.end() && found->second.type_index == type;
}
// check if ID exists and return its type or nullptr
const std::type_info* idm::get_type(u32 raw_id)
{
std::lock_guard<std::mutex> lock(g_mutex);
reader_lock lock(g_mutex);
const auto found = g_map.find(raw_id);
@ -53,7 +52,7 @@ const std::type_info* idm::get_type(u32 raw_id)
std::shared_ptr<void> idm::get(u32 in_id, id_type_index_t type)
{
std::lock_guard<std::mutex> lock(g_mutex);
reader_lock lock(g_mutex);
const auto found = g_map.find(in_id);
@ -67,7 +66,7 @@ std::shared_ptr<void> idm::get(u32 in_id, id_type_index_t type)
idm::map_t idm::get_all(id_type_index_t type)
{
std::lock_guard<std::mutex> lock(g_mutex);
reader_lock lock(g_mutex);
idm::map_t result;
@ -84,7 +83,7 @@ idm::map_t idm::get_all(id_type_index_t type)
std::shared_ptr<void> idm::withdraw(u32 in_id, id_type_index_t type)
{
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<shared_mutex> lock(g_mutex);
const auto found = g_map.find(in_id);
@ -102,7 +101,7 @@ std::shared_ptr<void> idm::withdraw(u32 in_id, id_type_index_t type)
u32 idm::get_count(id_type_index_t type)
{
std::lock_guard<std::mutex> lock(g_mutex);
reader_lock lock(g_mutex);
u32 result = 0;
@ -120,9 +119,9 @@ u32 idm::get_count(id_type_index_t type)
void fxm::clear()
{
std::lock_guard<std::mutex> lock{g_mutex};
std::lock_guard<shared_mutex> lock(g_mutex);
// Call recorded id_finalize functions for all IDs
// Call recorded finalization functions for all IDs
for (auto& id : fxm::map_t(std::move(g_map)))
{
if (id.second) (*id.first)(id.second.get());
@ -131,7 +130,7 @@ void fxm::clear()
bool fxm::check(id_type_index_t type)
{
std::lock_guard<std::mutex> lock(g_mutex);
reader_lock lock(g_mutex);
const auto found = g_map.find(type);
@ -140,7 +139,7 @@ bool fxm::check(id_type_index_t type)
std::shared_ptr<void> fxm::get(id_type_index_t type)
{
std::lock_guard<std::mutex> lock(g_mutex);
reader_lock lock(g_mutex);
const auto found = g_map.find(type);
@ -149,7 +148,7 @@ std::shared_ptr<void> fxm::get(id_type_index_t type)
std::shared_ptr<void> fxm::withdraw(id_type_index_t type)
{
std::unique_lock<std::mutex> lock(g_mutex);
std::unique_lock<shared_mutex> lock(g_mutex);
const auto found = g_map.find(type);

View File

@ -1,5 +1,7 @@
#pragma once
#include "Utilities/SharedMutex.h"
#define ID_MANAGER_INCLUDED
// TODO: make id_aux_initialize and id_aux_finalize safer against a possible ODR violation
@ -109,12 +111,12 @@ namespace idm
template<typename T, typename Ptr>
std::shared_ptr<T> add(Ptr&& get_ptr)
{
extern std::mutex g_mutex;
extern shared_mutex g_mutex;
extern idm::map_t g_map;
extern u32 g_last_raw_id;
extern thread_local u32 g_tls_last_id;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<shared_mutex> lock(g_mutex);
for (u32 raw_id = g_last_raw_id; (raw_id = id_traits<T>::next_id(raw_id)); /**/)
{
@ -283,10 +285,10 @@ namespace fxm
template<typename T, bool Always, typename Ptr>
std::pair<std::shared_ptr<T>, std::shared_ptr<T>> add(Ptr&& get_ptr)
{
extern std::mutex g_mutex;
extern shared_mutex g_mutex;
extern fxm::map_t g_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<shared_mutex> lock(g_mutex);
auto& item = g_map[get_id_type_index<T>()];