2012-11-15 00:39:56 +01:00
|
|
|
#pragma once
|
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
const class thread_ctrl_t* get_current_thread_ctrl();
|
2013-06-30 10:46:29 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// named thread control class
|
|
|
|
class thread_ctrl_t final
|
|
|
|
{
|
|
|
|
friend class thread_t;
|
2014-06-20 13:00:36 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// thread handler
|
|
|
|
std::thread m_thread;
|
2015-02-18 17:22:06 +01:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// name getter
|
|
|
|
const std::function<std::string()> name;
|
2012-11-15 00:39:56 +01:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// condition variable, notified before thread exit
|
|
|
|
std::condition_variable join_cv;
|
2013-06-30 10:46:29 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// thread status (set to false after execution)
|
|
|
|
std::atomic<bool> joinable{ true };
|
2012-11-15 00:39:56 +01:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// true if TLS of some thread points to owner
|
|
|
|
std::atomic<bool> assigned{ false };
|
2013-06-30 10:46:29 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// assign TLS
|
|
|
|
void set_current();
|
2012-11-15 00:39:56 +01:00
|
|
|
|
2014-01-31 19:40:18 +01:00
|
|
|
public:
|
2015-07-01 00:25:52 +02:00
|
|
|
thread_ctrl_t(std::function<std::string()> name)
|
|
|
|
: name(std::move(name))
|
|
|
|
{
|
|
|
|
}
|
2012-11-15 00:39:56 +01:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// get thread name
|
|
|
|
std::string get_name() const;
|
2012-11-15 00:39:56 +01:00
|
|
|
};
|
|
|
|
|
2015-01-16 15:36:53 +01:00
|
|
|
class thread_t
|
2014-01-31 19:40:18 +01:00
|
|
|
{
|
2015-07-01 00:25:52 +02:00
|
|
|
// pointer to managed resource (shared with actual thread)
|
|
|
|
std::shared_ptr<thread_ctrl_t> m_thread;
|
2015-01-16 15:36:53 +01:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
public:
|
|
|
|
// thread mutex for external use
|
|
|
|
std::mutex mutex;
|
|
|
|
|
|
|
|
// thread condition variable for external use
|
|
|
|
std::condition_variable cv;
|
2014-01-31 19:40:18 +01:00
|
|
|
|
|
|
|
public:
|
2015-07-01 00:25:52 +02:00
|
|
|
// initialize in empty state
|
|
|
|
thread_t() = default;
|
|
|
|
|
|
|
|
// create named thread
|
|
|
|
thread_t(std::function<std::string()> name, std::function<void()> func);
|
|
|
|
|
2015-07-01 19:09:26 +02:00
|
|
|
// destructor, joins automatically (questionable, don't rely on this functionality in derived destructors)
|
|
|
|
virtual ~thread_t() noexcept(false);
|
2015-01-16 15:36:53 +01:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
thread_t(const thread_t&) = delete;
|
2014-01-31 19:40:18 +01:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
thread_t& operator =(const thread_t&) = delete;
|
2014-02-19 18:27:52 +01:00
|
|
|
|
|
|
|
public:
|
2015-07-01 00:25:52 +02:00
|
|
|
// get thread name
|
|
|
|
std::string get_name() const;
|
2014-10-10 23:33:57 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// create named thread (current state must be empty)
|
|
|
|
void start(std::function<std::string()> name, std::function<void()> func);
|
2014-10-17 22:13:25 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// detach thread -> empty state
|
|
|
|
void detach();
|
2014-10-17 22:13:25 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// join thread (provide locked unique_lock, for example, lv2_lock, for interruptibility) -> empty state
|
|
|
|
void join(std::unique_lock<std::mutex>& lock);
|
2014-10-17 22:13:25 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// join thread -> empty state
|
|
|
|
void join();
|
2014-10-17 22:13:25 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// check if not empty
|
|
|
|
bool joinable() const { return m_thread.operator bool(); }
|
2014-10-17 22:13:25 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// check whether it is the current running thread
|
|
|
|
bool is_current() const;
|
2014-10-17 22:13:25 +02:00
|
|
|
};
|
|
|
|
|
2015-07-04 01:22:24 +02:00
|
|
|
class autojoin_thread_t final : private thread_t
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
using thread_t::mutex;
|
|
|
|
using thread_t::cv;
|
|
|
|
|
|
|
|
public:
|
|
|
|
autojoin_thread_t() = delete;
|
|
|
|
|
|
|
|
autojoin_thread_t(std::function<std::string()> name, std::function<void()> func)
|
|
|
|
{
|
|
|
|
start(std::move(name), std::move(func));
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~autojoin_thread_t() override
|
|
|
|
{
|
|
|
|
join();
|
|
|
|
}
|
|
|
|
|
|
|
|
using thread_t::is_current;
|
|
|
|
};
|
|
|
|
|
2015-03-07 17:03:42 +01:00
|
|
|
struct waiter_map_t
|
2014-10-11 00:37:20 +02:00
|
|
|
{
|
2015-06-26 01:26:23 +02:00
|
|
|
static const size_t size = 16;
|
2014-10-11 00:37:20 +02:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
std::array<std::mutex, size> mutexes;
|
|
|
|
std::array<std::condition_variable, size> cvs;
|
2014-10-17 22:13:25 +02:00
|
|
|
|
2015-03-07 17:03:42 +01:00
|
|
|
const std::string name;
|
2014-10-10 23:33:57 +02:00
|
|
|
|
2014-10-17 22:13:25 +02:00
|
|
|
waiter_map_t(const char* name)
|
2015-03-07 17:03:42 +01:00
|
|
|
: name(name)
|
2014-10-17 22:13:25 +02:00
|
|
|
{
|
|
|
|
}
|
2014-10-16 18:29:41 +02:00
|
|
|
|
2015-06-26 01:26:23 +02:00
|
|
|
// generate simple "hash" for mutex/cv distribution
|
|
|
|
u32 get_hash(u32 addr)
|
|
|
|
{
|
|
|
|
addr ^= addr >> 16;
|
|
|
|
addr ^= addr >> 24;
|
|
|
|
addr ^= addr >> 28;
|
|
|
|
return addr % size;
|
|
|
|
}
|
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
void check_emu_status(u32 addr);
|
2015-03-07 17:03:42 +01:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// wait until pred() returns true, `addr` is an arbitrary number
|
2015-06-26 01:26:23 +02:00
|
|
|
template<typename F, typename... Args> safe_buffers auto wait_op(u32 addr, F pred, Args&&... args) -> decltype(static_cast<void>(pred(args...)))
|
2014-10-10 23:33:57 +02:00
|
|
|
{
|
2015-06-26 01:26:23 +02:00
|
|
|
const u32 hash = get_hash(addr);
|
2015-03-07 17:03:42 +01:00
|
|
|
|
|
|
|
// set mutex locker
|
2015-07-01 00:25:52 +02:00
|
|
|
std::unique_lock<std::mutex> lock(mutexes[hash], std::defer_lock);
|
2014-10-16 18:29:41 +02:00
|
|
|
|
2015-06-26 01:26:23 +02:00
|
|
|
while (true)
|
2014-10-16 18:29:41 +02:00
|
|
|
{
|
2015-06-26 01:26:23 +02:00
|
|
|
// check the condition
|
|
|
|
if (pred(args...)) return;
|
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
check_emu_status(addr);
|
|
|
|
|
2015-03-07 17:03:42 +01:00
|
|
|
// lock the mutex and initialize waiter (only once)
|
2015-06-26 01:26:23 +02:00
|
|
|
if (!lock) lock.lock();
|
2015-03-07 17:03:42 +01:00
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// wait on an appropriate cond var for 1 ms or until a signal arrived
|
|
|
|
cvs[hash].wait_for(lock, std::chrono::milliseconds(1));
|
2014-10-16 18:29:41 +02:00
|
|
|
}
|
2014-10-10 23:33:57 +02:00
|
|
|
}
|
|
|
|
|
2015-07-01 00:25:52 +02:00
|
|
|
// signal all threads waiting on wait_op() with the same `addr` (signaling only hints those threads that corresponding conditions are *probably* met)
|
2015-06-26 01:26:23 +02:00
|
|
|
void notify(u32 addr);
|
2014-10-16 18:29:41 +02:00
|
|
|
};
|
2014-12-24 23:24:17 +01:00
|
|
|
|
2015-01-17 17:14:58 +01:00
|
|
|
extern const std::function<bool()> SQUEUE_ALWAYS_EXIT;
|
|
|
|
extern const std::function<bool()> SQUEUE_NEVER_EXIT;
|
|
|
|
|
2015-01-16 18:09:53 +01:00
|
|
|
bool squeue_test_exit();
|
2014-12-25 21:30:34 +01:00
|
|
|
|
2014-12-24 23:24:17 +01:00
|
|
|
template<typename T, u32 sq_size = 256>
|
|
|
|
class squeue_t
|
|
|
|
{
|
|
|
|
struct squeue_sync_var_t
|
|
|
|
{
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
u32 position : 31;
|
2014-12-25 23:58:43 +01:00
|
|
|
u32 pop_lock : 1;
|
2014-12-24 23:24:17 +01:00
|
|
|
};
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
u32 count : 31;
|
2014-12-25 23:58:43 +01:00
|
|
|
u32 push_lock : 1;
|
2014-12-24 23:24:17 +01:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2015-05-27 05:11:59 +02:00
|
|
|
atomic<squeue_sync_var_t> m_sync;
|
2014-12-24 23:24:17 +01:00
|
|
|
|
2014-12-28 14:15:22 +01:00
|
|
|
mutable std::mutex m_rcv_mutex;
|
|
|
|
mutable std::mutex m_wcv_mutex;
|
|
|
|
mutable std::condition_variable m_rcv;
|
|
|
|
mutable std::condition_variable m_wcv;
|
2014-12-24 23:24:17 +01:00
|
|
|
|
|
|
|
T m_data[sq_size];
|
|
|
|
|
2014-12-25 23:49:55 +01:00
|
|
|
enum squeue_sync_var_result : u32
|
|
|
|
{
|
|
|
|
SQSVR_OK = 0,
|
|
|
|
SQSVR_LOCKED = 1,
|
|
|
|
SQSVR_FAILED = 2,
|
|
|
|
};
|
|
|
|
|
2014-12-24 23:24:17 +01:00
|
|
|
public:
|
|
|
|
squeue_t()
|
2015-03-13 02:09:53 +01:00
|
|
|
: m_sync({})
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 get_max_size() const
|
|
|
|
{
|
|
|
|
return sq_size;
|
|
|
|
}
|
|
|
|
|
2015-03-13 02:09:53 +01:00
|
|
|
bool is_full() const volatile
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
2015-03-13 02:09:53 +01:00
|
|
|
return m_sync.data.count == sq_size;
|
2014-12-24 23:24:17 +01:00
|
|
|
}
|
|
|
|
|
2015-01-16 18:09:53 +01:00
|
|
|
bool push(const T& data, const std::function<bool()>& test_exit)
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
u32 pos = 0;
|
|
|
|
|
2015-06-26 01:26:23 +02:00
|
|
|
while (u32 res = m_sync.atomic_op([&pos](squeue_sync_var_t& sync) -> u32
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
assert(sync.count <= sq_size);
|
|
|
|
assert(sync.position < sq_size);
|
|
|
|
|
2014-12-25 23:58:43 +01:00
|
|
|
if (sync.push_lock)
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
2014-12-25 23:49:55 +01:00
|
|
|
return SQSVR_LOCKED;
|
|
|
|
}
|
|
|
|
if (sync.count == sq_size)
|
|
|
|
{
|
|
|
|
return SQSVR_FAILED;
|
2014-12-24 23:24:17 +01:00
|
|
|
}
|
|
|
|
|
2014-12-25 23:58:43 +01:00
|
|
|
sync.push_lock = 1;
|
2014-12-24 23:24:17 +01:00
|
|
|
pos = sync.position + sync.count;
|
2014-12-25 23:49:55 +01:00
|
|
|
return SQSVR_OK;
|
2014-12-24 23:24:17 +01:00
|
|
|
}))
|
|
|
|
{
|
2015-01-16 18:09:53 +01:00
|
|
|
if (res == SQSVR_FAILED && (test_exit() || squeue_test_exit()))
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_lock<std::mutex> wcv_lock(m_wcv_mutex);
|
|
|
|
m_wcv.wait_for(wcv_lock, std::chrono::milliseconds(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
m_data[pos >= sq_size ? pos - sq_size : pos] = data;
|
|
|
|
|
|
|
|
m_sync.atomic_op([](squeue_sync_var_t& sync)
|
|
|
|
{
|
|
|
|
assert(sync.count <= sq_size);
|
|
|
|
assert(sync.position < sq_size);
|
2014-12-25 23:58:43 +01:00
|
|
|
assert(sync.push_lock);
|
|
|
|
sync.push_lock = 0;
|
2014-12-24 23:24:17 +01:00
|
|
|
sync.count++;
|
|
|
|
});
|
|
|
|
|
|
|
|
m_rcv.notify_one();
|
|
|
|
m_wcv.notify_one();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-01-16 18:09:53 +01:00
|
|
|
bool push(const T& data, const volatile bool* do_exit)
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
2015-01-16 18:09:53 +01:00
|
|
|
return push(data, [do_exit](){ return do_exit && *do_exit; });
|
|
|
|
}
|
2014-12-24 23:24:17 +01:00
|
|
|
|
2015-05-28 17:14:22 +02:00
|
|
|
force_inline bool push(const T& data)
|
2015-01-16 18:09:53 +01:00
|
|
|
{
|
2015-01-17 17:14:58 +01:00
|
|
|
return push(data, SQUEUE_NEVER_EXIT);
|
2014-12-24 23:24:17 +01:00
|
|
|
}
|
|
|
|
|
2015-05-28 17:14:22 +02:00
|
|
|
force_inline bool try_push(const T& data)
|
2015-01-16 18:09:53 +01:00
|
|
|
{
|
2015-01-17 17:14:58 +01:00
|
|
|
return push(data, SQUEUE_ALWAYS_EXIT);
|
2015-01-16 18:09:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool pop(T& data, const std::function<bool()>& test_exit)
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
u32 pos = 0;
|
|
|
|
|
2015-06-26 01:26:23 +02:00
|
|
|
while (u32 res = m_sync.atomic_op([&pos](squeue_sync_var_t& sync) -> u32
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
assert(sync.count <= sq_size);
|
|
|
|
assert(sync.position < sq_size);
|
|
|
|
|
2014-12-25 23:49:55 +01:00
|
|
|
if (!sync.count)
|
|
|
|
{
|
|
|
|
return SQSVR_FAILED;
|
2014-12-24 23:24:17 +01:00
|
|
|
}
|
2014-12-25 23:58:43 +01:00
|
|
|
if (sync.pop_lock)
|
|
|
|
{
|
|
|
|
return SQSVR_LOCKED;
|
|
|
|
}
|
2014-12-24 23:24:17 +01:00
|
|
|
|
2014-12-25 23:58:43 +01:00
|
|
|
sync.pop_lock = 1;
|
2014-12-24 23:24:17 +01:00
|
|
|
pos = sync.position;
|
2014-12-25 23:49:55 +01:00
|
|
|
return SQSVR_OK;
|
2014-12-24 23:24:17 +01:00
|
|
|
}))
|
|
|
|
{
|
2015-01-16 18:09:53 +01:00
|
|
|
if (res == SQSVR_FAILED && (test_exit() || squeue_test_exit()))
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
|
|
|
|
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
data = m_data[pos];
|
|
|
|
|
|
|
|
m_sync.atomic_op([](squeue_sync_var_t& sync)
|
|
|
|
{
|
|
|
|
assert(sync.count <= sq_size);
|
|
|
|
assert(sync.position < sq_size);
|
2014-12-25 23:58:43 +01:00
|
|
|
assert(sync.pop_lock);
|
|
|
|
sync.pop_lock = 0;
|
2014-12-24 23:24:17 +01:00
|
|
|
sync.position++;
|
|
|
|
sync.count--;
|
|
|
|
if (sync.position == sq_size)
|
|
|
|
{
|
|
|
|
sync.position = 0;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
m_rcv.notify_one();
|
|
|
|
m_wcv.notify_one();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-01-16 18:09:53 +01:00
|
|
|
bool pop(T& data, const volatile bool* do_exit)
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
2015-01-16 18:09:53 +01:00
|
|
|
return pop(data, [do_exit](){ return do_exit && *do_exit; });
|
|
|
|
}
|
2014-12-24 23:24:17 +01:00
|
|
|
|
2015-05-28 17:14:22 +02:00
|
|
|
force_inline bool pop(T& data)
|
2015-01-16 18:09:53 +01:00
|
|
|
{
|
2015-01-17 17:14:58 +01:00
|
|
|
return pop(data, SQUEUE_NEVER_EXIT);
|
2014-12-24 23:24:17 +01:00
|
|
|
}
|
|
|
|
|
2015-05-28 17:14:22 +02:00
|
|
|
force_inline bool try_pop(T& data)
|
2015-01-16 18:09:53 +01:00
|
|
|
{
|
2015-01-17 17:14:58 +01:00
|
|
|
return pop(data, SQUEUE_ALWAYS_EXIT);
|
2015-01-16 18:09:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool peek(T& data, u32 start_pos, const std::function<bool()>& test_exit)
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
assert(start_pos < sq_size);
|
|
|
|
u32 pos = 0;
|
|
|
|
|
2015-06-26 01:26:23 +02:00
|
|
|
while (u32 res = m_sync.atomic_op([&pos, start_pos](squeue_sync_var_t& sync) -> u32
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
assert(sync.count <= sq_size);
|
|
|
|
assert(sync.position < sq_size);
|
|
|
|
|
2014-12-25 23:49:55 +01:00
|
|
|
if (sync.count <= start_pos)
|
|
|
|
{
|
|
|
|
return SQSVR_FAILED;
|
2014-12-24 23:24:17 +01:00
|
|
|
}
|
2014-12-25 23:58:43 +01:00
|
|
|
if (sync.pop_lock)
|
|
|
|
{
|
|
|
|
return SQSVR_LOCKED;
|
|
|
|
}
|
2015-01-17 17:14:58 +01:00
|
|
|
|
2014-12-25 23:58:43 +01:00
|
|
|
sync.pop_lock = 1;
|
2014-12-24 23:24:17 +01:00
|
|
|
pos = sync.position + start_pos;
|
2014-12-25 23:49:55 +01:00
|
|
|
return SQSVR_OK;
|
2014-12-24 23:24:17 +01:00
|
|
|
}))
|
|
|
|
{
|
2015-01-16 18:09:53 +01:00
|
|
|
if (res == SQSVR_FAILED && (test_exit() || squeue_test_exit()))
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
|
|
|
|
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
data = m_data[pos >= sq_size ? pos - sq_size : pos];
|
|
|
|
|
|
|
|
m_sync.atomic_op([](squeue_sync_var_t& sync)
|
|
|
|
{
|
|
|
|
assert(sync.count <= sq_size);
|
|
|
|
assert(sync.position < sq_size);
|
2014-12-25 23:58:43 +01:00
|
|
|
assert(sync.pop_lock);
|
|
|
|
sync.pop_lock = 0;
|
2014-12-24 23:24:17 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
m_rcv.notify_one();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-01-16 18:09:53 +01:00
|
|
|
bool peek(T& data, u32 start_pos, const volatile bool* do_exit)
|
|
|
|
{
|
|
|
|
return peek(data, start_pos, [do_exit](){ return do_exit && *do_exit; });
|
|
|
|
}
|
|
|
|
|
2015-05-28 17:14:22 +02:00
|
|
|
force_inline bool peek(T& data, u32 start_pos = 0)
|
2014-12-24 23:24:17 +01:00
|
|
|
{
|
2015-01-17 17:14:58 +01:00
|
|
|
return peek(data, start_pos, SQUEUE_NEVER_EXIT);
|
2015-01-16 18:09:53 +01:00
|
|
|
}
|
2014-12-24 23:24:17 +01:00
|
|
|
|
2015-05-28 17:14:22 +02:00
|
|
|
force_inline bool try_peek(T& data, u32 start_pos = 0)
|
2015-01-16 18:09:53 +01:00
|
|
|
{
|
2015-01-17 17:14:58 +01:00
|
|
|
return peek(data, start_pos, SQUEUE_ALWAYS_EXIT);
|
2014-12-24 23:24:17 +01:00
|
|
|
}
|
2015-01-02 00:41:29 +01:00
|
|
|
|
|
|
|
class squeue_data_t
|
|
|
|
{
|
|
|
|
T* const m_data;
|
|
|
|
const u32 m_pos;
|
|
|
|
const u32 m_count;
|
|
|
|
|
|
|
|
squeue_data_t(T* data, u32 pos, u32 count)
|
|
|
|
: m_data(data)
|
|
|
|
, m_pos(pos)
|
|
|
|
, m_count(count)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
T& operator [] (u32 index)
|
|
|
|
{
|
|
|
|
assert(index < m_count);
|
|
|
|
index += m_pos;
|
|
|
|
index = index < sq_size ? index : index - sq_size;
|
|
|
|
return m_data[index];
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void process(void(*proc)(squeue_data_t data))
|
|
|
|
{
|
|
|
|
u32 pos, count;
|
|
|
|
|
2015-06-26 01:26:23 +02:00
|
|
|
while (m_sync.atomic_op([&pos, &count](squeue_sync_var_t& sync) -> u32
|
2015-01-02 00:41:29 +01:00
|
|
|
{
|
|
|
|
assert(sync.count <= sq_size);
|
|
|
|
assert(sync.position < sq_size);
|
|
|
|
|
|
|
|
if (sync.pop_lock || sync.push_lock)
|
|
|
|
{
|
|
|
|
return SQSVR_LOCKED;
|
|
|
|
}
|
|
|
|
|
|
|
|
pos = sync.position;
|
|
|
|
count = sync.count;
|
|
|
|
sync.pop_lock = 1;
|
|
|
|
sync.push_lock = 1;
|
|
|
|
return SQSVR_OK;
|
|
|
|
}))
|
|
|
|
{
|
|
|
|
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
|
|
|
|
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
proc(squeue_data_t(m_data, pos, count));
|
|
|
|
|
|
|
|
m_sync.atomic_op([](squeue_sync_var_t& sync)
|
|
|
|
{
|
|
|
|
assert(sync.count <= sq_size);
|
|
|
|
assert(sync.position < sq_size);
|
|
|
|
assert(sync.pop_lock && sync.push_lock);
|
|
|
|
sync.pop_lock = 0;
|
|
|
|
sync.push_lock = 0;
|
|
|
|
});
|
|
|
|
|
|
|
|
m_wcv.notify_one();
|
|
|
|
m_rcv.notify_one();
|
|
|
|
}
|
|
|
|
|
|
|
|
void clear()
|
|
|
|
{
|
2015-06-26 01:26:23 +02:00
|
|
|
while (m_sync.atomic_op([](squeue_sync_var_t& sync) -> u32
|
2015-01-02 00:41:29 +01:00
|
|
|
{
|
|
|
|
assert(sync.count <= sq_size);
|
|
|
|
assert(sync.position < sq_size);
|
|
|
|
|
|
|
|
if (sync.pop_lock || sync.push_lock)
|
|
|
|
{
|
|
|
|
return SQSVR_LOCKED;
|
|
|
|
}
|
|
|
|
|
|
|
|
sync.pop_lock = 1;
|
|
|
|
sync.push_lock = 1;
|
|
|
|
return SQSVR_OK;
|
|
|
|
}))
|
|
|
|
{
|
|
|
|
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
|
|
|
|
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
m_sync.exchange({});
|
|
|
|
m_wcv.notify_one();
|
|
|
|
m_rcv.notify_one();
|
|
|
|
}
|
2014-12-24 23:24:17 +01:00
|
|
|
};
|