1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-23 03:02:53 +01:00
rpcs3/Utilities/Thread.h

374 lines
6.7 KiB
C
Raw Normal View History

#pragma once
2014-12-24 23:24:17 +01:00
#include "Emu/Memory/atomic_type.h"
2014-07-11 13:59:13 +02:00
static std::thread::id main_thread;
class NamedThreadBase
{
2013-11-27 20:16:19 +01:00
std::string m_name;
std::condition_variable m_signal_cv;
std::mutex m_signal_mtx;
public:
2014-09-15 00:17:24 +02:00
std::atomic<bool> m_tls_assigned;
NamedThreadBase(const std::string& name) : m_name(name), m_tls_assigned(false)
{
}
2014-09-15 00:17:24 +02:00
NamedThreadBase() : m_tls_assigned(false)
{
}
2013-11-27 20:16:19 +01:00
virtual std::string GetThreadName() const;
virtual void SetThreadName(const std::string& name);
2014-09-12 21:27:33 +02:00
void WaitForAnySignal(u64 time = 1);
2014-08-25 20:09:48 +02:00
void Notify();
};
NamedThreadBase* GetCurrentNamedThread();
2014-08-20 16:23:48 +02:00
void SetCurrentNamedThread(NamedThreadBase* value);
class ThreadBase : public NamedThreadBase
{
protected:
std::atomic<bool> m_destroy;
std::atomic<bool> m_alive;
std::thread* m_executor;
mutable std::mutex m_main_mutex;
ThreadBase(const std::string& name);
~ThreadBase();
public:
void Start();
void Stop(bool wait = true, bool send_destroy = true);
bool Join() const;
bool IsAlive() const;
bool TestDestroy() const;
virtual void Task() = 0;
};
class thread
{
std::string m_name;
std::thread m_thr;
public:
thread(const std::string& name, std::function<void()> func);
thread(const std::string& name);
thread();
2014-02-19 18:27:52 +01:00
public:
void start(std::function<void()> func);
void detach();
void join();
bool joinable() const;
};
2014-10-17 22:13:25 +02:00
class slw_mutex_t
{
};
class slw_recursive_mutex_t
{
};
class slw_shared_mutex_t
{
};
class waiter_map_t
2014-10-11 00:37:20 +02:00
{
// TODO: optimize (use custom lightweight readers-writer lock)
std::mutex m_mutex;
2014-10-11 00:37:20 +02:00
struct waiter_t
{
u64 signal_id;
NamedThreadBase* thread;
};
2014-10-11 00:37:20 +02:00
std::vector<waiter_t> m_waiters;
std::string m_name;
struct waiter_reg_t
{
2014-10-17 22:13:25 +02:00
NamedThreadBase* thread;
const u64 signal_id;
waiter_map_t& map;
2014-10-17 22:13:25 +02:00
waiter_reg_t(waiter_map_t& map, u64 signal_id)
: thread(nullptr)
, signal_id(signal_id)
, map(map)
{
}
~waiter_reg_t();
2014-10-17 22:13:25 +02:00
void init();
};
bool is_stopped(u64 signal_id);
public:
2014-10-17 22:13:25 +02:00
waiter_map_t(const char* name)
: m_name(name)
{
}
// wait until waiter_func() returns true, signal_id is an arbitrary number
2014-10-16 21:34:17 +02:00
template<typename WT> __forceinline void wait_op(u64 signal_id, const WT waiter_func)
{
// register waiter
waiter_reg_t waiter(*this, signal_id);
2014-12-23 00:31:11 +01:00
// check the condition or if the emulator is stopped
2014-10-17 22:13:25 +02:00
while (!waiter_func() && !is_stopped(signal_id))
{
2014-12-23 00:31:11 +01:00
// initialize waiter (only once)
2014-10-17 22:13:25 +02:00
waiter.init();
// wait for 1 ms or until signal arrived
waiter.thread->WaitForAnySignal(1);
}
}
// signal all threads waiting on waiter_op() with the same signal_id (signaling only hints those threads that corresponding conditions are *probably* met)
void notify(u64 signal_id);
};
2014-12-24 23:24:17 +01:00
template<typename T, u32 sq_size = 256>
class squeue_t
{
struct squeue_sync_var_t
{
struct
{
u32 position : 31;
u32 read_lock : 1;
};
struct
{
u32 count : 31;
u32 write_lock : 1;
};
};
atomic_le_t<squeue_sync_var_t> m_sync;
mutable std::mutex m_rcv_mutex, m_wcv_mutex;
mutable std::condition_variable m_rcv, m_wcv;
T m_data[sq_size];
public:
squeue_t()
{
m_sync.write_relaxed({});
}
u32 get_max_size() const
{
return sq_size;
}
bool is_full() const volatile
{
return m_sync.read_relaxed().count == sq_size;
}
bool push(const T& data, const volatile bool* do_exit = nullptr)
{
u32 pos = 0;
while (!m_sync.atomic_op_sync(true, [&pos](squeue_sync_var_t& sync) -> bool
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
if (sync.write_lock || sync.count == sq_size)
{
return false;
}
sync.write_lock = 1;
pos = sync.position + sync.count;
return true;
}))
{
if (Emu.IsStopped() || (do_exit && *do_exit))
{
return false;
}
std::unique_lock<std::mutex> wcv_lock(m_wcv_mutex);
m_wcv.wait_for(wcv_lock, std::chrono::milliseconds(1));
}
m_data[pos >= sq_size ? pos - sq_size : pos] = data;
m_sync.atomic_op([](squeue_sync_var_t& sync)
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
assert(sync.write_lock);
sync.write_lock = 0;
sync.count++;
});
m_rcv.notify_one();
m_wcv.notify_one();
return true;
}
bool try_push(const T& data)
{
static const volatile bool no_wait = true;
return push(data, &no_wait);
}
bool pop(T& data, const volatile bool* do_exit = nullptr)
{
u32 pos = 0;
while (!m_sync.atomic_op_sync(true, [&pos](squeue_sync_var_t& sync) -> bool
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
if (sync.read_lock || !sync.count)
{
return false;
}
sync.read_lock = 1;
pos = sync.position;
return true;
}))
{
if (Emu.IsStopped() || (do_exit && *do_exit))
{
return false;
}
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
data = m_data[pos];
m_sync.atomic_op([](squeue_sync_var_t& sync)
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
assert(sync.read_lock);
sync.read_lock = 0;
sync.position++;
sync.count--;
if (sync.position == sq_size)
{
sync.position = 0;
}
});
m_rcv.notify_one();
m_wcv.notify_one();
return true;
}
bool try_pop(T& data)
{
static const volatile bool no_wait = true;
return pop(data, &no_wait);
}
void clear()
{
while (!m_sync.atomic_op_sync(true, [](squeue_sync_var_t& sync) -> bool
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
if (sync.read_lock || sync.write_lock)
{
return false;
}
sync.read_lock = 1;
sync.write_lock = 1;
return true;
}))
{
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
m_sync.exchange({});
m_wcv.notify_one();
m_rcv.notify_one();
}
bool peek(T& data, u32 start_pos = 0, const volatile bool* do_exit = nullptr)
{
assert(start_pos < sq_size);
u32 pos = 0;
while (!m_sync.atomic_op_sync(true, [&pos, start_pos](squeue_sync_var_t& sync) -> bool
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
if (sync.read_lock || sync.count <= start_pos)
{
return false;
}
sync.read_lock = 1;
pos = sync.position + start_pos;
return true;
}))
{
if (Emu.IsStopped() || (do_exit && *do_exit))
{
return false;
}
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
data = m_data[pos >= sq_size ? pos - sq_size : pos];
m_sync.atomic_op([](squeue_sync_var_t& sync)
{
assert(sync.count <= sq_size);
assert(sync.position < sq_size);
assert(sync.read_lock);
sync.read_lock = 0;
});
m_rcv.notify_one();
return true;
}
bool try_peek(T& data, u32 start_pos = 0)
{
static const volatile bool no_wait = true;
return peek(data, start_pos, &no_wait);
}
};