mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-25 12:12:50 +01:00
Cleanup semaphore<> (sema.h) and mutex.h (shared_mutex)
Remove semaphore_lock and writer_lock classes, replace with std::lock_guard Change semaphore<> interface to Lockable (+ exotic try_unlock method)
This commit is contained in:
parent
5e556a87ff
commit
ca5158a03e
@ -217,7 +217,7 @@ std::shared_ptr<fs::device_base> fs::device_manager::get_device(const std::strin
|
|||||||
|
|
||||||
std::shared_ptr<fs::device_base> fs::device_manager::set_device(const std::string& name, const std::shared_ptr<device_base>& device)
|
std::shared_ptr<fs::device_base> fs::device_manager::set_device(const std::string& name, const std::shared_ptr<device_base>& device)
|
||||||
{
|
{
|
||||||
writer_lock lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
|
|
||||||
return m_map[name] = device;
|
return m_map[name] = device;
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ struct MemoryManager : llvm::RTDyldMemoryManager
|
|||||||
if ((u64)s_memory > 0x80000000 - s_memory_size ? (u64)addr - (u64)s_memory >= s_memory_size : addr >= 0x80000000)
|
if ((u64)s_memory > 0x80000000 - s_memory_size ? (u64)addr - (u64)s_memory >= s_memory_size : addr >= 0x80000000)
|
||||||
{
|
{
|
||||||
// Lock memory manager
|
// Lock memory manager
|
||||||
writer_lock lock(s_mutex);
|
std::lock_guard lock(s_mutex);
|
||||||
|
|
||||||
// Allocate memory for trampolines
|
// Allocate memory for trampolines
|
||||||
if (!m_tramps)
|
if (!m_tramps)
|
||||||
@ -213,7 +213,7 @@ struct MemoryManager : llvm::RTDyldMemoryManager
|
|||||||
u8* allocateCodeSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name) override
|
u8* allocateCodeSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name) override
|
||||||
{
|
{
|
||||||
// Lock memory manager
|
// Lock memory manager
|
||||||
writer_lock lock(s_mutex);
|
std::lock_guard lock(s_mutex);
|
||||||
|
|
||||||
// Simple allocation
|
// Simple allocation
|
||||||
const u64 next = ::align((u64)s_next + size, 4096);
|
const u64 next = ::align((u64)s_next + size, 4096);
|
||||||
@ -234,7 +234,7 @@ struct MemoryManager : llvm::RTDyldMemoryManager
|
|||||||
u8* allocateDataSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override
|
u8* allocateDataSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override
|
||||||
{
|
{
|
||||||
// Lock memory manager
|
// Lock memory manager
|
||||||
writer_lock lock(s_mutex);
|
std::lock_guard lock(s_mutex);
|
||||||
|
|
||||||
// Simple allocation
|
// Simple allocation
|
||||||
const u64 next = ::align((u64)s_next + size, 4096);
|
const u64 next = ::align((u64)s_next + size, 4096);
|
||||||
@ -259,7 +259,7 @@ struct MemoryManager : llvm::RTDyldMemoryManager
|
|||||||
bool finalizeMemory(std::string* = nullptr) override
|
bool finalizeMemory(std::string* = nullptr) override
|
||||||
{
|
{
|
||||||
// Lock memory manager
|
// Lock memory manager
|
||||||
writer_lock lock(s_mutex);
|
std::lock_guard lock(s_mutex);
|
||||||
|
|
||||||
// TODO: make only read-only sections read-only
|
// TODO: make only read-only sections read-only
|
||||||
//#ifdef _WIN32
|
//#ifdef _WIN32
|
||||||
@ -277,7 +277,7 @@ struct MemoryManager : llvm::RTDyldMemoryManager
|
|||||||
{
|
{
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
// Lock memory manager
|
// Lock memory manager
|
||||||
writer_lock lock(s_mutex);
|
std::lock_guard lock(s_mutex);
|
||||||
|
|
||||||
// Use s_memory as a BASE, compute the difference
|
// Use s_memory as a BASE, compute the difference
|
||||||
const u64 unwind_diff = (u64)addr - (u64)s_memory;
|
const u64 unwind_diff = (u64)addr - (u64)s_memory;
|
||||||
@ -427,7 +427,7 @@ struct EventListener : llvm::JITEventListener
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Lock memory manager
|
// Lock memory manager
|
||||||
writer_lock lock(s_mutex);
|
std::lock_guard lock(s_mutex);
|
||||||
|
|
||||||
// Use s_memory as a BASE, compute the difference
|
// Use s_memory as a BASE, compute the difference
|
||||||
const u64 code_diff = (u64)m_mem.m_code_addr - (u64)s_memory;
|
const u64 code_diff = (u64)m_mem.m_code_addr - (u64)s_memory;
|
||||||
@ -649,7 +649,7 @@ u64 jit_compiler::get(const std::string& name)
|
|||||||
std::unordered_map<std::string, u64> jit_compiler::add(std::unordered_map<std::string, std::string> data)
|
std::unordered_map<std::string, u64> jit_compiler::add(std::unordered_map<std::string, std::string> data)
|
||||||
{
|
{
|
||||||
// Lock memory manager
|
// Lock memory manager
|
||||||
writer_lock lock(s_mutex);
|
std::lock_guard lock(s_mutex);
|
||||||
|
|
||||||
std::unordered_map<std::string, u64> result;
|
std::unordered_map<std::string, u64> result;
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ namespace logs
|
|||||||
|
|
||||||
void reset()
|
void reset()
|
||||||
{
|
{
|
||||||
semaphore_lock lock(g_mutex);
|
std::lock_guard lock(g_mutex);
|
||||||
|
|
||||||
for (auto&& pair : get_logger()->channels)
|
for (auto&& pair : get_logger()->channels)
|
||||||
{
|
{
|
||||||
@ -198,7 +198,7 @@ namespace logs
|
|||||||
|
|
||||||
void set_level(const std::string& ch_name, level value)
|
void set_level(const std::string& ch_name, level value)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(g_mutex);
|
std::lock_guard lock(g_mutex);
|
||||||
|
|
||||||
get_logger()->channels[ch_name].set_level(value);
|
get_logger()->channels[ch_name].set_level(value);
|
||||||
}
|
}
|
||||||
@ -208,7 +208,7 @@ namespace logs
|
|||||||
{
|
{
|
||||||
if (!g_init)
|
if (!g_init)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(g_mutex);
|
std::lock_guard lock(g_mutex);
|
||||||
get_logger()->messages.clear();
|
get_logger()->messages.clear();
|
||||||
g_init = true;
|
g_init = true;
|
||||||
}
|
}
|
||||||
@ -224,7 +224,7 @@ void logs::listener::add(logs::listener* _new)
|
|||||||
// Get first (main) listener
|
// Get first (main) listener
|
||||||
listener* lis = get_logger();
|
listener* lis = get_logger();
|
||||||
|
|
||||||
semaphore_lock lock(g_mutex);
|
std::lock_guard lock(g_mutex);
|
||||||
|
|
||||||
// Install new listener at the end of linked list
|
// Install new listener at the end of linked list
|
||||||
while (lis->m_next || !lis->m_next.compare_and_swap_test(nullptr, _new))
|
while (lis->m_next || !lis->m_next.compare_and_swap_test(nullptr, _new))
|
||||||
@ -247,7 +247,7 @@ void logs::message::broadcast(const char* fmt, const fmt_type_info* sup, const u
|
|||||||
// Register channel
|
// Register channel
|
||||||
if (ch->enabled == level::_uninit)
|
if (ch->enabled == level::_uninit)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(g_mutex);
|
std::lock_guard lock(g_mutex);
|
||||||
|
|
||||||
auto& info = get_logger()->channels[ch->name];
|
auto& info = get_logger()->channels[ch->name];
|
||||||
|
|
||||||
@ -278,7 +278,7 @@ void logs::message::broadcast(const char* fmt, const fmt_type_info* sup, const u
|
|||||||
|
|
||||||
if (!g_init)
|
if (!g_init)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(g_mutex);
|
std::lock_guard lock(g_mutex);
|
||||||
|
|
||||||
if (!g_init)
|
if (!g_init)
|
||||||
{
|
{
|
||||||
@ -468,7 +468,7 @@ logs::file_writer::~file_writer()
|
|||||||
|
|
||||||
bool logs::file_writer::flush(u64 bufv)
|
bool logs::file_writer::flush(u64 bufv)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(m_m);
|
std::lock_guard lock(m_m);
|
||||||
|
|
||||||
const u64 st = +m_out;
|
const u64 st = +m_out;
|
||||||
const u64 end = std::min<u64>((st + s_log_size) & ~(s_log_size - 1), bufv >> 24);
|
const u64 end = std::min<u64>((st + s_log_size) & ~(s_log_size - 1), bufv >> 24);
|
||||||
|
@ -1260,7 +1260,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
|||||||
|
|
||||||
auto pf_entries = fxm::get_always<page_fault_event_entries>();
|
auto pf_entries = fxm::get_always<page_fault_event_entries>();
|
||||||
{
|
{
|
||||||
semaphore_lock pf_lock(pf_entries->pf_mutex);
|
std::lock_guard pf_lock(pf_entries->pf_mutex);
|
||||||
page_fault_event pf_event{ cpu->id, addr };
|
page_fault_event pf_event{ cpu->id, addr };
|
||||||
pf_entries->events.emplace_back(pf_event);
|
pf_entries->events.emplace_back(pf_event);
|
||||||
}
|
}
|
||||||
@ -1689,7 +1689,7 @@ void thread_ctrl::finalize(std::exception_ptr eptr) noexcept
|
|||||||
--g_thread_count;
|
--g_thread_count;
|
||||||
|
|
||||||
// Untangle circular reference, set exception
|
// Untangle circular reference, set exception
|
||||||
semaphore_lock{m_mutex}, m_self.reset(), m_exception = eptr;
|
std::lock_guard{m_mutex}, m_self.reset(), m_exception = eptr;
|
||||||
|
|
||||||
// Signal joining waiters
|
// Signal joining waiters
|
||||||
m_jcv.notify_all();
|
m_jcv.notify_all();
|
||||||
@ -1715,7 +1715,7 @@ bool thread_ctrl::_wait_for(u64 usec)
|
|||||||
|
|
||||||
void unlock()
|
void unlock()
|
||||||
{
|
{
|
||||||
ref.post();
|
ref.unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_lock{_this->m_mutex};
|
_lock{_this->m_mutex};
|
||||||
@ -1741,7 +1741,7 @@ bool thread_ctrl::_wait_for(u64 usec)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Lock (semaphore)
|
// Lock (semaphore)
|
||||||
_this->m_mutex.wait();
|
_this->m_mutex.lock();
|
||||||
|
|
||||||
// Double-check the value
|
// Double-check the value
|
||||||
if (u32 sig = _this->m_signal.load())
|
if (u32 sig = _this->m_signal.load())
|
||||||
@ -1754,7 +1754,7 @@ bool thread_ctrl::_wait_for(u64 usec)
|
|||||||
if (sig & 1)
|
if (sig & 1)
|
||||||
{
|
{
|
||||||
_this->m_signal &= ~1;
|
_this->m_signal &= ~1;
|
||||||
_this->m_mutex.post();
|
_this->m_mutex.unlock();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1769,7 +1769,7 @@ bool thread_ctrl::_wait_for(u64 usec)
|
|||||||
{
|
{
|
||||||
std::exception_ptr ex = std::exchange(m_exception, std::exception_ptr{});
|
std::exception_ptr ex = std::exchange(m_exception, std::exception_ptr{});
|
||||||
m_signal &= ~3;
|
m_signal &= ~3;
|
||||||
m_mutex.post();
|
m_mutex.unlock();
|
||||||
std::rethrow_exception(std::move(ex));
|
std::rethrow_exception(std::move(ex));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1778,8 +1778,8 @@ void thread_ctrl::_notify(cond_variable thread_ctrl::* ptr)
|
|||||||
// Optimized lock + unlock
|
// Optimized lock + unlock
|
||||||
if (!m_mutex.get())
|
if (!m_mutex.get())
|
||||||
{
|
{
|
||||||
m_mutex.wait();
|
m_mutex.lock();
|
||||||
m_mutex.post();
|
m_mutex.unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
(this->*ptr).notify_one();
|
(this->*ptr).notify_one();
|
||||||
@ -1804,13 +1804,13 @@ thread_ctrl::~thread_ctrl()
|
|||||||
|
|
||||||
std::exception_ptr thread_ctrl::get_exception() const
|
std::exception_ptr thread_ctrl::get_exception() const
|
||||||
{
|
{
|
||||||
semaphore_lock lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
return m_exception;
|
return m_exception;
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread_ctrl::set_exception(std::exception_ptr ptr)
|
void thread_ctrl::set_exception(std::exception_ptr ptr)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
m_exception = ptr;
|
m_exception = ptr;
|
||||||
|
|
||||||
if (m_exception)
|
if (m_exception)
|
||||||
@ -1830,7 +1830,7 @@ void thread_ctrl::join()
|
|||||||
//verify("thread_ctrl::join" HERE), WaitForSingleObjectEx((HANDLE)m_thread.load(), -1, false) == WAIT_OBJECT_0;
|
//verify("thread_ctrl::join" HERE), WaitForSingleObjectEx((HANDLE)m_thread.load(), -1, false) == WAIT_OBJECT_0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
semaphore_lock lock(m_mutex);
|
std::unique_lock lock(m_mutex);
|
||||||
|
|
||||||
while (m_self)
|
while (m_self)
|
||||||
{
|
{
|
||||||
@ -1888,14 +1888,14 @@ void thread_ctrl::test()
|
|||||||
|
|
||||||
if (_this->m_signal & 2)
|
if (_this->m_signal & 2)
|
||||||
{
|
{
|
||||||
_this->m_mutex.wait();
|
_this->m_mutex.lock();
|
||||||
|
|
||||||
if (_this->m_exception)
|
if (_this->m_exception)
|
||||||
{
|
{
|
||||||
_this->_throw();
|
_this->_throw();
|
||||||
}
|
}
|
||||||
|
|
||||||
_this->m_mutex.post();
|
_this->m_mutex.unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
#include "types.h"
|
#include "types.h"
|
||||||
#include "Atomic.h"
|
#include "Atomic.h"
|
||||||
|
|
||||||
@ -113,25 +114,13 @@ class reader_lock final
|
|||||||
shared_mutex& m_mutex;
|
shared_mutex& m_mutex;
|
||||||
bool m_upgraded = false;
|
bool m_upgraded = false;
|
||||||
|
|
||||||
void lock()
|
|
||||||
{
|
|
||||||
m_upgraded ? m_mutex.lock() : m_mutex.lock_shared();
|
|
||||||
}
|
|
||||||
|
|
||||||
void unlock()
|
|
||||||
{
|
|
||||||
m_upgraded ? m_mutex.unlock() : m_mutex.unlock_shared();
|
|
||||||
}
|
|
||||||
|
|
||||||
friend class cond_variable;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
reader_lock(const reader_lock&) = delete;
|
reader_lock(const reader_lock&) = delete;
|
||||||
|
|
||||||
explicit reader_lock(shared_mutex& mutex)
|
explicit reader_lock(shared_mutex& mutex)
|
||||||
: m_mutex(mutex)
|
: m_mutex(mutex)
|
||||||
{
|
{
|
||||||
lock();
|
m_mutex.lock_shared();
|
||||||
}
|
}
|
||||||
|
|
||||||
// One-way lock upgrade
|
// One-way lock upgrade
|
||||||
@ -146,39 +135,7 @@ public:
|
|||||||
|
|
||||||
~reader_lock()
|
~reader_lock()
|
||||||
{
|
{
|
||||||
unlock();
|
m_upgraded ? m_mutex.unlock() : m_mutex.unlock_shared();
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Simplified exclusive (writer) lock implementation.
|
|
||||||
class writer_lock final
|
|
||||||
{
|
|
||||||
shared_mutex& m_mutex;
|
|
||||||
|
|
||||||
void lock()
|
|
||||||
{
|
|
||||||
m_mutex.lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void unlock()
|
|
||||||
{
|
|
||||||
m_mutex.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
friend class cond_variable;
|
|
||||||
|
|
||||||
public:
|
|
||||||
writer_lock(const writer_lock&) = delete;
|
|
||||||
|
|
||||||
explicit writer_lock(shared_mutex& mutex)
|
|
||||||
: m_mutex(mutex)
|
|
||||||
{
|
|
||||||
lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
~writer_lock()
|
|
||||||
{
|
|
||||||
unlock();
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -188,18 +145,6 @@ class safe_reader_lock final
|
|||||||
shared_mutex& m_mutex;
|
shared_mutex& m_mutex;
|
||||||
bool m_is_owned;
|
bool m_is_owned;
|
||||||
|
|
||||||
void lock()
|
|
||||||
{
|
|
||||||
m_mutex.lock_shared();
|
|
||||||
}
|
|
||||||
|
|
||||||
void unlock()
|
|
||||||
{
|
|
||||||
m_mutex.unlock_shared();
|
|
||||||
}
|
|
||||||
|
|
||||||
friend class cond_variable;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
safe_reader_lock(const safe_reader_lock&) = delete;
|
safe_reader_lock(const safe_reader_lock&) = delete;
|
||||||
|
|
||||||
@ -215,18 +160,6 @@ class safe_writer_lock final
|
|||||||
bool m_is_owned;
|
bool m_is_owned;
|
||||||
bool m_is_upgraded;
|
bool m_is_upgraded;
|
||||||
|
|
||||||
void lock()
|
|
||||||
{
|
|
||||||
m_mutex.lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void unlock()
|
|
||||||
{
|
|
||||||
m_mutex.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
friend class cond_variable;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
safe_writer_lock(const safe_writer_lock&) = delete;
|
safe_writer_lock(const safe_writer_lock&) = delete;
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
#include "types.h"
|
#include "types.h"
|
||||||
#include "Atomic.h"
|
#include "Atomic.h"
|
||||||
|
|
||||||
@ -13,8 +14,6 @@ class semaphore_base
|
|||||||
|
|
||||||
void imp_post(s32 _old);
|
void imp_post(s32 _old);
|
||||||
|
|
||||||
friend class semaphore_lock;
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit constexpr semaphore_base(s32 value)
|
explicit constexpr semaphore_base(s32 value)
|
||||||
: m_value{value}
|
: m_value{value}
|
||||||
@ -73,36 +72,36 @@ class semaphore final : public semaphore_base
|
|||||||
public:
|
public:
|
||||||
// Default constructor (recommended)
|
// Default constructor (recommended)
|
||||||
constexpr semaphore()
|
constexpr semaphore()
|
||||||
: base{Def}
|
: base(Def)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
// Explicit value constructor (not recommended)
|
// Explicit value constructor (not recommended)
|
||||||
explicit constexpr semaphore(s32 value)
|
explicit constexpr semaphore(s32 value)
|
||||||
: base{value}
|
: base(value)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
// Obtain a semaphore
|
// Obtain a semaphore
|
||||||
void wait()
|
void lock()
|
||||||
{
|
{
|
||||||
return base::wait();
|
return base::wait();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to obtain a semaphore
|
// Try to obtain a semaphore
|
||||||
explicit_bool_t try_wait()
|
bool try_lock()
|
||||||
{
|
{
|
||||||
return base::try_wait();
|
return base::try_wait();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a semaphore
|
// Return a semaphore
|
||||||
void post()
|
void unlock()
|
||||||
{
|
{
|
||||||
return base::post(Max);
|
return base::post(Max);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to return a semaphore
|
// Try to return a semaphore
|
||||||
explicit_bool_t try_post()
|
bool try_unlock()
|
||||||
{
|
{
|
||||||
return base::try_post(Max);
|
return base::try_post(Max);
|
||||||
}
|
}
|
||||||
@ -113,34 +112,3 @@ public:
|
|||||||
return Max;
|
return Max;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class semaphore_lock
|
|
||||||
{
|
|
||||||
semaphore_base& m_base;
|
|
||||||
|
|
||||||
void lock()
|
|
||||||
{
|
|
||||||
m_base.wait();
|
|
||||||
}
|
|
||||||
|
|
||||||
void unlock()
|
|
||||||
{
|
|
||||||
m_base.post(INT32_MAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
friend class cond_variable;
|
|
||||||
|
|
||||||
public:
|
|
||||||
explicit semaphore_lock(const semaphore_lock&) = delete;
|
|
||||||
|
|
||||||
semaphore_lock(semaphore_base& sema)
|
|
||||||
: m_base(sema)
|
|
||||||
{
|
|
||||||
lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
~semaphore_lock()
|
|
||||||
{
|
|
||||||
unlock();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
@ -65,7 +65,7 @@ inline int futex(int* uaddr, int futex_op, int val, const timespec* timeout, int
|
|||||||
|
|
||||||
int operator()(int* uaddr, int futex_op, int val, const timespec* timeout, int*, uint val3)
|
int operator()(int* uaddr, int futex_op, int val, const timespec* timeout, int*, uint val3)
|
||||||
{
|
{
|
||||||
std::unique_lock<std::mutex> lock(mutex);
|
std::unique_lock lock(mutex);
|
||||||
|
|
||||||
switch (futex_op)
|
switch (futex_op)
|
||||||
{
|
{
|
||||||
|
@ -280,7 +280,7 @@ public:
|
|||||||
opts = nullptr;
|
opts = nullptr;
|
||||||
av_dict_set(&opts, "refcounted_frames", "1", 0);
|
av_dict_set(&opts, "refcounted_frames", "1", 0);
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(g_mutex_avcodec_open2);
|
std::lock_guard lock(g_mutex_avcodec_open2);
|
||||||
// not multithread-safe (???)
|
// not multithread-safe (???)
|
||||||
err = avcodec_open2(ctx, codec, &opts);
|
err = avcodec_open2(ctx, codec, &opts);
|
||||||
}
|
}
|
||||||
|
@ -320,7 +320,7 @@ void audio_config::on_task()
|
|||||||
|
|
||||||
// send aftermix event (normal audio event)
|
// send aftermix event (normal audio event)
|
||||||
|
|
||||||
semaphore_lock lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
for (u64 key : keys)
|
for (u64 key : keys)
|
||||||
{
|
{
|
||||||
@ -748,7 +748,7 @@ error_code cellAudioSetNotifyEventQueue(u64 key)
|
|||||||
return CELL_AUDIO_ERROR_NOT_INIT;
|
return CELL_AUDIO_ERROR_NOT_INIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_audio->mutex);
|
std::lock_guard lock(g_audio->mutex);
|
||||||
|
|
||||||
for (auto k : g_audio->keys) // check for duplicates
|
for (auto k : g_audio->keys) // check for duplicates
|
||||||
{
|
{
|
||||||
@ -783,7 +783,7 @@ error_code cellAudioRemoveNotifyEventQueue(u64 key)
|
|||||||
return CELL_AUDIO_ERROR_NOT_INIT;
|
return CELL_AUDIO_ERROR_NOT_INIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_audio->mutex);
|
std::lock_guard lock(g_audio->mutex);
|
||||||
|
|
||||||
for (auto i = g_audio->keys.begin(); i != g_audio->keys.end(); i++)
|
for (auto i = g_audio->keys.begin(); i != g_audio->keys.end(); i++)
|
||||||
{
|
{
|
||||||
|
@ -291,7 +291,7 @@ s32 cellCameraInit()
|
|||||||
return CELL_CAMERA_ERROR_ALREADY_INIT;
|
return CELL_CAMERA_ERROR_ALREADY_INIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
switch (g_cfg.io.camera_type)
|
switch (g_cfg.io.camera_type)
|
||||||
{
|
{
|
||||||
@ -429,7 +429,7 @@ s32 cellCameraOpenEx(s32 dev_num, vm::ptr<CellCameraInfoEx> info)
|
|||||||
|
|
||||||
const auto vbuf_size = get_video_buffer_size(*info);
|
const auto vbuf_size = get_video_buffer_size(*info);
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
if (info->read_mode == CELL_CAMERA_READ_FUNCCALL && !info->buffer)
|
if (info->read_mode == CELL_CAMERA_READ_FUNCCALL && !info->buffer)
|
||||||
{
|
{
|
||||||
@ -466,7 +466,7 @@ s32 cellCameraClose(s32 dev_num)
|
|||||||
return CELL_CAMERA_ERROR_NOT_INIT;
|
return CELL_CAMERA_ERROR_NOT_INIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
if (!g_camera->is_open)
|
if (!g_camera->is_open)
|
||||||
{
|
{
|
||||||
@ -569,7 +569,7 @@ s32 cellCameraIsAttached(s32 dev_num)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
bool is_attached = g_camera->is_attached;
|
bool is_attached = g_camera->is_attached;
|
||||||
|
|
||||||
@ -603,7 +603,7 @@ s32 cellCameraIsOpen(s32 dev_num)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
return g_camera->is_open;
|
return g_camera->is_open;
|
||||||
}
|
}
|
||||||
@ -624,7 +624,7 @@ s32 cellCameraIsStarted(s32 dev_num)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
return g_camera->is_streaming;
|
return g_camera->is_streaming;
|
||||||
}
|
}
|
||||||
@ -657,7 +657,7 @@ s32 cellCameraGetAttribute(s32 dev_num, s32 attrib, vm::ptr<u32> arg1, vm::ptr<u
|
|||||||
return CELL_CAMERA_ERROR_NOT_OPEN;
|
return CELL_CAMERA_ERROR_NOT_OPEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
if (!g_camera->is_attached)
|
if (!g_camera->is_attached)
|
||||||
{
|
{
|
||||||
@ -759,7 +759,7 @@ s32 cellCameraGetBufferSize(s32 dev_num, vm::ptr<CellCameraInfoEx> info)
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
info->bytesize = get_video_buffer_size(g_camera->info);
|
info->bytesize = get_video_buffer_size(g_camera->info);
|
||||||
g_camera->info = *info;
|
g_camera->info = *info;
|
||||||
@ -809,7 +809,7 @@ s32 cellCameraGetBufferInfoEx(s32 dev_num, vm::ptr<CellCameraInfoEx> info)
|
|||||||
return CELL_CAMERA_ERROR_PARAM;
|
return CELL_CAMERA_ERROR_PARAM;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
*info = g_camera->info;
|
*info = g_camera->info;
|
||||||
|
|
||||||
return CELL_OK;
|
return CELL_OK;
|
||||||
@ -898,7 +898,7 @@ s32 cellCameraStart(s32 dev_num)
|
|||||||
return CELL_CAMERA_ERROR_NOT_INIT;
|
return CELL_CAMERA_ERROR_NOT_INIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
if (!g_camera->is_open)
|
if (!g_camera->is_open)
|
||||||
{
|
{
|
||||||
@ -963,7 +963,7 @@ s32 cellCameraReadEx(s32 dev_num, vm::ptr<CellCameraReadEx> read)
|
|||||||
return CELL_CAMERA_ERROR_PARAM;
|
return CELL_CAMERA_ERROR_PARAM;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
|
|
||||||
if (!g_camera->is_open)
|
if (!g_camera->is_open)
|
||||||
{
|
{
|
||||||
@ -1041,7 +1041,7 @@ s32 cellCameraStop(s32 dev_num)
|
|||||||
|
|
||||||
g_camera->is_streaming = false;
|
g_camera->is_streaming = false;
|
||||||
|
|
||||||
semaphore_lock lock(g_camera->mutex);
|
std::lock_guard lock(g_camera->mutex);
|
||||||
g_camera->timer.Stop();
|
g_camera->timer.Stop();
|
||||||
|
|
||||||
return CELL_OK;
|
return CELL_OK;
|
||||||
@ -1166,7 +1166,7 @@ void camera_thread::on_task()
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(mutex_notify_data_map);
|
std::lock_guard lock(mutex_notify_data_map);
|
||||||
|
|
||||||
for (auto const& notify_data_entry : notify_data_map)
|
for (auto const& notify_data_entry : notify_data_map)
|
||||||
{
|
{
|
||||||
@ -1229,7 +1229,7 @@ void camera_thread::on_init(const std::shared_ptr<void>& _this)
|
|||||||
|
|
||||||
void camera_thread::send_attach_state(bool attached)
|
void camera_thread::send_attach_state(bool attached)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex_notify_data_map);
|
std::lock_guard lock(mutex_notify_data_map);
|
||||||
|
|
||||||
if (!notify_data_map.empty())
|
if (!notify_data_map.empty())
|
||||||
{
|
{
|
||||||
@ -1267,15 +1267,15 @@ void camera_thread::set_attr(s32 attrib, u32 arg1, u32 arg2)
|
|||||||
read_mode.exchange(arg1);
|
read_mode.exchange(arg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
attr[attrib] = {arg1, arg2};
|
attr[attrib] = {arg1, arg2};
|
||||||
}
|
}
|
||||||
|
|
||||||
void camera_thread::add_queue(u64 key, u64 source, u64 flag)
|
void camera_thread::add_queue(u64 key, u64 source, u64 flag)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
{
|
{
|
||||||
semaphore_lock lock_data_map(mutex_notify_data_map);
|
std::lock_guard lock_data_map(mutex_notify_data_map);
|
||||||
|
|
||||||
notify_data_map[key] = { source, flag };
|
notify_data_map[key] = { source, flag };
|
||||||
}
|
}
|
||||||
@ -1286,9 +1286,9 @@ void camera_thread::add_queue(u64 key, u64 source, u64 flag)
|
|||||||
|
|
||||||
void camera_thread::remove_queue(u64 key)
|
void camera_thread::remove_queue(u64 key)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
{
|
{
|
||||||
semaphore_lock lock_data_map(mutex_notify_data_map);
|
std::lock_guard lock_data_map(mutex_notify_data_map);
|
||||||
|
|
||||||
notify_data_map.erase(key);
|
notify_data_map.erase(key);
|
||||||
}
|
}
|
||||||
|
@ -794,7 +794,7 @@ bool ElementaryStream::is_full(u32 space)
|
|||||||
|
|
||||||
bool ElementaryStream::isfull(u32 space)
|
bool ElementaryStream::isfull(u32 space)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
return is_full(space);
|
return is_full(space);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -802,7 +802,7 @@ void ElementaryStream::push_au(u32 size, u64 dts, u64 pts, u64 userdata, bool ra
|
|||||||
{
|
{
|
||||||
u32 addr;
|
u32 addr;
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
verify(HERE), !is_full(size);
|
verify(HERE), !is_full(size);
|
||||||
|
|
||||||
if (put + size + 128 > memAddr + memSize)
|
if (put + size + 128 > memAddr + memSize)
|
||||||
@ -860,7 +860,7 @@ void ElementaryStream::push(DemuxerStream& stream, u32 size)
|
|||||||
|
|
||||||
bool ElementaryStream::release()
|
bool ElementaryStream::release()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
if (released >= put_count)
|
if (released >= put_count)
|
||||||
{
|
{
|
||||||
cellDmux.error("es::release() error: buffer is empty");
|
cellDmux.error("es::release() error: buffer is empty");
|
||||||
@ -888,7 +888,7 @@ bool ElementaryStream::release()
|
|||||||
|
|
||||||
bool ElementaryStream::peek(u32& out_data, bool no_ex, u32& out_spec, bool update_index)
|
bool ElementaryStream::peek(u32& out_data, bool no_ex, u32& out_spec, bool update_index)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
if (got_count < released)
|
if (got_count < released)
|
||||||
{
|
{
|
||||||
cellDmux.error("es::peek() error: got_count(%d) < released(%d) (put_count=%d)", got_count, released, put_count);
|
cellDmux.error("es::peek() error: got_count(%d) < released(%d) (put_count=%d)", got_count, released, put_count);
|
||||||
@ -920,7 +920,7 @@ bool ElementaryStream::peek(u32& out_data, bool no_ex, u32& out_spec, bool updat
|
|||||||
|
|
||||||
void ElementaryStream::reset()
|
void ElementaryStream::reset()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
std::lock_guard lock(m_mutex);
|
||||||
put = memAddr;
|
put = memAddr;
|
||||||
entries.clear();
|
entries.clear();
|
||||||
put_count = 0;
|
put_count = 0;
|
||||||
|
@ -891,7 +891,7 @@ struct fs_aio_thread : ppu_thread
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(file->mp->mutex);
|
std::lock_guard lock(file->mp->mutex);
|
||||||
|
|
||||||
const auto old_pos = file->file.pos(); file->file.seek(aio->offset);
|
const auto old_pos = file->file.pos(); file->file.seek(aio->offset);
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ s32 cellMouseGetData(u32 port_no, vm::ptr<CellMouseData> data)
|
|||||||
return CELL_MOUSE_ERROR_INVALID_PARAMETER;
|
return CELL_MOUSE_ERROR_INVALID_PARAMETER;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(handler->mutex);
|
std::lock_guard lock(handler->mutex);
|
||||||
|
|
||||||
const MouseInfo& current_info = handler->GetInfo();
|
const MouseInfo& current_info = handler->GetInfo();
|
||||||
|
|
||||||
@ -210,7 +210,7 @@ s32 cellMouseGetDataList(u32 port_no, vm::ptr<CellMouseDataList> data)
|
|||||||
return CELL_MOUSE_ERROR_INVALID_PARAMETER;
|
return CELL_MOUSE_ERROR_INVALID_PARAMETER;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(handler->mutex);
|
std::lock_guard lock(handler->mutex);
|
||||||
|
|
||||||
const MouseInfo& current_info = handler->GetInfo();
|
const MouseInfo& current_info = handler->GetInfo();
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ static NEVER_INLINE s32 savedata_op(ppu_thread& ppu, u32 operation, u32 version,
|
|||||||
PFuncFile funcFile, u32 container, u32 unknown, vm::ptr<void> userdata, u32 userId, PFuncDone funcDone)
|
PFuncFile funcFile, u32 container, u32 unknown, vm::ptr<void> userdata, u32 userId, PFuncDone funcDone)
|
||||||
{
|
{
|
||||||
// TODO: check arguments
|
// TODO: check arguments
|
||||||
std::unique_lock<std::mutex> lock(g_savedata_mutex, std::try_to_lock);
|
std::unique_lock lock(g_savedata_mutex, std::try_to_lock);
|
||||||
|
|
||||||
if (!lock)
|
if (!lock)
|
||||||
{
|
{
|
||||||
|
@ -22,7 +22,7 @@ struct sysutil_cb_manager
|
|||||||
|
|
||||||
std::function<s32(ppu_thread&)> get_cb()
|
std::function<s32(ppu_thread&)> get_cb()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
if (registered.empty())
|
if (registered.empty())
|
||||||
{
|
{
|
||||||
@ -41,7 +41,7 @@ extern void sysutil_register_cb(std::function<s32(ppu_thread&)>&& cb)
|
|||||||
{
|
{
|
||||||
const auto cbm = fxm::get_always<sysutil_cb_manager>();
|
const auto cbm = fxm::get_always<sysutil_cb_manager>();
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(cbm->mutex);
|
std::lock_guard lock(cbm->mutex);
|
||||||
|
|
||||||
cbm->registered.push(std::move(cb));
|
cbm->registered.push(std::move(cb));
|
||||||
}
|
}
|
||||||
@ -54,7 +54,7 @@ extern void sysutil_send_system_cmd(u64 status, u64 param)
|
|||||||
{
|
{
|
||||||
if (cb.first)
|
if (cb.first)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(cbm->mutex);
|
std::lock_guard lock(cbm->mutex);
|
||||||
|
|
||||||
cbm->registered.push([=](ppu_thread& ppu) -> s32
|
cbm->registered.push([=](ppu_thread& ppu) -> s32
|
||||||
{
|
{
|
||||||
|
@ -134,7 +134,7 @@ struct vdec_thread : ppu_thread
|
|||||||
AVDictionary* opts{};
|
AVDictionary* opts{};
|
||||||
av_dict_set(&opts, "refcounted_frames", "1", 0);
|
av_dict_set(&opts, "refcounted_frames", "1", 0);
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_mutex_avcodec_open2);
|
std::lock_guard lock(g_mutex_avcodec_open2);
|
||||||
|
|
||||||
int err = avcodec_open2(ctx, codec, &opts);
|
int err = avcodec_open2(ctx, codec, &opts);
|
||||||
if (err || opts)
|
if (err || opts)
|
||||||
@ -354,7 +354,7 @@ struct vdec_thread : ppu_thread
|
|||||||
|
|
||||||
cellVdec.trace("Got picture (pts=0x%llx[0x%llx], dts=0x%llx[0x%llx])", frame.pts, frame->pkt_pts, frame.dts, frame->pkt_dts);
|
cellVdec.trace("Got picture (pts=0x%llx[0x%llx], dts=0x%llx[0x%llx])", frame.pts, frame->pkt_pts, frame.dts, frame->pkt_dts);
|
||||||
|
|
||||||
std::lock_guard<std::mutex>{mutex}, out.push(std::move(frame));
|
std::lock_guard{mutex}, out.push(std::move(frame));
|
||||||
|
|
||||||
cb_func(*this, id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, cb_arg);
|
cb_func(*this, id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, cb_arg);
|
||||||
lv2_obj::sleep(*this);
|
lv2_obj::sleep(*this);
|
||||||
@ -377,7 +377,7 @@ struct vdec_thread : ppu_thread
|
|||||||
au_count--;
|
au_count--;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (std::lock_guard<std::mutex>{mutex}, max_frames && out.size() > max_frames)
|
while (std::lock_guard{mutex}, max_frames && out.size() > max_frames)
|
||||||
{
|
{
|
||||||
thread_ctrl::wait();
|
thread_ctrl::wait();
|
||||||
}
|
}
|
||||||
@ -494,7 +494,7 @@ s32 cellVdecClose(ppu_thread& ppu, u32 handle)
|
|||||||
lv2_obj::sleep(ppu);
|
lv2_obj::sleep(ppu);
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(vdec->mutex);
|
std::lock_guard lock(vdec->mutex);
|
||||||
vdec->cmd_push({vdec_cmd::close, 0});
|
vdec->cmd_push({vdec_cmd::close, 0});
|
||||||
vdec->max_frames = 0;
|
vdec->max_frames = 0;
|
||||||
}
|
}
|
||||||
@ -583,7 +583,7 @@ s32 cellVdecGetPicture(u32 handle, vm::cptr<CellVdecPicFormat> format, vm::ptr<u
|
|||||||
|
|
||||||
vdec_frame frame;
|
vdec_frame frame;
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(vdec->mutex);
|
std::lock_guard lock(vdec->mutex);
|
||||||
|
|
||||||
if (vdec->out.empty())
|
if (vdec->out.empty())
|
||||||
{
|
{
|
||||||
@ -713,7 +713,7 @@ s32 cellVdecGetPicItem(u32 handle, vm::pptr<CellVdecPicItem> picItem)
|
|||||||
u64 usrd;
|
u64 usrd;
|
||||||
u32 frc;
|
u32 frc;
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(vdec->mutex);
|
std::lock_guard lock(vdec->mutex);
|
||||||
|
|
||||||
if (vdec->out.empty())
|
if (vdec->out.empty())
|
||||||
{
|
{
|
||||||
|
@ -81,7 +81,7 @@ s32 cellAANAddData(u32 aan_handle, u32 aan_port, u32 offset, vm::ptr<float> addr
|
|||||||
return CELL_LIBMIXER_ERROR_INVALID_PARAMATER;
|
return CELL_LIBMIXER_ERROR_INVALID_PARAMATER;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
if (type == CELL_SURMIXER_CHSTRIP_TYPE1A)
|
if (type == CELL_SURMIXER_CHSTRIP_TYPE1A)
|
||||||
{
|
{
|
||||||
@ -140,7 +140,7 @@ s32 cellAANConnect(u32 receive, u32 receivePortNo, u32 source, u32 sourcePortNo)
|
|||||||
libmixer.warning("cellAANConnect(receive=0x%x, receivePortNo=0x%x, source=0x%x, sourcePortNo=0x%x)",
|
libmixer.warning("cellAANConnect(receive=0x%x, receivePortNo=0x%x, source=0x%x, sourcePortNo=0x%x)",
|
||||||
receive, receivePortNo, source, sourcePortNo);
|
receive, receivePortNo, source, sourcePortNo);
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
if (source >= g_ssp.size() || !g_ssp[source].m_created)
|
if (source >= g_ssp.size() || !g_ssp[source].m_created)
|
||||||
{
|
{
|
||||||
@ -158,7 +158,7 @@ s32 cellAANDisconnect(u32 receive, u32 receivePortNo, u32 source, u32 sourcePort
|
|||||||
libmixer.warning("cellAANDisconnect(receive=0x%x, receivePortNo=0x%x, source=0x%x, sourcePortNo=0x%x)",
|
libmixer.warning("cellAANDisconnect(receive=0x%x, receivePortNo=0x%x, source=0x%x, sourcePortNo=0x%x)",
|
||||||
receive, receivePortNo, source, sourcePortNo);
|
receive, receivePortNo, source, sourcePortNo);
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
if (source >= g_ssp.size() || !g_ssp[source].m_created)
|
if (source >= g_ssp.size() || !g_ssp[source].m_created)
|
||||||
{
|
{
|
||||||
@ -181,7 +181,7 @@ s32 cellSSPlayerCreate(vm::ptr<u32> handle, vm::ptr<CellSSPlayerConfig> config)
|
|||||||
return CELL_LIBMIXER_ERROR_INVALID_PARAMATER;
|
return CELL_LIBMIXER_ERROR_INVALID_PARAMATER;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
SSPlayer p;
|
SSPlayer p;
|
||||||
p.m_created = true;
|
p.m_created = true;
|
||||||
@ -198,7 +198,7 @@ s32 cellSSPlayerRemove(u32 handle)
|
|||||||
{
|
{
|
||||||
libmixer.warning("cellSSPlayerRemove(handle=0x%x)", handle);
|
libmixer.warning("cellSSPlayerRemove(handle=0x%x)", handle);
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
||||||
{
|
{
|
||||||
@ -217,7 +217,7 @@ s32 cellSSPlayerSetWave(u32 handle, vm::ptr<CellSSPlayerWaveParam> waveInfo, vm:
|
|||||||
{
|
{
|
||||||
libmixer.warning("cellSSPlayerSetWave(handle=0x%x, waveInfo=*0x%x, commonInfo=*0x%x)", handle, waveInfo, commonInfo);
|
libmixer.warning("cellSSPlayerSetWave(handle=0x%x, waveInfo=*0x%x, commonInfo=*0x%x)", handle, waveInfo, commonInfo);
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
||||||
{
|
{
|
||||||
@ -240,7 +240,7 @@ s32 cellSSPlayerPlay(u32 handle, vm::ptr<CellSSPlayerRuntimeInfo> info)
|
|||||||
{
|
{
|
||||||
libmixer.warning("cellSSPlayerPlay(handle=0x%x, info=*0x%x)", handle, info);
|
libmixer.warning("cellSSPlayerPlay(handle=0x%x, info=*0x%x)", handle, info);
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
||||||
{
|
{
|
||||||
@ -264,7 +264,7 @@ s32 cellSSPlayerStop(u32 handle, u32 mode)
|
|||||||
{
|
{
|
||||||
libmixer.warning("cellSSPlayerStop(handle=0x%x, mode=0x%x)", handle, mode);
|
libmixer.warning("cellSSPlayerStop(handle=0x%x, mode=0x%x)", handle, mode);
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
||||||
{
|
{
|
||||||
@ -283,7 +283,7 @@ s32 cellSSPlayerSetParam(u32 handle, vm::ptr<CellSSPlayerRuntimeInfo> info)
|
|||||||
{
|
{
|
||||||
libmixer.warning("cellSSPlayerSetParam(handle=0x%x, info=*0x%x)", handle, info);
|
libmixer.warning("cellSSPlayerSetParam(handle=0x%x, info=*0x%x)", handle, info);
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
||||||
{
|
{
|
||||||
@ -306,7 +306,7 @@ s32 cellSSPlayerGetState(u32 handle)
|
|||||||
{
|
{
|
||||||
libmixer.warning("cellSSPlayerGetState(handle=0x%x)", handle);
|
libmixer.warning("cellSSPlayerGetState(handle=0x%x)", handle);
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
if (handle >= g_ssp.size() || !g_ssp[handle].m_created)
|
||||||
{
|
{
|
||||||
@ -354,7 +354,7 @@ struct surmixer_thread : ppu_thread
|
|||||||
//u64 stamp1 = get_system_time();
|
//u64 stamp1 = get_system_time();
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
for (auto& p : g_ssp) if (p.m_active && p.m_created)
|
for (auto& p : g_ssp) if (p.m_active && p.m_created)
|
||||||
{
|
{
|
||||||
@ -589,7 +589,7 @@ s32 cellSurMixerSurBusAddData(u32 busNo, u32 offset, vm::ptr<float> addr, u32 sa
|
|||||||
return CELL_OK;
|
return CELL_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(g_surmx.mutex);
|
std::lock_guard lock(g_surmx.mutex);
|
||||||
|
|
||||||
for (u32 i = 0; i < samples; i++)
|
for (u32 i = 0; i < samples; i++)
|
||||||
{
|
{
|
||||||
|
@ -1550,7 +1550,7 @@ extern void ppu_initialize(const ppu_module& info)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(jmutex);
|
std::lock_guard lock(jmutex);
|
||||||
jit->add(cache_path + obj_name);
|
jit->add(cache_path + obj_name);
|
||||||
|
|
||||||
LOG_SUCCESS(PPU, "LLVM: Loaded module %s", obj_name);
|
LOG_SUCCESS(PPU, "LLVM: Loaded module %s", obj_name);
|
||||||
@ -1568,7 +1568,7 @@ extern void ppu_initialize(const ppu_module& info)
|
|||||||
|
|
||||||
// Allocate "core"
|
// Allocate "core"
|
||||||
{
|
{
|
||||||
semaphore_lock jlock(jcores->sem);
|
std::lock_guard jlock(jcores->sem);
|
||||||
|
|
||||||
if (!Emu.IsStopped())
|
if (!Emu.IsStopped())
|
||||||
{
|
{
|
||||||
@ -1586,7 +1586,7 @@ extern void ppu_initialize(const ppu_module& info)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Proceed with original JIT instance
|
// Proceed with original JIT instance
|
||||||
semaphore_lock lock(jmutex);
|
std::lock_guard lock(jmutex);
|
||||||
jit->add(cache_path + obj_name);
|
jit->add(cache_path + obj_name);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -1605,7 +1605,7 @@ extern void ppu_initialize(const ppu_module& info)
|
|||||||
// Jit can be null if the loop doesn't ever enter.
|
// Jit can be null if the loop doesn't ever enter.
|
||||||
if (jit && jit_mod.vars.empty())
|
if (jit && jit_mod.vars.empty())
|
||||||
{
|
{
|
||||||
semaphore_lock lock(jmutex);
|
std::lock_guard lock(jmutex);
|
||||||
jit->fin();
|
jit->fin();
|
||||||
|
|
||||||
// Get and install function addresses
|
// Get and install function addresses
|
||||||
|
@ -83,7 +83,7 @@ spu_function_t spu_recompiler::compile(std::vector<u32>&& func_rv)
|
|||||||
init();
|
init();
|
||||||
|
|
||||||
// Don't lock without shared runtime
|
// Don't lock without shared runtime
|
||||||
std::unique_lock<shared_mutex> lock(m_spurt->m_mutex, std::defer_lock);
|
std::unique_lock lock(m_spurt->m_mutex, std::defer_lock);
|
||||||
|
|
||||||
if (g_cfg.core.spu_shared_runtime)
|
if (g_cfg.core.spu_shared_runtime)
|
||||||
{
|
{
|
||||||
|
@ -2272,7 +2272,7 @@ public:
|
|||||||
init();
|
init();
|
||||||
|
|
||||||
// Don't lock without shared runtime
|
// Don't lock without shared runtime
|
||||||
std::unique_lock<shared_mutex> lock(m_spurt->m_mutex, std::defer_lock);
|
std::unique_lock lock(m_spurt->m_mutex, std::defer_lock);
|
||||||
|
|
||||||
if (g_cfg.core.spu_shared_runtime)
|
if (g_cfg.core.spu_shared_runtime)
|
||||||
{
|
{
|
||||||
|
@ -1694,7 +1694,7 @@ s64 SPUThread::get_ch_value(u32 ch)
|
|||||||
fmt::throw_exception("Not supported: event mask 0x%x" HERE, mask1);
|
fmt::throw_exception("Not supported: event mask 0x%x" HERE, mask1);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_lock<notifier> pseudo_lock(vm::reservation_notifier(raddr, 128), std::try_to_lock);
|
std::shared_lock pseudo_lock(vm::reservation_notifier(raddr, 128), std::try_to_lock);
|
||||||
|
|
||||||
verify(HERE), pseudo_lock;
|
verify(HERE), pseudo_lock;
|
||||||
|
|
||||||
@ -1786,7 +1786,7 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
|
|||||||
|
|
||||||
LOG_TRACE(SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data);
|
LOG_TRACE(SPU, "sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data);
|
||||||
|
|
||||||
const auto queue = (semaphore_lock{group->mutex}, this->spup[spup].lock());
|
const auto queue = (std::lock_guard{group->mutex}, this->spup[spup].lock());
|
||||||
|
|
||||||
if (!queue)
|
if (!queue)
|
||||||
{
|
{
|
||||||
@ -1818,7 +1818,7 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
|
|||||||
|
|
||||||
LOG_TRACE(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data);
|
LOG_TRACE(SPU, "sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data);
|
||||||
|
|
||||||
const auto queue = (semaphore_lock{group->mutex}, this->spup[spup].lock());
|
const auto queue = (std::lock_guard{group->mutex}, this->spup[spup].lock());
|
||||||
|
|
||||||
if (!queue)
|
if (!queue)
|
||||||
{
|
{
|
||||||
@ -2154,7 +2154,7 @@ bool SPUThread::stop_and_signal(u32 code)
|
|||||||
|
|
||||||
reader_lock rlock(id_manager::g_mutex);
|
reader_lock rlock(id_manager::g_mutex);
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (group->run_state >= SPU_THREAD_GROUP_STATUS_WAITING && group->run_state <= SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
|
if (group->run_state >= SPU_THREAD_GROUP_STATUS_WAITING && group->run_state <= SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
|
||||||
{
|
{
|
||||||
@ -2180,7 +2180,7 @@ bool SPUThread::stop_and_signal(u32 code)
|
|||||||
return ch_in_mbox.set_values(1, CELL_EINVAL), true; // TODO: check error value
|
return ch_in_mbox.set_values(1, CELL_EINVAL), true; // TODO: check error value
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock qlock(queue->mutex);
|
std::lock_guard qlock(queue->mutex);
|
||||||
|
|
||||||
if (queue->events.empty())
|
if (queue->events.empty())
|
||||||
{
|
{
|
||||||
@ -2228,7 +2228,7 @@ bool SPUThread::stop_and_signal(u32 code)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING)
|
if (group->run_state == SPU_THREAD_GROUP_STATUS_WAITING)
|
||||||
{
|
{
|
||||||
@ -2279,7 +2279,7 @@ bool SPUThread::stop_and_signal(u32 code)
|
|||||||
|
|
||||||
LOG_TRACE(SPU, "sys_spu_thread_group_exit(status=0x%x)", value);
|
LOG_TRACE(SPU, "sys_spu_thread_group_exit(status=0x%x)", value);
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
for (auto& thread : group->threads)
|
for (auto& thread : group->threads)
|
||||||
{
|
{
|
||||||
@ -2310,7 +2310,7 @@ bool SPUThread::stop_and_signal(u32 code)
|
|||||||
|
|
||||||
LOG_TRACE(SPU, "sys_spu_thread_exit(status=0x%x)", ch_out_mbox.get_value());
|
LOG_TRACE(SPU, "sys_spu_thread_exit(status=0x%x)", ch_out_mbox.get_value());
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
status |= SPU_STATUS_STOPPED_BY_STOP;
|
status |= SPU_STATUS_STOPPED_BY_STOP;
|
||||||
group->cv.notify_one();
|
group->cv.notify_one();
|
||||||
|
@ -1004,7 +1004,7 @@ DECLARE(lv2_obj::g_waiting);
|
|||||||
|
|
||||||
void lv2_obj::sleep_timeout(named_thread& thread, u64 timeout)
|
void lv2_obj::sleep_timeout(named_thread& thread, u64 timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(g_mutex);
|
std::lock_guard lock(g_mutex);
|
||||||
|
|
||||||
const u64 start_time = get_system_time();
|
const u64 start_time = get_system_time();
|
||||||
|
|
||||||
@ -1058,7 +1058,7 @@ void lv2_obj::awake(cpu_thread& cpu, u32 prio)
|
|||||||
// Check thread type
|
// Check thread type
|
||||||
if (cpu.id_type() != 1) return;
|
if (cpu.id_type() != 1) return;
|
||||||
|
|
||||||
semaphore_lock lock(g_mutex);
|
std::lock_guard lock(g_mutex);
|
||||||
|
|
||||||
if (prio == -4)
|
if (prio == -4)
|
||||||
{
|
{
|
||||||
|
@ -80,7 +80,7 @@ error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
|
|||||||
{
|
{
|
||||||
if (cond.waiters)
|
if (cond.waiters)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(cond.mutex->mutex);
|
std::lock_guard lock(cond.mutex->mutex);
|
||||||
|
|
||||||
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
|
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
|
||||||
{
|
{
|
||||||
@ -119,7 +119,7 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
|
|||||||
|
|
||||||
if (cond.waiters)
|
if (cond.waiters)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(cond.mutex->mutex);
|
std::lock_guard lock(cond.mutex->mutex);
|
||||||
|
|
||||||
while (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
|
while (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
|
||||||
{
|
{
|
||||||
@ -156,7 +156,7 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
|
|||||||
{
|
{
|
||||||
if (cond.waiters)
|
if (cond.waiters)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(cond.mutex->mutex);
|
std::lock_guard lock(cond.mutex->mutex);
|
||||||
|
|
||||||
for (auto cpu : cond.sq)
|
for (auto cpu : cond.sq)
|
||||||
{
|
{
|
||||||
@ -221,7 +221,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
semaphore_lock lock(cond->mutex->mutex);
|
std::lock_guard lock(cond->mutex->mutex);
|
||||||
|
|
||||||
// Register waiter
|
// Register waiter
|
||||||
cond->sq.emplace_back(&ppu);
|
cond->sq.emplace_back(&ppu);
|
||||||
@ -247,7 +247,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(cond->mutex->mutex);
|
std::lock_guard lock(cond->mutex->mutex);
|
||||||
|
|
||||||
// Try to cancel the waiting
|
// Try to cancel the waiting
|
||||||
if (cond->unqueue(cond->sq, &ppu))
|
if (cond->unqueue(cond->sq, &ppu))
|
||||||
|
@ -31,7 +31,7 @@ std::shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key)
|
|||||||
|
|
||||||
bool lv2_event_queue::send(lv2_event event)
|
bool lv2_event_queue::send(lv2_event event)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
if (sq.empty())
|
if (sq.empty())
|
||||||
{
|
{
|
||||||
@ -147,7 +147,7 @@ error_code sys_event_queue_destroy(ppu_thread& ppu, u32 equeue_id, s32 mode)
|
|||||||
|
|
||||||
const auto queue = idm::withdraw<lv2_obj, lv2_event_queue>(equeue_id, [&](lv2_event_queue& queue) -> CellError
|
const auto queue = idm::withdraw<lv2_obj, lv2_event_queue>(equeue_id, [&](lv2_event_queue& queue) -> CellError
|
||||||
{
|
{
|
||||||
semaphore_lock lock(queue.mutex);
|
std::lock_guard lock(queue.mutex);
|
||||||
|
|
||||||
if (!mode && !queue.sq.empty())
|
if (!mode && !queue.sq.empty())
|
||||||
{
|
{
|
||||||
@ -169,7 +169,7 @@ error_code sys_event_queue_destroy(ppu_thread& ppu, u32 equeue_id, s32 mode)
|
|||||||
|
|
||||||
if (mode == SYS_EVENT_QUEUE_DESTROY_FORCE)
|
if (mode == SYS_EVENT_QUEUE_DESTROY_FORCE)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(queue->mutex);
|
std::lock_guard lock(queue->mutex);
|
||||||
|
|
||||||
for (auto cpu : queue->sq)
|
for (auto cpu : queue->sq)
|
||||||
{
|
{
|
||||||
@ -206,7 +206,7 @@ error_code sys_event_queue_tryreceive(u32 equeue_id, vm::ptr<sys_event_t> event_
|
|||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(queue->mutex);
|
std::lock_guard lock(queue->mutex);
|
||||||
|
|
||||||
s32 count = 0;
|
s32 count = 0;
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
|
|||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(queue.mutex);
|
std::lock_guard lock(queue.mutex);
|
||||||
|
|
||||||
if (queue.events.empty())
|
if (queue.events.empty())
|
||||||
{
|
{
|
||||||
@ -277,7 +277,7 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(queue->mutex);
|
std::lock_guard lock(queue->mutex);
|
||||||
|
|
||||||
if (!queue->unqueue(queue->sq, &ppu))
|
if (!queue->unqueue(queue->sq, &ppu))
|
||||||
{
|
{
|
||||||
@ -306,7 +306,7 @@ error_code sys_event_queue_drain(u32 equeue_id)
|
|||||||
|
|
||||||
const auto queue = idm::check<lv2_obj, lv2_event_queue>(equeue_id, [&](lv2_event_queue& queue)
|
const auto queue = idm::check<lv2_obj, lv2_event_queue>(equeue_id, [&](lv2_event_queue& queue)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(queue.mutex);
|
std::lock_guard lock(queue.mutex);
|
||||||
|
|
||||||
queue.events.clear();
|
queue.events.clear();
|
||||||
});
|
});
|
||||||
@ -369,7 +369,7 @@ error_code sys_event_port_connect_local(u32 eport_id, u32 equeue_id)
|
|||||||
{
|
{
|
||||||
sys_event.warning("sys_event_port_connect_local(eport_id=0x%x, equeue_id=0x%x)", eport_id, equeue_id);
|
sys_event.warning("sys_event_port_connect_local(eport_id=0x%x, equeue_id=0x%x)", eport_id, equeue_id);
|
||||||
|
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
|
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
|
||||||
|
|
||||||
@ -399,7 +399,7 @@ error_code sys_event_port_connect_ipc(u32 eport_id, u64 ipc_key)
|
|||||||
|
|
||||||
auto queue = lv2_event_queue::find(ipc_key);
|
auto queue = lv2_event_queue::find(ipc_key);
|
||||||
|
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
|
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
|
||||||
|
|
||||||
@ -427,7 +427,7 @@ error_code sys_event_port_disconnect(u32 eport_id)
|
|||||||
{
|
{
|
||||||
sys_event.warning("sys_event_port_disconnect(eport_id=0x%x)", eport_id);
|
sys_event.warning("sys_event_port_disconnect(eport_id=0x%x)", eport_id);
|
||||||
|
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
|
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(flag.mutex);
|
std::lock_guard lock(flag.mutex);
|
||||||
|
|
||||||
if (flag.pattern.atomic_op(lv2_event_flag::check_pattern, bitptn, mode, &ppu.gpr[6]))
|
if (flag.pattern.atomic_op(lv2_event_flag::check_pattern, bitptn, mode, &ppu.gpr[6]))
|
||||||
{
|
{
|
||||||
@ -165,7 +165,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(flag->mutex);
|
std::lock_guard lock(flag->mutex);
|
||||||
|
|
||||||
if (!flag->unqueue(flag->sq, &ppu))
|
if (!flag->unqueue(flag->sq, &ppu))
|
||||||
{
|
{
|
||||||
@ -244,7 +244,7 @@ error_code sys_event_flag_set(u32 id, u64 bitptn)
|
|||||||
|
|
||||||
if (true)
|
if (true)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(flag->mutex);
|
std::lock_guard lock(flag->mutex);
|
||||||
|
|
||||||
// Sort sleep queue in required order
|
// Sort sleep queue in required order
|
||||||
if (flag->protocol != SYS_SYNC_FIFO)
|
if (flag->protocol != SYS_SYNC_FIFO)
|
||||||
@ -336,7 +336,7 @@ error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num)
|
|||||||
|
|
||||||
u32 value = 0;
|
u32 value = 0;
|
||||||
{
|
{
|
||||||
semaphore_lock lock(flag->mutex);
|
std::lock_guard lock(flag->mutex);
|
||||||
|
|
||||||
// Get current pattern
|
// Get current pattern
|
||||||
const u64 pattern = flag->pattern;
|
const u64 pattern = flag->pattern;
|
||||||
|
@ -395,7 +395,7 @@ error_code sys_fs_read(u32 fd, vm::ptr<void> buf, u64 nbytes, vm::ptr<u64> nread
|
|||||||
return CELL_EBADF;
|
return CELL_EBADF;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(file->mp->mutex);
|
std::lock_guard lock(file->mp->mutex);
|
||||||
|
|
||||||
*nread = file->op_read(buf, nbytes);
|
*nread = file->op_read(buf, nbytes);
|
||||||
|
|
||||||
@ -413,7 +413,7 @@ error_code sys_fs_write(u32 fd, vm::cptr<void> buf, u64 nbytes, vm::ptr<u64> nwr
|
|||||||
return CELL_EBADF;
|
return CELL_EBADF;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(file->mp->mutex);
|
std::lock_guard lock(file->mp->mutex);
|
||||||
|
|
||||||
if (file->lock)
|
if (file->lock)
|
||||||
{
|
{
|
||||||
@ -638,7 +638,7 @@ error_code sys_fs_fstat(u32 fd, vm::ptr<CellFsStat> sb)
|
|||||||
return CELL_EBADF;
|
return CELL_EBADF;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(file->mp->mutex);
|
std::lock_guard lock(file->mp->mutex);
|
||||||
|
|
||||||
const fs::stat_t& info = file->file.stat();
|
const fs::stat_t& info = file->file.stat();
|
||||||
|
|
||||||
@ -862,7 +862,7 @@ error_code sys_fs_fcntl(u32 fd, u32 op, vm::ptr<void> _arg, u32 _size)
|
|||||||
return CELL_EBADF;
|
return CELL_EBADF;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(file->mp->mutex);
|
std::lock_guard lock(file->mp->mutex);
|
||||||
|
|
||||||
if (op == 0x8000000b && file->lock)
|
if (op == 0x8000000b && file->lock)
|
||||||
{
|
{
|
||||||
@ -1207,7 +1207,7 @@ error_code sys_fs_lseek(u32 fd, s64 offset, s32 whence, vm::ptr<u64> pos)
|
|||||||
return CELL_EBADF;
|
return CELL_EBADF;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(file->mp->mutex);
|
std::lock_guard lock(file->mp->mutex);
|
||||||
|
|
||||||
const u64 result = file->file.seek(offset, static_cast<fs::seek_mode>(whence));
|
const u64 result = file->file.seek(offset, static_cast<fs::seek_mode>(whence));
|
||||||
|
|
||||||
@ -1337,7 +1337,7 @@ error_code sys_fs_ftruncate(u32 fd, u64 size)
|
|||||||
return CELL_EBADF;
|
return CELL_EBADF;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(file->mp->mutex);
|
std::lock_guard lock(file->mp->mutex);
|
||||||
|
|
||||||
if (file->lock)
|
if (file->lock)
|
||||||
{
|
{
|
||||||
|
@ -81,7 +81,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u3
|
|||||||
|
|
||||||
if (cond.waiters)
|
if (cond.waiters)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(cond.mutex);
|
std::lock_guard lock(cond.mutex);
|
||||||
|
|
||||||
cpu_thread* result = nullptr;
|
cpu_thread* result = nullptr;
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u3
|
|||||||
if (mode == 1)
|
if (mode == 1)
|
||||||
{
|
{
|
||||||
verify(HERE), !mutex->signaled;
|
verify(HERE), !mutex->signaled;
|
||||||
semaphore_lock lock(mutex->mutex);
|
std::lock_guard lock(mutex->mutex);
|
||||||
mutex->sq.emplace_back(result);
|
mutex->sq.emplace_back(result);
|
||||||
result = nullptr;
|
result = nullptr;
|
||||||
mode = 2; // Enforce CELL_OK
|
mode = 2; // Enforce CELL_OK
|
||||||
@ -174,7 +174,7 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
|||||||
|
|
||||||
if (cond.waiters)
|
if (cond.waiters)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(cond.mutex);
|
std::lock_guard lock(cond.mutex);
|
||||||
|
|
||||||
u32 result = 0;
|
u32 result = 0;
|
||||||
|
|
||||||
@ -190,7 +190,7 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
|||||||
if (mode == 1)
|
if (mode == 1)
|
||||||
{
|
{
|
||||||
verify(HERE), !mutex->signaled;
|
verify(HERE), !mutex->signaled;
|
||||||
semaphore_lock lock(mutex->mutex);
|
std::lock_guard lock(mutex->mutex);
|
||||||
mutex->sq.emplace_back(cpu);
|
mutex->sq.emplace_back(cpu);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -241,14 +241,14 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(cond.mutex);
|
std::lock_guard lock(cond.mutex);
|
||||||
|
|
||||||
// Add a waiter
|
// Add a waiter
|
||||||
cond.waiters++;
|
cond.waiters++;
|
||||||
cond.sq.emplace_back(&ppu);
|
cond.sq.emplace_back(&ppu);
|
||||||
cond.sleep(ppu, timeout);
|
cond.sleep(ppu, timeout);
|
||||||
|
|
||||||
semaphore_lock lock2(mutex->mutex);
|
std::lock_guard lock2(mutex->mutex);
|
||||||
|
|
||||||
// Process lwmutex sleep queue
|
// Process lwmutex sleep queue
|
||||||
if (const auto cpu = mutex->schedule<ppu_thread>(mutex->sq, mutex->protocol))
|
if (const auto cpu = mutex->schedule<ppu_thread>(mutex->sq, mutex->protocol))
|
||||||
@ -280,7 +280,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(cond->mutex);
|
std::lock_guard lock(cond->mutex);
|
||||||
|
|
||||||
if (!cond->unqueue(cond->sq, &ppu))
|
if (!cond->unqueue(cond->sq, &ppu))
|
||||||
{
|
{
|
||||||
|
@ -46,7 +46,7 @@ error_code _sys_lwmutex_destroy(u32 lwmutex_id)
|
|||||||
|
|
||||||
const auto mutex = idm::withdraw<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) -> CellError
|
const auto mutex = idm::withdraw<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) -> CellError
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex.mutex);
|
std::lock_guard lock(mutex.mutex);
|
||||||
|
|
||||||
if (!mutex.sq.empty())
|
if (!mutex.sq.empty())
|
||||||
{
|
{
|
||||||
@ -83,7 +83,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(mutex.mutex);
|
std::lock_guard lock(mutex.mutex);
|
||||||
|
|
||||||
if (u32 value = mutex.signaled)
|
if (u32 value = mutex.signaled)
|
||||||
{
|
{
|
||||||
@ -118,7 +118,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex->mutex);
|
std::lock_guard lock(mutex->mutex);
|
||||||
|
|
||||||
if (!mutex->unqueue(mutex->sq, &ppu))
|
if (!mutex->unqueue(mutex->sq, &ppu))
|
||||||
{
|
{
|
||||||
@ -177,7 +177,7 @@ error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id)
|
|||||||
|
|
||||||
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) -> cpu_thread*
|
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) -> cpu_thread*
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex.mutex);
|
std::lock_guard lock(mutex.mutex);
|
||||||
|
|
||||||
if (const auto cpu = mutex.schedule<ppu_thread>(mutex.sq, mutex.protocol))
|
if (const auto cpu = mutex.schedule<ppu_thread>(mutex.sq, mutex.protocol))
|
||||||
{
|
{
|
||||||
|
@ -195,7 +195,7 @@ error_code sys_mmapper_free_address(u32 addr)
|
|||||||
|
|
||||||
// If page fault notify exists and an address in this area is faulted, we can't free the memory.
|
// If page fault notify exists and an address in this area is faulted, we can't free the memory.
|
||||||
auto pf_events = fxm::get_always<page_fault_event_entries>();
|
auto pf_events = fxm::get_always<page_fault_event_entries>();
|
||||||
semaphore_lock pf_lock(pf_events->pf_mutex);
|
std::lock_guard pf_lock(pf_events->pf_mutex);
|
||||||
|
|
||||||
for (const auto& ev : pf_events->events)
|
for (const auto& ev : pf_events->events)
|
||||||
{
|
{
|
||||||
|
@ -116,7 +116,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
|
|||||||
|
|
||||||
if (result == CELL_EBUSY)
|
if (result == CELL_EBUSY)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex.mutex);
|
std::lock_guard lock(mutex.mutex);
|
||||||
|
|
||||||
if (mutex.try_own(ppu, ppu.id))
|
if (mutex.try_own(ppu, ppu.id))
|
||||||
{
|
{
|
||||||
@ -158,7 +158,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex->mutex);
|
std::lock_guard lock(mutex->mutex);
|
||||||
|
|
||||||
if (!mutex->unqueue(mutex->sq, &ppu))
|
if (!mutex->unqueue(mutex->sq, &ppu))
|
||||||
{
|
{
|
||||||
@ -224,7 +224,7 @@ error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id)
|
|||||||
|
|
||||||
if (mutex.ret == CELL_EBUSY)
|
if (mutex.ret == CELL_EBUSY)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex->mutex);
|
std::lock_guard lock(mutex->mutex);
|
||||||
|
|
||||||
if (auto cpu = mutex->reown<ppu_thread>())
|
if (auto cpu = mutex->reown<ppu_thread>())
|
||||||
{
|
{
|
||||||
|
@ -90,7 +90,7 @@ static void network_clear_queue(ppu_thread& ppu)
|
|||||||
{
|
{
|
||||||
idm::select<lv2_socket>([&](u32, lv2_socket& sock)
|
idm::select<lv2_socket>([&](u32, lv2_socket& sock)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
for (auto it = sock.queue.begin(); it != sock.queue.end();)
|
for (auto it = sock.queue.begin(); it != sock.queue.end();)
|
||||||
{
|
{
|
||||||
@ -137,7 +137,7 @@ extern void network_thread_init()
|
|||||||
::poll(fds, socklist.size(), 1);
|
::poll(fds, socklist.size(), 1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
semaphore_lock lock(s_nw_mutex);
|
std::lock_guard lock(s_nw_mutex);
|
||||||
|
|
||||||
for (std::size_t i = 0; i < socklist.size(); i++)
|
for (std::size_t i = 0; i < socklist.size(); i++)
|
||||||
{
|
{
|
||||||
@ -182,7 +182,7 @@ extern void network_thread_init()
|
|||||||
|
|
||||||
if (events)
|
if (events)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(socklist[i]->mutex);
|
std::lock_guard lock(socklist[i]->mutex);
|
||||||
|
|
||||||
for (auto it = socklist[i]->queue.begin(); events && it != socklist[i]->queue.end();)
|
for (auto it = socklist[i]->queue.begin(); events && it != socklist[i]->queue.end();)
|
||||||
{
|
{
|
||||||
@ -276,7 +276,7 @@ s32 sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr,
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock)
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
//if (!(sock.events & lv2_socket::poll::read))
|
//if (!(sock.events & lv2_socket::poll::read))
|
||||||
{
|
{
|
||||||
@ -401,7 +401,7 @@ s32 sys_net_bnet_bind(ppu_thread& ppu, s32 s, vm::cptr<sys_net_sockaddr> addr, u
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
if (::bind(sock.socket, (::sockaddr*)&name, namelen) == 0)
|
if (::bind(sock.socket, (::sockaddr*)&name, namelen) == 0)
|
||||||
{
|
{
|
||||||
@ -432,7 +432,7 @@ s32 sys_net_bnet_connect(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr,
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock)
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
if (addr->sa_family == 0 && !((sys_net_sockaddr_in*)addr.get_ptr())->sin_port && !((sys_net_sockaddr_in*)addr.get_ptr())->sin_addr)
|
if (addr->sa_family == 0 && !((sys_net_sockaddr_in*)addr.get_ptr())->sin_port && !((sys_net_sockaddr_in*)addr.get_ptr())->sin_addr)
|
||||||
{
|
{
|
||||||
@ -569,7 +569,7 @@ s32 sys_net_bnet_getpeername(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> a
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
::sockaddr_storage native_addr;
|
::sockaddr_storage native_addr;
|
||||||
::socklen_t native_addrlen = sizeof(native_addr);
|
::socklen_t native_addrlen = sizeof(native_addr);
|
||||||
@ -610,7 +610,7 @@ s32 sys_net_bnet_getsockname(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> a
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
::sockaddr_storage native_addr;
|
::sockaddr_storage native_addr;
|
||||||
::socklen_t native_addrlen = sizeof(native_addr);
|
::socklen_t native_addrlen = sizeof(native_addr);
|
||||||
@ -663,7 +663,7 @@ s32 sys_net_bnet_getsockopt(ppu_thread& ppu, s32 s, s32 level, s32 optname, vm::
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
if (*optlen < sizeof(int))
|
if (*optlen < sizeof(int))
|
||||||
{
|
{
|
||||||
@ -839,7 +839,7 @@ s32 sys_net_bnet_listen(ppu_thread& ppu, s32 s, s32 backlog)
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
if (::listen(sock.socket, backlog) == 0)
|
if (::listen(sock.socket, backlog) == 0)
|
||||||
{
|
{
|
||||||
@ -884,7 +884,7 @@ s32 sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32 len, s3
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock)
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
//if (!(sock.events & lv2_socket::poll::read))
|
//if (!(sock.events & lv2_socket::poll::read))
|
||||||
{
|
{
|
||||||
@ -1037,7 +1037,7 @@ s32 sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr<void> buf, u32 len, s32
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock)
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
//if (!(sock.events & lv2_socket::poll::write))
|
//if (!(sock.events & lv2_socket::poll::write))
|
||||||
{
|
{
|
||||||
@ -1131,7 +1131,7 @@ s32 sys_net_bnet_setsockopt(ppu_thread& ppu, s32 s, s32 level, s32 optname, vm::
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
if (optlen >= sizeof(int))
|
if (optlen >= sizeof(int))
|
||||||
{
|
{
|
||||||
@ -1313,7 +1313,7 @@ s32 sys_net_bnet_shutdown(ppu_thread& ppu, s32 s, s32 how)
|
|||||||
|
|
||||||
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock.mutex);
|
std::lock_guard lock(sock.mutex);
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
const int native_how =
|
const int native_how =
|
||||||
@ -1421,7 +1421,7 @@ s32 sys_net_bnet_poll(ppu_thread& ppu, vm::ptr<sys_net_pollfd> fds, s32 nfds, s3
|
|||||||
|
|
||||||
if (nfds)
|
if (nfds)
|
||||||
{
|
{
|
||||||
semaphore_lock nw_lock(s_nw_mutex);
|
std::lock_guard nw_lock(s_nw_mutex);
|
||||||
|
|
||||||
reader_lock lock(id_manager::g_mutex);
|
reader_lock lock(id_manager::g_mutex);
|
||||||
|
|
||||||
@ -1503,7 +1503,7 @@ s32 sys_net_bnet_poll(ppu_thread& ppu, vm::ptr<sys_net_pollfd> fds, s32 nfds, s3
|
|||||||
|
|
||||||
if (auto sock = idm::check_unlocked<lv2_socket>(fds[i].fd))
|
if (auto sock = idm::check_unlocked<lv2_socket>(fds[i].fd))
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock->mutex);
|
std::lock_guard lock(sock->mutex);
|
||||||
|
|
||||||
bs_t<lv2_socket::poll> selected = +lv2_socket::poll::error;
|
bs_t<lv2_socket::poll> selected = +lv2_socket::poll::error;
|
||||||
|
|
||||||
@ -1552,7 +1552,7 @@ s32 sys_net_bnet_poll(ppu_thread& ppu, vm::ptr<sys_net_pollfd> fds, s32 nfds, s3
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock nw_lock(s_nw_mutex);
|
std::lock_guard nw_lock(s_nw_mutex);
|
||||||
|
|
||||||
if (signaled)
|
if (signaled)
|
||||||
{
|
{
|
||||||
@ -1593,7 +1593,7 @@ s32 sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set> readf
|
|||||||
|
|
||||||
if (nfds >= 0)
|
if (nfds >= 0)
|
||||||
{
|
{
|
||||||
semaphore_lock nw_lock(s_nw_mutex);
|
std::lock_guard nw_lock(s_nw_mutex);
|
||||||
|
|
||||||
reader_lock lock(id_manager::g_mutex);
|
reader_lock lock(id_manager::g_mutex);
|
||||||
|
|
||||||
@ -1702,7 +1702,7 @@ s32 sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set> readf
|
|||||||
|
|
||||||
if (auto sock = idm::check_unlocked<lv2_socket>((lv2_socket::id_base & -1024) + i))
|
if (auto sock = idm::check_unlocked<lv2_socket>((lv2_socket::id_base & -1024) + i))
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sock->mutex);
|
std::lock_guard lock(sock->mutex);
|
||||||
|
|
||||||
sock->events += selected;
|
sock->events += selected;
|
||||||
sock->queue.emplace_back(ppu.id, [sock, selected, i, &rread, &rwrite, &rexcept, &signaled, &ppu](bs_t<lv2_socket::poll> events)
|
sock->queue.emplace_back(ppu.id, [sock, selected, i, &rread, &rwrite, &rexcept, &signaled, &ppu](bs_t<lv2_socket::poll> events)
|
||||||
@ -1746,7 +1746,7 @@ s32 sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set> readf
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock nw_lock(s_nw_mutex);
|
std::lock_guard nw_lock(s_nw_mutex);
|
||||||
|
|
||||||
if (signaled)
|
if (signaled)
|
||||||
{
|
{
|
||||||
|
@ -45,7 +45,7 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
|
|||||||
}
|
}
|
||||||
else if (jid != 0)
|
else if (jid != 0)
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
// Schedule joiner and unqueue
|
// Schedule joiner and unqueue
|
||||||
lv2_obj::awake(*idm::check_unlocked<ppu_thread>(jid), -2);
|
lv2_obj::awake(*idm::check_unlocked<ppu_thread>(jid), -2);
|
||||||
|
@ -91,7 +91,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(rwlock.mutex);
|
std::lock_guard lock(rwlock.mutex);
|
||||||
|
|
||||||
const s64 _old = rwlock.owner.fetch_op([&](s64& val)
|
const s64 _old = rwlock.owner.fetch_op([&](s64& val)
|
||||||
{
|
{
|
||||||
@ -135,7 +135,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(rwlock->mutex);
|
std::lock_guard lock(rwlock->mutex);
|
||||||
|
|
||||||
if (!rwlock->unqueue(rwlock->rq, &ppu))
|
if (!rwlock->unqueue(rwlock->rq, &ppu))
|
||||||
{
|
{
|
||||||
@ -220,7 +220,7 @@ error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
semaphore_lock lock(rwlock->mutex);
|
std::lock_guard lock(rwlock->mutex);
|
||||||
|
|
||||||
// Remove one reader
|
// Remove one reader
|
||||||
const s64 _old = rwlock->owner.fetch_op([](s64& val)
|
const s64 _old = rwlock->owner.fetch_op([](s64& val)
|
||||||
@ -276,7 +276,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
|
|||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(rwlock.mutex);
|
std::lock_guard lock(rwlock.mutex);
|
||||||
|
|
||||||
const s64 _old = rwlock.owner.fetch_op([&](s64& val)
|
const s64 _old = rwlock.owner.fetch_op([&](s64& val)
|
||||||
{
|
{
|
||||||
@ -324,7 +324,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(rwlock->mutex);
|
std::lock_guard lock(rwlock->mutex);
|
||||||
|
|
||||||
if (!rwlock->unqueue(rwlock->wq, &ppu))
|
if (!rwlock->unqueue(rwlock->wq, &ppu))
|
||||||
{
|
{
|
||||||
@ -414,7 +414,7 @@ error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id)
|
|||||||
|
|
||||||
if (rwlock.ret & 1)
|
if (rwlock.ret & 1)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(rwlock->mutex);
|
std::lock_guard lock(rwlock->mutex);
|
||||||
|
|
||||||
if (auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol))
|
if (auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol))
|
||||||
{
|
{
|
||||||
|
@ -97,7 +97,7 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(sema.mutex);
|
std::lock_guard lock(sema.mutex);
|
||||||
|
|
||||||
if (sema.val-- <= 0)
|
if (sema.val-- <= 0)
|
||||||
{
|
{
|
||||||
@ -129,7 +129,7 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
|
|||||||
|
|
||||||
if (passed >= timeout)
|
if (passed >= timeout)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sem->mutex);
|
std::lock_guard lock(sem->mutex);
|
||||||
|
|
||||||
if (!sem->unqueue(sem->sq, &ppu))
|
if (!sem->unqueue(sem->sq, &ppu))
|
||||||
{
|
{
|
||||||
@ -227,7 +227,7 @@ error_code sys_semaphore_post(ppu_thread& ppu, u32 sem_id, s32 count)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
semaphore_lock lock(sem->mutex);
|
std::lock_guard lock(sem->mutex);
|
||||||
|
|
||||||
const s32 val = sem->val.fetch_op([=](s32& val)
|
const s32 val = sem->val.fetch_op([=](s32& val)
|
||||||
{
|
{
|
||||||
|
@ -216,7 +216,7 @@ error_code sys_spu_thread_initialize(vm::ptr<u32> thread, u32 group_id, u32 spu_
|
|||||||
return CELL_ESRCH;
|
return CELL_ESRCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (spu_num >= group->threads.size())
|
if (spu_num >= group->threads.size())
|
||||||
{
|
{
|
||||||
@ -263,7 +263,7 @@ error_code sys_spu_thread_set_argument(u32 id, vm::ptr<sys_spu_thread_argument>
|
|||||||
|
|
||||||
const auto group = thread->group;
|
const auto group = thread->group;
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
group->args[thread->index] = {arg->arg1, arg->arg2, arg->arg3, arg->arg4};
|
group->args[thread->index] = {arg->arg1, arg->arg2, arg->arg3, arg->arg4};
|
||||||
|
|
||||||
@ -371,7 +371,7 @@ error_code sys_spu_thread_group_start(ppu_thread& ppu, u32 id)
|
|||||||
return CELL_ESTAT;
|
return CELL_ESTAT;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
group->join_state = 0;
|
group->join_state = 0;
|
||||||
|
|
||||||
@ -426,7 +426,7 @@ error_code sys_spu_thread_group_suspend(u32 id)
|
|||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED || group->run_state == SPU_THREAD_GROUP_STATUS_STOPPED)
|
if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED || group->run_state == SPU_THREAD_GROUP_STATUS_STOPPED)
|
||||||
{
|
{
|
||||||
@ -479,7 +479,7 @@ error_code sys_spu_thread_group_resume(u32 id)
|
|||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
// SPU_THREAD_GROUP_STATUS_READY state is not used
|
// SPU_THREAD_GROUP_STATUS_READY state is not used
|
||||||
|
|
||||||
@ -567,7 +567,7 @@ error_code sys_spu_thread_group_terminate(u32 id, s32 value)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED ||
|
if (group->run_state <= SPU_THREAD_GROUP_STATUS_INITIALIZED ||
|
||||||
group->run_state == SPU_THREAD_GROUP_STATUS_WAITING ||
|
group->run_state == SPU_THREAD_GROUP_STATUS_WAITING ||
|
||||||
@ -610,7 +610,7 @@ error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause
|
|||||||
s32 exit_value = 0;
|
s32 exit_value = 0;
|
||||||
|
|
||||||
{
|
{
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
|
if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
|
||||||
{
|
{
|
||||||
@ -647,7 +647,7 @@ error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO
|
// TODO
|
||||||
group->cv.wait(lock, 1000);
|
group->cv.wait(group->mutex, 1000);
|
||||||
thread_ctrl::test();
|
thread_ctrl::test();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -757,7 +757,7 @@ error_code sys_spu_thread_write_ls(u32 id, u32 lsa, u64 value, u32 type)
|
|||||||
|
|
||||||
const auto group = thread->group;
|
const auto group = thread->group;
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
|
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
|
||||||
{
|
{
|
||||||
@ -794,7 +794,7 @@ error_code sys_spu_thread_read_ls(u32 id, u32 lsa, vm::ptr<u64> value, u32 type)
|
|||||||
|
|
||||||
const auto group = thread->group;
|
const auto group = thread->group;
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
|
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
|
||||||
{
|
{
|
||||||
@ -826,7 +826,7 @@ error_code sys_spu_thread_write_spu_mb(u32 id, u32 value)
|
|||||||
|
|
||||||
const auto group = thread->group;
|
const auto group = thread->group;
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
|
if (group->run_state < SPU_THREAD_GROUP_STATUS_WAITING || group->run_state > SPU_THREAD_GROUP_STATUS_RUNNING)
|
||||||
{
|
{
|
||||||
@ -913,7 +913,7 @@ error_code sys_spu_thread_group_connect_event(u32 id, u32 eq, u32 et)
|
|||||||
return CELL_ESRCH;
|
return CELL_ESRCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
switch (et)
|
switch (et)
|
||||||
{
|
{
|
||||||
@ -968,7 +968,7 @@ error_code sys_spu_thread_group_disconnect_event(u32 id, u32 et)
|
|||||||
return CELL_ESRCH;
|
return CELL_ESRCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
switch (et)
|
switch (et)
|
||||||
{
|
{
|
||||||
@ -1030,7 +1030,7 @@ error_code sys_spu_thread_connect_event(u32 id, u32 eq, u32 et, u8 spup)
|
|||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(thread->group->mutex);
|
std::lock_guard lock(thread->group->mutex);
|
||||||
|
|
||||||
auto& port = thread->spup[spup];
|
auto& port = thread->spup[spup];
|
||||||
|
|
||||||
@ -1061,7 +1061,7 @@ error_code sys_spu_thread_disconnect_event(u32 id, u32 et, u8 spup)
|
|||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(thread->group->mutex);
|
std::lock_guard lock(thread->group->mutex);
|
||||||
|
|
||||||
auto& port = thread->spup[spup];
|
auto& port = thread->spup[spup];
|
||||||
|
|
||||||
@ -1092,7 +1092,7 @@ error_code sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num)
|
|||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(thread->group->mutex);
|
std::lock_guard lock(thread->group->mutex);
|
||||||
|
|
||||||
for (auto& v : thread->spuq)
|
for (auto& v : thread->spuq)
|
||||||
{
|
{
|
||||||
@ -1130,7 +1130,7 @@ error_code sys_spu_thread_unbind_queue(u32 id, u32 spuq_num)
|
|||||||
return CELL_ESRCH;
|
return CELL_ESRCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(thread->group->mutex);
|
std::lock_guard lock(thread->group->mutex);
|
||||||
|
|
||||||
for (auto& v : thread->spuq)
|
for (auto& v : thread->spuq)
|
||||||
{
|
{
|
||||||
@ -1162,7 +1162,7 @@ error_code sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 re
|
|||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
|
if (group->run_state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
|
||||||
{
|
{
|
||||||
@ -1232,7 +1232,7 @@ error_code sys_spu_thread_group_disconnect_event_all_threads(u32 id, u8 spup)
|
|||||||
return CELL_EINVAL;
|
return CELL_EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(group->mutex);
|
std::lock_guard lock(group->mutex);
|
||||||
|
|
||||||
for (auto& t : group->threads)
|
for (auto& t : group->threads)
|
||||||
{
|
{
|
||||||
|
@ -30,7 +30,7 @@ void lv2_timer::on_task()
|
|||||||
|
|
||||||
if (_now >= next)
|
if (_now >= next)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
if (const auto queue = port.lock())
|
if (const auto queue = port.lock())
|
||||||
{
|
{
|
||||||
@ -91,7 +91,7 @@ error_code sys_timer_destroy(u32 timer_id)
|
|||||||
|
|
||||||
const auto timer = idm::withdraw<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer) -> CellError
|
const auto timer = idm::withdraw<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer) -> CellError
|
||||||
{
|
{
|
||||||
semaphore_lock lock(timer.mutex);
|
std::lock_guard lock(timer.mutex);
|
||||||
|
|
||||||
if (!timer.port.expired())
|
if (!timer.port.expired())
|
||||||
{
|
{
|
||||||
@ -120,7 +120,7 @@ error_code sys_timer_get_information(u32 timer_id, vm::ptr<sys_timer_information
|
|||||||
|
|
||||||
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer)
|
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(timer.mutex);
|
std::lock_guard lock(timer.mutex);
|
||||||
|
|
||||||
info->next_expire = timer.expire;
|
info->next_expire = timer.expire;
|
||||||
info->period = timer.period;
|
info->period = timer.period;
|
||||||
@ -155,7 +155,7 @@ error_code _sys_timer_start(u32 timer_id, u64 base_time, u64 period)
|
|||||||
|
|
||||||
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer) -> CellError
|
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer) -> CellError
|
||||||
{
|
{
|
||||||
semaphore_lock lock(timer.mutex);
|
std::lock_guard lock(timer.mutex);
|
||||||
|
|
||||||
if (timer.state != SYS_TIMER_STATE_STOP)
|
if (timer.state != SYS_TIMER_STATE_STOP)
|
||||||
{
|
{
|
||||||
@ -194,7 +194,7 @@ error_code sys_timer_stop(u32 timer_id)
|
|||||||
|
|
||||||
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [](lv2_timer& timer)
|
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [](lv2_timer& timer)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(timer.mutex);
|
std::lock_guard lock(timer.mutex);
|
||||||
|
|
||||||
timer.state = SYS_TIMER_STATE_STOP;
|
timer.state = SYS_TIMER_STATE_STOP;
|
||||||
});
|
});
|
||||||
@ -220,7 +220,7 @@ error_code sys_timer_connect_event_queue(u32 timer_id, u32 queue_id, u64 name, u
|
|||||||
return CELL_ESRCH;
|
return CELL_ESRCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
semaphore_lock lock(timer.mutex);
|
std::lock_guard lock(timer.mutex);
|
||||||
|
|
||||||
if (!timer.port.expired())
|
if (!timer.port.expired())
|
||||||
{
|
{
|
||||||
@ -254,7 +254,7 @@ error_code sys_timer_disconnect_event_queue(u32 timer_id)
|
|||||||
|
|
||||||
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [](lv2_timer& timer) -> CellError
|
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [](lv2_timer& timer) -> CellError
|
||||||
{
|
{
|
||||||
semaphore_lock lock(timer.mutex);
|
std::lock_guard lock(timer.mutex);
|
||||||
|
|
||||||
if (timer.port.expired())
|
if (timer.port.expired())
|
||||||
{
|
{
|
||||||
|
@ -35,7 +35,7 @@ error_code sys_tty_read(s32 ch, vm::ptr<char> buf, u32 len, vm::ptr<u32> preadle
|
|||||||
|
|
||||||
if (len > 0)
|
if (len > 0)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(g_tty_mutex);
|
std::lock_guard lock(g_tty_mutex);
|
||||||
|
|
||||||
if (g_tty_input[ch].size() > 0)
|
if (g_tty_input[ch].size() > 0)
|
||||||
{
|
{
|
||||||
|
@ -21,7 +21,7 @@ public:
|
|||||||
template <typename F>
|
template <typename F>
|
||||||
static bool add(const K& ipc_key, F&& provider, std::shared_ptr<T>* out = nullptr)
|
static bool add(const K& ipc_key, F&& provider, std::shared_ptr<T>* out = nullptr)
|
||||||
{
|
{
|
||||||
writer_lock lock(g_ipc.m_mutex);
|
std::lock_guard lock(g_ipc.m_mutex);
|
||||||
|
|
||||||
// Get object location
|
// Get object location
|
||||||
std::weak_ptr<T>& wptr = g_ipc.m_map[ipc_key];
|
std::weak_ptr<T>& wptr = g_ipc.m_map[ipc_key];
|
||||||
@ -55,7 +55,7 @@ public:
|
|||||||
// Unregister specified ipc_key, may return true even if the object doesn't exist anymore
|
// Unregister specified ipc_key, may return true even if the object doesn't exist anymore
|
||||||
static bool remove(const K& ipc_key)
|
static bool remove(const K& ipc_key)
|
||||||
{
|
{
|
||||||
writer_lock lock(g_ipc.m_mutex);
|
std::lock_guard lock(g_ipc.m_mutex);
|
||||||
|
|
||||||
return g_ipc.m_map.erase(ipc_key) != 0;
|
return g_ipc.m_map.erase(ipc_key) != 0;
|
||||||
}
|
}
|
||||||
@ -63,7 +63,7 @@ public:
|
|||||||
// Unregister specified ipc_key, return the object
|
// Unregister specified ipc_key, return the object
|
||||||
static std::shared_ptr<T> withdraw(const K& ipc_key)
|
static std::shared_ptr<T> withdraw(const K& ipc_key)
|
||||||
{
|
{
|
||||||
writer_lock lock(g_ipc.m_mutex);
|
std::lock_guard lock(g_ipc.m_mutex);
|
||||||
|
|
||||||
const auto found = g_ipc.m_map.find(ipc_key);
|
const auto found = g_ipc.m_map.find(ipc_key);
|
||||||
|
|
||||||
|
@ -307,7 +307,7 @@ class idm
|
|||||||
using traits = id_manager::id_traits<Type>;
|
using traits = id_manager::id_traits<Type>;
|
||||||
|
|
||||||
// Allocate new id
|
// Allocate new id
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
if (auto* place = allocate_id(info, traits::base, traits::step, traits::count))
|
if (auto* place = allocate_id(info, traits::base, traits::step, traits::count))
|
||||||
{
|
{
|
||||||
@ -576,7 +576,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::shared_ptr<void> ptr;
|
std::shared_ptr<void> ptr;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
if (const auto found = find_id<T, Get>(id))
|
if (const auto found = find_id<T, Get>(id))
|
||||||
{
|
{
|
||||||
@ -598,7 +598,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::shared_ptr<void> ptr;
|
std::shared_ptr<void> ptr;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
if (const auto found = find_id<T, Get>(id))
|
if (const auto found = find_id<T, Get>(id))
|
||||||
{
|
{
|
||||||
@ -622,7 +622,7 @@ public:
|
|||||||
|
|
||||||
std::shared_ptr<void> ptr;
|
std::shared_ptr<void> ptr;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
if (const auto found = find_id<T, Get>(id))
|
if (const auto found = find_id<T, Get>(id))
|
||||||
{
|
{
|
||||||
@ -649,7 +649,7 @@ public:
|
|||||||
std::shared_ptr<void> ptr;
|
std::shared_ptr<void> ptr;
|
||||||
FRT ret;
|
FRT ret;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
if (const auto found = find_id<T, Get>(id))
|
if (const auto found = find_id<T, Get>(id))
|
||||||
{
|
{
|
||||||
@ -700,7 +700,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::shared_ptr<T> ptr;
|
std::shared_ptr<T> ptr;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
auto& pair = g_vec[get_type<T>()];
|
auto& pair = g_vec[get_type<T>()];
|
||||||
|
|
||||||
@ -728,7 +728,7 @@ public:
|
|||||||
std::shared_ptr<T> ptr;
|
std::shared_ptr<T> ptr;
|
||||||
std::shared_ptr<void> old;
|
std::shared_ptr<void> old;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
auto& pair = g_vec[get_type<T>()];
|
auto& pair = g_vec[get_type<T>()];
|
||||||
|
|
||||||
@ -754,7 +754,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::shared_ptr<T> ptr;
|
std::shared_ptr<T> ptr;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
auto& pair = g_vec[get_type<T>()];
|
auto& pair = g_vec[get_type<T>()];
|
||||||
|
|
||||||
@ -786,7 +786,7 @@ public:
|
|||||||
std::shared_ptr<T> ptr;
|
std::shared_ptr<T> ptr;
|
||||||
std::shared_ptr<void> old;
|
std::shared_ptr<void> old;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
auto& pair = g_vec[get_type<T>()];
|
auto& pair = g_vec[get_type<T>()];
|
||||||
|
|
||||||
@ -820,7 +820,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::shared_ptr<T> ptr;
|
std::shared_ptr<T> ptr;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
|
|
||||||
auto& pair = g_vec[get_type<T>()];
|
auto& pair = g_vec[get_type<T>()];
|
||||||
|
|
||||||
@ -874,7 +874,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::shared_ptr<void> ptr;
|
std::shared_ptr<void> ptr;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
ptr = std::move(g_vec[get_type<T>()].second);
|
ptr = std::move(g_vec[get_type<T>()].second);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -892,7 +892,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::shared_ptr<void> ptr;
|
std::shared_ptr<void> ptr;
|
||||||
{
|
{
|
||||||
writer_lock lock(id_manager::g_mutex);
|
std::lock_guard lock(id_manager::g_mutex);
|
||||||
ptr = std::move(g_vec[get_type<T>()].second);
|
ptr = std::move(g_vec[get_type<T>()].second);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ public:
|
|||||||
|
|
||||||
void Button(u8 button, bool pressed)
|
void Button(u8 button, bool pressed)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
for (u32 p = 0; p < (u32)m_mice.size(); ++p)
|
for (u32 p = 0; p < (u32)m_mice.size(); ++p)
|
||||||
{
|
{
|
||||||
@ -183,7 +183,7 @@ public:
|
|||||||
|
|
||||||
void Scroll(const s8 rotation)
|
void Scroll(const s8 rotation)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
for (u32 p = 0; p < (u32)m_mice.size(); ++p)
|
for (u32 p = 0; p < (u32)m_mice.size(); ++p)
|
||||||
{
|
{
|
||||||
@ -210,7 +210,7 @@ public:
|
|||||||
|
|
||||||
void Move(const s32 x_pos_new, const s32 y_pos_new, const bool is_qt_fullscreen = false, s32 x_delta = 0, s32 y_delta = 0)
|
void Move(const s32 x_pos_new, const s32 y_pos_new, const bool is_qt_fullscreen = false, s32 x_delta = 0, s32 y_delta = 0)
|
||||||
{
|
{
|
||||||
semaphore_lock lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
for (u32 p = 0; p < (u32)m_mice.size(); ++p)
|
for (u32 p = 0; p < (u32)m_mice.size(); ++p)
|
||||||
{
|
{
|
||||||
|
@ -347,7 +347,7 @@ public:
|
|||||||
bool busy = false;
|
bool busy = false;
|
||||||
{
|
{
|
||||||
u32 count = 0;
|
u32 count = 0;
|
||||||
writer_lock lock(m_decompiler_mutex);
|
std::lock_guard lock(m_decompiler_mutex);
|
||||||
|
|
||||||
while (!m_decompile_queue.empty())
|
while (!m_decompile_queue.empty())
|
||||||
{
|
{
|
||||||
@ -392,7 +392,7 @@ public:
|
|||||||
pipeline_storage_type pipeline = backend_traits::build_pipeline(link_entry->vp, link_entry->fp, link_entry->props, std::forward<Args>(args)...);
|
pipeline_storage_type pipeline = backend_traits::build_pipeline(link_entry->vp, link_entry->fp, link_entry->props, std::forward<Args>(args)...);
|
||||||
LOG_SUCCESS(RSX, "New program compiled successfully");
|
LOG_SUCCESS(RSX, "New program compiled successfully");
|
||||||
|
|
||||||
writer_lock lock(m_pipeline_mutex);
|
std::lock_guard lock(m_pipeline_mutex);
|
||||||
m_storage[key] = std::move(pipeline);
|
m_storage[key] = std::move(pipeline);
|
||||||
m_link_queue.erase(key);
|
m_link_queue.erase(key);
|
||||||
|
|
||||||
@ -444,7 +444,7 @@ public:
|
|||||||
m_program_compiled_flag = true;
|
m_program_compiled_flag = true;
|
||||||
|
|
||||||
pipeline_storage_type pipeline = backend_traits::build_pipeline(vertex_program, fragment_program, pipelineProperties, std::forward<Args>(args)...);
|
pipeline_storage_type pipeline = backend_traits::build_pipeline(vertex_program, fragment_program, pipelineProperties, std::forward<Args>(args)...);
|
||||||
writer_lock lock(m_pipeline_mutex);
|
std::lock_guard lock(m_pipeline_mutex);
|
||||||
auto &rtn = m_storage[key] = std::move(pipeline);
|
auto &rtn = m_storage[key] = std::move(pipeline);
|
||||||
LOG_SUCCESS(RSX, "New program compiled successfully");
|
LOG_SUCCESS(RSX, "New program compiled successfully");
|
||||||
return rtn;
|
return rtn;
|
||||||
|
@ -1160,7 +1160,7 @@ namespace rsx
|
|||||||
template <typename ...Args>
|
template <typename ...Args>
|
||||||
void lock_memory_region(image_storage_type* image, u32 memory_address, u32 memory_size, u32 width, u32 height, u32 pitch, Args&&... extras)
|
void lock_memory_region(image_storage_type* image, u32 memory_address, u32 memory_size, u32 width, u32 height, u32 pitch, Args&&... extras)
|
||||||
{
|
{
|
||||||
writer_lock lock(m_cache_mutex);
|
std::lock_guard lock(m_cache_mutex);
|
||||||
section_storage_type& region = find_cached_texture(memory_address, memory_size, false);
|
section_storage_type& region = find_cached_texture(memory_address, memory_size, false);
|
||||||
|
|
||||||
if (region.get_context() != texture_upload_context::framebuffer_storage &&
|
if (region.get_context() != texture_upload_context::framebuffer_storage &&
|
||||||
@ -1240,7 +1240,7 @@ namespace rsx
|
|||||||
|
|
||||||
void set_memory_read_flags(u32 memory_address, u32 memory_size, memory_read_flags flags)
|
void set_memory_read_flags(u32 memory_address, u32 memory_size, memory_read_flags flags)
|
||||||
{
|
{
|
||||||
writer_lock lock(m_cache_mutex);
|
std::lock_guard lock(m_cache_mutex);
|
||||||
|
|
||||||
if (flags != memory_read_flags::flush_always)
|
if (flags != memory_read_flags::flush_always)
|
||||||
m_flush_always_cache.erase(memory_address);
|
m_flush_always_cache.erase(memory_address);
|
||||||
@ -1259,7 +1259,7 @@ namespace rsx
|
|||||||
template <typename ...Args>
|
template <typename ...Args>
|
||||||
bool flush_memory_to_cache(u32 memory_address, u32 memory_size, bool skip_synchronized, u32 allowed_types_mask, Args&&... extra)
|
bool flush_memory_to_cache(u32 memory_address, u32 memory_size, bool skip_synchronized, u32 allowed_types_mask, Args&&... extra)
|
||||||
{
|
{
|
||||||
writer_lock lock(m_cache_mutex);
|
std::lock_guard lock(m_cache_mutex);
|
||||||
section_storage_type* region = find_flushable_section(memory_address, memory_size);
|
section_storage_type* region = find_flushable_section(memory_address, memory_size);
|
||||||
|
|
||||||
//Check if section was released, usually if cell overwrites a currently bound render target
|
//Check if section was released, usually if cell overwrites a currently bound render target
|
||||||
@ -1353,7 +1353,7 @@ namespace rsx
|
|||||||
if (!region_intersects_cache(address, range, is_writing))
|
if (!region_intersects_cache(address, range, is_writing))
|
||||||
return{};
|
return{};
|
||||||
|
|
||||||
writer_lock lock(m_cache_mutex);
|
std::lock_guard lock(m_cache_mutex);
|
||||||
return invalidate_range_impl_base(address, range, is_writing, false, allow_flush, std::forward<Args>(extras)...);
|
return invalidate_range_impl_base(address, range, is_writing, false, allow_flush, std::forward<Args>(extras)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1364,14 +1364,14 @@ namespace rsx
|
|||||||
if (!region_intersects_cache(address, range, is_writing))
|
if (!region_intersects_cache(address, range, is_writing))
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
writer_lock lock(m_cache_mutex);
|
std::lock_guard lock(m_cache_mutex);
|
||||||
return invalidate_range_impl_base(address, range, is_writing, discard, allow_flush, std::forward<Args>(extras)...);
|
return invalidate_range_impl_base(address, range, is_writing, discard, allow_flush, std::forward<Args>(extras)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename ...Args>
|
template <typename ...Args>
|
||||||
bool flush_all(thrashed_set& data, Args&&... extras)
|
bool flush_all(thrashed_set& data, Args&&... extras)
|
||||||
{
|
{
|
||||||
writer_lock lock(m_cache_mutex);
|
std::lock_guard lock(m_cache_mutex);
|
||||||
|
|
||||||
if (m_cache_update_tag.load(std::memory_order_consume) == data.cache_tag)
|
if (m_cache_update_tag.load(std::memory_order_consume) == data.cache_tag)
|
||||||
{
|
{
|
||||||
@ -1483,7 +1483,7 @@ namespace rsx
|
|||||||
|
|
||||||
void purge_dirty()
|
void purge_dirty()
|
||||||
{
|
{
|
||||||
writer_lock lock(m_cache_mutex);
|
std::lock_guard lock(m_cache_mutex);
|
||||||
|
|
||||||
//Reclaims all graphics memory consumed by dirty textures
|
//Reclaims all graphics memory consumed by dirty textures
|
||||||
std::vector<u32> empty_addresses;
|
std::vector<u32> empty_addresses;
|
||||||
@ -2033,7 +2033,7 @@ namespace rsx
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Do direct upload from CPU as the last resort
|
//Do direct upload from CPU as the last resort
|
||||||
writer_lock lock(m_cache_mutex);
|
std::lock_guard lock(m_cache_mutex);
|
||||||
const bool is_swizzled = !(tex.format() & CELL_GCM_TEXTURE_LN);
|
const bool is_swizzled = !(tex.format() & CELL_GCM_TEXTURE_LN);
|
||||||
auto subresources_layout = get_subresources_layout(tex);
|
auto subresources_layout = get_subresources_layout(tex);
|
||||||
|
|
||||||
@ -2575,7 +2575,7 @@ namespace rsx
|
|||||||
{
|
{
|
||||||
if (m_cache_update_tag.load(std::memory_order_consume) != m_flush_always_update_timestamp)
|
if (m_cache_update_tag.load(std::memory_order_consume) != m_flush_always_update_timestamp)
|
||||||
{
|
{
|
||||||
writer_lock lock(m_cache_mutex);
|
std::lock_guard lock(m_cache_mutex);
|
||||||
bool update_tag = false;
|
bool update_tag = false;
|
||||||
|
|
||||||
for (const auto &It : m_flush_always_cache)
|
for (const auto &It : m_flush_always_cache)
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
void data_cache::store_and_protect_data(u64 key, u32 start, size_t size, u8 format, size_t w, size_t h, size_t d, size_t m, ComPtr<ID3D12Resource> data)
|
void data_cache::store_and_protect_data(u64 key, u32 start, size_t size, u8 format, size_t w, size_t h, size_t d, size_t m, ComPtr<ID3D12Resource> data)
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_mut);
|
std::lock_guard lock(m_mut);
|
||||||
m_address_to_data[key] = std::make_pair(texture_entry(format, w, h, d, m), data);
|
m_address_to_data[key] = std::make_pair(texture_entry(format, w, h, d, m), data);
|
||||||
protect_data(key, start, size);
|
protect_data(key, start, size);
|
||||||
}
|
}
|
||||||
@ -25,7 +25,7 @@ void data_cache::protect_data(u64 key, u32 start, size_t size)
|
|||||||
bool data_cache::invalidate_address(u32 addr)
|
bool data_cache::invalidate_address(u32 addr)
|
||||||
{
|
{
|
||||||
// In case 2 threads write to texture memory
|
// In case 2 threads write to texture memory
|
||||||
std::lock_guard<shared_mutex> lock(m_mut);
|
std::lock_guard lock(m_mut);
|
||||||
bool handled = false;
|
bool handled = false;
|
||||||
auto It = m_protected_ranges.begin(), E = m_protected_ranges.end();
|
auto It = m_protected_ranges.begin(), E = m_protected_ranges.end();
|
||||||
for (; It != E;)
|
for (; It != E;)
|
||||||
@ -49,7 +49,7 @@ bool data_cache::invalidate_address(u32 addr)
|
|||||||
|
|
||||||
std::pair<texture_entry, ComPtr<ID3D12Resource> > *data_cache::find_data_if_available(u64 key)
|
std::pair<texture_entry, ComPtr<ID3D12Resource> > *data_cache::find_data_if_available(u64 key)
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_mut);
|
std::lock_guard lock(m_mut);
|
||||||
auto It = m_address_to_data.find(key);
|
auto It = m_address_to_data.find(key);
|
||||||
if (It == m_address_to_data.end())
|
if (It == m_address_to_data.end())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -58,7 +58,7 @@ std::pair<texture_entry, ComPtr<ID3D12Resource> > *data_cache::find_data_if_avai
|
|||||||
|
|
||||||
void data_cache::unprotect_all()
|
void data_cache::unprotect_all()
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_mut);
|
std::lock_guard lock(m_mut);
|
||||||
for (auto &protectedTexture : m_protected_ranges)
|
for (auto &protectedTexture : m_protected_ranges)
|
||||||
{
|
{
|
||||||
u32 protectedRangeStart = std::get<1>(protectedTexture), protectedRangeSize = std::get<2>(protectedTexture);
|
u32 protectedRangeStart = std::get<1>(protectedTexture), protectedRangeSize = std::get<2>(protectedTexture);
|
||||||
|
@ -322,7 +322,7 @@ void GLGSRender::end()
|
|||||||
{
|
{
|
||||||
std::chrono::time_point<steady_clock> textures_start = steady_clock::now();
|
std::chrono::time_point<steady_clock> textures_start = steady_clock::now();
|
||||||
|
|
||||||
std::lock_guard<shared_mutex> lock(m_sampler_mutex);
|
std::lock_guard lock(m_sampler_mutex);
|
||||||
void* unused = nullptr;
|
void* unused = nullptr;
|
||||||
bool update_framebuffer_sourced = false;
|
bool update_framebuffer_sourced = false;
|
||||||
|
|
||||||
@ -832,7 +832,7 @@ void GLGSRender::on_init_thread()
|
|||||||
GLuint handle = 0;
|
GLuint handle = 0;
|
||||||
auto &query = m_occlusion_query_data[i];
|
auto &query = m_occlusion_query_data[i];
|
||||||
glGenQueries(1, &handle);
|
glGenQueries(1, &handle);
|
||||||
|
|
||||||
query.driver_handle = (u64)handle;
|
query.driver_handle = (u64)handle;
|
||||||
query.pending = false;
|
query.pending = false;
|
||||||
query.active = false;
|
query.active = false;
|
||||||
@ -1566,7 +1566,7 @@ void GLGSRender::flip(int buffer)
|
|||||||
glViewport(0, 0, m_frame->client_width(), m_frame->client_height());
|
glViewport(0, 0, m_frame->client_width(), m_frame->client_height());
|
||||||
|
|
||||||
// Lock to avoid modification during run-update chain
|
// Lock to avoid modification during run-update chain
|
||||||
std::lock_guard<rsx::overlays::display_manager> lock(*m_overlay_manager);
|
std::lock_guard lock(*m_overlay_manager);
|
||||||
|
|
||||||
for (const auto& view : m_overlay_manager->get_views())
|
for (const auto& view : m_overlay_manager->get_views())
|
||||||
{
|
{
|
||||||
@ -1636,7 +1636,7 @@ bool GLGSRender::on_access_violation(u32 address, bool is_writing)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_sampler_mutex);
|
std::lock_guard lock(m_sampler_mutex);
|
||||||
m_samplers_dirty.store(true);
|
m_samplers_dirty.store(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1660,7 +1660,7 @@ void GLGSRender::on_invalidate_memory_range(u32 address_base, u32 size)
|
|||||||
{
|
{
|
||||||
m_gl_texture_cache.purge_dirty();
|
m_gl_texture_cache.purge_dirty();
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_sampler_mutex);
|
std::lock_guard lock(m_sampler_mutex);
|
||||||
m_samplers_dirty.store(true);
|
m_samplers_dirty.store(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1670,7 +1670,7 @@ void GLGSRender::do_local_task(rsx::FIFO_state state)
|
|||||||
{
|
{
|
||||||
if (!work_queue.empty())
|
if (!work_queue.empty())
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(queue_guard);
|
std::lock_guard lock(queue_guard);
|
||||||
|
|
||||||
work_queue.remove_if([](work_item &q) { return q.received; });
|
work_queue.remove_if([](work_item &q) { return q.received; });
|
||||||
|
|
||||||
@ -1715,7 +1715,7 @@ void GLGSRender::do_local_task(rsx::FIFO_state state)
|
|||||||
|
|
||||||
work_item& GLGSRender::post_flush_request(u32 address, gl::texture_cache::thrashed_set& flush_data)
|
work_item& GLGSRender::post_flush_request(u32 address, gl::texture_cache::thrashed_set& flush_data)
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(queue_guard);
|
std::lock_guard lock(queue_guard);
|
||||||
|
|
||||||
work_queue.emplace_back();
|
work_queue.emplace_back();
|
||||||
work_item &result = work_queue.back();
|
work_item &result = work_queue.back();
|
||||||
@ -1743,7 +1743,7 @@ void GLGSRender::notify_tile_unbound(u32 tile)
|
|||||||
//m_rtts.invalidate_surface_address(addr, false);
|
//m_rtts.invalidate_surface_address(addr, false);
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_sampler_mutex);
|
std::lock_guard lock(m_sampler_mutex);
|
||||||
m_samplers_dirty.store(true);
|
m_samplers_dirty.store(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -249,7 +249,7 @@ namespace rsx
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
T* add(std::unique_ptr<T>& entry, bool remove_existing = true)
|
T* add(std::unique_ptr<T>& entry, bool remove_existing = true)
|
||||||
{
|
{
|
||||||
writer_lock lock(m_list_mutex);
|
std::lock_guard lock(m_list_mutex);
|
||||||
|
|
||||||
T* e = entry.get();
|
T* e = entry.get();
|
||||||
e->uid = m_uid_ctr.fetch_add(1);
|
e->uid = m_uid_ctr.fetch_add(1);
|
||||||
@ -340,7 +340,7 @@ namespace rsx
|
|||||||
// Deallocate object. Object must first be removed via the remove() functions
|
// Deallocate object. Object must first be removed via the remove() functions
|
||||||
void dispose(const std::vector<u32>& uids)
|
void dispose(const std::vector<u32>& uids)
|
||||||
{
|
{
|
||||||
writer_lock lock(m_list_mutex);
|
std::lock_guard lock(m_list_mutex);
|
||||||
|
|
||||||
if (!m_uids_to_remove.empty() || !m_type_ids_to_remove.empty())
|
if (!m_uids_to_remove.empty() || !m_type_ids_to_remove.empty())
|
||||||
{
|
{
|
||||||
@ -402,7 +402,7 @@ namespace rsx
|
|||||||
|
|
||||||
if (!m_uids_to_remove.empty() || !m_type_ids_to_remove.empty())
|
if (!m_uids_to_remove.empty() || !m_type_ids_to_remove.empty())
|
||||||
{
|
{
|
||||||
writer_lock lock(m_list_mutex);
|
std::lock_guard lock(m_list_mutex);
|
||||||
cleanup_internal();
|
cleanup_internal();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -688,7 +688,7 @@ namespace rsx
|
|||||||
{
|
{
|
||||||
if (m_return_addr != -1)
|
if (m_return_addr != -1)
|
||||||
{
|
{
|
||||||
// Only one layer is allowed in the call stack.
|
// Only one layer is allowed in the call stack.
|
||||||
LOG_ERROR(RSX, "FIFO: CALL found inside a subroutine. Discarding subroutine");
|
LOG_ERROR(RSX, "FIFO: CALL found inside a subroutine. Discarding subroutine");
|
||||||
internal_get = std::exchange(m_return_addr, -1);
|
internal_get = std::exchange(m_return_addr, -1);
|
||||||
continue;
|
continue;
|
||||||
@ -762,7 +762,7 @@ namespace rsx
|
|||||||
auto args = vm::ptr<u32>::make(args_address);
|
auto args = vm::ptr<u32>::make(args_address);
|
||||||
u32 first_cmd = (cmd & 0xfffc) >> 2;
|
u32 first_cmd = (cmd & 0xfffc) >> 2;
|
||||||
|
|
||||||
// Stop command execution if put will be equal to get ptr during the execution itself
|
// Stop command execution if put will be equal to get ptr during the execution itself
|
||||||
if (count * 4 + 4 > put - internal_get)
|
if (count * 4 + 4 > put - internal_get)
|
||||||
{
|
{
|
||||||
count = (put - internal_get) / 4 - 1;
|
count = (put - internal_get) / 4 - 1;
|
||||||
@ -1351,7 +1351,7 @@ namespace rsx
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
fmt::throw_exception("Disabled" HERE);
|
fmt::throw_exception("Disabled" HERE);
|
||||||
//std::lock_guard<shared_mutex> lock{ m_mtx_task };
|
//std::lock_guard lock(m_mtx_task);
|
||||||
|
|
||||||
//internal_task_entry &front = m_internal_tasks.front();
|
//internal_task_entry &front = m_internal_tasks.front();
|
||||||
|
|
||||||
@ -1369,7 +1369,7 @@ namespace rsx
|
|||||||
{
|
{
|
||||||
if (!m_invalidated_memory_ranges.empty())
|
if (!m_invalidated_memory_ranges.empty())
|
||||||
{
|
{
|
||||||
writer_lock lock(m_mtx_task);
|
std::lock_guard lock(m_mtx_task);
|
||||||
|
|
||||||
for (const auto& range : m_invalidated_memory_ranges)
|
for (const auto& range : m_invalidated_memory_ranges)
|
||||||
{
|
{
|
||||||
@ -1401,7 +1401,7 @@ namespace rsx
|
|||||||
|
|
||||||
//std::future<void> thread::add_internal_task(std::function<bool()> callback)
|
//std::future<void> thread::add_internal_task(std::function<bool()> callback)
|
||||||
//{
|
//{
|
||||||
// std::lock_guard<shared_mutex> lock{ m_mtx_task };
|
// std::lock_guard lock(m_mtx_task);
|
||||||
// m_internal_tasks.emplace_back(callback);
|
// m_internal_tasks.emplace_back(callback);
|
||||||
|
|
||||||
// return m_internal_tasks.back().promise.get_future();
|
// return m_internal_tasks.back().promise.get_future();
|
||||||
@ -2711,7 +2711,7 @@ namespace rsx
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
for (const u32 end = ea + (size >> 20); ea < end;)
|
for (const u32 end = ea + (size >> 20); ea < end;)
|
||||||
{
|
{
|
||||||
offsetTable.ioAddress[ea++] = 0xFFFF;
|
offsetTable.ioAddress[ea++] = 0xFFFF;
|
||||||
offsetTable.eaAddress[io++] = 0xFFFF;
|
offsetTable.eaAddress[io++] = 0xFFFF;
|
||||||
@ -2719,7 +2719,7 @@ namespace rsx
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
writer_lock lock(m_mtx_task);
|
std::lock_guard lock(m_mtx_task);
|
||||||
m_invalidated_memory_ranges.push_back({ base_address, size });
|
m_invalidated_memory_ranges.push_back({ base_address, size });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ namespace vk
|
|||||||
VkComponentMapping no_alpha = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_ONE };
|
VkComponentMapping no_alpha = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_ONE };
|
||||||
return std::make_pair(VK_FORMAT_R8_UNORM, no_alpha);
|
return std::make_pair(VK_FORMAT_R8_UNORM, no_alpha);
|
||||||
}
|
}
|
||||||
|
|
||||||
case rsx::surface_color_format::g8b8:
|
case rsx::surface_color_format::g8b8:
|
||||||
{
|
{
|
||||||
VkComponentMapping gb_rg = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G };
|
VkComponentMapping gb_rg = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G };
|
||||||
@ -283,7 +283,7 @@ namespace vk
|
|||||||
fmt::throw_exception("Unknown blend op: 0x%x" HERE, (u32)op);
|
fmt::throw_exception("Unknown blend op: 0x%x" HERE, (u32)op);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
VkStencilOp get_stencil_op(rsx::stencil_op op)
|
VkStencilOp get_stencil_op(rsx::stencil_op op)
|
||||||
{
|
{
|
||||||
@ -320,7 +320,7 @@ namespace vk
|
|||||||
case rsx::cull_face::back: return VK_CULL_MODE_BACK_BIT;
|
case rsx::cull_face::back: return VK_CULL_MODE_BACK_BIT;
|
||||||
case rsx::cull_face::front: return VK_CULL_MODE_FRONT_BIT;
|
case rsx::cull_face::front: return VK_CULL_MODE_FRONT_BIT;
|
||||||
case rsx::cull_face::front_and_back: return VK_CULL_MODE_FRONT_AND_BACK;
|
case rsx::cull_face::front_and_back: return VK_CULL_MODE_FRONT_AND_BACK;
|
||||||
default:
|
default:
|
||||||
fmt::throw_exception("Unknown cull face value: 0x%x" HERE, (u32)cfv);
|
fmt::throw_exception("Unknown cull face value: 0x%x" HERE, (u32)cfv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -486,7 +486,7 @@ namespace
|
|||||||
VKGSRender::VKGSRender() : GSRender()
|
VKGSRender::VKGSRender() : GSRender()
|
||||||
{
|
{
|
||||||
u32 instance_handle = m_thread_context.createInstance("RPCS3");
|
u32 instance_handle = m_thread_context.createInstance("RPCS3");
|
||||||
|
|
||||||
if (instance_handle > 0)
|
if (instance_handle > 0)
|
||||||
{
|
{
|
||||||
m_thread_context.makeCurrentInstance(instance_handle);
|
m_thread_context.makeCurrentInstance(instance_handle);
|
||||||
@ -568,7 +568,7 @@ VKGSRender::VKGSRender() : GSRender()
|
|||||||
}
|
}
|
||||||
|
|
||||||
m_current_command_buffer = &m_primary_cb_list[0];
|
m_current_command_buffer = &m_primary_cb_list[0];
|
||||||
|
|
||||||
//Create secondary command_buffer for parallel operations
|
//Create secondary command_buffer for parallel operations
|
||||||
m_secondary_command_buffer_pool.create((*m_device));
|
m_secondary_command_buffer_pool.create((*m_device));
|
||||||
m_secondary_command_buffer.create(m_secondary_command_buffer_pool, true);
|
m_secondary_command_buffer.create(m_secondary_command_buffer_pool, true);
|
||||||
@ -781,7 +781,7 @@ VKGSRender::~VKGSRender()
|
|||||||
//Device handles/contexts
|
//Device handles/contexts
|
||||||
m_swapchain->destroy();
|
m_swapchain->destroy();
|
||||||
m_thread_context.close();
|
m_thread_context.close();
|
||||||
|
|
||||||
#if !defined(_WIN32) && !defined(__APPLE__) && defined(HAVE_VULKAN)
|
#if !defined(_WIN32) && !defined(__APPLE__) && defined(HAVE_VULKAN)
|
||||||
if (m_display_handle)
|
if (m_display_handle)
|
||||||
XCloseDisplay(m_display_handle);
|
XCloseDisplay(m_display_handle);
|
||||||
@ -792,7 +792,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
|
|||||||
{
|
{
|
||||||
vk::texture_cache::thrashed_set result;
|
vk::texture_cache::thrashed_set result;
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_secondary_cb_guard);
|
std::lock_guard lock(m_secondary_cb_guard);
|
||||||
result = std::move(m_texture_cache.invalidate_address(address, is_writing, false, m_secondary_command_buffer, m_swapchain->get_graphics_queue()));
|
result = std::move(m_texture_cache.invalidate_address(address, is_writing, false, m_secondary_command_buffer, m_swapchain->get_graphics_queue()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -800,7 +800,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_sampler_mutex);
|
std::lock_guard lock(m_sampler_mutex);
|
||||||
m_samplers_dirty.store(true);
|
m_samplers_dirty.store(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -818,7 +818,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
|
|||||||
//Always submit primary cb to ensure state consistency (flush pending changes such as image transitions)
|
//Always submit primary cb to ensure state consistency (flush pending changes such as image transitions)
|
||||||
vm::temporary_unlock();
|
vm::temporary_unlock();
|
||||||
|
|
||||||
std::lock_guard<shared_mutex> lock(m_flush_queue_mutex);
|
std::lock_guard lock(m_flush_queue_mutex);
|
||||||
|
|
||||||
m_flush_requests.post(sync_timestamp == 0ull);
|
m_flush_requests.post(sync_timestamp == 0ull);
|
||||||
has_queue_ref = true;
|
has_queue_ref = true;
|
||||||
@ -876,13 +876,13 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
|
|||||||
|
|
||||||
void VKGSRender::on_invalidate_memory_range(u32 address_base, u32 size)
|
void VKGSRender::on_invalidate_memory_range(u32 address_base, u32 size)
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_secondary_cb_guard);
|
std::lock_guard lock(m_secondary_cb_guard);
|
||||||
if (m_texture_cache.invalidate_range(address_base, size, true, true, false,
|
if (m_texture_cache.invalidate_range(address_base, size, true, true, false,
|
||||||
m_secondary_command_buffer, m_swapchain->get_graphics_queue()).violation_handled)
|
m_secondary_command_buffer, m_swapchain->get_graphics_queue()).violation_handled)
|
||||||
{
|
{
|
||||||
m_texture_cache.purge_dirty();
|
m_texture_cache.purge_dirty();
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_sampler_mutex);
|
std::lock_guard lock(m_sampler_mutex);
|
||||||
m_samplers_dirty.store(true);
|
m_samplers_dirty.store(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -896,7 +896,7 @@ void VKGSRender::notify_tile_unbound(u32 tile)
|
|||||||
//m_rtts.invalidate_surface_address(addr, false);
|
//m_rtts.invalidate_surface_address(addr, false);
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_sampler_mutex);
|
std::lock_guard lock(m_sampler_mutex);
|
||||||
m_samplers_dirty.store(true);
|
m_samplers_dirty.store(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1187,7 +1187,7 @@ void VKGSRender::end()
|
|||||||
|
|
||||||
//Load textures
|
//Load textures
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_sampler_mutex);
|
std::lock_guard lock(m_sampler_mutex);
|
||||||
bool update_framebuffer_sourced = false;
|
bool update_framebuffer_sourced = false;
|
||||||
|
|
||||||
if (surface_store_tag != m_rtts.cache_tag)
|
if (surface_store_tag != m_rtts.cache_tag)
|
||||||
@ -1842,7 +1842,7 @@ void VKGSRender::flush_command_queue(bool hard_sync)
|
|||||||
{
|
{
|
||||||
//Mark this queue as pending
|
//Mark this queue as pending
|
||||||
m_current_command_buffer->pending = true;
|
m_current_command_buffer->pending = true;
|
||||||
|
|
||||||
//Grab next cb in line and make it usable
|
//Grab next cb in line and make it usable
|
||||||
m_current_cb_index = (m_current_cb_index + 1) % VK_MAX_ASYNC_CB_COUNT;
|
m_current_cb_index = (m_current_cb_index + 1) % VK_MAX_ASYNC_CB_COUNT;
|
||||||
m_current_command_buffer = &m_primary_cb_list[m_current_cb_index];
|
m_current_command_buffer = &m_primary_cb_list[m_current_cb_index];
|
||||||
@ -2058,7 +2058,7 @@ void VKGSRender::do_local_task(rsx::FIFO_state state)
|
|||||||
{
|
{
|
||||||
if (m_flush_requests.pending())
|
if (m_flush_requests.pending())
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_flush_queue_mutex);
|
std::lock_guard lock(m_flush_queue_mutex);
|
||||||
|
|
||||||
//TODO: Determine if a hard sync is necessary
|
//TODO: Determine if a hard sync is necessary
|
||||||
//Pipeline barriers later may do a better job synchronizing than wholly stalling the pipeline
|
//Pipeline barriers later may do a better job synchronizing than wholly stalling the pipeline
|
||||||
@ -2525,7 +2525,7 @@ void VKGSRender::close_and_submit_command_buffer(const std::vector<VkSemaphore>
|
|||||||
m_transform_constants_ring_info.dirty() ||
|
m_transform_constants_ring_info.dirty() ||
|
||||||
m_texture_upload_buffer_ring_info.dirty())
|
m_texture_upload_buffer_ring_info.dirty())
|
||||||
{
|
{
|
||||||
std::lock_guard<shared_mutex> lock(m_secondary_cb_guard);
|
std::lock_guard lock(m_secondary_cb_guard);
|
||||||
m_secondary_command_buffer.begin();
|
m_secondary_command_buffer.begin();
|
||||||
|
|
||||||
m_attrib_ring_info.sync(m_secondary_command_buffer);
|
m_attrib_ring_info.sync(m_secondary_command_buffer);
|
||||||
@ -3084,7 +3084,7 @@ void VKGSRender::flip(int buffer)
|
|||||||
if (has_overlay)
|
if (has_overlay)
|
||||||
{
|
{
|
||||||
// Lock to avoid modification during run-update chain
|
// Lock to avoid modification during run-update chain
|
||||||
std::lock_guard<rsx::overlays::display_manager> lock(*m_overlay_manager);
|
std::lock_guard lock(*m_overlay_manager);
|
||||||
|
|
||||||
for (const auto& view : m_overlay_manager->get_views())
|
for (const auto& view : m_overlay_manager->get_views())
|
||||||
{
|
{
|
||||||
|
@ -37,11 +37,11 @@ static semaphore<> s_qt_mutex{};
|
|||||||
|
|
||||||
[[noreturn]] extern void report_fatal_error(const std::string& text)
|
[[noreturn]] extern void report_fatal_error(const std::string& text)
|
||||||
{
|
{
|
||||||
s_qt_mutex.wait();
|
s_qt_mutex.lock();
|
||||||
|
|
||||||
if (!s_qt_init.try_wait())
|
if (!s_qt_init.try_lock())
|
||||||
{
|
{
|
||||||
s_init.wait();
|
s_init.lock();
|
||||||
static int argc = 1;
|
static int argc = 1;
|
||||||
static char arg1[] = {"ERROR"};
|
static char arg1[] = {"ERROR"};
|
||||||
static char* argv[] = {arg1};
|
static char* argv[] = {arg1};
|
||||||
@ -95,8 +95,8 @@ int main(int argc, char** argv)
|
|||||||
QCoreApplication::setAttribute(Qt::AA_DisableWindowContextHelpButton);
|
QCoreApplication::setAttribute(Qt::AA_DisableWindowContextHelpButton);
|
||||||
QCoreApplication::setAttribute(Qt::AA_DontCheckOpenGLContextThreadAffinity);
|
QCoreApplication::setAttribute(Qt::AA_DontCheckOpenGLContextThreadAffinity);
|
||||||
|
|
||||||
s_init.post();
|
s_init.unlock();
|
||||||
s_qt_mutex.wait();
|
s_qt_mutex.lock();
|
||||||
rpcs3_app app(argc, argv);
|
rpcs3_app app(argc, argv);
|
||||||
|
|
||||||
// Command line args
|
// Command line args
|
||||||
@ -135,7 +135,7 @@ int main(int argc, char** argv)
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
s_qt_init.post();
|
s_qt_init.unlock();
|
||||||
s_qt_mutex.post();
|
s_qt_mutex.unlock();
|
||||||
return app.exec();
|
return app.exec();
|
||||||
}
|
}
|
||||||
|
@ -323,7 +323,7 @@ bool gs_frame::nativeEvent(const QByteArray &eventType, void *message, long *res
|
|||||||
while (wm_event_raised.load(std::memory_order_consume) && !Emu.IsStopped());
|
while (wm_event_raised.load(std::memory_order_consume) && !Emu.IsStopped());
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(wm_event_lock);
|
std::lock_guard lock(wm_event_lock);
|
||||||
|
|
||||||
// https://bugreports.qt.io/browse/QTBUG-69074?focusedCommentId=409797&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-409797
|
// https://bugreports.qt.io/browse/QTBUG-69074?focusedCommentId=409797&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-409797
|
||||||
#if (QT_VERSION == QT_VERSION_CHECK(5, 11, 1))
|
#if (QT_VERSION == QT_VERSION_CHECK(5, 11, 1))
|
||||||
|
@ -331,7 +331,7 @@ void log_frame::CreateAndConnectActions()
|
|||||||
std::string text = m_tty_input->text().toStdString();
|
std::string text = m_tty_input->text().toStdString();
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(g_tty_mutex);
|
std::lock_guard lock(g_tty_mutex);
|
||||||
|
|
||||||
if (m_tty_channel == -1)
|
if (m_tty_channel == -1)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user