mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 02:32:36 +01:00
ThreadBase rewritten (wip)
This commit is contained in:
parent
b7a320fbbd
commit
3aefa2b4e1
@ -2,6 +2,11 @@
|
||||
|
||||
#include <emmintrin.h>
|
||||
|
||||
// temporarily (until noexcept is available); use `noexcept(true)` instead of `noexcept` if necessary
|
||||
#if defined(_MSC_VER) && _MSC_VER <= 1800
|
||||
#define noexcept _NOEXCEPT_OP
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define thread_local __declspec(thread)
|
||||
#elif __APPLE__
|
||||
|
@ -14,7 +14,7 @@ std::unique_ptr<LogManager> g_log_manager;
|
||||
u32 LogMessage::size() const
|
||||
{
|
||||
//1 byte for NULL terminator
|
||||
return (u32)(sizeof(LogMessage::size_type) + sizeof(LogType) + sizeof(LogSeverity) + sizeof(std::string::value_type) * mText.size() + 1);
|
||||
return (u32)(sizeof(LogMessage::size_type) + sizeof(LogType) + sizeof(Severity) + sizeof(std::string::value_type) * mText.size() + 1);
|
||||
}
|
||||
|
||||
void LogMessage::serialize(char *output) const
|
||||
@ -24,8 +24,8 @@ void LogMessage::serialize(char *output) const
|
||||
output += sizeof(LogMessage::size_type);
|
||||
memcpy(output, &mType, sizeof(LogType));
|
||||
output += sizeof(LogType);
|
||||
memcpy(output, &mServerity, sizeof(LogSeverity));
|
||||
output += sizeof(LogSeverity);
|
||||
memcpy(output, &mServerity, sizeof(Severity));
|
||||
output += sizeof(Severity);
|
||||
memcpy(output, mText.c_str(), mText.size() );
|
||||
output += sizeof(std::string::value_type)*mText.size();
|
||||
*output = '\0';
|
||||
@ -38,13 +38,13 @@ LogMessage LogMessage::deserialize(char *input, u32* size_out)
|
||||
input += sizeof(LogMessage::size_type);
|
||||
msg.mType = *(reinterpret_cast<LogType*>(input));
|
||||
input += sizeof(LogType);
|
||||
msg.mServerity = *(reinterpret_cast<LogSeverity*>(input));
|
||||
input += sizeof(LogSeverity);
|
||||
msg.mServerity = *(reinterpret_cast<Severity*>(input));
|
||||
input += sizeof(Severity);
|
||||
if (msgSize > 9000)
|
||||
{
|
||||
int wtf = 6;
|
||||
}
|
||||
msg.mText.append(input, msgSize - 1 - sizeof(LogSeverity) - sizeof(LogType));
|
||||
msg.mText.append(input, msgSize - 1 - sizeof(Severity) - sizeof(LogType));
|
||||
if (size_out){(*size_out) = msgSize;}
|
||||
return msg;
|
||||
}
|
||||
@ -57,7 +57,7 @@ LogChannel::LogChannel() : LogChannel("unknown")
|
||||
LogChannel::LogChannel(const std::string& name) :
|
||||
name(name)
|
||||
, mEnabled(true)
|
||||
, mLogLevel(LogSeverityWarning)
|
||||
, mLogLevel(Severity::Warning)
|
||||
{}
|
||||
|
||||
void LogChannel::log(const LogMessage &msg)
|
||||
@ -186,22 +186,22 @@ void LogManager::log(LogMessage msg)
|
||||
std::string prefix;
|
||||
switch (msg.mServerity)
|
||||
{
|
||||
case LogSeveritySuccess:
|
||||
case Severity::Success:
|
||||
prefix = "S ";
|
||||
break;
|
||||
case LogSeverityNotice:
|
||||
case Severity::Notice:
|
||||
prefix = "! ";
|
||||
break;
|
||||
case LogSeverityWarning:
|
||||
case Severity::Warning:
|
||||
prefix = "W ";
|
||||
break;
|
||||
case LogSeverityError:
|
||||
case Severity::Error:
|
||||
prefix = "E ";
|
||||
break;
|
||||
}
|
||||
if (NamedThreadBase* thr = GetCurrentNamedThread())
|
||||
if (auto thr = get_current_thread_ctrl())
|
||||
{
|
||||
prefix += "{" + thr->GetThreadName() + "} ";
|
||||
prefix += "{" + thr->get_name() + "} ";
|
||||
}
|
||||
msg.mText.insert(0, prefix);
|
||||
msg.mText.append(1,'\n');
|
||||
@ -248,12 +248,12 @@ LogChannel &LogManager::getChannel(LogType type)
|
||||
return mChannels[static_cast<u32>(type)];
|
||||
}
|
||||
|
||||
void log_message(Log::LogType type, Log::LogSeverity sev, const char* text)
|
||||
void log_message(Log::LogType type, Log::Severity sev, const char* text)
|
||||
{
|
||||
log_message(type, sev, std::string(text));
|
||||
}
|
||||
|
||||
void log_message(Log::LogType type, Log::LogSeverity sev, std::string text)
|
||||
void log_message(Log::LogType type, Log::Severity sev, std::string text)
|
||||
{
|
||||
if (g_log_manager)
|
||||
{
|
||||
@ -265,12 +265,12 @@ void log_message(Log::LogType type, Log::LogSeverity sev, std::string text)
|
||||
else
|
||||
{
|
||||
rMessageBox(text,
|
||||
sev == LogSeverityNotice ? "Notice" :
|
||||
sev == LogSeverityWarning ? "Warning" :
|
||||
sev == LogSeveritySuccess ? "Success" :
|
||||
sev == LogSeverityError ? "Error" : "Unknown",
|
||||
sev == LogSeverityNotice ? rICON_INFORMATION :
|
||||
sev == LogSeverityWarning ? rICON_EXCLAMATION :
|
||||
sev == LogSeverityError ? rICON_ERROR : rICON_INFORMATION);
|
||||
sev == Severity::Notice ? "Notice" :
|
||||
sev == Severity::Warning ? "Warning" :
|
||||
sev == Severity::Success ? "Success" :
|
||||
sev == Severity::Error ? "Error" : "Unknown",
|
||||
sev == Severity::Notice ? rICON_INFORMATION :
|
||||
sev == Severity::Warning ? rICON_EXCLAMATION :
|
||||
sev == Severity::Error ? rICON_ERROR : rICON_INFORMATION);
|
||||
}
|
||||
}
|
||||
|
@ -5,10 +5,10 @@
|
||||
|
||||
//first parameter is of type Log::LogType and text is of type std::string
|
||||
|
||||
#define LOG_SUCCESS(logType, text, ...) log_message(logType, Log::LogSeveritySuccess, text, ##__VA_ARGS__)
|
||||
#define LOG_NOTICE(logType, text, ...) log_message(logType, Log::LogSeverityNotice, text, ##__VA_ARGS__)
|
||||
#define LOG_WARNING(logType, text, ...) log_message(logType, Log::LogSeverityWarning, text, ##__VA_ARGS__)
|
||||
#define LOG_ERROR(logType, text, ...) log_message(logType, Log::LogSeverityError, text, ##__VA_ARGS__)
|
||||
#define LOG_SUCCESS(logType, text, ...) log_message(logType, Log::Severity::Success, text, ##__VA_ARGS__)
|
||||
#define LOG_NOTICE(logType, text, ...) log_message(logType, Log::Severity::Notice, text, ##__VA_ARGS__)
|
||||
#define LOG_WARNING(logType, text, ...) log_message(logType, Log::Severity::Warning, text, ##__VA_ARGS__)
|
||||
#define LOG_ERROR(logType, text, ...) log_message(logType, Log::Severity::Error, text, ##__VA_ARGS__)
|
||||
|
||||
namespace Log
|
||||
{
|
||||
@ -48,19 +48,19 @@ namespace Log
|
||||
{ TTY, "TTY: " }
|
||||
} };
|
||||
|
||||
enum LogSeverity : u32
|
||||
enum class Severity : u32
|
||||
{
|
||||
LogSeverityNotice = 0,
|
||||
LogSeverityWarning,
|
||||
LogSeveritySuccess,
|
||||
LogSeverityError,
|
||||
Notice = 0,
|
||||
Warning,
|
||||
Success,
|
||||
Error,
|
||||
};
|
||||
|
||||
struct LogMessage
|
||||
{
|
||||
using size_type = u32;
|
||||
LogType mType;
|
||||
LogSeverity mServerity;
|
||||
Severity mServerity;
|
||||
std::string mText;
|
||||
|
||||
u32 size() const;
|
||||
@ -86,7 +86,7 @@ namespace Log
|
||||
std::string name;
|
||||
private:
|
||||
bool mEnabled;
|
||||
LogSeverity mLogLevel;
|
||||
Severity mLogLevel;
|
||||
std::mutex mListenerLock;
|
||||
std::set<std::shared_ptr<LogListener>> mListeners;
|
||||
};
|
||||
@ -126,10 +126,10 @@ static struct { inline operator Log::LogType() { return Log::LogType::SPU; } } S
|
||||
static struct { inline operator Log::LogType() { return Log::LogType::ARMv7; } } ARMv7;
|
||||
static struct { inline operator Log::LogType() { return Log::LogType::TTY; } } TTY;
|
||||
|
||||
void log_message(Log::LogType type, Log::LogSeverity sev, const char* text);
|
||||
void log_message(Log::LogType type, Log::LogSeverity sev, std::string text);
|
||||
void log_message(Log::LogType type, Log::Severity sev, const char* text);
|
||||
void log_message(Log::LogType type, Log::Severity sev, std::string text);
|
||||
|
||||
template<typename... Args> never_inline void log_message(Log::LogType type, Log::LogSeverity sev, const char* fmt, Args... args)
|
||||
template<typename... Args> never_inline void log_message(Log::LogType type, Log::Severity sev, const char* fmt, Args... args)
|
||||
{
|
||||
log_message(type, sev, fmt::Format(fmt, fmt::do_unveil(args)...));
|
||||
}
|
||||
|
@ -277,6 +277,39 @@ namespace fmt
|
||||
return Format(fmt, do_unveil(args)...);
|
||||
}
|
||||
|
||||
struct exception
|
||||
{
|
||||
std::unique_ptr<char[]> message;
|
||||
|
||||
template<typename... Args> never_inline safe_buffers exception(const char* file, int line, const char* func, const char* text, Args... args)
|
||||
{
|
||||
const std::string data = format(text, args...) + format("\n(in file %s:%d, in function %s)", file, line, func);
|
||||
|
||||
message = std::make_unique<char[]>(data.size() + 1);
|
||||
|
||||
std::memcpy(message.get(), data.c_str(), data.size() + 1);
|
||||
}
|
||||
|
||||
exception(const exception& other)
|
||||
{
|
||||
const std::size_t size = std::strlen(other);
|
||||
|
||||
message = std::make_unique<char[]>(size + 1);
|
||||
|
||||
std::memcpy(message.get(), other, size + 1);
|
||||
}
|
||||
|
||||
exception(exception&& other)
|
||||
{
|
||||
message = std::move(other.message);
|
||||
}
|
||||
|
||||
operator const char*() const
|
||||
{
|
||||
return message.get();
|
||||
}
|
||||
};
|
||||
|
||||
//convert a wxString to a std::string encoded in utf8
|
||||
//CAUTION, only use this to interface with wxWidgets classes
|
||||
std::string ToUTF8(const wxString& right);
|
||||
|
@ -1108,7 +1108,7 @@ const PVOID exception_handler = (atexit([]{ RemoveVectoredExceptionHandler(excep
|
||||
|
||||
if (pExp->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
|
||||
(u32)addr64 == addr64 &&
|
||||
GetCurrentNamedThread() &&
|
||||
get_current_thread_ctrl() &&
|
||||
handle_access_violation((u32)addr64, is_writing, pExp->ContextRecord))
|
||||
{
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
@ -1119,6 +1119,13 @@ const PVOID exception_handler = (atexit([]{ RemoveVectoredExceptionHandler(excep
|
||||
}
|
||||
}));
|
||||
|
||||
const auto exception_filter = SetUnhandledExceptionFilter([](PEXCEPTION_POINTERS pExp) -> LONG
|
||||
{
|
||||
_se_translator(pExp->ExceptionRecord->ExceptionCode, pExp);
|
||||
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
});
|
||||
|
||||
#else
|
||||
|
||||
void signal_handler(int sig, siginfo_t* info, void* uct)
|
||||
@ -1131,7 +1138,7 @@ void signal_handler(int sig, siginfo_t* info, void* uct)
|
||||
const bool is_writing = ((ucontext_t*)uct)->uc_mcontext.gregs[REG_ERR] & 0x2;
|
||||
#endif
|
||||
|
||||
if ((u32)addr64 == addr64 && GetCurrentNamedThread())
|
||||
if ((u32)addr64 == addr64 && get_current_thread_ctrl())
|
||||
{
|
||||
if (handle_access_violation((u32)addr64, is_writing, (ucontext_t*)uct))
|
||||
{
|
||||
@ -1158,19 +1165,23 @@ const int sigaction_result = []() -> int
|
||||
|
||||
#endif
|
||||
|
||||
thread_local NamedThreadBase* g_tls_this_thread = nullptr;
|
||||
std::atomic<u32> g_thread_count(0);
|
||||
thread_local thread_ctrl_t* g_tls_this_thread = nullptr;
|
||||
|
||||
NamedThreadBase* GetCurrentNamedThread()
|
||||
const thread_ctrl_t* get_current_thread_ctrl()
|
||||
{
|
||||
return g_tls_this_thread;
|
||||
}
|
||||
|
||||
void SetCurrentNamedThread(NamedThreadBase* value)
|
||||
std::string thread_ctrl_t::get_name() const
|
||||
{
|
||||
return name();
|
||||
}
|
||||
|
||||
void thread_ctrl_t::set_current()
|
||||
{
|
||||
const auto old_value = g_tls_this_thread;
|
||||
|
||||
if (old_value == value)
|
||||
if (old_value == this)
|
||||
{
|
||||
return;
|
||||
}
|
||||
@ -1180,76 +1191,82 @@ void SetCurrentNamedThread(NamedThreadBase* value)
|
||||
vm::reservation_free();
|
||||
}
|
||||
|
||||
if (value && value->m_tls_assigned.exchange(true))
|
||||
if (true && assigned.exchange(true))
|
||||
{
|
||||
LOG_ERROR(GENERAL, "Thread '%s' was already assigned to g_tls_this_thread of another thread", value->GetThreadName());
|
||||
LOG_ERROR(GENERAL, "Thread '%s' was already assigned to g_tls_this_thread of another thread", get_name());
|
||||
g_tls_this_thread = nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
g_tls_this_thread = value;
|
||||
g_tls_this_thread = this;
|
||||
}
|
||||
|
||||
if (old_value)
|
||||
{
|
||||
old_value->m_tls_assigned = false;
|
||||
old_value->assigned = false;
|
||||
}
|
||||
}
|
||||
|
||||
std::string NamedThreadBase::GetThreadName() const
|
||||
thread_t::thread_t(std::function<std::string()> name, std::function<void()> func)
|
||||
{
|
||||
return m_name;
|
||||
start(std::move(name), func);
|
||||
}
|
||||
|
||||
void NamedThreadBase::SetThreadName(const std::string& name)
|
||||
thread_t::~thread_t()
|
||||
{
|
||||
m_name = name;
|
||||
}
|
||||
|
||||
void NamedThreadBase::WaitForAnySignal(u64 time) // wait for Notify() signal or sleep
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(m_signal_mtx);
|
||||
m_signal_cv.wait_for(lock, std::chrono::milliseconds(time));
|
||||
}
|
||||
|
||||
void NamedThreadBase::Notify() // wake up waiting thread or nothing
|
||||
{
|
||||
m_signal_cv.notify_one();
|
||||
}
|
||||
|
||||
ThreadBase::ThreadBase(const std::string& name)
|
||||
: NamedThreadBase(name)
|
||||
, m_executor(nullptr)
|
||||
, m_destroy(false)
|
||||
, m_alive(false)
|
||||
{
|
||||
}
|
||||
|
||||
ThreadBase::~ThreadBase()
|
||||
{
|
||||
if(IsAlive())
|
||||
Stop(false);
|
||||
|
||||
delete m_executor;
|
||||
m_executor = nullptr;
|
||||
}
|
||||
|
||||
void ThreadBase::Start()
|
||||
{
|
||||
if(m_executor) Stop();
|
||||
|
||||
std::lock_guard<std::mutex> lock(m_main_mutex);
|
||||
|
||||
m_destroy = false;
|
||||
m_alive = true;
|
||||
|
||||
m_executor = new std::thread([this]()
|
||||
if (m_thread)
|
||||
{
|
||||
SetCurrentThreadDebugName(GetThreadName().c_str());
|
||||
if (g_tls_this_thread != m_thread.get())
|
||||
{
|
||||
m_thread->m_thread.join();
|
||||
}
|
||||
else
|
||||
{
|
||||
m_thread->m_thread.detach();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string thread_t::get_name() const
|
||||
{
|
||||
if (!m_thread)
|
||||
{
|
||||
throw EXCEPTION("Invalid thread");
|
||||
}
|
||||
|
||||
if (!m_thread->name)
|
||||
{
|
||||
throw EXCEPTION("Invalid name getter");
|
||||
}
|
||||
|
||||
return m_thread->name();
|
||||
}
|
||||
|
||||
std::atomic<u32> g_thread_count{ 0 };
|
||||
|
||||
void thread_t::start(std::function<std::string()> name, std::function<void()> func)
|
||||
{
|
||||
if (m_thread)
|
||||
{
|
||||
throw EXCEPTION("Thread already exists");
|
||||
}
|
||||
|
||||
// create new ctrl and assign it
|
||||
auto ctrl = std::make_shared<thread_ctrl_t>(std::move(name));
|
||||
|
||||
// start thread
|
||||
ctrl->m_thread = std::thread([ctrl, func]()
|
||||
{
|
||||
g_thread_count++;
|
||||
|
||||
SetCurrentThreadDebugName(ctrl->get_name().c_str());
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
auto old_se_translator = _set_se_translator(_se_translator);
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
auto old_se_translator = _set_se_translator(_se_translator);
|
||||
if (!exception_handler)
|
||||
if (!exception_handler || !exception_filter)
|
||||
{
|
||||
LOG_ERROR(GENERAL, "exception_handler not set");
|
||||
return;
|
||||
@ -1262,238 +1279,139 @@ void ThreadBase::Start()
|
||||
}
|
||||
#endif
|
||||
|
||||
SetCurrentNamedThread(this);
|
||||
g_thread_count++;
|
||||
// error handler
|
||||
const auto error = [&](const char* text)
|
||||
{
|
||||
log_message(GENERAL, Emu.IsStopped() ? Log::Severity::Warning : Log::Severity::Error, "Exception: %s", text);
|
||||
Emu.Pause();
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
Task();
|
||||
}
|
||||
catch (const char* e)
|
||||
{
|
||||
LOG_ERROR(GENERAL, "Exception: %s", e);
|
||||
DumpInformation();
|
||||
Emu.Pause();
|
||||
}
|
||||
catch (const std::string& e)
|
||||
{
|
||||
LOG_ERROR(GENERAL, "Exception: %s", e);
|
||||
DumpInformation();
|
||||
Emu.Pause();
|
||||
}
|
||||
ctrl->set_current();
|
||||
|
||||
m_alive = false;
|
||||
SetCurrentNamedThread(nullptr);
|
||||
g_thread_count--;
|
||||
if (Ini.HLELogging.GetValue())
|
||||
{
|
||||
LOG_NOTICE(GENERAL, "Thread started");
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
_set_se_translator(old_se_translator);
|
||||
#endif
|
||||
});
|
||||
}
|
||||
|
||||
void ThreadBase::Stop(bool wait, bool send_destroy)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_main_mutex);
|
||||
|
||||
if (send_destroy)
|
||||
m_destroy = true;
|
||||
|
||||
if(!m_executor)
|
||||
return;
|
||||
|
||||
if(wait && m_executor->joinable() && m_alive)
|
||||
{
|
||||
m_executor->join();
|
||||
}
|
||||
else
|
||||
{
|
||||
m_executor->detach();
|
||||
}
|
||||
|
||||
delete m_executor;
|
||||
m_executor = nullptr;
|
||||
}
|
||||
|
||||
bool ThreadBase::Join() const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_main_mutex);
|
||||
if(m_executor->joinable() && m_alive && m_executor != nullptr)
|
||||
{
|
||||
m_executor->join();
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ThreadBase::IsAlive() const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_main_mutex);
|
||||
return m_alive;
|
||||
}
|
||||
|
||||
bool ThreadBase::TestDestroy() const
|
||||
{
|
||||
return m_destroy;
|
||||
}
|
||||
|
||||
thread_t::thread_t(const std::string& name, bool autojoin, std::function<void()> func)
|
||||
: m_name(name)
|
||||
, m_state(TS_NON_EXISTENT)
|
||||
, m_autojoin(autojoin)
|
||||
{
|
||||
start(func);
|
||||
}
|
||||
|
||||
thread_t::thread_t(const std::string& name, std::function<void()> func)
|
||||
: m_name(name)
|
||||
, m_state(TS_NON_EXISTENT)
|
||||
, m_autojoin(false)
|
||||
{
|
||||
start(func);
|
||||
}
|
||||
|
||||
thread_t::thread_t(const std::string& name)
|
||||
: m_name(name)
|
||||
, m_state(TS_NON_EXISTENT)
|
||||
, m_autojoin(false)
|
||||
{
|
||||
}
|
||||
|
||||
thread_t::thread_t()
|
||||
: m_state(TS_NON_EXISTENT)
|
||||
, m_autojoin(false)
|
||||
{
|
||||
}
|
||||
|
||||
void thread_t::set_name(const std::string& name)
|
||||
{
|
||||
m_name = name;
|
||||
}
|
||||
|
||||
thread_t::~thread_t()
|
||||
{
|
||||
if (m_state == TS_JOINABLE)
|
||||
{
|
||||
if (m_autojoin)
|
||||
{
|
||||
m_thr.join();
|
||||
}
|
||||
else
|
||||
{
|
||||
m_thr.detach();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void thread_t::start(std::function<void()> func)
|
||||
{
|
||||
if (m_state.exchange(TS_NON_EXISTENT) == TS_JOINABLE)
|
||||
{
|
||||
m_thr.join(); // forcefully join previously created thread
|
||||
}
|
||||
|
||||
std::string name = m_name;
|
||||
m_thr = std::thread([func, name]()
|
||||
{
|
||||
SetCurrentThreadDebugName(name.c_str());
|
||||
|
||||
#ifdef _WIN32
|
||||
auto old_se_translator = _set_se_translator(_se_translator);
|
||||
#endif
|
||||
|
||||
NamedThreadBase info(name);
|
||||
SetCurrentNamedThread(&info);
|
||||
g_thread_count++;
|
||||
|
||||
if (Ini.HLELogging.GetValue())
|
||||
{
|
||||
LOG_NOTICE(HLE, name + " started");
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
func();
|
||||
}
|
||||
catch (const char* e)
|
||||
{
|
||||
LOG_ERROR(GENERAL, "Exception: %s", e);
|
||||
Emu.Pause();
|
||||
error(e);
|
||||
}
|
||||
catch (const std::string& e)
|
||||
{
|
||||
LOG_ERROR(GENERAL, "Exception: %s", e.c_str());
|
||||
Emu.Pause();
|
||||
error(e.c_str());
|
||||
}
|
||||
catch (const fmt::exception& e)
|
||||
{
|
||||
error(e);
|
||||
}
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
LOG_NOTICE(HLE, name + " aborted");
|
||||
LOG_NOTICE(GENERAL, "Thread aborted");
|
||||
}
|
||||
else if (Ini.HLELogging.GetValue())
|
||||
{
|
||||
LOG_NOTICE(HLE, name + " ended");
|
||||
LOG_NOTICE(GENERAL, "Thread ended");
|
||||
}
|
||||
|
||||
SetCurrentNamedThread(nullptr);
|
||||
//ctrl->set_current(false);
|
||||
|
||||
g_thread_count--;
|
||||
|
||||
#ifdef _WIN32
|
||||
ctrl->joinable = false;
|
||||
|
||||
ctrl->join_cv.notify_all();
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
_set_se_translator(old_se_translator);
|
||||
#endif
|
||||
});
|
||||
|
||||
if (m_state.exchange(TS_JOINABLE) == TS_JOINABLE)
|
||||
{
|
||||
assert(!"thread_t::start() failed"); // probably started from another thread
|
||||
}
|
||||
// set
|
||||
m_thread = std::move(ctrl);
|
||||
}
|
||||
|
||||
void thread_t::detach()
|
||||
{
|
||||
if (m_state.exchange(TS_NON_EXISTENT) == TS_JOINABLE)
|
||||
if (!m_thread)
|
||||
{
|
||||
m_thr.detach();
|
||||
throw EXCEPTION("Invalid thread");
|
||||
}
|
||||
else
|
||||
|
||||
const auto ctrl = std::move(m_thread);
|
||||
|
||||
ctrl->m_thread.detach();
|
||||
}
|
||||
|
||||
void thread_t::join(std::unique_lock<std::mutex>& lock)
|
||||
{
|
||||
if (!m_thread)
|
||||
{
|
||||
assert(!"thread_t::detach() failed"); // probably joined or detached
|
||||
throw EXCEPTION("Invalid thread");
|
||||
}
|
||||
|
||||
if (g_tls_this_thread == m_thread.get())
|
||||
{
|
||||
throw EXCEPTION("Deadlock");
|
||||
}
|
||||
|
||||
const auto ctrl = std::move(m_thread);
|
||||
|
||||
// wait for completion
|
||||
while (ctrl->joinable)
|
||||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
ctrl->join_cv.wait_for(lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
ctrl->m_thread.join();
|
||||
}
|
||||
|
||||
void thread_t::join()
|
||||
{
|
||||
if (m_state.exchange(TS_NON_EXISTENT) == TS_JOINABLE)
|
||||
if (!m_thread)
|
||||
{
|
||||
m_thr.join();
|
||||
throw EXCEPTION("Invalid thread");
|
||||
}
|
||||
else
|
||||
|
||||
if (g_tls_this_thread == m_thread.get())
|
||||
{
|
||||
assert(!"thread_t::join() failed"); // probably joined or detached
|
||||
throw EXCEPTION("Deadlock");
|
||||
}
|
||||
|
||||
const auto ctrl = std::move(m_thread);
|
||||
|
||||
ctrl->m_thread.join();
|
||||
}
|
||||
|
||||
bool thread_t::joinable() const
|
||||
bool thread_t::is_current() const
|
||||
{
|
||||
//return m_thr.joinable();
|
||||
return m_state == TS_JOINABLE;
|
||||
if (!m_thread)
|
||||
{
|
||||
throw EXCEPTION("Invalid thread");
|
||||
}
|
||||
|
||||
return g_tls_this_thread == m_thread.get();
|
||||
}
|
||||
|
||||
bool waiter_map_t::is_stopped(u32 addr)
|
||||
void waiter_map_t::check_emu_status(u32 addr)
|
||||
{
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
LOG_WARNING(Log::HLE, "%s: waiter_op() aborted (addr=0x%x)", name.c_str(), addr);
|
||||
return true;
|
||||
throw EXCEPTION("Aborted (emulation stopped) (%s, addr=0x%x)", name, addr);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void waiter_map_t::notify(u32 addr)
|
||||
{
|
||||
// signal appropriate condition variable
|
||||
cv[get_hash(addr)].notify_all();
|
||||
// signal an appropriate condition variable
|
||||
cvs[get_hash(addr)].notify_all();
|
||||
}
|
||||
|
||||
const std::function<bool()> SQUEUE_ALWAYS_EXIT = [](){ return true; };
|
||||
|
@ -1,112 +1,95 @@
|
||||
#pragma once
|
||||
|
||||
class NamedThreadBase
|
||||
const class thread_ctrl_t* get_current_thread_ctrl();
|
||||
|
||||
// named thread control class
|
||||
class thread_ctrl_t final
|
||||
{
|
||||
std::string m_name;
|
||||
std::condition_variable m_signal_cv;
|
||||
std::mutex m_signal_mtx;
|
||||
friend class thread_t;
|
||||
|
||||
// thread handler
|
||||
std::thread m_thread;
|
||||
|
||||
// name getter
|
||||
const std::function<std::string()> name;
|
||||
|
||||
// condition variable, notified before thread exit
|
||||
std::condition_variable join_cv;
|
||||
|
||||
// thread status (set to false after execution)
|
||||
std::atomic<bool> joinable{ true };
|
||||
|
||||
// true if TLS of some thread points to owner
|
||||
std::atomic<bool> assigned{ false };
|
||||
|
||||
// assign TLS
|
||||
void set_current();
|
||||
|
||||
public:
|
||||
std::atomic<bool> m_tls_assigned;
|
||||
|
||||
NamedThreadBase(const std::string& name) : m_name(name), m_tls_assigned(false)
|
||||
thread_ctrl_t(std::function<std::string()> name)
|
||||
: name(std::move(name))
|
||||
{
|
||||
}
|
||||
|
||||
NamedThreadBase() : m_tls_assigned(false)
|
||||
{
|
||||
}
|
||||
|
||||
virtual std::string GetThreadName() const;
|
||||
virtual void SetThreadName(const std::string& name);
|
||||
|
||||
void WaitForAnySignal(u64 time = 1);
|
||||
void Notify();
|
||||
|
||||
virtual void DumpInformation() {}
|
||||
};
|
||||
|
||||
NamedThreadBase* GetCurrentNamedThread();
|
||||
void SetCurrentNamedThread(NamedThreadBase* value);
|
||||
|
||||
class ThreadBase : public NamedThreadBase
|
||||
{
|
||||
protected:
|
||||
std::atomic<bool> m_destroy;
|
||||
std::atomic<bool> m_alive;
|
||||
std::thread* m_executor;
|
||||
|
||||
mutable std::mutex m_main_mutex;
|
||||
|
||||
ThreadBase(const std::string& name);
|
||||
~ThreadBase();
|
||||
|
||||
public:
|
||||
void Start();
|
||||
void Stop(bool wait = true, bool send_destroy = true);
|
||||
|
||||
bool Join() const;
|
||||
bool IsAlive() const;
|
||||
bool TestDestroy() const;
|
||||
|
||||
virtual void Task() = 0;
|
||||
// get thread name
|
||||
std::string get_name() const;
|
||||
};
|
||||
|
||||
class thread_t
|
||||
{
|
||||
enum thread_state_t
|
||||
{
|
||||
TS_NON_EXISTENT,
|
||||
TS_JOINABLE,
|
||||
};
|
||||
|
||||
std::atomic<thread_state_t> m_state;
|
||||
std::string m_name;
|
||||
std::thread m_thr;
|
||||
bool m_autojoin;
|
||||
// pointer to managed resource (shared with actual thread)
|
||||
std::shared_ptr<thread_ctrl_t> m_thread;
|
||||
|
||||
public:
|
||||
thread_t(const std::string& name, bool autojoin, std::function<void()> func);
|
||||
thread_t(const std::string& name, std::function<void()> func);
|
||||
thread_t(const std::string& name);
|
||||
thread_t();
|
||||
~thread_t();
|
||||
// thread mutex for external use
|
||||
std::mutex mutex;
|
||||
|
||||
thread_t(const thread_t& right) = delete;
|
||||
thread_t(thread_t&& right) = delete;
|
||||
|
||||
thread_t& operator =(const thread_t& right) = delete;
|
||||
thread_t& operator =(thread_t&& right) = delete;
|
||||
// thread condition variable for external use
|
||||
std::condition_variable cv;
|
||||
|
||||
public:
|
||||
void set_name(const std::string& name);
|
||||
void start(std::function<void()> func);
|
||||
// initialize in empty state
|
||||
thread_t() = default;
|
||||
|
||||
// create named thread
|
||||
thread_t(std::function<std::string()> name, std::function<void()> func);
|
||||
|
||||
// destructor, joins automatically
|
||||
virtual ~thread_t();
|
||||
|
||||
thread_t(const thread_t&) = delete;
|
||||
|
||||
thread_t& operator =(const thread_t&) = delete;
|
||||
|
||||
public:
|
||||
// get thread name
|
||||
std::string get_name() const;
|
||||
|
||||
// create named thread (current state must be empty)
|
||||
void start(std::function<std::string()> name, std::function<void()> func);
|
||||
|
||||
// detach thread -> empty state
|
||||
void detach();
|
||||
|
||||
// join thread (provide locked unique_lock, for example, lv2_lock, for interruptibility) -> empty state
|
||||
void join(std::unique_lock<std::mutex>& lock);
|
||||
|
||||
// join thread -> empty state
|
||||
void join();
|
||||
bool joinable() const;
|
||||
};
|
||||
|
||||
class slw_mutex_t
|
||||
{
|
||||
|
||||
};
|
||||
|
||||
class slw_recursive_mutex_t
|
||||
{
|
||||
|
||||
};
|
||||
|
||||
class slw_shared_mutex_t
|
||||
{
|
||||
// check if not empty
|
||||
bool joinable() const { return m_thread.operator bool(); }
|
||||
|
||||
// check whether it is the current running thread
|
||||
bool is_current() const;
|
||||
};
|
||||
|
||||
struct waiter_map_t
|
||||
{
|
||||
static const size_t size = 16;
|
||||
|
||||
std::array<std::mutex, size> mutex;
|
||||
std::array<std::condition_variable, size> cv;
|
||||
std::array<std::mutex, size> mutexes;
|
||||
std::array<std::condition_variable, size> cvs;
|
||||
|
||||
const std::string name;
|
||||
|
||||
@ -124,33 +107,32 @@ struct waiter_map_t
|
||||
return addr % size;
|
||||
}
|
||||
|
||||
// check emu status
|
||||
bool is_stopped(u32 addr);
|
||||
void check_emu_status(u32 addr);
|
||||
|
||||
// wait until waiter_func() returns true, signal_id is an arbitrary number
|
||||
// wait until pred() returns true, `addr` is an arbitrary number
|
||||
template<typename F, typename... Args> safe_buffers auto wait_op(u32 addr, F pred, Args&&... args) -> decltype(static_cast<void>(pred(args...)))
|
||||
{
|
||||
const u32 hash = get_hash(addr);
|
||||
|
||||
// set mutex locker
|
||||
std::unique_lock<std::mutex> lock(mutex[hash], std::defer_lock);
|
||||
std::unique_lock<std::mutex> lock(mutexes[hash], std::defer_lock);
|
||||
|
||||
while (true)
|
||||
{
|
||||
// check the condition
|
||||
if (pred(args...)) return;
|
||||
|
||||
check_emu_status(addr);
|
||||
|
||||
// lock the mutex and initialize waiter (only once)
|
||||
if (!lock) lock.lock();
|
||||
|
||||
// wait on appropriate condition variable for 1 ms or until signal arrived
|
||||
cv[hash].wait_for(lock, std::chrono::milliseconds(1));
|
||||
|
||||
if (is_stopped(addr)) return;
|
||||
// wait on an appropriate cond var for 1 ms or until a signal arrived
|
||||
cvs[hash].wait_for(lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
}
|
||||
|
||||
// signal all threads waiting on waiter_op() with the same signal_id (signaling only hints those threads that corresponding conditions are *probably* met)
|
||||
// signal all threads waiting on wait_op() with the same `addr` (signaling only hints those threads that corresponding conditions are *probably* met)
|
||||
void notify(u32 addr);
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "Emu/Memory/Memory.h"
|
||||
|
||||
enum ARMv7InstructionSet
|
||||
{
|
||||
ARM,
|
||||
@ -120,15 +122,34 @@ struct ARMv7Context
|
||||
|
||||
std::array<perf_counter, 6> counters;
|
||||
|
||||
u32 PC;
|
||||
s32 prio;
|
||||
u32 stack_addr;
|
||||
u32 stack_size;
|
||||
u32 hle_func; // current function ID
|
||||
|
||||
u32 debug;
|
||||
std::string debug_str;
|
||||
|
||||
void write_pc(u32 value);
|
||||
u32 read_pc();
|
||||
u32 get_stack_arg(u32 pos);
|
||||
void write_pc(u32 value, u32 size)
|
||||
{
|
||||
ISET = value & 1 ? Thumb : ARM;
|
||||
PC = (value & ~1) - size;
|
||||
}
|
||||
|
||||
u32 read_pc()
|
||||
{
|
||||
return ISET == ARM ? PC + 8 : PC + 4;
|
||||
}
|
||||
|
||||
u32 get_stack_arg(u32 pos)
|
||||
{
|
||||
return vm::psv::read32(SP + sizeof(u32) * (pos - 5));
|
||||
}
|
||||
|
||||
void fast_call(u32 addr);
|
||||
|
||||
void write_gpr(u32 n, u32 value)
|
||||
void write_gpr(u32 n, u32 value, u32 size)
|
||||
{
|
||||
assert(n < 16);
|
||||
|
||||
@ -138,7 +159,7 @@ struct ARMv7Context
|
||||
}
|
||||
else
|
||||
{
|
||||
write_pc(value);
|
||||
write_pc(value, size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -301,4 +322,3 @@ force_inline T cast_from_armv7_gpr(const u32 reg)
|
||||
{
|
||||
return cast_armv7_gpr<T>::from_gpr(reg);
|
||||
}
|
||||
|
||||
|
@ -287,19 +287,17 @@ namespace ARMv7_instrs
|
||||
{
|
||||
if (context.debug & DF_PRINT)
|
||||
{
|
||||
ARMv7Thread& CPU = static_cast<ARMv7Thread&>(context);
|
||||
|
||||
auto pos = context.debug_str.find(' ');
|
||||
if (pos != std::string::npos && pos < 8)
|
||||
{
|
||||
context.debug_str.insert(pos, 8 - pos, ' ');
|
||||
}
|
||||
|
||||
context.fmt_debug_str("0x%08x: %s", CPU.PC, context.debug_str);
|
||||
context.fmt_debug_str("0x%08x: %s", context.PC, context.debug_str);
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
auto found = g_armv7_dump.find(CPU.PC);
|
||||
auto found = g_armv7_dump.find(context.PC);
|
||||
if (found != g_armv7_dump.end())
|
||||
{
|
||||
if (found->second != context.debug_str)
|
||||
@ -309,7 +307,7 @@ namespace ARMv7_instrs
|
||||
}
|
||||
else
|
||||
{
|
||||
g_armv7_dump[CPU.PC] = context.debug_str;
|
||||
g_armv7_dump[context.PC] = context.debug_str;
|
||||
}
|
||||
}
|
||||
|
||||
@ -640,7 +638,7 @@ void ARMv7_instrs::ADC_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry, overflow;
|
||||
const u32 result = AddWithCarry(context.read_gpr(n), imm32, context.APSR.C, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -695,7 +693,7 @@ void ARMv7_instrs::ADC_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry, overflow;
|
||||
const u32 shifted = Shift(context.read_gpr(m), shift_t, shift_n, context.APSR.C);
|
||||
const u32 result = AddWithCarry(context.read_gpr(n), shifted, context.APSR.C, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -779,7 +777,7 @@ void ARMv7_instrs::ADD_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry, overflow;
|
||||
const u32 result = AddWithCarry(context.read_gpr(n), imm32, false, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type < T3 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -851,7 +849,7 @@ void ARMv7_instrs::ADD_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry, overflow;
|
||||
const u32 shifted = Shift(context.read_gpr(m), shift_t, shift_n, true);
|
||||
const u32 result = AddWithCarry(context.read_gpr(n), shifted, false, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type < T3 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -930,7 +928,7 @@ void ARMv7_instrs::ADD_SPI(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry, overflow;
|
||||
const u32 result = AddWithCarry(context.SP, imm32, false, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type < T3 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -997,7 +995,7 @@ void ARMv7_instrs::ADD_SPR(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry, overflow;
|
||||
const u32 shifted = Shift(context.read_gpr(m), shift_t, shift_n, context.APSR.C);
|
||||
const u32 result = AddWithCarry(context.SP, shifted, false, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type < T3 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -1060,7 +1058,7 @@ void ARMv7_instrs::ADR(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1097,7 +1095,7 @@ void ARMv7_instrs::AND_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u32 result = context.read_gpr(n) & imm32;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -1152,7 +1150,7 @@ void ARMv7_instrs::AND_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry;
|
||||
const u32 shifted = Shift_C(context.read_gpr(m), shift_t, shift_n, context.APSR.C, carry);
|
||||
const u32 result = context.read_gpr(n) & shifted;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -1260,7 +1258,7 @@ void ARMv7_instrs::B(ARMv7Context& context, const ARMv7Code code, const ARMv7_en
|
||||
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
static_cast<ARMv7Thread&>(context).SetBranch(context.read_pc() + imm32);
|
||||
context.PC = context.read_pc() + imm32 - (type < T3 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1315,7 +1313,7 @@ void ARMv7_instrs::BIC_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u32 result = context.read_gpr(n) & ~imm32;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -1369,7 +1367,7 @@ void ARMv7_instrs::BIC_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry;
|
||||
const u32 shifted = Shift_C(context.read_gpr(m), shift_t, shift_n, context.APSR.C, carry);
|
||||
const u32 result = context.read_gpr(n) & ~shifted;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -1440,14 +1438,12 @@ void ARMv7_instrs::BL(ARMv7Context& context, const ARMv7Code code, const ARMv7_e
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
context.LR = lr;
|
||||
static_cast<ARMv7Thread&>(context).SetBranch(pc);
|
||||
context.PC = pc - 4;
|
||||
}
|
||||
}
|
||||
|
||||
void ARMv7_instrs::BLX(ARMv7Context& context, const ARMv7Code code, const ARMv7_encoding type)
|
||||
{
|
||||
ARMv7Thread& thread = static_cast<ARMv7Thread&>(context);
|
||||
|
||||
u32 cond, target, newLR;
|
||||
|
||||
switch (type)
|
||||
@ -1455,7 +1451,7 @@ void ARMv7_instrs::BLX(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
case T1:
|
||||
{
|
||||
cond = context.ITSTATE.advance();
|
||||
newLR = (thread.PC + 2) | 1;
|
||||
newLR = (context.PC + 2) | 1;
|
||||
{
|
||||
const u32 m = (code.data >> 3) & 0xf;
|
||||
reject(m == 15, "UNPREDICTABLE");
|
||||
@ -1468,12 +1464,12 @@ void ARMv7_instrs::BLX(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
case T2:
|
||||
{
|
||||
cond = context.ITSTATE.advance();
|
||||
newLR = (thread.PC + 4) | 1;
|
||||
newLR = (context.PC + 4) | 1;
|
||||
{
|
||||
const u32 s = (code.data >> 26) & 0x1;
|
||||
const u32 i1 = (code.data >> 13) & 0x1 ^ s ^ 1;
|
||||
const u32 i2 = (code.data >> 11) & 0x1 ^ s ^ 1;
|
||||
target = ~3 & thread.PC + 4 + sign<25, u32>(s << 24 | i2 << 23 | i1 << 22 | (code.data & 0x3ff0000) >> 4 | (code.data & 0x7ff) << 1);
|
||||
target = ~3 & context.PC + 4 + sign<25, u32>(s << 24 | i2 << 23 | i1 << 22 | (code.data & 0x3ff0000) >> 4 | (code.data & 0x7ff) << 1);
|
||||
}
|
||||
|
||||
reject(context.ITSTATE, "UNPREDICTABLE");
|
||||
@ -1482,15 +1478,15 @@ void ARMv7_instrs::BLX(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
case A1:
|
||||
{
|
||||
cond = code.data >> 28;
|
||||
newLR = thread.PC + 4;
|
||||
newLR = context.PC + 4;
|
||||
target = context.read_gpr(code.data & 0xf);
|
||||
break;
|
||||
}
|
||||
case A2:
|
||||
{
|
||||
cond = 0xe; // always true
|
||||
newLR = thread.PC + 4;
|
||||
target = 1 | thread.PC + 8 + sign<25, u32>((code.data & 0xffffff) << 2 | (code.data & 0x1000000) >> 23);
|
||||
newLR = context.PC + 4;
|
||||
target = 1 | context.PC + 8 + sign<25, u32>((code.data & 0xffffff) << 2 | (code.data & 0x1000000) >> 23);
|
||||
break;
|
||||
}
|
||||
default: throw __FUNCTION__;
|
||||
@ -1514,7 +1510,7 @@ void ARMv7_instrs::BLX(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
context.LR = newLR;
|
||||
context.write_pc(target);
|
||||
context.write_pc(target, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1549,7 +1545,7 @@ void ARMv7_instrs::BX(ARMv7Context& context, const ARMv7Code code, const ARMv7_e
|
||||
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
context.write_pc(context.read_gpr(m));
|
||||
context.write_pc(context.read_gpr(m), type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1581,7 +1577,7 @@ void ARMv7_instrs::CB_Z(ARMv7Context& context, const ARMv7Code code, const ARMv7
|
||||
|
||||
if ((context.read_gpr(n) == 0) ^ nonzero)
|
||||
{
|
||||
static_cast<ARMv7Thread&>(context).SetBranch(context.read_pc() + imm32);
|
||||
context.PC = context.read_pc() + imm32 - 2;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1614,7 +1610,7 @@ void ARMv7_instrs::CLZ(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
context.write_gpr(d, cntlz32(context.read_gpr(m)));
|
||||
context.write_gpr(d, cntlz32(context.read_gpr(m)), type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1826,7 +1822,7 @@ void ARMv7_instrs::EOR_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u32 result = context.read_gpr(n) ^ imm32;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -1881,7 +1877,7 @@ void ARMv7_instrs::EOR_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry;
|
||||
const u32 shifted = Shift_C(context.read_gpr(m), shift_t, shift_n, context.APSR.C, carry);
|
||||
const u32 result = context.read_gpr(n) ^ shifted;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -1978,13 +1974,13 @@ void ARMv7_instrs::LDM(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
{
|
||||
if (reg_list & (1 << i))
|
||||
{
|
||||
context.write_gpr(i, *memory++);
|
||||
context.write_gpr(i, *memory++, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, memory.addr());
|
||||
context.write_gpr(n, memory.addr(), type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2091,11 +2087,11 @@ void ARMv7_instrs::LDR_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
const u32 offset_addr = add ? context.read_gpr(n) + imm32 : context.read_gpr(n) - imm32;
|
||||
const u32 addr = index ? offset_addr : context.read_gpr(n);
|
||||
context.write_gpr(t, vm::read32(addr));
|
||||
context.write_gpr(t, vm::read32(addr), type < T3 ? 2 : 4);
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type < T3 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2141,7 +2137,7 @@ void ARMv7_instrs::LDR_LIT(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u32 data = vm::read32(addr);
|
||||
context.write_gpr(t, data);
|
||||
context.write_gpr(t, data, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2197,11 +2193,11 @@ void ARMv7_instrs::LDR_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
const u32 offset = Shift(context.read_gpr(m), shift_t, shift_n, context.APSR.C);
|
||||
const u32 offset_addr = add ? context.read_gpr(n) + offset : context.read_gpr(n) - offset;
|
||||
const u32 addr = index ? offset_addr : context.read_gpr(n);
|
||||
context.write_gpr(t, vm::read32(addr));
|
||||
context.write_gpr(t, vm::read32(addr), type == T1 ? 2 : 4);
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2271,11 +2267,11 @@ void ARMv7_instrs::LDRB_IMM(ARMv7Context& context, const ARMv7Code code, const A
|
||||
{
|
||||
const u32 offset_addr = add ? context.read_gpr(n) + imm32 : context.read_gpr(n) - imm32;
|
||||
const u32 addr = index ? offset_addr : context.read_gpr(n);
|
||||
context.write_gpr(t, vm::read8(addr));
|
||||
context.write_gpr(t, vm::read8(addr), type == T1 ? 2 : 4);
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2341,11 +2337,11 @@ void ARMv7_instrs::LDRB_REG(ARMv7Context& context, const ARMv7Code code, const A
|
||||
const u32 offset = Shift(context.read_gpr(m), shift_t, shift_n, context.APSR.C);
|
||||
const u32 offset_addr = add ? context.read_gpr(n) + offset : context.read_gpr(n) - offset;
|
||||
const u32 addr = index ? offset_addr : context.read_gpr(n);
|
||||
context.write_gpr(t, vm::read8(addr));
|
||||
context.write_gpr(t, vm::read8(addr), type == T1 ? 2 : 4);
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2390,12 +2386,12 @@ void ARMv7_instrs::LDRD_IMM(ARMv7Context& context, const ARMv7Code code, const A
|
||||
const u32 offset_addr = add ? context.read_gpr(n) + imm32 : context.read_gpr(n) - imm32;
|
||||
const u32 addr = index ? offset_addr : context.read_gpr(n);
|
||||
const u64 value = vm::read64(addr);
|
||||
context.write_gpr(t, (u32)(value));
|
||||
context.write_gpr(t2, (u32)(value >> 32));
|
||||
context.write_gpr(t, (u32)(value), 4);
|
||||
context.write_gpr(t2, (u32)(value >> 32), 4);
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2435,8 +2431,8 @@ void ARMv7_instrs::LDRD_LIT(ARMv7Context& context, const ARMv7Code code, const A
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u64 value = vm::read64(addr);
|
||||
context.write_gpr(t, (u32)(value));
|
||||
context.write_gpr(t2, (u32)(value >> 32));
|
||||
context.write_gpr(t, (u32)(value), 4);
|
||||
context.write_gpr(t2, (u32)(value >> 32), 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2514,11 +2510,11 @@ void ARMv7_instrs::LDRH_IMM(ARMv7Context& context, const ARMv7Code code, const A
|
||||
{
|
||||
const u32 offset_addr = add ? context.read_gpr(n) + imm32 : context.read_gpr(n) - imm32;
|
||||
const u32 addr = index ? offset_addr : context.read_gpr(n);
|
||||
context.write_gpr(t, vm::read16(addr));
|
||||
context.write_gpr(t, vm::read16(addr), type == T1 ? 2 : 4);
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2596,11 +2592,11 @@ void ARMv7_instrs::LDRSB_IMM(ARMv7Context& context, const ARMv7Code code, const
|
||||
const u32 offset_addr = add ? context.read_gpr(n) + imm32 : context.read_gpr(n) - imm32;
|
||||
const u32 addr = index ? offset_addr : context.read_gpr(n);
|
||||
const s8 value = vm::read8(addr);
|
||||
context.write_gpr(t, value); // sign-extend
|
||||
context.write_gpr(t, value, 4); // sign-extend
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2685,7 +2681,7 @@ void ARMv7_instrs::LDREX(ARMv7Context& context, const ARMv7Code code, const ARMv
|
||||
u32 value;
|
||||
vm::reservation_acquire(&value, addr, sizeof(value));
|
||||
|
||||
context.write_gpr(t, value);
|
||||
context.write_gpr(t, value, 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2760,7 +2756,7 @@ void ARMv7_instrs::LSL_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry;
|
||||
const u32 result = Shift_C(context.read_gpr(m), SRType_LSL, shift_n, context.APSR.C, carry);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -2810,7 +2806,7 @@ void ARMv7_instrs::LSL_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry;
|
||||
const u32 result = Shift_C(context.read_gpr(n), SRType_LSL, (context.read_gpr(m) & 0xff), context.APSR.C, carry);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -2862,7 +2858,7 @@ void ARMv7_instrs::LSR_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry;
|
||||
const u32 result = Shift_C(context.read_gpr(m), SRType_LSR, shift_n, context.APSR.C, carry);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -2956,7 +2952,7 @@ void ARMv7_instrs::MOV_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u32 result = imm32;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -3018,7 +3014,7 @@ void ARMv7_instrs::MOV_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u32 result = context.read_gpr(m);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type < T3 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -3056,7 +3052,7 @@ void ARMv7_instrs::MOVT(ARMv7Context& context, const ARMv7Code code, const ARMv7
|
||||
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
context.write_gpr(d, (context.read_gpr(d) & 0xffff) | (imm16 << 16));
|
||||
context.write_gpr(d, (context.read_gpr(d) & 0xffff) | (imm16 << 16), 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3131,7 +3127,7 @@ void ARMv7_instrs::MUL(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
const u32 op1 = context.read_gpr(n);
|
||||
const u32 op2 = context.read_gpr(m);
|
||||
const u32 result = op1 * op2;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -3172,7 +3168,7 @@ void ARMv7_instrs::MVN_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u32 result = ~imm32;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -3225,7 +3221,7 @@ void ARMv7_instrs::MVN_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry;
|
||||
const u32 shifted = Shift_C(context.read_gpr(m), shift_t, shift_n, context.APSR.C, carry);
|
||||
const u32 result = ~shifted;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -3333,7 +3329,7 @@ void ARMv7_instrs::ORR_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u32 result = context.read_gpr(n) | imm32;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -3388,7 +3384,7 @@ void ARMv7_instrs::ORR_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry;
|
||||
const u32 shifted = Shift_C(context.read_gpr(m), shift_t, shift_n, context.APSR.C, carry);
|
||||
const u32 result = context.read_gpr(n) | shifted;
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -3484,7 +3480,7 @@ void ARMv7_instrs::POP(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
{
|
||||
if (reg_list & (1 << i))
|
||||
{
|
||||
context.write_gpr(i, *stack++);
|
||||
context.write_gpr(i, *stack++, type < A1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3699,7 +3695,7 @@ void ARMv7_instrs::REV(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
context.write_gpr(d, _byteswap_ulong(context.read_gpr(m)));
|
||||
context.write_gpr(d, _byteswap_ulong(context.read_gpr(m)), type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3755,7 +3751,7 @@ void ARMv7_instrs::ROR_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry;
|
||||
const u32 result = Shift_C(context.read_gpr(m), SRType_ROR, shift_n, context.APSR.C, carry);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -3806,7 +3802,7 @@ void ARMv7_instrs::ROR_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry;
|
||||
const u32 shift_n = context.read_gpr(m) & 0xff;
|
||||
const u32 result = Shift_C(context.read_gpr(n), SRType_ROR, shift_n, context.APSR.C, carry);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -3868,7 +3864,7 @@ void ARMv7_instrs::RSB_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry, overflow;
|
||||
const u32 result = AddWithCarry(~context.read_gpr(n), imm32, true, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -4311,7 +4307,7 @@ void ARMv7_instrs::STM(ARMv7Context& context, const ARMv7Code code, const ARMv7_
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, memory.addr());
|
||||
context.write_gpr(n, memory.addr(), type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4421,7 +4417,7 @@ void ARMv7_instrs::STR_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type < T3 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4481,7 +4477,7 @@ void ARMv7_instrs::STR_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4552,7 +4548,7 @@ void ARMv7_instrs::STRB_IMM(ARMv7Context& context, const ARMv7Code code, const A
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4612,7 +4608,7 @@ void ARMv7_instrs::STRB_REG(ARMv7Context& context, const ARMv7Code code, const A
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4660,7 +4656,7 @@ void ARMv7_instrs::STRD_IMM(ARMv7Context& context, const ARMv7Code code, const A
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset);
|
||||
context.write_gpr(n, offset, 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4740,7 +4736,7 @@ void ARMv7_instrs::STRH_IMM(ARMv7Context& context, const ARMv7Code code, const A
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4800,7 +4796,7 @@ void ARMv7_instrs::STRH_REG(ARMv7Context& context, const ARMv7Code code, const A
|
||||
|
||||
if (wback)
|
||||
{
|
||||
context.write_gpr(n, offset_addr);
|
||||
context.write_gpr(n, offset_addr, type == T1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4838,7 +4834,7 @@ void ARMv7_instrs::STREX(ARMv7Context& context, const ARMv7Code code, const ARMv
|
||||
{
|
||||
const u32 addr = context.read_gpr(n) + imm32;
|
||||
const u32 value = context.read_gpr(t);
|
||||
context.write_gpr(d, !vm::reservation_update(addr, &value, sizeof(value)));
|
||||
context.write_gpr(d, !vm::reservation_update(addr, &value, sizeof(value)), 4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4932,7 +4928,7 @@ void ARMv7_instrs::SUB_IMM(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry, overflow;
|
||||
const u32 result = AddWithCarry(context.read_gpr(n), ~imm32, true, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type < T3 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -4990,7 +4986,7 @@ void ARMv7_instrs::SUB_REG(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
bool carry, overflow;
|
||||
const u32 shifted = Shift(context.read_gpr(m), shift_t, shift_n, context.APSR.C);
|
||||
const u32 result = AddWithCarry(context.read_gpr(n), ~shifted, true, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -5061,7 +5057,7 @@ void ARMv7_instrs::SUB_SPI(ARMv7Context& context, const ARMv7Code code, const AR
|
||||
{
|
||||
bool carry, overflow;
|
||||
const u32 result = AddWithCarry(context.SP, ~imm32, true, carry, overflow);
|
||||
context.write_gpr(d, result);
|
||||
context.write_gpr(d, result, type == T1 ? 2 : 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -5390,8 +5386,8 @@ void ARMv7_instrs::UMULL(ARMv7Context& context, const ARMv7Code code, const ARMv
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
const u64 result = (u64)context.read_gpr(n) * (u64)context.read_gpr(m);
|
||||
context.write_gpr(d1, (u32)(result >> 32));
|
||||
context.write_gpr(d0, (u32)(result));
|
||||
context.write_gpr(d1, (u32)(result >> 32), 4);
|
||||
context.write_gpr(d0, (u32)(result), 4);
|
||||
|
||||
if (set_flags)
|
||||
{
|
||||
@ -5581,7 +5577,7 @@ void ARMv7_instrs::UXTB(ARMv7Context& context, const ARMv7Code code, const ARMv7
|
||||
|
||||
if (ConditionPassed(context, cond))
|
||||
{
|
||||
context.write_gpr(d, (context.read_gpr(m) >> rot) & 0xff);
|
||||
context.write_gpr(d, (context.read_gpr(m) >> rot) & 0xff, type < A1 ? 2 : 4);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,33 +3,15 @@
|
||||
#include "Utilities/Log.h"
|
||||
#include "Emu/Memory/Memory.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/IdManager.h"
|
||||
#include "Emu/CPU/CPUThreadManager.h"
|
||||
#include "Emu/ARMv7/PSVFuncList.h"
|
||||
|
||||
#include "ARMv7Thread.h"
|
||||
#include "ARMv7Decoder.h"
|
||||
#include "ARMv7DisAsm.h"
|
||||
#include "ARMv7Interpreter.h"
|
||||
|
||||
void ARMv7Context::write_pc(u32 value)
|
||||
{
|
||||
ARMv7Thread& thread = *static_cast<ARMv7Thread*>(this);
|
||||
|
||||
ISET = value & 1 ? Thumb : ARM;
|
||||
thread.SetBranch(value & ~1);
|
||||
}
|
||||
|
||||
u32 ARMv7Context::read_pc()
|
||||
{
|
||||
ARMv7Thread& thread = *static_cast<ARMv7Thread*>(this);
|
||||
|
||||
return ISET == ARM ? thread.PC + 8 : thread.PC + 4;
|
||||
}
|
||||
|
||||
u32 ARMv7Context::get_stack_arg(u32 pos)
|
||||
{
|
||||
return vm::psv::read32(SP + sizeof(u32) * (pos - 5));
|
||||
}
|
||||
|
||||
void ARMv7Context::fast_call(u32 addr)
|
||||
{
|
||||
return static_cast<ARMv7Thread*>(this)->FastCall(addr);
|
||||
@ -98,51 +80,73 @@ void armv7_free_tls(u32 thread)
|
||||
}
|
||||
}
|
||||
|
||||
ARMv7Thread::ARMv7Thread()
|
||||
: CPUThread(CPU_THREAD_ARMv7)
|
||||
//, m_arg(0)
|
||||
//, m_last_instr_size(0)
|
||||
//, m_last_instr_name("UNK")
|
||||
ARMv7Thread::ARMv7Thread(const std::string& name)
|
||||
: CPUThread(CPU_THREAD_ARMv7, name, [this]{ return fmt::format("%s[0x%x] Thread (%s)[0x%08x]", GetTypeString(), GetId(), GetName(), PC); })
|
||||
, ARMv7Context({})
|
||||
{
|
||||
}
|
||||
|
||||
ARMv7Thread::~ARMv7Thread()
|
||||
{
|
||||
cv.notify_one();
|
||||
join();
|
||||
|
||||
armv7_free_tls(GetId());
|
||||
}
|
||||
|
||||
void ARMv7Thread::DumpInformation() const
|
||||
{
|
||||
if (hle_func)
|
||||
{
|
||||
const auto func = get_psv_func_by_nid(hle_func);
|
||||
|
||||
LOG_SUCCESS(HLE, "Information: function 0x%x (%s)", hle_func, func ? func->name : "?????????");
|
||||
}
|
||||
|
||||
CPUThread::DumpInformation();
|
||||
}
|
||||
|
||||
void ARMv7Thread::InitRegs()
|
||||
{
|
||||
memset(GPR, 0, sizeof(GPR));
|
||||
APSR.APSR = 0;
|
||||
IPSR.IPSR = 0;
|
||||
ISET = PC & 1 ? Thumb : ARM; // select instruction set
|
||||
SetPc(PC & ~1); // and fix PC
|
||||
PC = PC & ~1; // and fix PC
|
||||
ITSTATE.IT = 0;
|
||||
SP = m_stack_addr + m_stack_size;
|
||||
SP = stack_addr + stack_size;
|
||||
TLS = armv7_get_tls(GetId());
|
||||
debug = DF_DISASM | DF_PRINT;
|
||||
}
|
||||
|
||||
void ARMv7Thread::InitStack()
|
||||
{
|
||||
if (!m_stack_addr)
|
||||
if (!stack_addr)
|
||||
{
|
||||
assert(m_stack_size);
|
||||
m_stack_addr = Memory.Alloc(m_stack_size, 4096);
|
||||
if (!stack_size)
|
||||
{
|
||||
throw EXCEPTION("Invalid stack size");
|
||||
}
|
||||
|
||||
stack_addr = Memory.Alloc(stack_size, 4096);
|
||||
|
||||
if (!stack_addr)
|
||||
{
|
||||
throw EXCEPTION("Out of stack memory");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ARMv7Thread::CloseStack()
|
||||
{
|
||||
if (m_stack_addr)
|
||||
if (stack_addr)
|
||||
{
|
||||
Memory.Free(m_stack_addr);
|
||||
m_stack_addr = 0;
|
||||
Memory.Free(stack_addr);
|
||||
stack_addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
std::string ARMv7Thread::RegsToString()
|
||||
std::string ARMv7Thread::RegsToString() const
|
||||
{
|
||||
std::string result = "Registers:\n=========\n";
|
||||
for(int i=0; i<15; ++i)
|
||||
@ -161,7 +165,7 @@ std::string ARMv7Thread::RegsToString()
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string ARMv7Thread::ReadRegString(const std::string& reg)
|
||||
std::string ARMv7Thread::ReadRegString(const std::string& reg) const
|
||||
{
|
||||
return "";
|
||||
}
|
||||
@ -171,19 +175,15 @@ bool ARMv7Thread::WriteRegString(const std::string& reg, std::string value)
|
||||
return true;
|
||||
}
|
||||
|
||||
void ARMv7Thread::DoReset()
|
||||
{
|
||||
}
|
||||
|
||||
void ARMv7Thread::DoRun()
|
||||
{
|
||||
m_dec = nullptr;
|
||||
m_dec.reset();
|
||||
|
||||
switch(Ini.CPUDecoderMode.GetValue())
|
||||
{
|
||||
case 0:
|
||||
case 1:
|
||||
m_dec = new ARMv7Decoder(*this);
|
||||
m_dec.reset(new ARMv7Decoder(*this));
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(PPU, "Invalid CPU decoder mode: %d", Ini.CPUDecoderMode.GetValue());
|
||||
@ -191,58 +191,70 @@ void ARMv7Thread::DoRun()
|
||||
}
|
||||
}
|
||||
|
||||
void ARMv7Thread::DoPause()
|
||||
void ARMv7Thread::Task()
|
||||
{
|
||||
}
|
||||
if (custom_task)
|
||||
{
|
||||
if (m_state.load() && CheckStatus()) return;
|
||||
|
||||
void ARMv7Thread::DoResume()
|
||||
{
|
||||
}
|
||||
return custom_task(*this);
|
||||
}
|
||||
|
||||
void ARMv7Thread::DoStop()
|
||||
{
|
||||
}
|
||||
while (true)
|
||||
{
|
||||
if (m_state.load() && CheckStatus()) return;
|
||||
|
||||
void ARMv7Thread::DoCode()
|
||||
{
|
||||
// decode instruction using specified decoder
|
||||
PC += m_dec->DecodeMemory(PC);
|
||||
}
|
||||
}
|
||||
|
||||
void ARMv7Thread::FastCall(u32 addr)
|
||||
{
|
||||
auto old_status = m_status;
|
||||
if (!is_current())
|
||||
{
|
||||
throw EXCEPTION("Called from the wrong thread");
|
||||
}
|
||||
|
||||
auto old_PC = PC;
|
||||
auto old_stack = SP;
|
||||
auto old_LR = LR;
|
||||
auto old_thread = GetCurrentNamedThread();
|
||||
|
||||
m_status = Running;
|
||||
PC = addr;
|
||||
LR = Emu.GetCPUThreadStop();
|
||||
SetCurrentNamedThread(this);
|
||||
|
||||
CPUThread::Task();
|
||||
try
|
||||
{
|
||||
Task();
|
||||
}
|
||||
catch (CPUThreadReturn)
|
||||
{
|
||||
}
|
||||
|
||||
m_status = old_status;
|
||||
PC = old_PC;
|
||||
SP = old_stack;
|
||||
|
||||
if (SP != old_stack) // SP shouldn't change
|
||||
{
|
||||
throw EXCEPTION("Stack inconsistency (addr=0x%x, SP=0x%x, old=0x%x)", addr, SP, old_stack);
|
||||
}
|
||||
|
||||
LR = old_LR;
|
||||
SetCurrentNamedThread(old_thread);
|
||||
}
|
||||
|
||||
void ARMv7Thread::FastStop()
|
||||
{
|
||||
m_status = Stopped;
|
||||
m_events |= CPU_EVENT_STOP;
|
||||
throw CPUThreadReturn{};
|
||||
}
|
||||
|
||||
armv7_thread::armv7_thread(u32 entry, const std::string& name, u32 stack_size, s32 prio)
|
||||
{
|
||||
thread = Emu.GetCPU().AddThread(CPU_THREAD_ARMv7);
|
||||
std::shared_ptr<ARMv7Thread> armv7 = Emu.GetIdManager().make_ptr<ARMv7Thread>(name);
|
||||
|
||||
thread->SetName(name);
|
||||
thread->SetEntry(entry);
|
||||
thread->SetStackSize(stack_size);
|
||||
thread->SetPrio(prio);
|
||||
armv7->PC = entry;
|
||||
armv7->stack_size = stack_size;
|
||||
armv7->prio = prio;
|
||||
|
||||
thread = std::move(armv7);
|
||||
|
||||
argc = 0;
|
||||
}
|
||||
|
@ -2,33 +2,31 @@
|
||||
#include "Emu/CPU/CPUThread.h"
|
||||
#include "ARMv7Context.h"
|
||||
|
||||
class ARMv7Thread : public CPUThread, public ARMv7Context
|
||||
class ARMv7Thread final : public CPUThread, public ARMv7Context
|
||||
{
|
||||
public:
|
||||
ARMv7Thread();
|
||||
~ARMv7Thread();
|
||||
std::function<void(ARMv7Thread& CPU)> custom_task;
|
||||
|
||||
public:
|
||||
virtual void InitRegs();
|
||||
virtual void InitStack();
|
||||
virtual void CloseStack();
|
||||
ARMv7Thread(const std::string& name);
|
||||
virtual ~ARMv7Thread() override;
|
||||
|
||||
virtual void DumpInformation() const override;
|
||||
virtual u32 GetPC() const override { return PC; }
|
||||
virtual u32 GetOffset() const override { return 0; }
|
||||
virtual void DoRun() override;
|
||||
virtual void Task() override;
|
||||
|
||||
virtual void InitRegs() override;
|
||||
virtual void InitStack() override;
|
||||
virtual void CloseStack() override;
|
||||
u32 GetStackArg(u32 pos);
|
||||
void FastCall(u32 addr);
|
||||
void FastStop();
|
||||
virtual void DoRun();
|
||||
|
||||
public:
|
||||
virtual std::string RegsToString();
|
||||
virtual std::string ReadRegString(const std::string& reg);
|
||||
virtual bool WriteRegString(const std::string& reg, std::string value);
|
||||
|
||||
protected:
|
||||
virtual void DoReset();
|
||||
virtual void DoPause();
|
||||
virtual void DoResume();
|
||||
virtual void DoStop();
|
||||
|
||||
virtual void DoCode();
|
||||
virtual std::string RegsToString() const override;
|
||||
virtual std::string ReadRegString(const std::string& reg) const override;
|
||||
virtual bool WriteRegString(const std::string& reg, std::string value) override;
|
||||
};
|
||||
|
||||
class armv7_thread : cpu_thread
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include "stdafx.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/IdManager.h"
|
||||
#include "Emu/ARMv7/PSVFuncList.h"
|
||||
#include "Emu/ARMv7/PSVObjectList.h"
|
||||
|
||||
@ -45,48 +46,43 @@ s32 sceKernelCreateThread(
|
||||
sceLibKernel.Warning("sceKernelCreateThread(pName=*0x%x, entry=*0x%x, initPriority=%d, stackSize=0x%x, attr=0x%x, cpuAffinityMask=0x%x, pOptParam=*0x%x)",
|
||||
pName, entry, initPriority, stackSize, attr, cpuAffinityMask, pOptParam);
|
||||
|
||||
auto t = Emu.GetCPU().AddThread(CPU_THREAD_ARMv7);
|
||||
auto armv7 = Emu.GetIdManager().make_ptr<ARMv7Thread>(pName.get_ptr());
|
||||
|
||||
auto& armv7 = static_cast<ARMv7Thread&>(*t);
|
||||
armv7->PC = entry.addr();
|
||||
armv7->prio = initPriority;
|
||||
armv7->stack_size = stackSize;
|
||||
armv7->Run();
|
||||
|
||||
armv7.SetEntry(entry.addr());
|
||||
armv7.SetPrio(initPriority);
|
||||
armv7.SetStackSize(stackSize);
|
||||
armv7.SetName(pName.get_ptr());
|
||||
armv7.Run();
|
||||
|
||||
return armv7.GetId();
|
||||
return armv7->GetId();
|
||||
}
|
||||
|
||||
s32 sceKernelStartThread(s32 threadId, u32 argSize, vm::cptr<void> pArgBlock)
|
||||
{
|
||||
sceLibKernel.Warning("sceKernelStartThread(threadId=0x%x, argSize=0x%x, pArgBlock=*0x%x)", threadId, argSize, pArgBlock);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(threadId, CPU_THREAD_ARMv7);
|
||||
const auto thread = Emu.GetIdManager().get<ARMv7Thread>(threadId);
|
||||
|
||||
if (!t)
|
||||
if (!thread)
|
||||
{
|
||||
return SCE_KERNEL_ERROR_INVALID_UID;
|
||||
}
|
||||
|
||||
// thread should be in DORMANT state, but it's not possible to check it correctly atm
|
||||
|
||||
if (t->IsAlive())
|
||||
{
|
||||
return SCE_KERNEL_ERROR_NOT_DORMANT;
|
||||
}
|
||||
|
||||
ARMv7Thread& thread = static_cast<ARMv7Thread&>(*t);
|
||||
//if (thread->IsAlive())
|
||||
//{
|
||||
// return SCE_KERNEL_ERROR_NOT_DORMANT;
|
||||
//}
|
||||
|
||||
// push arg block onto the stack
|
||||
const u32 pos = (thread.SP -= argSize);
|
||||
const u32 pos = (thread->SP -= argSize);
|
||||
memcpy(vm::get_ptr<void>(pos), pArgBlock.get_ptr(), argSize);
|
||||
|
||||
// set SceKernelThreadEntry function arguments
|
||||
thread.GPR[0] = argSize;
|
||||
thread.GPR[1] = pos;
|
||||
thread->GPR[0] = argSize;
|
||||
thread->GPR[1] = pos;
|
||||
|
||||
thread.Exec();
|
||||
thread->Exec();
|
||||
return SCE_OK;
|
||||
}
|
||||
|
||||
@ -104,21 +100,21 @@ s32 sceKernelDeleteThread(s32 threadId)
|
||||
{
|
||||
sceLibKernel.Warning("sceKernelDeleteThread(threadId=0x%x)", threadId);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(threadId, CPU_THREAD_ARMv7);
|
||||
const auto thread = Emu.GetIdManager().get<ARMv7Thread>(threadId);
|
||||
|
||||
if (!t)
|
||||
if (!thread)
|
||||
{
|
||||
return SCE_KERNEL_ERROR_INVALID_UID;
|
||||
}
|
||||
|
||||
// thread should be in DORMANT state, but it's not possible to check it correctly atm
|
||||
|
||||
if (t->IsAlive())
|
||||
{
|
||||
return SCE_KERNEL_ERROR_NOT_DORMANT;
|
||||
}
|
||||
//if (thread->IsAlive())
|
||||
//{
|
||||
// return SCE_KERNEL_ERROR_NOT_DORMANT;
|
||||
//}
|
||||
|
||||
Emu.GetCPU().RemoveThread(threadId);
|
||||
Emu.GetIdManager().remove<ARMv7Thread>(threadId);
|
||||
return SCE_OK;
|
||||
}
|
||||
|
||||
@ -131,9 +127,10 @@ s32 sceKernelExitDeleteThread(ARMv7Context& context, s32 exitStatus)
|
||||
|
||||
// current thread should be deleted
|
||||
const u32 id = static_cast<ARMv7Thread&>(context).GetId();
|
||||
|
||||
CallAfter([id]()
|
||||
{
|
||||
Emu.GetCPU().RemoveThread(id);
|
||||
Emu.GetIdManager().remove<ARMv7Thread>(id);
|
||||
});
|
||||
|
||||
return SCE_OK;
|
||||
@ -262,32 +259,27 @@ s32 sceKernelWaitThreadEnd(s32 threadId, vm::ptr<s32> pExitStatus, vm::ptr<u32>
|
||||
{
|
||||
sceLibKernel.Warning("sceKernelWaitThreadEnd(threadId=0x%x, pExitStatus=*0x%x, pTimeout=*0x%x)", threadId, pExitStatus, pTimeout);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(threadId, CPU_THREAD_ARMv7);
|
||||
const auto thread = Emu.GetIdManager().get<ARMv7Thread>(threadId);
|
||||
|
||||
if (!t)
|
||||
if (!thread)
|
||||
{
|
||||
return SCE_KERNEL_ERROR_INVALID_UID;
|
||||
}
|
||||
|
||||
ARMv7Thread& thread = static_cast<ARMv7Thread&>(*t);
|
||||
|
||||
if (pTimeout)
|
||||
{
|
||||
}
|
||||
|
||||
while (thread.IsAlive())
|
||||
while (thread->IsActive())
|
||||
{
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sceLibKernel.Warning("sceKernelWaitThreadEnd(0x%x) aborted", threadId);
|
||||
return SCE_OK;
|
||||
}
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
}
|
||||
|
||||
if (pExitStatus)
|
||||
{
|
||||
*pExitStatus = thread.GPR[0];
|
||||
*pExitStatus = thread->GPR[0];
|
||||
}
|
||||
|
||||
return SCE_OK;
|
||||
|
@ -179,24 +179,25 @@ namespace sce_libc_func
|
||||
|
||||
std::lock_guard<std::mutex> lock(g_atexit_mutex);
|
||||
|
||||
if (!Emu.IsStopped())
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
for (auto func : decltype(g_atexit)(std::move(g_atexit)))
|
||||
{
|
||||
for (auto func : decltype(g_atexit)(std::move(g_atexit)))
|
||||
{
|
||||
func(context);
|
||||
}
|
||||
func(context);
|
||||
}
|
||||
|
||||
sceLibc.Success("Process finished");
|
||||
sceLibc.Success("Process finished");
|
||||
|
||||
CallAfter([]()
|
||||
{
|
||||
Emu.Stop();
|
||||
});
|
||||
CallAfter([]()
|
||||
{
|
||||
Emu.Stop();
|
||||
});
|
||||
|
||||
while (!Emu.IsStopped())
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
while (true)
|
||||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,10 +70,8 @@ void execute_psv_func_by_index(ARMv7Context& context, u32 index)
|
||||
{
|
||||
if (auto func = get_psv_func_by_index(index))
|
||||
{
|
||||
ARMv7Thread& CPU = static_cast<ARMv7Thread&>(context);
|
||||
|
||||
auto old_last_syscall = CPU.m_last_syscall;
|
||||
CPU.m_last_syscall = func->nid;
|
||||
const u32 old_func = context.hle_func;
|
||||
context.hle_func = func->nid;
|
||||
|
||||
if (func->func)
|
||||
{
|
||||
@ -90,7 +88,7 @@ void execute_psv_func_by_index(ARMv7Context& context, u32 index)
|
||||
func->module->on_error(context.GPR[0], func);
|
||||
}
|
||||
|
||||
CPU.m_last_syscall = old_last_syscall;
|
||||
context.hle_func = old_func;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -3,346 +3,208 @@
|
||||
#include "Utilities/Log.h"
|
||||
#include "Emu/Memory/Memory.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/IdManager.h"
|
||||
#include "Emu/DbgCommand.h"
|
||||
#include "Emu/SysCalls/SysCalls.h"
|
||||
#include "Emu/ARMv7/PSVFuncList.h"
|
||||
|
||||
#include "CPUThreadManager.h"
|
||||
#include "CPUDecoder.h"
|
||||
#include "CPUThread.h"
|
||||
|
||||
CPUThread::CPUThread(CPUThreadType type)
|
||||
: ThreadBase("CPUThread")
|
||||
, m_events(0)
|
||||
CPUThread::CPUThread(CPUThreadType type, const std::string& name, std::function<std::string()> thread_name)
|
||||
: m_state({ CPU_STATE_STOP })
|
||||
, m_id(Emu.GetIdManager().get_current_id())
|
||||
, m_type(type)
|
||||
, m_stack_size(0)
|
||||
, m_stack_addr(0)
|
||||
, m_prio(0)
|
||||
, m_dec(nullptr)
|
||||
, m_is_step(false)
|
||||
, m_is_branch(false)
|
||||
, m_status(Stopped)
|
||||
, m_last_syscall(0)
|
||||
, m_trace_enabled(false)
|
||||
, m_trace_call_stack(true)
|
||||
, m_name(name)
|
||||
{
|
||||
offset = 0;
|
||||
start(thread_name, [this]
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
|
||||
// check thread status
|
||||
while (joinable() && IsActive())
|
||||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
// check stop status
|
||||
if (!IsStopped())
|
||||
{
|
||||
if (lock) lock.unlock();
|
||||
|
||||
try
|
||||
{
|
||||
Task();
|
||||
}
|
||||
catch (CPUThreadReturn)
|
||||
{
|
||||
}
|
||||
catch (CPUThreadStop)
|
||||
{
|
||||
m_state |= CPU_STATE_STOP;
|
||||
}
|
||||
catch (CPUThreadExit)
|
||||
{
|
||||
m_state |= CPU_STATE_DEAD;
|
||||
break;
|
||||
}
|
||||
|
||||
cv.notify_one();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!lock) lock.lock();
|
||||
|
||||
cv.wait_for(lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
cv.notify_all();
|
||||
});
|
||||
|
||||
SendDbgCommand(DID_CREATE_THREAD, this);
|
||||
}
|
||||
|
||||
CPUThread::~CPUThread()
|
||||
{
|
||||
safe_delete(m_dec);
|
||||
if (joinable())
|
||||
{
|
||||
throw EXCEPTION("Thread not joined");
|
||||
}
|
||||
|
||||
SendDbgCommand(DID_REMOVE_THREAD, this);
|
||||
}
|
||||
|
||||
void CPUThread::DumpInformation()
|
||||
bool CPUThread::IsPaused() const
|
||||
{
|
||||
auto get_syscall_name = [this](u64 syscall) -> std::string
|
||||
{
|
||||
switch (GetType())
|
||||
{
|
||||
case CPU_THREAD_ARMv7:
|
||||
{
|
||||
if ((u32)syscall == syscall)
|
||||
{
|
||||
if (syscall)
|
||||
{
|
||||
if (auto func = get_psv_func_by_nid((u32)syscall))
|
||||
{
|
||||
return func->name;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return{};
|
||||
}
|
||||
}
|
||||
return (m_state.load() & CPU_STATE_PAUSE) != 0 || Emu.IsPaused();
|
||||
}
|
||||
|
||||
return "unknown function";
|
||||
}
|
||||
|
||||
case CPU_THREAD_PPU:
|
||||
{
|
||||
if (syscall)
|
||||
{
|
||||
return SysCalls::GetFuncName(syscall);
|
||||
}
|
||||
else
|
||||
{
|
||||
return{};
|
||||
}
|
||||
}
|
||||
|
||||
case CPU_THREAD_SPU:
|
||||
case CPU_THREAD_RAW_SPU:
|
||||
default:
|
||||
{
|
||||
if (!syscall)
|
||||
{
|
||||
return{};
|
||||
}
|
||||
|
||||
return "unknown function";
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
LOG_ERROR(GENERAL, "Information: is_alive=%d, m_last_syscall=0x%llx (%s)", IsAlive(), m_last_syscall, get_syscall_name(m_last_syscall));
|
||||
void CPUThread::DumpInformation() const
|
||||
{
|
||||
LOG_WARNING(GENERAL, RegsToString());
|
||||
}
|
||||
|
||||
bool CPUThread::IsRunning() const { return m_status == Running; }
|
||||
bool CPUThread::IsPaused() const { return m_status == Paused; }
|
||||
bool CPUThread::IsStopped() const { return m_status == Stopped; }
|
||||
|
||||
void CPUThread::Close()
|
||||
{
|
||||
ThreadBase::Stop(false);
|
||||
DoStop();
|
||||
|
||||
delete m_dec;
|
||||
m_dec = nullptr;
|
||||
}
|
||||
|
||||
void CPUThread::Reset()
|
||||
{
|
||||
CloseStack();
|
||||
|
||||
SetPc(0);
|
||||
m_is_branch = false;
|
||||
|
||||
m_status = Stopped;
|
||||
|
||||
DoReset();
|
||||
}
|
||||
|
||||
void CPUThread::SetId(const u32 id)
|
||||
{
|
||||
m_id = id;
|
||||
}
|
||||
|
||||
void CPUThread::SetName(const std::string& name)
|
||||
{
|
||||
NamedThreadBase::SetThreadName(name);
|
||||
}
|
||||
|
||||
int CPUThread::ThreadStatus()
|
||||
{
|
||||
if(Emu.IsStopped() || IsStopped() || IsPaused())
|
||||
{
|
||||
return CPUThread_Stopped;
|
||||
}
|
||||
|
||||
if(TestDestroy())
|
||||
{
|
||||
return CPUThread_Break;
|
||||
}
|
||||
|
||||
if(m_is_step)
|
||||
{
|
||||
return CPUThread_Step;
|
||||
}
|
||||
|
||||
if (Emu.IsPaused())
|
||||
{
|
||||
return CPUThread_Sleeping;
|
||||
}
|
||||
|
||||
return CPUThread_Running;
|
||||
}
|
||||
|
||||
void CPUThread::SetEntry(const u32 pc)
|
||||
{
|
||||
entry = pc;
|
||||
}
|
||||
|
||||
void CPUThread::NextPc(u32 instr_size)
|
||||
{
|
||||
if(m_is_branch)
|
||||
{
|
||||
m_is_branch = false;
|
||||
|
||||
SetPc(nPC);
|
||||
}
|
||||
else
|
||||
{
|
||||
PC += instr_size;
|
||||
}
|
||||
}
|
||||
|
||||
void CPUThread::SetBranch(const u32 pc, bool record_branch)
|
||||
{
|
||||
m_is_branch = true;
|
||||
nPC = pc;
|
||||
|
||||
if(m_trace_call_stack && record_branch)
|
||||
CallStackBranch(pc);
|
||||
}
|
||||
|
||||
void CPUThread::SetPc(const u32 pc)
|
||||
{
|
||||
PC = pc;
|
||||
}
|
||||
|
||||
void CPUThread::Run()
|
||||
{
|
||||
if(!IsStopped())
|
||||
Stop();
|
||||
|
||||
Reset();
|
||||
|
||||
SendDbgCommand(DID_START_THREAD, this);
|
||||
|
||||
m_status = Running;
|
||||
|
||||
SetPc(entry);
|
||||
InitStack();
|
||||
InitRegs();
|
||||
DoRun();
|
||||
Emu.CheckStatus();
|
||||
|
||||
SendDbgCommand(DID_STARTED_THREAD, this);
|
||||
}
|
||||
|
||||
void CPUThread::Resume()
|
||||
{
|
||||
if(!IsPaused()) return;
|
||||
|
||||
SendDbgCommand(DID_RESUME_THREAD, this);
|
||||
|
||||
m_status = Running;
|
||||
DoResume();
|
||||
Emu.CheckStatus();
|
||||
m_state &= ~CPU_STATE_PAUSE;
|
||||
|
||||
ThreadBase::Start();
|
||||
cv.notify_one();
|
||||
|
||||
SendDbgCommand(DID_RESUMED_THREAD, this);
|
||||
}
|
||||
|
||||
void CPUThread::Pause()
|
||||
{
|
||||
if(!IsRunning()) return;
|
||||
|
||||
SendDbgCommand(DID_PAUSE_THREAD, this);
|
||||
|
||||
m_status = Paused;
|
||||
DoPause();
|
||||
Emu.CheckStatus();
|
||||
m_state |= CPU_STATE_PAUSE;
|
||||
|
||||
cv.notify_one();
|
||||
|
||||
// ThreadBase::Stop(); // "Abort() called" exception
|
||||
SendDbgCommand(DID_PAUSED_THREAD, this);
|
||||
}
|
||||
|
||||
void CPUThread::Stop()
|
||||
{
|
||||
if(IsStopped()) return;
|
||||
|
||||
SendDbgCommand(DID_STOP_THREAD, this);
|
||||
|
||||
m_status = Stopped;
|
||||
m_events |= CPU_EVENT_STOP;
|
||||
|
||||
if(static_cast<NamedThreadBase*>(this) != GetCurrentNamedThread())
|
||||
if (is_current())
|
||||
{
|
||||
ThreadBase::Stop();
|
||||
throw CPUThreadStop{};
|
||||
}
|
||||
else
|
||||
{
|
||||
m_state |= CPU_STATE_STOP;
|
||||
|
||||
Emu.CheckStatus();
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
SendDbgCommand(DID_STOPED_THREAD, this);
|
||||
}
|
||||
|
||||
void CPUThread::Exec()
|
||||
{
|
||||
m_is_step = false;
|
||||
SendDbgCommand(DID_EXEC_THREAD, this);
|
||||
|
||||
if(IsRunning())
|
||||
ThreadBase::Start();
|
||||
m_state &= ~CPU_STATE_STOP;
|
||||
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
void CPUThread::ExecOnce()
|
||||
void CPUThread::Exit()
|
||||
{
|
||||
m_is_step = true;
|
||||
SendDbgCommand(DID_EXEC_THREAD, this);
|
||||
|
||||
m_status = Running;
|
||||
ThreadBase::Start();
|
||||
ThreadBase::Stop(true,false);
|
||||
m_status = Paused;
|
||||
SendDbgCommand(DID_PAUSE_THREAD, this);
|
||||
SendDbgCommand(DID_PAUSED_THREAD, this);
|
||||
}
|
||||
|
||||
void CPUThread::Task()
|
||||
{
|
||||
if (Ini.HLELogging.GetValue()) LOG_NOTICE(GENERAL, "%s enter", CPUThread::GetFName().c_str());
|
||||
|
||||
const std::vector<u64>& bp = Emu.GetBreakPoints();
|
||||
|
||||
for (uint i = 0; i<bp.size(); ++i)
|
||||
if (is_current())
|
||||
{
|
||||
if (bp[i] == offset + PC)
|
||||
{
|
||||
Emu.Pause();
|
||||
break;
|
||||
}
|
||||
throw CPUThreadExit{};
|
||||
}
|
||||
else
|
||||
{
|
||||
throw EXCEPTION("Unable to exit another thread");
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<u32> trace;
|
||||
void CPUThread::Step()
|
||||
{
|
||||
m_state.atomic_op([](u64& state)
|
||||
{
|
||||
state |= CPU_STATE_STEP;
|
||||
state &= ~CPU_STATE_PAUSE;
|
||||
});
|
||||
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
void CPUThread::Sleep()
|
||||
{
|
||||
m_state ^= CPU_STATE_SLEEP;
|
||||
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
void CPUThread::Awake()
|
||||
{
|
||||
m_state ^= CPU_STATE_SLEEP;
|
||||
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
bool CPUThread::CheckStatus()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
|
||||
|
||||
while (true)
|
||||
{
|
||||
int status = ThreadStatus();
|
||||
CHECK_EMU_STATUS; // check at least once
|
||||
|
||||
if (status == CPUThread_Stopped || status == CPUThread_Break)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (!IsPaused()) break;
|
||||
|
||||
if (status == CPUThread_Sleeping)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
continue;
|
||||
}
|
||||
if (!lock) lock.lock();
|
||||
|
||||
Step();
|
||||
//if (m_trace_enabled)
|
||||
//trace.push_back(PC);
|
||||
NextPc(m_dec->DecodeMemory(PC + offset));
|
||||
|
||||
if (status == CPUThread_Step)
|
||||
{
|
||||
m_is_step = false;
|
||||
break;
|
||||
}
|
||||
|
||||
for (uint i = 0; i < bp.size(); ++i)
|
||||
{
|
||||
if (bp[i] == PC)
|
||||
{
|
||||
Emu.Pause();
|
||||
break;
|
||||
}
|
||||
}
|
||||
cv.wait_for(lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
if (trace.size())
|
||||
if (IsStopped())
|
||||
{
|
||||
LOG_NOTICE(GENERAL, "Trace begin (%d elements)", trace.size());
|
||||
|
||||
u32 start = trace[0], prev = trace[0] - 4;
|
||||
|
||||
for (auto& v : trace) //LOG_NOTICE(GENERAL, "PC = 0x%x", v);
|
||||
{
|
||||
if (v - prev != 4 && v - prev != 2)
|
||||
{
|
||||
LOG_NOTICE(GENERAL, "Trace: 0x%08x .. 0x%08x", start, prev);
|
||||
start = v;
|
||||
}
|
||||
prev = v;
|
||||
}
|
||||
|
||||
LOG_NOTICE(GENERAL, "Trace end: 0x%08x .. 0x%08x", start, prev);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (Ini.HLELogging.GetValue()) LOG_NOTICE(GENERAL, "%s leave", CPUThread::GetFName().c_str());
|
||||
if (m_state.load() & CPU_STATE_STEP)
|
||||
{
|
||||
// set PAUSE, but allow to execute once
|
||||
m_state |= CPU_STATE_PAUSE;
|
||||
m_state &= ~CPU_STATE_STEP;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include "Utilities/Thread.h"
|
||||
|
||||
enum CPUThreadType : unsigned char
|
||||
enum CPUThreadType
|
||||
{
|
||||
CPU_THREAD_PPU,
|
||||
CPU_THREAD_SPU,
|
||||
@ -9,80 +10,85 @@ enum CPUThreadType : unsigned char
|
||||
CPU_THREAD_ARMv7,
|
||||
};
|
||||
|
||||
enum CPUThreadStatus
|
||||
{
|
||||
CPUThread_Ready,
|
||||
CPUThread_Running,
|
||||
CPUThread_Paused,
|
||||
CPUThread_Stopped,
|
||||
CPUThread_Sleeping,
|
||||
CPUThread_Break,
|
||||
CPUThread_Step,
|
||||
};
|
||||
|
||||
// CPU Thread Events
|
||||
// CPU Thread State Flags
|
||||
enum : u64
|
||||
{
|
||||
CPU_EVENT_STOP = (1ull << 0),
|
||||
CPU_STATE_STOP = (1ull << 0), // basic execution state (stopped by default), removed by Exec()
|
||||
CPU_STATE_PAUSE = (1ull << 1), // paused by debugger (manually or after step execution)
|
||||
CPU_STATE_SLEEP = (1ull << 2),
|
||||
CPU_STATE_STEP = (1ull << 3),
|
||||
CPU_STATE_DEAD = (1ull << 4),
|
||||
};
|
||||
|
||||
// "HLE return" exception event
|
||||
class CPUThreadReturn{};
|
||||
|
||||
// CPUThread::Stop exception event
|
||||
class CPUThreadStop{};
|
||||
|
||||
// CPUThread::Exit exception event
|
||||
class CPUThreadExit{};
|
||||
|
||||
class CPUDecoder;
|
||||
|
||||
class CPUThread : public ThreadBase
|
||||
class CPUThread : protected thread_t
|
||||
{
|
||||
protected:
|
||||
std::atomic<u64> m_events; // flags
|
||||
atomic<u64> m_state; // thread state flags
|
||||
|
||||
u32 m_status;
|
||||
u32 m_id;
|
||||
u64 m_prio;
|
||||
CPUThreadType m_type;
|
||||
bool m_joinable;
|
||||
bool m_joining;
|
||||
bool m_is_step;
|
||||
std::unique_ptr<CPUDecoder> m_dec;
|
||||
|
||||
u32 m_stack_addr;
|
||||
u32 m_stack_size;
|
||||
|
||||
u64 m_exit_status;
|
||||
|
||||
CPUDecoder* m_dec;
|
||||
|
||||
bool m_trace_call_stack;
|
||||
|
||||
virtual void DumpInformation() override;
|
||||
const u32 m_id;
|
||||
const CPUThreadType m_type;
|
||||
const std::string m_name; // changing m_name would be terribly thread-unsafe in current implementation
|
||||
|
||||
public:
|
||||
void AddEvent(const u64 event) { m_events |= event; }
|
||||
using thread_t::mutex;
|
||||
using thread_t::cv;
|
||||
|
||||
protected:
|
||||
CPUThread(CPUThreadType type, const std::string& name, std::function<std::string()> thread_name);
|
||||
|
||||
public:
|
||||
virtual ~CPUThread() override;
|
||||
|
||||
u32 GetId() const { return m_id; }
|
||||
CPUThreadType GetType() const { return m_type; }
|
||||
std::string GetName() const { return m_name; }
|
||||
|
||||
bool IsActive() const { return (m_state.load() & CPU_STATE_DEAD) == 0; }
|
||||
bool IsStopped() const { return (m_state.load() & CPU_STATE_STOP) != 0; }
|
||||
virtual bool IsPaused() const;
|
||||
|
||||
virtual void DumpInformation() const;
|
||||
virtual u32 GetPC() const = 0;
|
||||
virtual u32 GetOffset() const = 0;
|
||||
virtual void DoRun() = 0;
|
||||
virtual void Task() = 0;
|
||||
|
||||
virtual void InitRegs() = 0;
|
||||
|
||||
virtual void InitStack() = 0;
|
||||
virtual void CloseStack() = 0;
|
||||
|
||||
u32 GetStackAddr() const { return m_stack_addr; }
|
||||
u32 GetStackSize() const { return m_stack_size; }
|
||||
void Run();
|
||||
void Pause();
|
||||
void Resume();
|
||||
void Stop();
|
||||
void Exec();
|
||||
void Exit();
|
||||
void Step(); // set STEP status, don't use
|
||||
void Sleep(); // flip SLEEP status, don't use
|
||||
void Awake(); // flip SLEEP status, don't use
|
||||
bool CheckStatus(); // process m_state flags, returns true if must return from Task()
|
||||
|
||||
void SetStackAddr(u32 stack_addr) { m_stack_addr = stack_addr; }
|
||||
void SetStackSize(u32 stack_size) { m_stack_size = stack_size; }
|
||||
|
||||
void SetId(const u32 id);
|
||||
void SetName(const std::string& name);
|
||||
void SetPrio(const u64 prio) { m_prio = prio; }
|
||||
void SetExitStatus(const u64 status) { m_exit_status = status; }
|
||||
|
||||
u64 GetPrio() const { return m_prio; }
|
||||
u64 GetExitStatus() const { return m_exit_status; }
|
||||
|
||||
std::string GetName() const { return NamedThreadBase::GetThreadName(); }
|
||||
std::string GetFName() const
|
||||
{
|
||||
return fmt::format("%s[0x%x] Thread (%s)", GetTypeString(), m_id, GetName());
|
||||
return fmt::format("%s[0x%x] Thread (%s)", GetTypeString(), m_id, m_name);
|
||||
}
|
||||
|
||||
static std::string CPUThreadTypeToString(CPUThreadType type)
|
||||
static const char* CPUThreadTypeToString(CPUThreadType type)
|
||||
{
|
||||
switch(type)
|
||||
switch (type)
|
||||
{
|
||||
case CPU_THREAD_PPU: return "PPU";
|
||||
case CPU_THREAD_SPU: return "SPU";
|
||||
@ -93,82 +99,38 @@ public:
|
||||
return "Unknown";
|
||||
}
|
||||
|
||||
std::string ThreadStatusToString()
|
||||
const char* ThreadStatusToString()
|
||||
{
|
||||
switch (ThreadStatus())
|
||||
{
|
||||
case CPUThread_Ready: return "Ready";
|
||||
case CPUThread_Running: return "Running";
|
||||
case CPUThread_Paused: return "Paused";
|
||||
case CPUThread_Stopped: return "Stopped";
|
||||
case CPUThread_Sleeping: return "Sleeping";
|
||||
case CPUThread_Break: return "Break";
|
||||
case CPUThread_Step: return "Step";
|
||||
// TODO
|
||||
|
||||
default: return "Unknown status";
|
||||
}
|
||||
//switch (ThreadStatus())
|
||||
//{
|
||||
//case CPUThread_Ready: return "Ready";
|
||||
//case CPUThread_Running: return "Running";
|
||||
//case CPUThread_Paused: return "Paused";
|
||||
//case CPUThread_Stopped: return "Stopped";
|
||||
//case CPUThread_Sleeping: return "Sleeping";
|
||||
//case CPUThread_Break: return "Break";
|
||||
//case CPUThread_Step: return "Step";
|
||||
//}
|
||||
|
||||
return "Unknown";
|
||||
}
|
||||
|
||||
std::string GetTypeString() const { return CPUThreadTypeToString(m_type); }
|
||||
|
||||
virtual std::string GetThreadName() const
|
||||
const char* GetTypeString() const
|
||||
{
|
||||
return fmt::format("%s[0x%08x]", GetFName(), PC);
|
||||
return CPUThreadTypeToString(m_type);
|
||||
}
|
||||
|
||||
CPUDecoder * GetDecoder() { return m_dec; };
|
||||
CPUDecoder* GetDecoder()
|
||||
{
|
||||
return m_dec.get();
|
||||
};
|
||||
|
||||
public:
|
||||
u32 entry;
|
||||
u32 PC;
|
||||
u32 nPC;
|
||||
u32 index;
|
||||
u32 offset;
|
||||
bool m_is_branch;
|
||||
bool m_trace_enabled;
|
||||
u64 m_last_syscall;
|
||||
|
||||
protected:
|
||||
CPUThread(CPUThreadType type);
|
||||
|
||||
public:
|
||||
virtual ~CPUThread();
|
||||
|
||||
int ThreadStatus();
|
||||
|
||||
void NextPc(u32 instr_size);
|
||||
void SetBranch(const u32 pc, bool record_branch = false);
|
||||
void SetPc(const u32 pc);
|
||||
void SetEntry(const u32 entry);
|
||||
|
||||
bool IsRunning() const;
|
||||
bool IsPaused() const;
|
||||
bool IsStopped() const;
|
||||
|
||||
bool IsJoinable() const { return m_joinable; }
|
||||
bool IsJoining() const { return m_joining; }
|
||||
void SetJoinable(bool joinable) { m_joinable = joinable; }
|
||||
void SetJoining(bool joining) { m_joining = joining; }
|
||||
|
||||
u32 GetId() const { return m_id; }
|
||||
CPUThreadType GetType() const { return m_type; }
|
||||
|
||||
void SetCallStackTracing(bool trace_call_stack) { m_trace_call_stack = trace_call_stack; }
|
||||
|
||||
void Reset();
|
||||
void Close();
|
||||
void Run();
|
||||
void Pause();
|
||||
void Resume();
|
||||
void Stop();
|
||||
|
||||
virtual std::string RegsToString() = 0;
|
||||
virtual std::string ReadRegString(const std::string& reg) = 0;
|
||||
virtual std::string RegsToString() const = 0;
|
||||
virtual std::string ReadRegString(const std::string& reg) const = 0;
|
||||
virtual bool WriteRegString(const std::string& reg, std::string value) = 0;
|
||||
|
||||
virtual void Exec();
|
||||
void ExecOnce();
|
||||
|
||||
struct CallStackItem
|
||||
{
|
||||
u32 pc;
|
||||
@ -207,7 +169,7 @@ public:
|
||||
CallStackItem new_item;
|
||||
|
||||
new_item.branch_pc = pc;
|
||||
new_item.pc = PC;
|
||||
new_item.pc = GetPC();
|
||||
|
||||
m_call_stack.push_back(new_item);
|
||||
}
|
||||
@ -216,17 +178,6 @@ public:
|
||||
{
|
||||
return pc + 4;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void DoReset()=0;
|
||||
virtual void DoRun()=0;
|
||||
virtual void DoPause()=0;
|
||||
virtual void DoResume()=0;
|
||||
virtual void DoStop()=0;
|
||||
|
||||
protected:
|
||||
virtual void Step() {}
|
||||
virtual void Task();
|
||||
};
|
||||
|
||||
class cpu_thread
|
||||
@ -235,32 +186,32 @@ protected:
|
||||
std::shared_ptr<CPUThread> thread;
|
||||
|
||||
public:
|
||||
u32 get_entry() const
|
||||
{
|
||||
return thread->entry;
|
||||
}
|
||||
//u32 get_entry() const
|
||||
//{
|
||||
// return thread->entry;
|
||||
//}
|
||||
|
||||
virtual cpu_thread& args(std::initializer_list<std::string> values) = 0;
|
||||
|
||||
virtual cpu_thread& run() = 0;
|
||||
|
||||
u64 join()
|
||||
{
|
||||
if (!joinable())
|
||||
throw "thread must be joinable for join";
|
||||
//u64 join()
|
||||
//{
|
||||
// if (!joinable())
|
||||
// throw "thread must be joinable for join";
|
||||
|
||||
thread->SetJoinable(false);
|
||||
// thread->SetJoinable(false);
|
||||
|
||||
while (thread->IsRunning())
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
// while (thread->IsRunning())
|
||||
// std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
return thread->GetExitStatus();
|
||||
}
|
||||
// return thread->GetExitStatus();
|
||||
//}
|
||||
|
||||
bool joinable() const
|
||||
{
|
||||
return thread->IsJoinable();
|
||||
}
|
||||
//bool joinable() const
|
||||
//{
|
||||
// return thread->IsJoinable();
|
||||
//}
|
||||
|
||||
u32 get_id() const
|
||||
{
|
||||
|
@ -2,13 +2,13 @@
|
||||
#include "Emu/Memory/Memory.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/DbgCommand.h"
|
||||
|
||||
#include "Emu/IdManager.h"
|
||||
#include "CPUThreadManager.h"
|
||||
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
#include "Emu/Cell/SPUThread.h"
|
||||
#include "Emu/Cell/RawSPUThread.h"
|
||||
#include "Emu/ARMv7/ARMv7Thread.h"
|
||||
#include "CPUThreadManager.h"
|
||||
|
||||
CPUThreadManager::CPUThreadManager()
|
||||
{
|
||||
@ -16,128 +16,84 @@ CPUThreadManager::CPUThreadManager()
|
||||
|
||||
CPUThreadManager::~CPUThreadManager()
|
||||
{
|
||||
Close();
|
||||
}
|
||||
|
||||
void CPUThreadManager::Close()
|
||||
{
|
||||
while(m_threads.size()) RemoveThread(m_threads[0]->GetId());
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
for (auto& x : m_raw_spu)
|
||||
{
|
||||
x.reset();
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<CPUThread> CPUThreadManager::AddThread(CPUThreadType type)
|
||||
std::vector<std::shared_ptr<CPUThread>> CPUThreadManager::GetAllThreads() const
|
||||
{
|
||||
std::vector<std::shared_ptr<CPUThread>> result;
|
||||
|
||||
for (auto& v : Emu.GetIdManager().get_data<PPUThread>())
|
||||
{
|
||||
result.emplace_back(std::static_pointer_cast<CPUThread>(v.data));
|
||||
}
|
||||
|
||||
for (auto& v : Emu.GetIdManager().get_data<SPUThread>())
|
||||
{
|
||||
result.emplace_back(std::static_pointer_cast<CPUThread>(v.data));
|
||||
}
|
||||
|
||||
for (auto& v : Emu.GetIdManager().get_data<RawSPUThread>())
|
||||
{
|
||||
result.emplace_back(std::static_pointer_cast<CPUThread>(v.data));
|
||||
}
|
||||
|
||||
for (auto& v : Emu.GetIdManager().get_data<ARMv7Thread>())
|
||||
{
|
||||
result.emplace_back(std::static_pointer_cast<CPUThread>(v.data));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void CPUThreadManager::Exec() const
|
||||
{
|
||||
for (auto& v : Emu.GetIdManager().get_data<PPUThread>())
|
||||
{
|
||||
static_cast<CPUThread*>(v.data.get())->Exec();
|
||||
}
|
||||
|
||||
for (auto& v : Emu.GetIdManager().get_data<ARMv7Thread>())
|
||||
{
|
||||
static_cast<CPUThread*>(v.data.get())->Exec();
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<RawSPUThread> CPUThreadManager::NewRawSPUThread()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
std::shared_ptr<CPUThread> new_thread;
|
||||
std::shared_ptr<RawSPUThread> result;
|
||||
|
||||
switch(type)
|
||||
for (u32 i = 0; i < m_raw_spu.size(); i++)
|
||||
{
|
||||
case CPU_THREAD_PPU:
|
||||
{
|
||||
new_thread = std::make_shared<PPUThread>();
|
||||
break;
|
||||
}
|
||||
case CPU_THREAD_SPU:
|
||||
{
|
||||
new_thread = std::make_shared<SPUThread>();
|
||||
break;
|
||||
}
|
||||
case CPU_THREAD_RAW_SPU:
|
||||
{
|
||||
for (u32 i = 0; i < m_raw_spu.size(); i++)
|
||||
if (m_raw_spu[i].expired())
|
||||
{
|
||||
if (!m_raw_spu[i])
|
||||
{
|
||||
new_thread = std::make_shared<RawSPUThread>();
|
||||
new_thread->index = i;
|
||||
|
||||
m_raw_spu[i] = new_thread;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CPU_THREAD_ARMv7:
|
||||
{
|
||||
new_thread.reset(new ARMv7Thread());
|
||||
break;
|
||||
}
|
||||
default: assert(0);
|
||||
}
|
||||
|
||||
if (new_thread)
|
||||
{
|
||||
new_thread->SetId(Emu.GetIdManager().add(new_thread));
|
||||
|
||||
m_threads.push_back(new_thread);
|
||||
SendDbgCommand(DID_CREATE_THREAD, new_thread.get());
|
||||
}
|
||||
|
||||
return new_thread;
|
||||
}
|
||||
|
||||
void CPUThreadManager::RemoveThread(u32 id)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
std::shared_ptr<CPUThread> thr;
|
||||
u32 thread_index = 0;
|
||||
|
||||
for (u32 i = 0; i < m_threads.size(); ++i)
|
||||
{
|
||||
if (m_threads[i]->GetId() != id) continue;
|
||||
|
||||
thr = m_threads[i];
|
||||
thread_index = i;
|
||||
}
|
||||
|
||||
if (thr)
|
||||
{
|
||||
SendDbgCommand(DID_REMOVE_THREAD, thr.get());
|
||||
thr->Close();
|
||||
|
||||
m_threads.erase(m_threads.begin() + thread_index);
|
||||
|
||||
if (thr->GetType() == CPU_THREAD_RAW_SPU)
|
||||
{
|
||||
assert(thr->index < m_raw_spu.size());
|
||||
m_raw_spu[thr->index] = nullptr;
|
||||
m_raw_spu[i] = result = Emu.GetIdManager().make_ptr<RawSPUThread>("RawSPU " + std::to_string(i), i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Removing the ID should trigger the actual deletion of the thread
|
||||
Emu.GetIdManager().remove<CPUThread>(id);
|
||||
Emu.CheckStatus();
|
||||
return result;
|
||||
}
|
||||
|
||||
std::shared_ptr<CPUThread> CPUThreadManager::GetThread(u32 id)
|
||||
{
|
||||
return Emu.GetIdManager().get<CPUThread>(id);
|
||||
}
|
||||
|
||||
std::shared_ptr<CPUThread> CPUThreadManager::GetThread(u32 id, CPUThreadType type)
|
||||
{
|
||||
const auto res = GetThread(id);
|
||||
|
||||
return res && res->GetType() == type ? res : nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<CPUThread> CPUThreadManager::GetRawSPUThread(u32 index)
|
||||
std::shared_ptr<RawSPUThread> CPUThreadManager::GetRawSPUThread(u32 index)
|
||||
{
|
||||
if (index >= m_raw_spu.size())
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return m_raw_spu[index];
|
||||
}
|
||||
|
||||
void CPUThreadManager::Exec()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
for(u32 i = 0; i < m_threads.size(); ++i)
|
||||
{
|
||||
m_threads[i]->Exec();
|
||||
}
|
||||
return m_raw_spu[index].lock();
|
||||
}
|
||||
|
@ -1,14 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
class CPUThread;
|
||||
enum CPUThreadType : unsigned char;
|
||||
class RawSPUThread;
|
||||
|
||||
class CPUThreadManager
|
||||
class CPUThreadManager final
|
||||
{
|
||||
std::mutex m_mutex;
|
||||
|
||||
std::vector<std::shared_ptr<CPUThread>> m_threads;
|
||||
std::array<std::shared_ptr<CPUThread>, 5> m_raw_spu;
|
||||
std::array<std::weak_ptr<RawSPUThread>, 5> m_raw_spu;
|
||||
|
||||
public:
|
||||
CPUThreadManager();
|
||||
@ -16,16 +15,11 @@ public:
|
||||
|
||||
void Close();
|
||||
|
||||
std::shared_ptr<CPUThread> AddThread(CPUThreadType type);
|
||||
std::vector<std::shared_ptr<CPUThread>> GetAllThreads() const;
|
||||
|
||||
void RemoveThread(u32 id);
|
||||
void Exec() const;
|
||||
|
||||
std::vector<std::shared_ptr<CPUThread>> GetThreads() { std::lock_guard<std::mutex> lock(m_mutex); return m_threads; }
|
||||
std::shared_ptr<RawSPUThread> NewRawSPUThread();
|
||||
|
||||
std::shared_ptr<CPUThread> GetThread(u32 id);
|
||||
std::shared_ptr<CPUThread> GetThread(u32 id, CPUThreadType type);
|
||||
std::shared_ptr<CPUThread> GetRawSPUThread(u32 index);
|
||||
|
||||
void Exec();
|
||||
void Task();
|
||||
std::shared_ptr<RawSPUThread> GetRawSPUThread(u32 index);
|
||||
};
|
||||
|
@ -1462,7 +1462,7 @@ void ppu_interpreter::BC(PPUThread& CPU, ppu_opcode_t op)
|
||||
if (ctr_ok && cond_ok)
|
||||
{
|
||||
const u32 nextLR = CPU.PC + 4;
|
||||
CPU.SetBranch(PPUOpcodes::branchTarget((op.aa ? 0 : CPU.PC), op.simm16), op.lk);
|
||||
CPU.PC = PPUOpcodes::branchTarget((op.aa ? 0 : CPU.PC), op.simm16) - 4;
|
||||
if (op.lk) CPU.LR = nextLR;
|
||||
}
|
||||
}
|
||||
@ -1485,7 +1485,7 @@ void ppu_interpreter::SC(PPUThread& CPU, ppu_opcode_t op)
|
||||
void ppu_interpreter::B(PPUThread& CPU, ppu_opcode_t op)
|
||||
{
|
||||
const u32 nextLR = CPU.PC + 4;
|
||||
CPU.SetBranch(PPUOpcodes::branchTarget(op.aa ? 0 : CPU.PC, op.ll), op.lk);
|
||||
CPU.PC = PPUOpcodes::branchTarget(op.aa ? 0 : CPU.PC, op.ll) - 4;
|
||||
if (op.lk) CPU.LR = nextLR;
|
||||
}
|
||||
|
||||
@ -1509,7 +1509,7 @@ void ppu_interpreter::BCLR(PPUThread& CPU, ppu_opcode_t op)
|
||||
if (ctr_ok && cond_ok)
|
||||
{
|
||||
const u32 nextLR = CPU.PC + 4;
|
||||
CPU.SetBranch(PPUOpcodes::branchTarget(0, (u32)CPU.LR), true);
|
||||
CPU.PC = PPUOpcodes::branchTarget(0, (u32)CPU.LR) - 4;
|
||||
if (op.lk) CPU.LR = nextLR;
|
||||
}
|
||||
}
|
||||
@ -1572,7 +1572,7 @@ void ppu_interpreter::BCCTR(PPUThread& CPU, ppu_opcode_t op)
|
||||
if (op.bo & 0x10 || CPU.IsCR(op.bi) == ((op.bo & 0x8) != 0))
|
||||
{
|
||||
const u32 nextLR = CPU.PC + 4;
|
||||
CPU.SetBranch(PPUOpcodes::branchTarget(0, (u32)CPU.CTR), true);
|
||||
CPU.PC = PPUOpcodes::branchTarget(0, (u32)CPU.CTR) - 4;
|
||||
if (op.lk) CPU.LR = nextLR;
|
||||
}
|
||||
}
|
||||
|
@ -2226,7 +2226,7 @@ private:
|
||||
if (CheckCondition(bo, bi))
|
||||
{
|
||||
const u32 nextLR = CPU.PC + 4;
|
||||
CPU.SetBranch(branchTarget((aa ? 0 : CPU.PC), bd), lk);
|
||||
CPU.PC = branchTarget((aa ? 0 : CPU.PC), bd) - 4;
|
||||
if(lk) CPU.LR = nextLR;
|
||||
}
|
||||
}
|
||||
@ -2247,7 +2247,7 @@ private:
|
||||
void B(s32 ll, u32 aa, u32 lk)
|
||||
{
|
||||
const u32 nextLR = CPU.PC + 4;
|
||||
CPU.SetBranch(branchTarget(aa ? 0 : CPU.PC, ll), lk);
|
||||
CPU.PC = branchTarget(aa ? 0 : CPU.PC, ll) - 4;
|
||||
if(lk) CPU.LR = nextLR;
|
||||
}
|
||||
void MCRF(u32 crfd, u32 crfs)
|
||||
@ -2259,7 +2259,7 @@ private:
|
||||
if (CheckCondition(bo, bi))
|
||||
{
|
||||
const u32 nextLR = CPU.PC + 4;
|
||||
CPU.SetBranch(branchTarget(0, (u32)CPU.LR), true);
|
||||
CPU.PC = branchTarget(0, (u32)CPU.LR) - 4;
|
||||
if(lk) CPU.LR = nextLR;
|
||||
}
|
||||
}
|
||||
@ -2312,7 +2312,7 @@ private:
|
||||
if(bo & 0x10 || CPU.IsCR(bi) == ((bo & 0x8) != 0))
|
||||
{
|
||||
const u32 nextLR = CPU.PC + 4;
|
||||
CPU.SetBranch(branchTarget(0, (u32)CPU.CTR), true);
|
||||
CPU.PC = branchTarget(0, (u32)CPU.CTR) - 4;
|
||||
if(lk) CPU.LR = nextLR;
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include "Utilities/Log.h"
|
||||
#include "Emu/Memory/Memory.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/IdManager.h"
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
#include "Emu/SysCalls/SysCalls.h"
|
||||
#include "Emu/SysCalls/Modules.h"
|
||||
@ -489,47 +490,39 @@ void fill_ppu_exec_map(u32 addr, u32 size)
|
||||
}
|
||||
}
|
||||
|
||||
PPUThread::PPUThread() : CPUThread(CPU_THREAD_PPU)
|
||||
PPUThread::PPUThread(const std::string& name)
|
||||
: CPUThread(CPU_THREAD_PPU, name, [this]{ return fmt::format("%s[0x%x] Thread (%s)[0x%08x]", GetTypeString(), GetId(), GetName(), PC); })
|
||||
{
|
||||
Reset();
|
||||
InitRotateMask();
|
||||
}
|
||||
|
||||
PPUThread::~PPUThread()
|
||||
{
|
||||
ppu_free_tls(GetId());
|
||||
cv.notify_one();
|
||||
join();
|
||||
|
||||
ppu_free_tls(m_id);
|
||||
}
|
||||
|
||||
void PPUThread::DoReset()
|
||||
void PPUThread::DumpInformation() const
|
||||
{
|
||||
//reset regs
|
||||
memset(VPR, 0, sizeof(VPR));
|
||||
memset(FPR, 0, sizeof(FPR));
|
||||
memset(GPR, 0, sizeof(GPR));
|
||||
memset(SPRG, 0, sizeof(SPRG));
|
||||
if (hle_code < 0)
|
||||
{
|
||||
LOG_SUCCESS(HLE, "Information: syscall %lld (%s)", ~hle_code, SysCalls::GetFuncName(hle_code));
|
||||
}
|
||||
|
||||
CR.CR = 0;
|
||||
LR = 0;
|
||||
CTR = 0;
|
||||
TB = 0;
|
||||
XER.XER = 0;
|
||||
FPSCR.FPSCR = 0;
|
||||
VSCR.VSCR = 0;
|
||||
VRSAVE = 0;
|
||||
if (hle_code > 0)
|
||||
{
|
||||
LOG_SUCCESS(HLE, "Information: function 0x%llx (%s)", hle_code, SysCalls::GetFuncName(hle_code));
|
||||
}
|
||||
|
||||
CPUThread::DumpInformation();
|
||||
}
|
||||
|
||||
void PPUThread::InitRegs()
|
||||
{
|
||||
const u32 pc = entry ? vm::read32(entry).value() : 0;
|
||||
const u32 rtoc = entry ? vm::read32(entry + 4).value() : 0;
|
||||
|
||||
SetPc(pc);
|
||||
|
||||
GPR[1] = align(m_stack_addr + m_stack_size, 0x200) - 0x200;
|
||||
GPR[2] = rtoc;
|
||||
//GPR[11] = entry;
|
||||
//GPR[12] = Emu.GetMallocPageSize();
|
||||
GPR[13] = ppu_get_tls(GetId()) + 0x7000; // 0x7000 is usually subtracted from r13 to access first TLS element (details are not clear)
|
||||
GPR[1] = align(stack_addr + stack_size, 0x200) - 0x200;
|
||||
GPR[13] = ppu_get_tls(m_id) + 0x7000; // 0x7000 is subtracted from r13 to access first TLS element
|
||||
|
||||
LR = 0;
|
||||
CTR = PC;
|
||||
@ -540,32 +533,40 @@ void PPUThread::InitRegs()
|
||||
|
||||
void PPUThread::InitStack()
|
||||
{
|
||||
if (!m_stack_addr)
|
||||
if (!stack_addr)
|
||||
{
|
||||
assert(m_stack_size);
|
||||
m_stack_addr = Memory.StackMem.AllocAlign(m_stack_size, 4096);
|
||||
if (!stack_size)
|
||||
{
|
||||
throw EXCEPTION("Invalid stack size");
|
||||
}
|
||||
|
||||
stack_addr = Memory.StackMem.AllocAlign(stack_size, 4096);
|
||||
|
||||
if (!stack_addr)
|
||||
{
|
||||
throw EXCEPTION("Out of stack memory");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PPUThread::CloseStack()
|
||||
{
|
||||
if (m_stack_addr)
|
||||
if (stack_addr)
|
||||
{
|
||||
Memory.StackMem.Free(m_stack_addr);
|
||||
m_stack_addr = 0;
|
||||
Memory.StackMem.Free(stack_addr);
|
||||
stack_addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void PPUThread::DoRun()
|
||||
{
|
||||
m_dec = nullptr;
|
||||
m_dec.reset();
|
||||
|
||||
switch (auto mode = Ini.CPUDecoderMode.GetValue())
|
||||
{
|
||||
case 0: // original interpreter
|
||||
{
|
||||
auto ppui = new PPUInterpreter(*this);
|
||||
m_dec = new PPUDecoder(ppui);
|
||||
m_dec.reset(new PPUDecoder(new PPUInterpreter(*this)));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -575,18 +576,18 @@ void PPUThread::DoRun()
|
||||
}
|
||||
|
||||
case 2:
|
||||
{
|
||||
#ifdef PPU_LLVM_RECOMPILER
|
||||
SetCallStackTracing(false);
|
||||
if (!m_dec) {
|
||||
m_dec = new ppu_recompiler_llvm::ExecutionEngine(*this);
|
||||
}
|
||||
m_dec.reset(new ppu_recompiler_llvm::ExecutionEngine(*this));
|
||||
#else
|
||||
LOG_ERROR(PPU, "This image does not include PPU JIT (LLVM)");
|
||||
Emu.Pause();
|
||||
#endif
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
//case 3: m_dec = new PPURecompiler(*this); break;
|
||||
//case 3: m_dec.reset(new PPURecompiler(*this)); break;
|
||||
|
||||
default:
|
||||
{
|
||||
@ -596,20 +597,6 @@ void PPUThread::DoRun()
|
||||
}
|
||||
}
|
||||
|
||||
void PPUThread::DoResume()
|
||||
{
|
||||
}
|
||||
|
||||
void PPUThread::DoPause()
|
||||
{
|
||||
}
|
||||
|
||||
void PPUThread::DoStop()
|
||||
{
|
||||
delete m_dec;
|
||||
m_dec = nullptr;
|
||||
}
|
||||
|
||||
bool FPRdouble::IsINF(PPCdouble d)
|
||||
{
|
||||
return ((u64&)d & 0x7FFFFFFFFFFFFFFFULL) == 0x7FF0000000000000ULL;
|
||||
@ -652,42 +639,45 @@ u64 PPUThread::GetStackArg(s32 i)
|
||||
|
||||
void PPUThread::FastCall2(u32 addr, u32 rtoc)
|
||||
{
|
||||
auto old_status = m_status;
|
||||
if (!is_current())
|
||||
{
|
||||
throw EXCEPTION("Called from the wrong thread");
|
||||
}
|
||||
|
||||
auto old_PC = PC;
|
||||
auto old_stack = GPR[1];
|
||||
auto old_rtoc = GPR[2];
|
||||
auto old_LR = LR;
|
||||
auto old_thread = GetCurrentNamedThread();
|
||||
auto old_task = decltype(custom_task)();
|
||||
|
||||
m_status = Running;
|
||||
PC = addr;
|
||||
GPR[2] = rtoc;
|
||||
LR = Emu.GetCPUThreadStop();
|
||||
SetCurrentNamedThread(this);
|
||||
custom_task.swap(old_task);
|
||||
|
||||
Task();
|
||||
try
|
||||
{
|
||||
Task();
|
||||
}
|
||||
catch (CPUThreadReturn)
|
||||
{
|
||||
}
|
||||
|
||||
m_status = old_status;
|
||||
PC = old_PC;
|
||||
|
||||
if (GPR[1] != old_stack && !Emu.IsStopped()) // GPR[1] shouldn't change
|
||||
if (GPR[1] != old_stack) // GPR[1] shouldn't change
|
||||
{
|
||||
LOG_ERROR(PPU, "PPUThread::FastCall2(0x%x,0x%x): stack inconsistency (SP=0x%llx, old=0x%llx)", addr, rtoc, GPR[1], old_stack);
|
||||
GPR[1] = old_stack;
|
||||
throw EXCEPTION("Stack inconsistency (addr=0x%x, rtoc=0x%x, SP=0x%llx, old=0x%llx)", addr, rtoc, GPR[1], old_stack);
|
||||
}
|
||||
|
||||
GPR[2] = old_rtoc;
|
||||
LR = old_LR;
|
||||
SetCurrentNamedThread(old_thread);
|
||||
custom_task.swap(old_task);
|
||||
}
|
||||
|
||||
void PPUThread::FastStop()
|
||||
{
|
||||
m_status = Stopped;
|
||||
m_events |= CPU_EVENT_STOP;
|
||||
throw CPUThreadReturn{};
|
||||
}
|
||||
|
||||
void PPUThread::Task()
|
||||
@ -696,54 +686,59 @@ void PPUThread::Task()
|
||||
|
||||
if (custom_task)
|
||||
{
|
||||
if (CheckStatus()) return;
|
||||
|
||||
return custom_task(*this);
|
||||
}
|
||||
|
||||
if (m_dec)
|
||||
{
|
||||
return CPUThread::Task();
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
// get interpreter function
|
||||
const auto func = g_ppu_inter_func_list[*(u32*)((u8*)g_ppu_exec_map + PC)];
|
||||
|
||||
if (m_events)
|
||||
while (true)
|
||||
{
|
||||
// process events
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (m_state.load() && CheckStatus()) return;
|
||||
|
||||
if (m_events & CPU_EVENT_STOP && (IsStopped() || IsPaused()))
|
||||
{
|
||||
m_events &= ~CPU_EVENT_STOP;
|
||||
return;
|
||||
}
|
||||
// decode instruction using specified decoder
|
||||
m_dec->DecodeMemory(PC);
|
||||
|
||||
// next instruction
|
||||
PC += 4;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
if (m_state.load() && CheckStatus()) return;
|
||||
|
||||
// read opcode
|
||||
const ppu_opcode_t opcode = { vm::read32(PC) };
|
||||
// get interpreter function
|
||||
const auto func = g_ppu_inter_func_list[*(u32*)((u8*)g_ppu_exec_map + PC)];
|
||||
|
||||
// call interpreter function
|
||||
func(*this, opcode);
|
||||
// read opcode
|
||||
const ppu_opcode_t opcode = { vm::read32(PC) };
|
||||
|
||||
// next instruction
|
||||
//PC += 4;
|
||||
NextPc(4);
|
||||
// call interpreter function
|
||||
func(*this, opcode);
|
||||
|
||||
// next instruction
|
||||
PC += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ppu_thread::ppu_thread(u32 entry, const std::string& name, u32 stack_size, u32 prio)
|
||||
ppu_thread::ppu_thread(u32 entry, const std::string& name, u32 stack_size, s32 prio)
|
||||
{
|
||||
thread = Emu.GetCPU().AddThread(CPU_THREAD_PPU);
|
||||
auto ppu = Emu.GetIdManager().make_ptr<PPUThread>(name);
|
||||
|
||||
thread->SetName(name);
|
||||
thread->SetEntry(entry);
|
||||
thread->SetStackSize(stack_size ? stack_size : Emu.GetPrimaryStackSize());
|
||||
thread->SetPrio(prio ? prio : Emu.GetPrimaryPrio());
|
||||
if (entry)
|
||||
{
|
||||
ppu->PC = vm::read32(entry);
|
||||
ppu->GPR[2] = vm::read32(entry + 4); // rtoc
|
||||
}
|
||||
|
||||
ppu->stack_size = stack_size ? stack_size : Emu.GetPrimaryStackSize();
|
||||
ppu->prio = prio ? prio : Emu.GetPrimaryPrio();
|
||||
|
||||
thread = std::move(ppu);
|
||||
|
||||
argc = 0;
|
||||
}
|
||||
|
@ -467,16 +467,16 @@ struct FPRdouble
|
||||
static int Cmp(PPCdouble a, PPCdouble b);
|
||||
};
|
||||
|
||||
class PPUThread : public CPUThread
|
||||
class PPUThread final : public CPUThread
|
||||
{
|
||||
public:
|
||||
PPCdouble FPR[32]; //Floating Point Register
|
||||
FPSCRhdr FPSCR; //Floating Point Status and Control Register
|
||||
u64 GPR[32]; //General-Purpose Register
|
||||
u128 VPR[32];
|
||||
u32 vpcr;
|
||||
PPCdouble FPR[32]{}; //Floating Point Register
|
||||
FPSCRhdr FPSCR{}; //Floating Point Status and Control Register
|
||||
u64 GPR[32]{}; //General-Purpose Register
|
||||
u128 VPR[32]{};
|
||||
u32 vpcr = 0;
|
||||
|
||||
CRhdr CR; //Condition Register
|
||||
CRhdr CR{}; //Condition Register
|
||||
//CR0
|
||||
// 0 : LT - Negative (is negative)
|
||||
// : 0 - Result is not negative
|
||||
@ -507,7 +507,7 @@ public:
|
||||
|
||||
//SPR : Special-Purpose Registers
|
||||
|
||||
XERhdr XER; //SPR 0x001 : Fixed-Point Expection Register
|
||||
XERhdr XER{}; //SPR 0x001 : Fixed-Point Expection Register
|
||||
// 0 : SO - Summary overflow
|
||||
// : 0 - No overflow occurred
|
||||
// : 1 - Overflow occurred
|
||||
@ -521,26 +521,45 @@ public:
|
||||
// 25 - 31 : TBC
|
||||
// Transfer-byte count
|
||||
|
||||
MSRhdr MSR; //Machine State Register
|
||||
PVRhdr PVR; //Processor Version Register
|
||||
MSRhdr MSR{}; //Machine State Register
|
||||
PVRhdr PVR{}; //Processor Version Register
|
||||
|
||||
VSCRhdr VSCR; // Vector Status and Control Register
|
||||
VSCRhdr VSCR{}; // Vector Status and Control Register
|
||||
|
||||
u64 LR; //SPR 0x008 : Link Register
|
||||
u64 CTR; //SPR 0x009 : Count Register
|
||||
u64 LR = 0; //SPR 0x008 : Link Register
|
||||
u64 CTR = 0; //SPR 0x009 : Count Register
|
||||
|
||||
u32 VRSAVE; //SPR 0x100: VR Save/Restore Register (32 bits)
|
||||
u32 VRSAVE = 0; //SPR 0x100: VR Save/Restore Register (32 bits)
|
||||
|
||||
u64 SPRG[8]; //SPR 0x110 - 0x117 : SPR General-Purpose Registers
|
||||
u64 SPRG[8]{}; //SPR 0x110 - 0x117 : SPR General-Purpose Registers
|
||||
|
||||
//TBR : Time-Base Registers
|
||||
u64 TB; //TBR 0x10C - 0x10D
|
||||
u64 TB = 0; //TBR 0x10C - 0x10D
|
||||
|
||||
u32 PC = 0;
|
||||
s32 prio = 0; // thread priority
|
||||
u32 stack_addr = 0; // stack address
|
||||
u32 stack_size = 0; // stack size
|
||||
bool is_joinable = true;
|
||||
bool is_joining = false;
|
||||
|
||||
s64 hle_code = 0; // current syscall (inverted value) or function id (positive value)
|
||||
|
||||
std::function<void(PPUThread& CPU)> custom_task;
|
||||
|
||||
public:
|
||||
PPUThread();
|
||||
virtual ~PPUThread();
|
||||
PPUThread(const std::string& name);
|
||||
virtual ~PPUThread() override;
|
||||
|
||||
virtual void DumpInformation() const override;
|
||||
virtual u32 GetPC() const override { return PC; }
|
||||
virtual u32 GetOffset() const override { return 0; }
|
||||
virtual void DoRun() override;
|
||||
virtual void Task() override;
|
||||
|
||||
virtual void InitRegs() override;
|
||||
virtual void InitStack() override;
|
||||
virtual void CloseStack() override;
|
||||
|
||||
inline u8 GetCR(const u8 n) const
|
||||
{
|
||||
@ -693,7 +712,7 @@ public:
|
||||
FPSCR.FI = val;
|
||||
}
|
||||
|
||||
virtual std::string RegsToString()
|
||||
virtual std::string RegsToString() const
|
||||
{
|
||||
std::string ret = "Registers:\n=========\n";
|
||||
|
||||
@ -721,7 +740,7 @@ public:
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual std::string ReadRegString(const std::string& reg)
|
||||
virtual std::string ReadRegString(const std::string& reg) const
|
||||
{
|
||||
std::string::size_type first_brk = reg.find('[');
|
||||
if (first_brk != std::string::npos)
|
||||
@ -807,20 +826,9 @@ public:
|
||||
}
|
||||
|
||||
public:
|
||||
virtual void InitRegs() override;
|
||||
virtual void InitStack() override;
|
||||
virtual void CloseStack() override;
|
||||
virtual void Task() override;
|
||||
u64 GetStackArg(s32 i);
|
||||
void FastCall2(u32 addr, u32 rtoc);
|
||||
void FastStop();
|
||||
virtual void DoRun() override;
|
||||
|
||||
protected:
|
||||
virtual void DoReset() override;
|
||||
virtual void DoPause() override;
|
||||
virtual void DoResume() override;
|
||||
virtual void DoStop() override;
|
||||
};
|
||||
|
||||
class ppu_thread : cpu_thread
|
||||
@ -831,7 +839,7 @@ class ppu_thread : cpu_thread
|
||||
vm::_ptr_base<be_t<u64>> envp;
|
||||
|
||||
public:
|
||||
ppu_thread(u32 entry, const std::string& name = "", u32 stack_size = 0, u32 prio = 0);
|
||||
ppu_thread(u32 entry, const std::string& name = "", u32 stack_size = 0, s32 prio = 0);
|
||||
|
||||
cpu_thread& args(std::initializer_list<std::string> values) override;
|
||||
cpu_thread& run() override;
|
||||
|
@ -8,13 +8,18 @@
|
||||
|
||||
thread_local spu_mfc_arg_t raw_spu_mfc[8] = {};
|
||||
|
||||
RawSPUThread::RawSPUThread(CPUThreadType type)
|
||||
: SPUThread(type)
|
||||
RawSPUThread::RawSPUThread(const std::string& name, u32 index)
|
||||
: SPUThread(CPU_THREAD_RAW_SPU, name, index, RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index)
|
||||
{
|
||||
Memory.Map(offset, 0x40000);
|
||||
}
|
||||
|
||||
RawSPUThread::~RawSPUThread()
|
||||
{
|
||||
cv.notify_one();
|
||||
join();
|
||||
|
||||
Memory.Unmap(offset);
|
||||
}
|
||||
|
||||
void RawSPUThread::start()
|
||||
@ -36,11 +41,7 @@ void RawSPUThread::start()
|
||||
|
||||
if (do_start)
|
||||
{
|
||||
// starting thread directly in SIGSEGV handler may cause problems
|
||||
Emu.GetCallbackManager().Async([this](PPUThread& PPU)
|
||||
{
|
||||
FastRun();
|
||||
});
|
||||
Exec();
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,7 +179,7 @@ bool RawSPUThread::WriteReg(const u32 addr, const u32 value)
|
||||
else if (value == SPU_RUNCNTL_STOP_REQUEST)
|
||||
{
|
||||
status &= ~SPU_STATUS_RUNNING;
|
||||
FastStop();
|
||||
Stop();
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -14,10 +14,10 @@ force_inline static u32 GetRawSPURegAddrByNum(int num, int offset)
|
||||
return RAW_SPU_OFFSET * num + RAW_SPU_BASE_ADDR + RAW_SPU_PROB_OFFSET + offset;
|
||||
}
|
||||
|
||||
class RawSPUThread : public SPUThread
|
||||
class RawSPUThread final : public SPUThread
|
||||
{
|
||||
public:
|
||||
RawSPUThread(CPUThreadType type = CPU_THREAD_RAW_SPU);
|
||||
RawSPUThread(const std::string& name, u32 index);
|
||||
virtual ~RawSPUThread();
|
||||
|
||||
void start();
|
||||
|
@ -279,7 +279,7 @@ void spu_interpreter::BIZ(SPUThread& CPU, spu_opcode_t op)
|
||||
|
||||
if (CPU.GPR[op.rt]._u32[3] == 0)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0) - 4;
|
||||
}
|
||||
}
|
||||
|
||||
@ -292,7 +292,7 @@ void spu_interpreter::BINZ(SPUThread& CPU, spu_opcode_t op)
|
||||
|
||||
if (CPU.GPR[op.rt]._u32[3] != 0)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0) - 4;
|
||||
}
|
||||
}
|
||||
|
||||
@ -305,7 +305,7 @@ void spu_interpreter::BIHZ(SPUThread& CPU, spu_opcode_t op)
|
||||
|
||||
if (CPU.GPR[op.rt]._u16[6] == 0)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0) - 4;
|
||||
}
|
||||
}
|
||||
|
||||
@ -318,7 +318,7 @@ void spu_interpreter::BIHNZ(SPUThread& CPU, spu_opcode_t op)
|
||||
|
||||
if (CPU.GPR[op.rt]._u16[6] != 0)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0) - 4;
|
||||
}
|
||||
}
|
||||
|
||||
@ -339,7 +339,7 @@ void spu_interpreter::BI(SPUThread& CPU, spu_opcode_t op)
|
||||
throw __FUNCTION__;
|
||||
}
|
||||
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0) - 4;
|
||||
}
|
||||
|
||||
void spu_interpreter::BISL(SPUThread& CPU, spu_opcode_t op)
|
||||
@ -351,7 +351,7 @@ void spu_interpreter::BISL(SPUThread& CPU, spu_opcode_t op)
|
||||
|
||||
const u32 target = SPUOpcodes::branchTarget(CPU.GPR[op.ra]._u32[3], 0);
|
||||
CPU.GPR[op.rt] = u128::from32r(CPU.PC + 4);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
|
||||
void spu_interpreter::IRET(SPUThread& CPU, spu_opcode_t op)
|
||||
@ -931,7 +931,7 @@ void spu_interpreter::BRZ(SPUThread& CPU, spu_opcode_t op)
|
||||
{
|
||||
if (CPU.GPR[op.rt]._u32[3] == 0)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.PC, op.i16));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.PC, op.i16) - 4;
|
||||
}
|
||||
}
|
||||
|
||||
@ -944,7 +944,7 @@ void spu_interpreter::BRNZ(SPUThread& CPU, spu_opcode_t op)
|
||||
{
|
||||
if (CPU.GPR[op.rt]._u32[3] != 0)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.PC, op.i16));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.PC, op.i16) - 4;
|
||||
}
|
||||
}
|
||||
|
||||
@ -952,7 +952,7 @@ void spu_interpreter::BRHZ(SPUThread& CPU, spu_opcode_t op)
|
||||
{
|
||||
if (CPU.GPR[op.rt]._u16[6] == 0)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.PC, op.i16));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.PC, op.i16) - 4;
|
||||
}
|
||||
}
|
||||
|
||||
@ -960,7 +960,7 @@ void spu_interpreter::BRHNZ(SPUThread& CPU, spu_opcode_t op)
|
||||
{
|
||||
if (CPU.GPR[op.rt]._u16[6] != 0)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.PC, op.i16));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.PC, op.i16) - 4;
|
||||
}
|
||||
}
|
||||
|
||||
@ -971,7 +971,7 @@ void spu_interpreter::STQR(SPUThread& CPU, spu_opcode_t op)
|
||||
|
||||
void spu_interpreter::BRA(SPUThread& CPU, spu_opcode_t op)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(0, op.i16));
|
||||
CPU.PC = SPUOpcodes::branchTarget(0, op.i16) - 4;
|
||||
}
|
||||
|
||||
void spu_interpreter::LQA(SPUThread& CPU, spu_opcode_t op)
|
||||
@ -983,12 +983,12 @@ void spu_interpreter::BRASL(SPUThread& CPU, spu_opcode_t op)
|
||||
{
|
||||
const u32 target = SPUOpcodes::branchTarget(0, op.i16);
|
||||
CPU.GPR[op.rt] = u128::from32r(CPU.PC + 4);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
|
||||
void spu_interpreter::BR(SPUThread& CPU, spu_opcode_t op)
|
||||
{
|
||||
CPU.SetBranch(SPUOpcodes::branchTarget(CPU.PC, op.i16));
|
||||
CPU.PC = SPUOpcodes::branchTarget(CPU.PC, op.i16) - 4;
|
||||
}
|
||||
|
||||
void spu_interpreter::FSMBI(SPUThread& CPU, spu_opcode_t op)
|
||||
@ -1000,7 +1000,7 @@ void spu_interpreter::BRSL(SPUThread& CPU, spu_opcode_t op)
|
||||
{
|
||||
const u32 target = SPUOpcodes::branchTarget(CPU.PC, op.i16);
|
||||
CPU.GPR[op.rt] = u128::from32r(CPU.PC + 4);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
|
||||
void spu_interpreter::LQR(SPUThread& CPU, spu_opcode_t op)
|
||||
|
@ -325,7 +325,7 @@ private:
|
||||
if (CPU.GPR[rt]._u32[3] == 0)
|
||||
{
|
||||
LOG5_OPCODE("taken (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -344,7 +344,7 @@ private:
|
||||
if (CPU.GPR[rt]._u32[3] != 0)
|
||||
{
|
||||
LOG5_OPCODE("taken (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -363,7 +363,7 @@ private:
|
||||
if (CPU.GPR[rt]._u16[6] == 0)
|
||||
{
|
||||
LOG5_OPCODE("taken (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -382,7 +382,7 @@ private:
|
||||
if (CPU.GPR[rt]._u16[6] != 0)
|
||||
{
|
||||
LOG5_OPCODE("taken (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -409,7 +409,7 @@ private:
|
||||
|
||||
u32 target = branchTarget(CPU.GPR[ra]._u32[3], 0);
|
||||
LOG5_OPCODE("branch (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
void BISL(u32 intr, u32 rt, u32 ra)
|
||||
{
|
||||
@ -422,7 +422,7 @@ private:
|
||||
u32 target = branchTarget(CPU.GPR[ra]._u32[3], 0);
|
||||
CPU.GPR[rt] = u128::from32r(CPU.PC + 4);
|
||||
LOG5_OPCODE("branch (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
void IRET(u32 ra)
|
||||
{
|
||||
@ -1536,7 +1536,7 @@ private:
|
||||
if (CPU.GPR[rt]._u32[3] == 0)
|
||||
{
|
||||
LOG5_OPCODE("taken (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1555,7 +1555,7 @@ private:
|
||||
if (CPU.GPR[rt]._u32[3] != 0)
|
||||
{
|
||||
LOG5_OPCODE("taken (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1568,7 +1568,7 @@ private:
|
||||
if (CPU.GPR[rt]._u16[6] == 0)
|
||||
{
|
||||
LOG5_OPCODE("taken (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1581,7 +1581,7 @@ private:
|
||||
if (CPU.GPR[rt]._u16[6] != 0)
|
||||
{
|
||||
LOG5_OPCODE("taken (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1598,7 +1598,7 @@ private:
|
||||
{
|
||||
u32 target = branchTarget(0, i16);
|
||||
LOG5_OPCODE("branch (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
void LQA(u32 rt, s32 i16)
|
||||
{
|
||||
@ -1611,13 +1611,13 @@ private:
|
||||
u32 target = branchTarget(0, i16);
|
||||
CPU.GPR[rt] = u128::from32r(CPU.PC + 4);
|
||||
LOG5_OPCODE("branch (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
void BR(s32 i16)
|
||||
{
|
||||
u32 target = branchTarget(CPU.PC, i16);
|
||||
LOG5_OPCODE("branch (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
void FSMBI(u32 rt, s32 i16)
|
||||
{
|
||||
@ -1640,7 +1640,7 @@ private:
|
||||
u32 target = branchTarget(CPU.PC, i16);
|
||||
CPU.GPR[rt] = u128::from32r(CPU.PC + 4);
|
||||
LOG5_OPCODE("branch (0x%x)", target);
|
||||
CPU.SetBranch(target);
|
||||
CPU.PC = target - 4;
|
||||
}
|
||||
void LQR(u32 rt, s32 i16)
|
||||
{
|
||||
|
@ -225,9 +225,9 @@ u32 SPURecompilerCore::DecodeMemory(const u32 address)
|
||||
return 0;
|
||||
}
|
||||
|
||||
const auto func = asmjit_cast<u32(*)(SPUThread& _cpu, be_t<u32>* _ls, const void* _imm, const void* _g_imm)>(entry[pos].pointer);
|
||||
const auto func = asmjit_cast<u32(*)(SPUThread* _cpu, be_t<u32>* _ls, const void* _imm, const void* _g_imm)>(entry[pos].pointer);
|
||||
|
||||
u32 res = func(CPU, _ls, imm_table.data(), &g_spu_imm);
|
||||
u32 res = func(&CPU, _ls, imm_table.data(), &g_spu_imm);
|
||||
|
||||
if (res & 0x1000000)
|
||||
{
|
||||
@ -247,7 +247,7 @@ u32 SPURecompilerCore::DecodeMemory(const u32 address)
|
||||
}
|
||||
else
|
||||
{
|
||||
CPU.SetBranch((u64)res << 2);
|
||||
CPU.PC = (res << 2) - 4;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -272,24 +272,22 @@ u32 SPURecompilerCore::DecodeMemory(const u32 address)
|
||||
|
||||
#define LOG4_OPCODE(...) //c.addComment(fmt::Format("SPU info: "__FUNCTION__"(): "__VA_ARGS__).c_str())
|
||||
|
||||
#define WRAPPER_BEGIN(a0, a1, a2, a3) struct opwr_##a0 \
|
||||
#define WRAPPER_BEGIN(a0, a1, a2) struct opwr_##a0 \
|
||||
{ \
|
||||
static void opcode(u32 a0, u32 a1, u32 a2, u32 a3) \
|
||||
{ \
|
||||
SPUThread& CPU = *(SPUThread*)GetCurrentNamedThread();
|
||||
static void opcode(SPUThread* CPU, u32 a0, u32 a1, u32 a2) \
|
||||
{
|
||||
|
||||
#define WRAPPER_END(a0, a1, a2, a3) /*LOG2_OPCODE();*/ } \
|
||||
#define WRAPPER_END(a0, a1, a2) /*LOG2_OPCODE();*/ } \
|
||||
}; \
|
||||
/*XmmRelease();*/ \
|
||||
if (#a0[0] == 'r') XmmInvalidate(a0); \
|
||||
if (#a1[0] == 'r') XmmInvalidate(a1); \
|
||||
if (#a2[0] == 'r') XmmInvalidate(a2); \
|
||||
if (#a3[0] == 'r') XmmInvalidate(a3); \
|
||||
X86CallNode* call##a0 = c.call(imm_ptr(reinterpret_cast<void*>(&opwr_##a0::opcode)), kFuncConvHost, FuncBuilder4<void, u32, u32, u32, u32>()); \
|
||||
call##a0->setArg(0, imm_u(a0)); \
|
||||
call##a0->setArg(1, imm_u(a1)); \
|
||||
call##a0->setArg(2, imm_u(a2)); \
|
||||
call##a0->setArg(3, imm_u(a3)); \
|
||||
X86CallNode* call##a0 = c.call(imm_ptr(reinterpret_cast<void*>(&opwr_##a0::opcode)), kFuncConvHost, FuncBuilder4<void, SPUThread*, u32, u32, u32>()); \
|
||||
call##a0->setArg(0, *cpu_var); \
|
||||
call##a0->setArg(1, imm_u(a0)); \
|
||||
call##a0->setArg(2, imm_u(a1)); \
|
||||
call##a0->setArg(3, imm_u(a2)); \
|
||||
LOG3_OPCODE(/*#a0"=%d, "#a1"=%d, "#a2"=%d, "#a3"=%d", a0, a1, a2, a3*/);
|
||||
|
||||
const SPURecompiler::XmmLink& SPURecompiler::XmmAlloc(s8 pref) // get empty xmm register
|
||||
@ -505,16 +503,16 @@ void SPURecompiler::STOP(u32 code)
|
||||
{
|
||||
struct STOP_wrapper
|
||||
{
|
||||
static void STOP(u32 code)
|
||||
static void STOP(SPUThread* CPU, u32 code)
|
||||
{
|
||||
SPUThread& CPU = *(SPUThread*)GetCurrentNamedThread();
|
||||
CPU.stop_and_signal(code);
|
||||
CPU->stop_and_signal(code);
|
||||
LOG2_OPCODE();
|
||||
}
|
||||
};
|
||||
c.mov(cpu_dword(PC), CPU.PC);
|
||||
X86CallNode* call = c.call(imm_ptr(reinterpret_cast<void*>(&STOP_wrapper::STOP)), kFuncConvHost, FuncBuilder1<void, u32>());
|
||||
call->setArg(0, imm_u(code));
|
||||
X86CallNode* call = c.call(imm_ptr(reinterpret_cast<void*>(&STOP_wrapper::STOP)), kFuncConvHost, FuncBuilder2<void, SPUThread*, u32>());
|
||||
call->setArg(0, *cpu_var);
|
||||
call->setArg(1, imm_u(code));
|
||||
c.mov(*pos_var, (CPU.PC >> 2) + 1);
|
||||
do_finalize = true;
|
||||
LOG_OPCODE();
|
||||
@ -550,18 +548,18 @@ void SPURecompiler::MFSPR(u32 rt, u32 sa)
|
||||
void SPURecompiler::RDCH(u32 rt, u32 ra)
|
||||
{
|
||||
c.mov(cpu_dword(PC), CPU.PC);
|
||||
WRAPPER_BEGIN(rt, ra, yy, zz);
|
||||
CPU.GPR[rt] = u128::from32r(CPU.get_ch_value(ra));
|
||||
WRAPPER_END(rt, ra, 0, 0);
|
||||
WRAPPER_BEGIN(rt, ra, zz);
|
||||
CPU->GPR[rt] = u128::from32r(CPU->get_ch_value(ra));
|
||||
WRAPPER_END(rt, ra, 0);
|
||||
// TODO
|
||||
}
|
||||
|
||||
void SPURecompiler::RCHCNT(u32 rt, u32 ra)
|
||||
{
|
||||
c.mov(cpu_dword(PC), CPU.PC);
|
||||
WRAPPER_BEGIN(rt, ra, yy, zz);
|
||||
CPU.GPR[rt] = u128::from32r(CPU.get_ch_count(ra));
|
||||
WRAPPER_END(rt, ra, 0, 0);
|
||||
WRAPPER_BEGIN(rt, ra, zz);
|
||||
CPU->GPR[rt] = u128::from32r(CPU->get_ch_count(ra));
|
||||
WRAPPER_END(rt, ra, 0);
|
||||
// TODO
|
||||
}
|
||||
|
||||
@ -964,9 +962,9 @@ void SPURecompiler::MTSPR(u32 rt, u32 sa)
|
||||
void SPURecompiler::WRCH(u32 ra, u32 rt)
|
||||
{
|
||||
c.mov(cpu_dword(PC), CPU.PC);
|
||||
WRAPPER_BEGIN(ra, rt, yy, zz);
|
||||
CPU.set_ch_value(ra, CPU.GPR[rt]._u32[3]);
|
||||
WRAPPER_END(ra, rt, 0, 0);
|
||||
WRAPPER_BEGIN(ra, rt, yy);
|
||||
CPU->set_ch_value(ra, CPU->GPR[rt]._u32[3]);
|
||||
WRAPPER_END(ra, rt, 0);
|
||||
// TODO
|
||||
|
||||
/*XmmInvalidate(rt);
|
||||
@ -2208,23 +2206,23 @@ void SPURecompiler::SFX(u32 rt, u32 ra, u32 rb)
|
||||
|
||||
void SPURecompiler::CGX(u32 rt, u32 ra, u32 rb) //nf
|
||||
{
|
||||
WRAPPER_BEGIN(rt, ra, rb, zz);
|
||||
WRAPPER_BEGIN(rt, ra, rb);
|
||||
for (int w = 0; w < 4; w++)
|
||||
CPU.GPR[rt]._u32[w] = ((u64)CPU.GPR[ra]._u32[w] + (u64)CPU.GPR[rb]._u32[w] + (u64)(CPU.GPR[rt]._u32[w] & 1)) >> 32;
|
||||
WRAPPER_END(rt, ra, rb, 0);
|
||||
CPU->GPR[rt]._u32[w] = ((u64)CPU->GPR[ra]._u32[w] + (u64)CPU->GPR[rb]._u32[w] + (u64)(CPU->GPR[rt]._u32[w] & 1)) >> 32;
|
||||
WRAPPER_END(rt, ra, rb);
|
||||
}
|
||||
|
||||
void SPURecompiler::BGX(u32 rt, u32 ra, u32 rb) //nf
|
||||
{
|
||||
WRAPPER_BEGIN(rt, ra, rb, zz);
|
||||
WRAPPER_BEGIN(rt, ra, rb);
|
||||
s64 nResult;
|
||||
|
||||
for (int w = 0; w < 4; w++)
|
||||
{
|
||||
nResult = (u64)CPU.GPR[rb]._u32[w] - (u64)CPU.GPR[ra]._u32[w] - (u64)(1 - (CPU.GPR[rt]._u32[w] & 1));
|
||||
CPU.GPR[rt]._u32[w] = nResult < 0 ? 0 : 1;
|
||||
nResult = (u64)CPU->GPR[rb]._u32[w] - (u64)CPU->GPR[ra]._u32[w] - (u64)(1 - (CPU->GPR[rt]._u32[w] & 1));
|
||||
CPU->GPR[rt]._u32[w] = nResult < 0 ? 0 : 1;
|
||||
}
|
||||
WRAPPER_END(rt, ra, rb, 0);
|
||||
WRAPPER_END(rt, ra, rb);
|
||||
}
|
||||
|
||||
void SPURecompiler::MPYHHA(u32 rt, u32 ra, u32 rb)
|
||||
|
@ -50,22 +50,56 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
force_inline spu_inter_func_t operator [] (u32 opcode) const
|
||||
force_inline spu_inter_func_t operator [](u32 opcode) const
|
||||
{
|
||||
return funcs[opcode >> 21];
|
||||
}
|
||||
}
|
||||
g_spu_inter_func_list;
|
||||
|
||||
SPUThread::SPUThread(CPUThreadType type) : CPUThread(type)
|
||||
SPUThread::SPUThread(CPUThreadType type, const std::string& name, u32 index, u32 offset)
|
||||
: CPUThread(type, name, [this]{ return fmt::format("%s[0x%x] Thread (%s)[0x%08x]", GetTypeString(), GetId(), GetName(), PC); })
|
||||
, index(index)
|
||||
, offset(offset)
|
||||
{
|
||||
assert(type == CPU_THREAD_SPU || type == CPU_THREAD_RAW_SPU);
|
||||
}
|
||||
|
||||
Reset();
|
||||
SPUThread::SPUThread(const std::string& name, u32 index, u32 offset)
|
||||
: CPUThread(CPU_THREAD_SPU, name, [this]{ return fmt::format("%s[0x%x] Thread (%s)[0x%08x]", GetTypeString(), GetId(), GetName(), PC); })
|
||||
, index(index)
|
||||
, offset(offset)
|
||||
{
|
||||
}
|
||||
|
||||
SPUThread::~SPUThread()
|
||||
{
|
||||
if (m_type == CPU_THREAD_SPU)
|
||||
{
|
||||
cv.notify_one();
|
||||
join();
|
||||
}
|
||||
else if (joinable())
|
||||
{
|
||||
throw EXCEPTION("Thread not joined");
|
||||
}
|
||||
}
|
||||
|
||||
bool SPUThread::IsPaused() const
|
||||
{
|
||||
if (const auto group = tg.lock())
|
||||
{
|
||||
if (group->state == SPU_THREAD_GROUP_STATUS_WAITING || group->state == SPU_THREAD_GROUP_STATUS_SUSPENDED)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return CPUThread::IsPaused();
|
||||
}
|
||||
|
||||
void SPUThread::DumpInformation() const
|
||||
{
|
||||
CPUThread::DumpInformation();
|
||||
}
|
||||
|
||||
void SPUThread::Task()
|
||||
@ -74,49 +108,43 @@ void SPUThread::Task()
|
||||
|
||||
if (m_custom_task)
|
||||
{
|
||||
return m_custom_task(*this);
|
||||
if (CheckStatus()) return;
|
||||
|
||||
m_custom_task(*this);
|
||||
}
|
||||
|
||||
if (m_dec)
|
||||
{
|
||||
return CPUThread::Task();
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
// read opcode
|
||||
const spu_opcode_t opcode = { vm::read32(PC + offset) };
|
||||
|
||||
// get interpreter function
|
||||
const auto func = g_spu_inter_func_list[opcode.opcode];
|
||||
|
||||
if (m_events)
|
||||
while (true)
|
||||
{
|
||||
// process events
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (m_state.load() && CheckStatus()) return;
|
||||
|
||||
if (m_events & CPU_EVENT_STOP && (IsStopped() || IsPaused()))
|
||||
{
|
||||
m_events &= ~CPU_EVENT_STOP;
|
||||
return;
|
||||
}
|
||||
// decode instruction using specified decoder
|
||||
m_dec->DecodeMemory(PC + offset);
|
||||
|
||||
// next instruction
|
||||
PC += 4;
|
||||
}
|
||||
|
||||
// call interpreter function
|
||||
func(*this, opcode);
|
||||
|
||||
// next instruction
|
||||
//PC += 4;
|
||||
NextPc(4);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
if (m_state.load() && CheckStatus()) return;
|
||||
|
||||
void SPUThread::DoReset()
|
||||
{
|
||||
InitRegs();
|
||||
// read opcode
|
||||
const spu_opcode_t opcode = { vm::read32(PC + offset) };
|
||||
|
||||
// get interpreter function
|
||||
const auto func = g_spu_inter_func_list[opcode.opcode];
|
||||
|
||||
// call interpreter function
|
||||
func(*this, opcode);
|
||||
|
||||
// next instruction
|
||||
PC += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SPUThread::InitRegs()
|
||||
@ -160,8 +188,7 @@ void SPUThread::InitRegs()
|
||||
|
||||
void SPUThread::InitStack()
|
||||
{
|
||||
m_stack_size = 0x2000; // this value is wrong
|
||||
m_stack_addr = offset + 0x40000 - m_stack_size; // stack is the part of SPU Local Storage
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
void SPUThread::CloseStack()
|
||||
@ -177,7 +204,7 @@ void SPUThread::DoRun()
|
||||
{
|
||||
case 0: // original interpreter
|
||||
{
|
||||
m_dec = new SPUDecoder(*new SPUInterpreter(*this));
|
||||
m_dec.reset(new SPUDecoder(*new SPUInterpreter(*this)));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -189,7 +216,7 @@ void SPUThread::DoRun()
|
||||
|
||||
case 2:
|
||||
{
|
||||
m_dec = new SPURecompilerCore(*this);
|
||||
m_dec.reset(new SPURecompilerCore(*this));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -201,27 +228,13 @@ void SPUThread::DoRun()
|
||||
}
|
||||
}
|
||||
|
||||
void SPUThread::DoResume()
|
||||
{
|
||||
}
|
||||
|
||||
void SPUThread::DoPause()
|
||||
{
|
||||
}
|
||||
|
||||
void SPUThread::DoStop()
|
||||
{
|
||||
delete m_dec;
|
||||
m_dec = nullptr;
|
||||
}
|
||||
|
||||
void SPUThread::DoClose()
|
||||
{
|
||||
}
|
||||
|
||||
void SPUThread::FastCall(u32 ls_addr)
|
||||
{
|
||||
// can't be called from another thread (because it doesn't make sense)
|
||||
if (!is_current())
|
||||
{
|
||||
throw EXCEPTION("Called from the wrong thread");
|
||||
}
|
||||
|
||||
write32(0x0, 2);
|
||||
|
||||
auto old_PC = PC;
|
||||
@ -229,12 +242,17 @@ void SPUThread::FastCall(u32 ls_addr)
|
||||
auto old_stack = GPR[1]._u32[3]; // only saved and restored (may be wrong)
|
||||
auto old_task = decltype(m_custom_task)();
|
||||
|
||||
m_status = Running;
|
||||
PC = ls_addr;
|
||||
GPR[0]._u32[3] = 0x0;
|
||||
m_custom_task.swap(old_task);
|
||||
|
||||
SPUThread::Task();
|
||||
try
|
||||
{
|
||||
Task();
|
||||
}
|
||||
catch (CPUThreadReturn)
|
||||
{
|
||||
}
|
||||
|
||||
PC = old_PC;
|
||||
GPR[0]._u32[3] = old_LR;
|
||||
@ -242,18 +260,6 @@ void SPUThread::FastCall(u32 ls_addr)
|
||||
m_custom_task.swap(old_task);
|
||||
}
|
||||
|
||||
void SPUThread::FastStop()
|
||||
{
|
||||
m_status = Stopped;
|
||||
m_events |= CPU_EVENT_STOP;
|
||||
}
|
||||
|
||||
void SPUThread::FastRun()
|
||||
{
|
||||
m_status = Running;
|
||||
Exec();
|
||||
}
|
||||
|
||||
void SPUThread::do_dma_transfer(u32 cmd, spu_mfc_arg_t args)
|
||||
{
|
||||
if (cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK))
|
||||
@ -418,7 +424,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
|
||||
vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), vm::cast(ch_mfc_args.ea), 128, [this]()
|
||||
{
|
||||
ch_event_stat |= SPU_EVENT_LR;
|
||||
Notify();
|
||||
cv.notify_one();
|
||||
});
|
||||
|
||||
ch_atomic_stat.push_uncond(MFC_GETLLAR_SUCCESS);
|
||||
@ -517,6 +523,7 @@ u32 SPUThread::get_ch_value(u32 ch)
|
||||
case SPU_RdInMbox:
|
||||
{
|
||||
u32 result, count;
|
||||
|
||||
while (!ch_in_mbox.pop(result, count) && !Emu.IsStopped())
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
@ -985,10 +992,9 @@ void SPUThread::stop_and_signal(u32 code)
|
||||
status &= ~SPU_STATUS_RUNNING;
|
||||
});
|
||||
|
||||
FastStop();
|
||||
|
||||
int2.set(SPU_INT2_STAT_SPU_STOP_AND_SIGNAL_INT);
|
||||
return;
|
||||
|
||||
return Stop();
|
||||
}
|
||||
|
||||
switch (code)
|
||||
@ -1001,8 +1007,7 @@ void SPUThread::stop_and_signal(u32 code)
|
||||
|
||||
case 0x002:
|
||||
{
|
||||
FastStop();
|
||||
return;
|
||||
throw CPUThreadReturn{};
|
||||
}
|
||||
|
||||
case 0x003:
|
||||
@ -1013,7 +1018,7 @@ void SPUThread::stop_and_signal(u32 code)
|
||||
auto return_to_caller = iter->second(*this);
|
||||
if (return_to_caller)
|
||||
{
|
||||
SetBranch(GPR[0]._u32[3] & 0x3fffc);
|
||||
PC = (GPR[0]._u32[3] & 0x3fffc) - 4;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -1072,7 +1077,9 @@ void SPUThread::stop_and_signal(u32 code)
|
||||
return ch_in_mbox.push_uncond(CELL_ECANCELED);
|
||||
}
|
||||
|
||||
if (Emu.IsStopped())
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
if (IsStopped())
|
||||
{
|
||||
LOG_WARNING(SPU, "sys_spu_thread_receive_event(spuq=0x%x) aborted", spuq);
|
||||
return;
|
||||
@ -1126,11 +1133,9 @@ void SPUThread::stop_and_signal(u32 code)
|
||||
|
||||
for (auto t : group->threads)
|
||||
{
|
||||
if (t)
|
||||
if (t && t.get() != this)
|
||||
{
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
spu.FastStop();
|
||||
t->Stop();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1138,7 +1143,8 @@ void SPUThread::stop_and_signal(u32 code)
|
||||
group->exit_status = value;
|
||||
group->join_state |= SPU_TGJSF_GROUP_EXIT;
|
||||
group->join_cv.notify_one();
|
||||
return;
|
||||
|
||||
return Stop();
|
||||
}
|
||||
|
||||
case 0x102:
|
||||
@ -1159,8 +1165,8 @@ void SPUThread::stop_and_signal(u32 code)
|
||||
LV2_LOCK;
|
||||
|
||||
status |= SPU_STATUS_STOPPED_BY_STOP;
|
||||
FastStop();
|
||||
return;
|
||||
|
||||
return Stop();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1191,10 +1197,9 @@ void SPUThread::halt()
|
||||
status &= ~SPU_STATUS_RUNNING;
|
||||
});
|
||||
|
||||
FastStop();
|
||||
|
||||
int2.set(SPU_INT2_STAT_SPU_HALT_OR_STEP_INT);
|
||||
return;
|
||||
|
||||
return Stop();
|
||||
}
|
||||
|
||||
status |= SPU_STATUS_STOPPED_BY_HALT;
|
||||
@ -1203,10 +1208,9 @@ void SPUThread::halt()
|
||||
|
||||
spu_thread::spu_thread(u32 entry, const std::string& name, u32 stack_size, u32 prio)
|
||||
{
|
||||
thread = Emu.GetCPU().AddThread(CPU_THREAD_SPU);
|
||||
auto spu = Emu.GetIdManager().make_ptr<SPUThread>(name, 0, 0x10000);
|
||||
|
||||
thread->SetName(name);
|
||||
thread->SetEntry(entry);
|
||||
thread->SetStackSize(stack_size ? stack_size : Emu.GetPrimaryStackSize());
|
||||
thread->SetPrio(prio ? prio : Emu.GetPrimaryPrio());
|
||||
spu->PC = entry;
|
||||
|
||||
thread = std::move(spu);
|
||||
}
|
||||
|
@ -544,6 +544,10 @@ public:
|
||||
std::array<std::pair<u32, std::weak_ptr<lv2_event_queue_t>>, 32> spuq; // Event Queue Keys for SPU Thread
|
||||
std::weak_ptr<lv2_event_queue_t> spup[64]; // SPU Ports
|
||||
|
||||
u32 PC = 0;
|
||||
const u32 index; // SPU index
|
||||
const u32 offset; // SPU LS offset
|
||||
|
||||
void write_snr(bool number, u32 value)
|
||||
{
|
||||
if (!number)
|
||||
@ -626,11 +630,28 @@ public:
|
||||
|
||||
std::function<void(SPUThread& SPU)> m_custom_task;
|
||||
|
||||
public:
|
||||
SPUThread(CPUThreadType type = CPU_THREAD_SPU);
|
||||
virtual ~SPUThread();
|
||||
protected:
|
||||
SPUThread(CPUThreadType type, const std::string& name, u32 index, u32 offset);
|
||||
|
||||
virtual std::string RegsToString()
|
||||
public:
|
||||
SPUThread(const std::string& name, u32 index, u32 offset);
|
||||
virtual ~SPUThread() override;
|
||||
|
||||
virtual bool IsPaused() const override;
|
||||
|
||||
virtual void DumpInformation() const override;
|
||||
virtual u32 GetPC() const override { return PC; }
|
||||
virtual u32 GetOffset() const override { return offset; }
|
||||
virtual void DoRun() override;
|
||||
virtual void Task() override;
|
||||
|
||||
virtual void InitRegs() override;
|
||||
virtual void InitStack() override;
|
||||
virtual void CloseStack() override;
|
||||
|
||||
void FastCall(u32 ls_addr);
|
||||
|
||||
virtual std::string RegsToString() const
|
||||
{
|
||||
std::string ret = "Registers:\n=========\n";
|
||||
|
||||
@ -639,7 +660,7 @@ public:
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual std::string ReadRegString(const std::string& reg)
|
||||
virtual std::string ReadRegString(const std::string& reg) const
|
||||
{
|
||||
std::string::size_type first_brk = reg.find('[');
|
||||
if (first_brk != std::string::npos)
|
||||
@ -679,23 +700,6 @@ public:
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public:
|
||||
virtual void InitRegs();
|
||||
virtual void InitStack();
|
||||
virtual void CloseStack();
|
||||
virtual void Task();
|
||||
void FastCall(u32 ls_addr);
|
||||
void FastStop();
|
||||
void FastRun();
|
||||
|
||||
protected:
|
||||
virtual void DoReset();
|
||||
virtual void DoRun();
|
||||
virtual void DoPause();
|
||||
virtual void DoResume();
|
||||
virtual void DoStop();
|
||||
virtual void DoClose();
|
||||
};
|
||||
|
||||
class spu_thread : cpu_thread
|
||||
|
@ -32,7 +32,6 @@ enum DbgCommand
|
||||
DID_EXEC_THREAD,
|
||||
DID_REGISTRED_CALLBACK,
|
||||
DID_UNREGISTRED_CALLBACK,
|
||||
DID_EXIT_THR_SYSCALL,
|
||||
|
||||
DID_LAST_COMMAND,
|
||||
};
|
||||
|
@ -30,7 +30,12 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
ID_data_t(const ID_data_t& right) = delete;
|
||||
ID_data_t(const ID_data_t& right)
|
||||
: data(right.data)
|
||||
, info(right.info)
|
||||
, type(right.type)
|
||||
{
|
||||
}
|
||||
|
||||
ID_data_t& operator =(const ID_data_t& right) = delete;
|
||||
|
||||
@ -73,7 +78,7 @@ public:
|
||||
// schedule unlocking
|
||||
std::lock_guard<std::mutex> lock(m_mutex, std::adopt_lock);
|
||||
|
||||
throw "Invalid get_cur_id() usage";
|
||||
throw EXCEPTION("Current ID is not available");
|
||||
}
|
||||
|
||||
return m_cur_id;
|
||||
@ -97,25 +102,40 @@ public:
|
||||
return m_cur_id++;
|
||||
}
|
||||
|
||||
// add new ID of specified type with specified constructor arguments (passed to std::make_shared<>)
|
||||
template<typename T, typename... Args> u32 make(Args&&... args)
|
||||
// add new ID of specified type with specified constructor arguments (returns object)
|
||||
template<typename T, typename... Args, typename = std::enable_if_t<std::is_constructible<T, Args...>::value>> std::shared_ptr<T> make_ptr(Args&&... args)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
const u32 type = ID_type<T>::type;
|
||||
|
||||
m_id_map.emplace(m_cur_id, ID_data_t(std::make_shared<T>(args...), type));
|
||||
auto ptr = std::make_shared<T>(std::forward<Args>(args)...);
|
||||
|
||||
m_id_map.emplace(m_cur_id++, ID_data_t(ptr, type));
|
||||
|
||||
return std::move(ptr);
|
||||
}
|
||||
|
||||
// add new ID of specified type with specified constructor arguments (returns id)
|
||||
template<typename T, typename... Args> std::enable_if_t<std::is_constructible<T, Args...>::value, u32> make(Args&&... args)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
const u32 type = ID_type<T>::type;
|
||||
|
||||
m_id_map.emplace(m_cur_id, ID_data_t(std::make_shared<T>(std::forward<Args>(args)...), type));
|
||||
|
||||
return m_cur_id++;
|
||||
}
|
||||
|
||||
template<typename T> std::shared_ptr<T> get(u32 id)
|
||||
// load ID created with type Original, optionally static_cast to T
|
||||
template<typename T, typename Orig = T> auto get(u32 id) -> decltype(std::shared_ptr<T>(static_cast<T*>(std::declval<Orig*>())))
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
auto f = m_id_map.find(id);
|
||||
|
||||
if (f == m_id_map.end() || f->second.info != typeid(T))
|
||||
if (f == m_id_map.end() || f->second.info != typeid(Orig))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
@ -139,7 +159,24 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
u32 get_count_by_type(u32 type)
|
||||
template<typename T> u32 get_count()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
u32 result = 0;
|
||||
|
||||
for (auto& v : m_id_map)
|
||||
{
|
||||
if (v.second.info == typeid(T))
|
||||
{
|
||||
result++;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
u32 get_count(u32 type)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
@ -156,7 +193,24 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
std::set<u32> get_IDs_by_type(u32 type)
|
||||
template<typename T> std::set<u32> get_IDs()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
std::set<u32> result;
|
||||
|
||||
for (auto& v : m_id_map)
|
||||
{
|
||||
if (v.second.info == typeid(T))
|
||||
{
|
||||
result.insert(v.first);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::set<u32> get_IDs(u32 type)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
@ -172,4 +226,38 @@ public:
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename T> std::vector<ID_data_t> get_data()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
std::vector<ID_data_t> result;
|
||||
|
||||
for (auto& v : m_id_map)
|
||||
{
|
||||
if (v.second.info == typeid(T))
|
||||
{
|
||||
result.emplace_back(v.second);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::vector<ID_data_t> get_data(u32 type)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
std::vector<ID_data_t> result;
|
||||
|
||||
for (auto& v : m_id_map)
|
||||
{
|
||||
if (v.second.type == type)
|
||||
{
|
||||
result.emplace_back(v.second);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
@ -2,11 +2,6 @@
|
||||
|
||||
#include "MemoryBlock.h"
|
||||
|
||||
using std::nullptr_t;
|
||||
|
||||
#define safe_delete(x) do {delete (x);(x)=nullptr;} while(0)
|
||||
#define safe_free(x) do {free(x);(x)=nullptr;} while(0)
|
||||
|
||||
enum MemoryType
|
||||
{
|
||||
Memory_PS3,
|
||||
|
@ -79,13 +79,12 @@ namespace vm
|
||||
|
||||
class reservation_mutex_t
|
||||
{
|
||||
std::atomic<NamedThreadBase*> m_owner;
|
||||
atomic<const thread_ctrl_t*> m_owner{};
|
||||
std::condition_variable m_cv;
|
||||
std::mutex m_cv_mutex;
|
||||
|
||||
public:
|
||||
reservation_mutex_t()
|
||||
: m_owner(nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
@ -93,10 +92,9 @@ namespace vm
|
||||
|
||||
never_inline void lock()
|
||||
{
|
||||
NamedThreadBase* owner = GetCurrentNamedThread();
|
||||
NamedThreadBase* old = nullptr;
|
||||
auto owner = get_current_thread_ctrl();
|
||||
|
||||
while (!m_owner.compare_exchange_strong(old, owner))
|
||||
while (auto old = m_owner.compare_and_swap(nullptr, owner))
|
||||
{
|
||||
std::unique_lock<std::mutex> cv_lock(m_cv_mutex);
|
||||
|
||||
@ -115,9 +113,9 @@ namespace vm
|
||||
|
||||
never_inline void unlock()
|
||||
{
|
||||
NamedThreadBase* owner = GetCurrentNamedThread();
|
||||
auto owner = get_current_thread_ctrl();
|
||||
|
||||
if (!m_owner.compare_exchange_strong(owner, nullptr))
|
||||
if (!m_owner.compare_and_swap_test(owner, nullptr))
|
||||
{
|
||||
throw __FUNCTION__;
|
||||
}
|
||||
@ -131,7 +129,7 @@ namespace vm
|
||||
};
|
||||
|
||||
std::function<void()> g_reservation_cb = nullptr;
|
||||
NamedThreadBase* g_reservation_owner = nullptr;
|
||||
const thread_ctrl_t* g_reservation_owner = nullptr;
|
||||
|
||||
u32 g_reservation_addr = 0;
|
||||
u32 g_reservation_size = 0;
|
||||
@ -232,7 +230,7 @@ namespace vm
|
||||
// set additional information
|
||||
g_reservation_addr = addr;
|
||||
g_reservation_size = size;
|
||||
g_reservation_owner = GetCurrentNamedThread();
|
||||
g_reservation_owner = get_current_thread_ctrl();
|
||||
g_reservation_cb = callback;
|
||||
|
||||
// copy data
|
||||
@ -254,7 +252,7 @@ namespace vm
|
||||
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
if (g_reservation_owner != GetCurrentNamedThread() || g_reservation_addr != addr || g_reservation_size != size)
|
||||
if (g_reservation_owner != get_current_thread_ctrl() || g_reservation_addr != addr || g_reservation_size != size)
|
||||
{
|
||||
// atomic update failed
|
||||
return false;
|
||||
@ -306,7 +304,7 @@ namespace vm
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
if (g_reservation_owner == GetCurrentNamedThread())
|
||||
if (g_reservation_owner == get_current_thread_ctrl())
|
||||
{
|
||||
_reservation_break(g_reservation_addr);
|
||||
}
|
||||
@ -320,7 +318,7 @@ namespace vm
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
// break previous reservation
|
||||
if (g_reservation_owner != GetCurrentNamedThread() || g_reservation_addr != addr || g_reservation_size != size)
|
||||
if (g_reservation_owner != get_current_thread_ctrl() || g_reservation_addr != addr || g_reservation_size != size)
|
||||
{
|
||||
if (g_reservation_owner)
|
||||
{
|
||||
@ -334,7 +332,7 @@ namespace vm
|
||||
// set additional information
|
||||
g_reservation_addr = addr;
|
||||
g_reservation_size = size;
|
||||
g_reservation_owner = GetCurrentNamedThread();
|
||||
g_reservation_owner = get_current_thread_ctrl();
|
||||
g_reservation_cb = nullptr;
|
||||
|
||||
// may not be necessary
|
||||
@ -638,9 +636,9 @@ namespace vm
|
||||
context.GPR[1] -= align(size, 8); // room minimal possible size
|
||||
context.GPR[1] &= ~(align_v - 1); // fix stack alignment
|
||||
|
||||
if (context.GPR[1] < CPU.GetStackAddr())
|
||||
if (context.GPR[1] < context.stack_addr)
|
||||
{
|
||||
LOG_ERROR(PPU, "vm::stack_push(0x%x,%d): stack overflow (SP=0x%llx, stack=*0x%x)", size, align_v, context.GPR[1], CPU.GetStackAddr());
|
||||
LOG_ERROR(PPU, "vm::stack_push(0x%x,%d): stack overflow (SP=0x%llx, stack=*0x%x)", size, align_v, context.GPR[1], context.stack_addr);
|
||||
context.GPR[1] = old_pos;
|
||||
return 0;
|
||||
}
|
||||
@ -665,9 +663,9 @@ namespace vm
|
||||
context.SP -= align(size, 4); // room minimal possible size
|
||||
context.SP &= ~(align_v - 1); // fix stack alignment
|
||||
|
||||
if (context.SP < CPU.GetStackAddr())
|
||||
if (context.SP < context.stack_addr)
|
||||
{
|
||||
LOG_ERROR(ARMv7, "vm::stack_push(0x%x,%d): stack overflow (SP=0x%x, stack=*0x%x)", size, align_v, context.SP, CPU.GetStackAddr());
|
||||
LOG_ERROR(ARMv7, "vm::stack_push(0x%x,%d): stack overflow (SP=0x%x, stack=*0x%x)", size, align_v, context.SP, context.stack_addr);
|
||||
context.SP = old_pos;
|
||||
return 0;
|
||||
}
|
||||
|
@ -554,9 +554,12 @@ namespace vm
|
||||
|
||||
stackvar(stackvar&& r) = delete;
|
||||
|
||||
~stackvar()
|
||||
~stackvar() noexcept(false) // allow exceptions
|
||||
{
|
||||
stack_pop(m_thread, m_data.addr, m_data.old_pos);
|
||||
if (!std::uncaught_exception()) // don't call during stack unwinding
|
||||
{
|
||||
stack_pop(m_thread, m_data.addr, m_data.old_pos);
|
||||
}
|
||||
}
|
||||
|
||||
stackvar& operator = (const stackvar& r)
|
||||
|
@ -119,35 +119,33 @@ void GLFragmentDecompilerThread::Task()
|
||||
}
|
||||
|
||||
GLFragmentProgram::GLFragmentProgram()
|
||||
: m_decompiler_thread(nullptr)
|
||||
, id(0)
|
||||
{
|
||||
}
|
||||
|
||||
GLFragmentProgram::~GLFragmentProgram()
|
||||
{
|
||||
if (m_decompiler_thread)
|
||||
{
|
||||
Wait();
|
||||
if (m_decompiler_thread->IsAlive())
|
||||
{
|
||||
m_decompiler_thread->Stop();
|
||||
}
|
||||
//if (m_decompiler_thread)
|
||||
//{
|
||||
// Wait();
|
||||
// if (m_decompiler_thread->IsAlive())
|
||||
// {
|
||||
// m_decompiler_thread->Stop();
|
||||
// }
|
||||
|
||||
delete m_decompiler_thread;
|
||||
m_decompiler_thread = nullptr;
|
||||
}
|
||||
// delete m_decompiler_thread;
|
||||
// m_decompiler_thread = nullptr;
|
||||
//}
|
||||
|
||||
Delete();
|
||||
}
|
||||
|
||||
void GLFragmentProgram::Wait()
|
||||
{
|
||||
if (m_decompiler_thread && m_decompiler_thread->IsAlive())
|
||||
{
|
||||
m_decompiler_thread->Join();
|
||||
}
|
||||
}
|
||||
//void GLFragmentProgram::Wait()
|
||||
//{
|
||||
// if (m_decompiler_thread && m_decompiler_thread->IsAlive())
|
||||
// {
|
||||
// m_decompiler_thread->Join();
|
||||
// }
|
||||
//}
|
||||
|
||||
void GLFragmentProgram::Decompile(RSXFragmentProgram& prog)
|
||||
{
|
||||
@ -163,23 +161,23 @@ void GLFragmentProgram::Decompile(RSXFragmentProgram& prog)
|
||||
}
|
||||
}
|
||||
|
||||
void GLFragmentProgram::DecompileAsync(RSXFragmentProgram& prog)
|
||||
{
|
||||
if (m_decompiler_thread)
|
||||
{
|
||||
Wait();
|
||||
if (m_decompiler_thread->IsAlive())
|
||||
{
|
||||
m_decompiler_thread->Stop();
|
||||
}
|
||||
|
||||
delete m_decompiler_thread;
|
||||
m_decompiler_thread = nullptr;
|
||||
}
|
||||
|
||||
m_decompiler_thread = new GLFragmentDecompilerThread(shader, parr, prog.addr, prog.size, prog.ctrl);
|
||||
m_decompiler_thread->Start();
|
||||
}
|
||||
//void GLFragmentProgram::DecompileAsync(RSXFragmentProgram& prog)
|
||||
//{
|
||||
// if (m_decompiler_thread)
|
||||
// {
|
||||
// Wait();
|
||||
// if (m_decompiler_thread->IsAlive())
|
||||
// {
|
||||
// m_decompiler_thread->Stop();
|
||||
// }
|
||||
//
|
||||
// delete m_decompiler_thread;
|
||||
// m_decompiler_thread = nullptr;
|
||||
// }
|
||||
//
|
||||
// m_decompiler_thread = new GLFragmentDecompilerThread(shader, parr, prog.addr, prog.size, prog.ctrl);
|
||||
// m_decompiler_thread->Start();
|
||||
//}
|
||||
|
||||
void GLFragmentProgram::Compile()
|
||||
{
|
||||
|
@ -4,16 +4,17 @@
|
||||
#include "Utilities/Thread.h"
|
||||
#include "OpenGL.h"
|
||||
|
||||
struct GLFragmentDecompilerThread : public ThreadBase, public FragmentProgramDecompiler
|
||||
struct GLFragmentDecompilerThread : public FragmentProgramDecompiler
|
||||
{
|
||||
std::string& m_shader;
|
||||
ParamArray& m_parrDummy;
|
||||
public:
|
||||
GLFragmentDecompilerThread(std::string& shader, ParamArray& parr, u32 addr, u32& size, u32 ctrl)
|
||||
: ThreadBase("Fragment Shader Decompiler Thread"), FragmentProgramDecompiler(addr, size, ctrl)
|
||||
: FragmentProgramDecompiler(addr, size, ctrl)
|
||||
, m_shader(shader)
|
||||
, m_parrDummy(parr)
|
||||
{}
|
||||
{
|
||||
}
|
||||
|
||||
void Task();
|
||||
|
||||
@ -41,7 +42,7 @@ public:
|
||||
~GLFragmentProgram();
|
||||
|
||||
ParamArray parr;
|
||||
u32 id;
|
||||
u32 id = 0;
|
||||
std::string shader;
|
||||
std::vector<size_t> FragmentConstantOffsetCache;
|
||||
|
||||
@ -51,23 +52,10 @@ public:
|
||||
*/
|
||||
void Decompile(RSXFragmentProgram& prog);
|
||||
|
||||
/**
|
||||
* Asynchronously decompile a fragment shader located in the PS3's Memory.
|
||||
* When this function is called you must call Wait() before GetShaderText() will return valid data.
|
||||
* @param prog RSXShaderProgram specifying the location and size of the shader in memory
|
||||
*/
|
||||
void DecompileAsync(RSXFragmentProgram& prog);
|
||||
|
||||
/** Wait for the decompiler task to complete decompilation. */
|
||||
void Wait();
|
||||
|
||||
/** Compile the decompiled fragment shader into a format we can use with OpenGL. */
|
||||
void Compile();
|
||||
|
||||
private:
|
||||
/** Threaded fragment shader decompiler responsible for decompiling this program */
|
||||
GLFragmentDecompilerThread* m_decompiler_thread;
|
||||
|
||||
/** Deletes the shader and any stored information */
|
||||
void Delete();
|
||||
};
|
||||
|
@ -794,6 +794,11 @@ GLGSRender::GLGSRender()
|
||||
|
||||
GLGSRender::~GLGSRender()
|
||||
{
|
||||
if (joinable())
|
||||
{
|
||||
throw EXCEPTION("Thread not joined");
|
||||
}
|
||||
|
||||
m_frame->Close();
|
||||
m_frame->DeleteContext(m_context);
|
||||
}
|
||||
@ -814,7 +819,8 @@ extern CellGcmContextData current_context;
|
||||
|
||||
void GLGSRender::Close()
|
||||
{
|
||||
Stop();
|
||||
cv.notify_one();
|
||||
join();
|
||||
|
||||
if (m_frame->IsShown())
|
||||
{
|
||||
|
@ -134,9 +134,7 @@ typedef GSFrameBase*(*GetGSFrameCb)();
|
||||
|
||||
void SetGetGSFrameCallback(GetGSFrameCb value);
|
||||
|
||||
class GLGSRender //TODO: find out why this used to inherit from wxWindow
|
||||
: //public wxWindow
|
||||
/*,*/ public GSRender
|
||||
class GLGSRender final : public GSRender
|
||||
{
|
||||
private:
|
||||
std::vector<u8> m_vdata;
|
||||
@ -167,7 +165,7 @@ public:
|
||||
bool is_intel_vendor;
|
||||
|
||||
GLGSRender();
|
||||
virtual ~GLGSRender();
|
||||
virtual ~GLGSRender() override;
|
||||
|
||||
private:
|
||||
void EnableVertexData(bool indexed_draw = false);
|
||||
|
@ -131,35 +131,33 @@ void GLVertexDecompilerThread::Task()
|
||||
}
|
||||
|
||||
GLVertexProgram::GLVertexProgram()
|
||||
: m_decompiler_thread(nullptr)
|
||||
, id(0)
|
||||
{
|
||||
}
|
||||
|
||||
GLVertexProgram::~GLVertexProgram()
|
||||
{
|
||||
if (m_decompiler_thread)
|
||||
{
|
||||
Wait();
|
||||
if (m_decompiler_thread->IsAlive())
|
||||
{
|
||||
m_decompiler_thread->Stop();
|
||||
}
|
||||
//if (m_decompiler_thread)
|
||||
//{
|
||||
// Wait();
|
||||
// if (m_decompiler_thread->IsAlive())
|
||||
// {
|
||||
// m_decompiler_thread->Stop();
|
||||
// }
|
||||
|
||||
delete m_decompiler_thread;
|
||||
m_decompiler_thread = nullptr;
|
||||
}
|
||||
// delete m_decompiler_thread;
|
||||
// m_decompiler_thread = nullptr;
|
||||
//}
|
||||
|
||||
Delete();
|
||||
}
|
||||
|
||||
void GLVertexProgram::Wait()
|
||||
{
|
||||
if (m_decompiler_thread && m_decompiler_thread->IsAlive())
|
||||
{
|
||||
m_decompiler_thread->Join();
|
||||
}
|
||||
}
|
||||
//void GLVertexProgram::Wait()
|
||||
//{
|
||||
// if (m_decompiler_thread && m_decompiler_thread->IsAlive())
|
||||
// {
|
||||
// m_decompiler_thread->Join();
|
||||
// }
|
||||
//}
|
||||
|
||||
void GLVertexProgram::Decompile(RSXVertexProgram& prog)
|
||||
{
|
||||
@ -167,23 +165,23 @@ void GLVertexProgram::Decompile(RSXVertexProgram& prog)
|
||||
decompiler.Task();
|
||||
}
|
||||
|
||||
void GLVertexProgram::DecompileAsync(RSXVertexProgram& prog)
|
||||
{
|
||||
if (m_decompiler_thread)
|
||||
{
|
||||
Wait();
|
||||
if (m_decompiler_thread->IsAlive())
|
||||
{
|
||||
m_decompiler_thread->Stop();
|
||||
}
|
||||
|
||||
delete m_decompiler_thread;
|
||||
m_decompiler_thread = nullptr;
|
||||
}
|
||||
|
||||
m_decompiler_thread = new GLVertexDecompilerThread(prog.data, shader, parr);
|
||||
m_decompiler_thread->Start();
|
||||
}
|
||||
//void GLVertexProgram::DecompileAsync(RSXVertexProgram& prog)
|
||||
//{
|
||||
// if (m_decompiler_thread)
|
||||
// {
|
||||
// Wait();
|
||||
// if (m_decompiler_thread->IsAlive())
|
||||
// {
|
||||
// m_decompiler_thread->Stop();
|
||||
// }
|
||||
//
|
||||
// delete m_decompiler_thread;
|
||||
// m_decompiler_thread = nullptr;
|
||||
// }
|
||||
//
|
||||
// m_decompiler_thread = new GLVertexDecompilerThread(prog.data, shader, parr);
|
||||
// m_decompiler_thread->Start();
|
||||
//}
|
||||
|
||||
void GLVertexProgram::Compile()
|
||||
{
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include "Utilities/Thread.h"
|
||||
#include "OpenGL.h"
|
||||
|
||||
struct GLVertexDecompilerThread : public ThreadBase, public VertexProgramDecompiler
|
||||
struct GLVertexDecompilerThread : public VertexProgramDecompiler
|
||||
{
|
||||
std::string &m_shader;
|
||||
protected:
|
||||
@ -20,11 +20,12 @@ protected:
|
||||
virtual void insertMainEnd(std::stringstream &OS) override;
|
||||
public:
|
||||
GLVertexDecompilerThread(std::vector<u32>& data, std::string& shader, ParamArray& parr)
|
||||
: ThreadBase("Vertex Shader Decompiler Thread"), VertexProgramDecompiler(data), m_shader(shader)
|
||||
: VertexProgramDecompiler(data)
|
||||
, m_shader(shader)
|
||||
{
|
||||
}
|
||||
|
||||
virtual void Task() override;
|
||||
void Task();
|
||||
};
|
||||
|
||||
class GLVertexProgram
|
||||
@ -34,15 +35,12 @@ public:
|
||||
~GLVertexProgram();
|
||||
|
||||
ParamArray parr;
|
||||
u32 id;
|
||||
u32 id = 0;
|
||||
std::string shader;
|
||||
|
||||
void Decompile(RSXVertexProgram& prog);
|
||||
void DecompileAsync(RSXVertexProgram& prog);
|
||||
void Wait();
|
||||
void Compile();
|
||||
|
||||
private:
|
||||
GLVertexDecompilerThread* m_decompiler_thread;
|
||||
void Delete();
|
||||
};
|
||||
|
@ -3,8 +3,12 @@
|
||||
|
||||
struct GSRender : public RSXThread
|
||||
{
|
||||
virtual ~GSRender()
|
||||
virtual ~GSRender() override
|
||||
{
|
||||
if (joinable())
|
||||
{
|
||||
throw EXCEPTION("Thread not joined");
|
||||
}
|
||||
}
|
||||
|
||||
virtual void Close()=0;
|
||||
|
@ -1,8 +1,7 @@
|
||||
#pragma once
|
||||
#include "Emu/RSX/GSRender.h"
|
||||
|
||||
class NullGSRender
|
||||
: public GSRender
|
||||
class NullGSRender final : public GSRender
|
||||
{
|
||||
public:
|
||||
|
||||
@ -10,8 +9,10 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
virtual ~NullGSRender()
|
||||
virtual ~NullGSRender() override
|
||||
{
|
||||
cv.notify_one();
|
||||
join();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -2456,13 +2456,13 @@ void RSXThread::Task()
|
||||
|
||||
m_last_flip_time = get_system_time() - 1000000;
|
||||
|
||||
thread_t vblank("VBlank thread", true /* autojoin */, [this]()
|
||||
thread_t vblank(WRAP_EXPR("VBlank thread"), [this]()
|
||||
{
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
m_vblank_count = 0;
|
||||
|
||||
while (!TestDestroy() && !Emu.IsStopped())
|
||||
while (joinable() && !Emu.IsStopped())
|
||||
{
|
||||
if (get_system_time() - start_time > m_vblank_count * 1000000 / 60)
|
||||
{
|
||||
@ -2483,13 +2483,8 @@ void RSXThread::Task()
|
||||
}
|
||||
});
|
||||
|
||||
while (!TestDestroy()) try
|
||||
while (joinable() && !Emu.IsStopped())
|
||||
{
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
LOG_WARNING(RSX, "RSX thread aborted");
|
||||
break;
|
||||
}
|
||||
std::lock_guard<std::mutex> lock(m_cs_main);
|
||||
|
||||
inc = 1;
|
||||
@ -2571,16 +2566,6 @@ void RSXThread::Task()
|
||||
value += (count + 1) * 4;
|
||||
});
|
||||
}
|
||||
catch (const std::string& e)
|
||||
{
|
||||
LOG_ERROR(RSX, "Exception: %s", e.c_str());
|
||||
Emu.Pause();
|
||||
}
|
||||
catch (const char* e)
|
||||
{
|
||||
LOG_ERROR(RSX, "Exception: %s", e);
|
||||
Emu.Pause();
|
||||
}
|
||||
|
||||
LOG_NOTICE(RSX, "RSX thread ended");
|
||||
|
||||
@ -2602,7 +2587,8 @@ void RSXThread::Init(const u32 ioAddress, const u32 ioSize, const u32 ctrlAddres
|
||||
m_used_gcm_commands.clear();
|
||||
|
||||
OnInit();
|
||||
ThreadBase::Start();
|
||||
|
||||
start(WRAP_EXPR("RSXThread"), WRAP_EXPR(Task()));
|
||||
}
|
||||
|
||||
u32 RSXThread::ReadIO32(u32 addr)
|
||||
|
@ -90,7 +90,7 @@ struct RSXTransformConstant
|
||||
}
|
||||
};
|
||||
|
||||
class RSXThread : public ThreadBase
|
||||
class RSXThread : protected thread_t
|
||||
{
|
||||
public:
|
||||
static const uint m_textures_count = 16;
|
||||
@ -449,8 +449,7 @@ public:
|
||||
|
||||
protected:
|
||||
RSXThread()
|
||||
: ThreadBase("RSXThread")
|
||||
, m_ctrl(nullptr)
|
||||
: m_ctrl(nullptr)
|
||||
, m_shader_ctrl(0x40)
|
||||
, m_flip_status(0)
|
||||
, m_flip_mode(CELL_GCM_DISPLAY_VSYNC)
|
||||
@ -551,7 +550,13 @@ protected:
|
||||
Reset();
|
||||
}
|
||||
|
||||
virtual ~RSXThread() {}
|
||||
virtual ~RSXThread() override
|
||||
{
|
||||
if (joinable())
|
||||
{
|
||||
throw EXCEPTION("Thread not joined");
|
||||
}
|
||||
}
|
||||
|
||||
void Reset()
|
||||
{
|
||||
|
@ -2,35 +2,50 @@
|
||||
#include "Utilities/Log.h"
|
||||
#include "Emu/Memory/Memory.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/CPU/CPUThreadManager.h"
|
||||
#include "Emu/IdManager.h"
|
||||
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
#include "Emu/ARMv7/ARMv7Thread.h"
|
||||
#include "Emu/CPU/CPUThreadManager.h"
|
||||
#include "Callback.h"
|
||||
|
||||
void CallbackManager::Register(const std::function<s32(PPUThread& PPU)>& func)
|
||||
void CallbackManager::Register(std::function<s32(PPUThread& PPU)> func)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
m_cb_list.push_back([=](CPUThread& CPU) -> s32
|
||||
{
|
||||
assert(CPU.GetType() == CPU_THREAD_PPU);
|
||||
if (CPU.GetType() != CPU_THREAD_PPU) throw EXCEPTION("PPU thread expected");
|
||||
return func(static_cast<PPUThread&>(CPU));
|
||||
});
|
||||
}
|
||||
|
||||
void CallbackManager::Async(const std::function<void(PPUThread& PPU)>& func)
|
||||
void CallbackManager::Async(std::function<void(PPUThread& PPU)> func)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
m_async_list.push_back([=](CPUThread& CPU)
|
||||
{
|
||||
assert(CPU.GetType() == CPU_THREAD_PPU);
|
||||
if (CPU.GetType() != CPU_THREAD_PPU) throw EXCEPTION("PPU thread expected");
|
||||
func(static_cast<PPUThread&>(CPU));
|
||||
});
|
||||
|
||||
m_cv.notify_one();
|
||||
}
|
||||
|
||||
//void CallbackManager::Async(std::function<void(ARMv7Context& context)> func)
|
||||
//{
|
||||
// std::lock_guard<std::mutex> lock(m_mutex);
|
||||
//
|
||||
// m_async_list.push_back([=](CPUThread& CPU)
|
||||
// {
|
||||
// if (CPU.GetType() != CPU_THREAD_ARMv7) throw EXCEPTION("ARMv7 thread expected");
|
||||
// func(static_cast<ARMv7Thread&>(CPU));
|
||||
// });
|
||||
//
|
||||
// m_cv.notify_one();
|
||||
//}
|
||||
|
||||
bool CallbackManager::Check(CPUThread& CPU, s32& result)
|
||||
{
|
||||
std::function<s32(CPUThread& CPU)> func;
|
||||
@ -40,7 +55,7 @@ bool CallbackManager::Check(CPUThread& CPU, s32& result)
|
||||
|
||||
if (m_cb_list.size())
|
||||
{
|
||||
func = m_cb_list[0];
|
||||
func = std::move(m_cb_list.front());
|
||||
m_cb_list.erase(m_cb_list.begin());
|
||||
}
|
||||
}
|
||||
@ -52,56 +67,56 @@ void CallbackManager::Init()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
if (Memory.PSV.RAM.GetStartAddr())
|
||||
auto task = [this](CPUThread& CPU)
|
||||
{
|
||||
m_cb_thread = Emu.GetCPU().AddThread(CPU_THREAD_ARMv7);
|
||||
m_cb_thread->SetName("Callback Thread");
|
||||
m_cb_thread->SetEntry(0);
|
||||
m_cb_thread->SetPrio(1001);
|
||||
m_cb_thread->SetStackSize(0x10000);
|
||||
m_cb_thread->InitStack();
|
||||
m_cb_thread->InitRegs();
|
||||
static_cast<ARMv7Thread&>(*m_cb_thread).DoRun();
|
||||
}
|
||||
else
|
||||
{
|
||||
m_cb_thread = Emu.GetCPU().AddThread(CPU_THREAD_PPU);
|
||||
m_cb_thread->SetName("Callback Thread");
|
||||
m_cb_thread->SetEntry(0);
|
||||
m_cb_thread->SetPrio(1001);
|
||||
m_cb_thread->SetStackSize(0x10000);
|
||||
m_cb_thread->InitStack();
|
||||
m_cb_thread->InitRegs();
|
||||
static_cast<PPUThread&>(*m_cb_thread).DoRun();
|
||||
}
|
||||
|
||||
thread_t cb_async_thread("CallbackManager thread", [this]()
|
||||
{
|
||||
SetCurrentNamedThread(&*m_cb_thread);
|
||||
|
||||
std::unique_lock<std::mutex> lock(m_mutex);
|
||||
|
||||
while (!Emu.IsStopped())
|
||||
while (!CPU.CheckStatus())
|
||||
{
|
||||
std::function<void(CPUThread& CPU)> func;
|
||||
|
||||
if (m_async_list.size())
|
||||
{
|
||||
func = m_async_list[0];
|
||||
func = std::move(m_async_list.front());
|
||||
m_async_list.erase(m_async_list.begin());
|
||||
}
|
||||
|
||||
if (func)
|
||||
{
|
||||
lock.unlock();
|
||||
if (lock) lock.unlock();
|
||||
|
||||
func(*m_cb_thread);
|
||||
lock.lock();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!lock) lock.lock();
|
||||
|
||||
m_cv.wait_for(lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
if (Memory.PSV.RAM.GetStartAddr())
|
||||
{
|
||||
auto thread = Emu.GetIdManager().make_ptr<ARMv7Thread>("Callback Thread");
|
||||
|
||||
thread->prio = 1001;
|
||||
thread->stack_size = 0x10000;
|
||||
thread->custom_task = task;
|
||||
thread->Run();
|
||||
|
||||
m_cb_thread = thread;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto thread = Emu.GetIdManager().make_ptr<PPUThread>("Callback Thread");
|
||||
|
||||
thread->prio = 1001;
|
||||
thread->stack_size = 0x10000;
|
||||
thread->custom_task = task;
|
||||
thread->Run();
|
||||
|
||||
m_cb_thread = thread;
|
||||
}
|
||||
}
|
||||
|
||||
void CallbackManager::Clear()
|
||||
@ -112,7 +127,7 @@ void CallbackManager::Clear()
|
||||
m_async_list.clear();
|
||||
}
|
||||
|
||||
u64 CallbackManager::AddPauseCallback(const std::function<PauseResumeCB>& func)
|
||||
u64 CallbackManager::AddPauseCallback(std::function<PauseResumeCB> func)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
|
@ -24,9 +24,10 @@ class CallbackManager
|
||||
std::vector<PauseResumeCBS> m_pause_cb_list;
|
||||
|
||||
public:
|
||||
void Register(const std::function<s32(PPUThread& CPU)>& func); // register callback (called in Check() method)
|
||||
void Register(std::function<s32(PPUThread& CPU)> func); // register callback (called in Check() method)
|
||||
|
||||
void Async(const std::function<void(PPUThread& CPU)>& func); // register callback for callback thread (called immediately)
|
||||
void Async(std::function<void(PPUThread& CPU)> func); // register callback for callback thread (called immediately)
|
||||
//void Async(std::function<void(ARMv7Context& context)> func);
|
||||
|
||||
bool Check(CPUThread& CPU, s32& result); // call one callback registered by Register() method
|
||||
|
||||
@ -34,7 +35,7 @@ public:
|
||||
|
||||
void Clear();
|
||||
|
||||
u64 AddPauseCallback(const std::function<PauseResumeCB>& func); // register callback for pausing/resuming emulation events
|
||||
u64 AddPauseCallback(std::function<PauseResumeCB> func); // register callback for pausing/resuming emulation events
|
||||
void RemovePauseCallback(const u64 tag); // unregister callback (uses the result of AddPauseCallback() function)
|
||||
void RunPauseCallbacks(const bool is_paused);
|
||||
};
|
||||
|
@ -2,7 +2,7 @@
|
||||
#include "Modules.h"
|
||||
#include "SysCalls.h"
|
||||
|
||||
std::string SysCalls::GetFuncName(const u64 fid)
|
||||
std::string SysCalls::GetFuncName(const s64 fid)
|
||||
{
|
||||
// check syscalls
|
||||
switch (~fid)
|
||||
|
@ -110,28 +110,28 @@ void execute_ppu_func_by_index(PPUThread& CPU, u32 index)
|
||||
}
|
||||
|
||||
// save old syscall/NID value
|
||||
auto old_last_syscall = CPU.m_last_syscall;
|
||||
const auto last_code = CPU.hle_code;
|
||||
|
||||
// branch directly to the LLE function
|
||||
if (index & EIF_USE_BRANCH)
|
||||
{
|
||||
// for example, FastCall2 can't work with functions which do user level context switch
|
||||
|
||||
if (old_last_syscall)
|
||||
if (last_code)
|
||||
{
|
||||
CPU.m_last_syscall = func->id;
|
||||
CPU.hle_code = func->id;
|
||||
throw "Unfortunately, this function cannot be called from the callback.";
|
||||
}
|
||||
|
||||
if (!func->lle_func)
|
||||
{
|
||||
CPU.m_last_syscall = func->id;
|
||||
CPU.hle_code = func->id;
|
||||
throw "Wrong usage: LLE function not set.";
|
||||
}
|
||||
|
||||
if (func->flags & MFF_FORCED_HLE)
|
||||
{
|
||||
CPU.m_last_syscall = func->id;
|
||||
CPU.hle_code = func->id;
|
||||
throw "Wrong usage: Forced HLE enabled.";
|
||||
}
|
||||
|
||||
@ -142,20 +142,20 @@ void execute_ppu_func_by_index(PPUThread& CPU, u32 index)
|
||||
|
||||
if (index & EIF_PERFORM_BLR)
|
||||
{
|
||||
CPU.m_last_syscall = func->id;
|
||||
throw "TODO: Branch with link";
|
||||
CPU.hle_code = func->id;
|
||||
throw EXCEPTION("TODO: Branch with link (%s)", SysCalls::GetFuncName(func->id));
|
||||
// CPU.LR = CPU.PC + 4;
|
||||
}
|
||||
|
||||
const auto data = vm::get_ptr<be_t<u32>>(func->lle_func.addr());
|
||||
CPU.SetBranch(data[0]);
|
||||
CPU.PC = data[0] - 4;
|
||||
CPU.GPR[2] = data[1]; // set rtoc
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// change current syscall/NID value
|
||||
CPU.m_last_syscall = func->id;
|
||||
CPU.hle_code = func->id;
|
||||
|
||||
if (func->lle_func && !(func->flags & MFF_FORCED_HLE))
|
||||
{
|
||||
@ -200,10 +200,10 @@ void execute_ppu_func_by_index(PPUThread& CPU, u32 index)
|
||||
if (index & EIF_PERFORM_BLR)
|
||||
{
|
||||
// return if necessary
|
||||
CPU.SetBranch(vm::cast(CPU.LR & ~3), true);
|
||||
CPU.PC = vm::cast(CPU.LR & ~3) - 4;
|
||||
}
|
||||
|
||||
CPU.m_last_syscall = old_last_syscall;
|
||||
CPU.hle_code = last_code;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -29,7 +29,6 @@ AudioDecoder::AudioDecoder(s32 type, u32 addr, u32 size, vm::ptr<CellAdecCbMsg>
|
||||
, memBias(0)
|
||||
, cbFunc(func)
|
||||
, cbArg(arg)
|
||||
, adecCb(nullptr)
|
||||
, is_closed(false)
|
||||
, is_finished(false)
|
||||
, just_started(false)
|
||||
@ -223,16 +222,10 @@ void adecOpen(u32 adec_id) // TODO: call from the constructor
|
||||
|
||||
adec.id = adec_id;
|
||||
|
||||
adec.adecCb = static_cast<PPUThread*>(Emu.GetCPU().AddThread(CPU_THREAD_PPU).get());
|
||||
adec.adecCb->SetName(fmt::format("AudioDecoder[0x%x] Callback", adec_id));
|
||||
adec.adecCb->SetEntry(0);
|
||||
adec.adecCb->SetPrio(1001);
|
||||
adec.adecCb->SetStackSize(0x10000);
|
||||
adec.adecCb->InitStack();
|
||||
adec.adecCb->InitRegs();
|
||||
adec.adecCb->DoRun();
|
||||
|
||||
thread_t t(fmt::format("AudioDecoder[0x%x] Thread", adec_id), [sptr]()
|
||||
adec.adecCb = Emu.GetIdManager().make_ptr<PPUThread>(fmt::format("Demuxer[0x%x] Thread", adec_id));
|
||||
adec.adecCb->prio = 1001;
|
||||
adec.adecCb->stack_size = 0x10000;
|
||||
adec.adecCb->custom_task = [sptr](PPUThread& CPU)
|
||||
{
|
||||
AudioDecoder& adec = *sptr;
|
||||
AdecTask& task = adec.task;
|
||||
@ -277,7 +270,7 @@ void adecOpen(u32 adec_id) // TODO: call from the constructor
|
||||
{
|
||||
// TODO: finalize
|
||||
cellAdec.Warning("adecEndSeq:");
|
||||
adec.cbFunc(*adec.adecCb, adec.id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, adec.cbArg);
|
||||
adec.cbFunc(CPU, adec.id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, adec.cbArg);
|
||||
|
||||
adec.just_finished = true;
|
||||
break;
|
||||
@ -453,12 +446,12 @@ void adecOpen(u32 adec_id) // TODO: call from the constructor
|
||||
if (adec.frames.push(frame, &adec.is_closed))
|
||||
{
|
||||
frame.data = nullptr; // to prevent destruction
|
||||
adec.cbFunc(*adec.adecCb, adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg);
|
||||
adec.cbFunc(CPU, adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
adec.cbFunc(*adec.adecCb, adec.id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, adec.cbArg);
|
||||
adec.cbFunc(CPU, adec.id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, adec.cbArg);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -475,7 +468,11 @@ void adecOpen(u32 adec_id) // TODO: call from the constructor
|
||||
}
|
||||
|
||||
adec.is_finished = true;
|
||||
});
|
||||
|
||||
};
|
||||
|
||||
adec.adecCb->Run();
|
||||
adec.adecCb->Exec();
|
||||
}
|
||||
|
||||
bool adecCheckType(s32 type)
|
||||
@ -580,7 +577,7 @@ s32 cellAdecClose(u32 handle)
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
}
|
||||
|
||||
if (adec->adecCb) Emu.GetCPU().RemoveThread(adec->adecCb->GetId());
|
||||
Emu.GetIdManager().remove<PPUThread>(adec->adecCb->GetId());
|
||||
Emu.GetIdManager().remove<AudioDecoder>(handle);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -1145,7 +1145,7 @@ public:
|
||||
u32 sample_rate;
|
||||
bool use_ats_headers;
|
||||
|
||||
PPUThread* adecCb;
|
||||
std::shared_ptr<PPUThread> adecCb;
|
||||
|
||||
AudioDecoder(s32 type, u32 addr, u32 size, vm::ptr<CellAdecCbMsg> func, u32 arg);
|
||||
|
||||
|
@ -48,9 +48,17 @@ s32 cellAudioInit()
|
||||
memset(vm::get_ptr<void>(g_audio.buffer), 0, AUDIO_PORT_OFFSET * AUDIO_PORT_COUNT);
|
||||
memset(vm::get_ptr<void>(g_audio.indexes), 0, sizeof(u64) * AUDIO_PORT_COUNT);
|
||||
|
||||
// start audio thread
|
||||
g_audio.audio_thread.start([]()
|
||||
// check thread status
|
||||
if (g_audio.thread.joinable())
|
||||
{
|
||||
g_audio.thread.join();
|
||||
}
|
||||
|
||||
// start audio thread
|
||||
g_audio.thread.start(WRAP_EXPR("Audio Thread"), []()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(g_audio.thread.mutex);
|
||||
|
||||
const bool do_dump = Ini.AudioDumpToFile.GetValue();
|
||||
|
||||
AudioDumper m_dump;
|
||||
@ -73,7 +81,7 @@ s32 cellAudioInit()
|
||||
|
||||
squeue_t<float*, BUFFER_NUM - 1> out_queue;
|
||||
|
||||
thread_t iat("Internal Audio Thread", true /* autojoin */, [&out_queue]()
|
||||
thread_t iat(WRAP_EXPR("Internal Audio Thread"), [&out_queue]()
|
||||
{
|
||||
const bool use_u16 = Ini.AudioConvertToU16.GetValue();
|
||||
|
||||
@ -131,9 +139,8 @@ s32 cellAudioInit()
|
||||
|
||||
u64 last_pause_time;
|
||||
std::atomic<u64> added_time(0);
|
||||
NamedThreadBase* audio_thread = GetCurrentNamedThread();
|
||||
|
||||
PauseCallbackRegisterer pcb(Emu.GetCallbackManager(), [&last_pause_time, &added_time, audio_thread](bool is_paused)
|
||||
PauseCallbackRegisterer pcb(Emu.GetCallbackManager(), [&last_pause_time, &added_time](bool is_paused)
|
||||
{
|
||||
if (is_paused)
|
||||
{
|
||||
@ -142,7 +149,7 @@ s32 cellAudioInit()
|
||||
else
|
||||
{
|
||||
added_time += get_system_time() - last_pause_time;
|
||||
audio_thread->Notify();
|
||||
g_audio.thread.cv.notify_one();
|
||||
}
|
||||
});
|
||||
|
||||
@ -150,7 +157,7 @@ s32 cellAudioInit()
|
||||
{
|
||||
if (Emu.IsPaused())
|
||||
{
|
||||
GetCurrentNamedThread()->WaitForAnySignal();
|
||||
g_audio.thread.cv.wait_for(lock, std::chrono::milliseconds(1));
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -167,7 +174,7 @@ s32 cellAudioInit()
|
||||
const u64 expected_time = g_audio.counter * AUDIO_SAMPLES * MHZ / 48000;
|
||||
if (expected_time >= stamp0 - g_audio.start_time)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
g_audio.thread.cv.wait_for(lock, std::chrono::milliseconds(1));
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -365,8 +372,6 @@ s32 cellAudioInit()
|
||||
//const u64 stamp2 = get_system_time();
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(g_audio.mutex);
|
||||
|
||||
// update indices:
|
||||
|
||||
auto indexes = vm::ptr<u64>::make(g_audio.indexes);
|
||||
@ -438,7 +443,8 @@ s32 cellAudioQuit()
|
||||
return CELL_AUDIO_ERROR_NOT_INIT;
|
||||
}
|
||||
|
||||
g_audio.audio_thread.join();
|
||||
g_audio.thread.cv.notify_one();
|
||||
g_audio.thread.join();
|
||||
g_audio.state.exchange(AUDIO_STATE_NOT_INITIALIZED);
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -672,7 +678,7 @@ s32 cellAudioGetPortTimestamp(u32 portNum, u64 tag, vm::ptr<u64> stamp)
|
||||
|
||||
// TODO: check tag (CELL_AUDIO_ERROR_TAG_NOT_FOUND error)
|
||||
|
||||
std::lock_guard<std::mutex> lock(g_audio.mutex);
|
||||
std::lock_guard<std::mutex> lock(g_audio.thread.mutex);
|
||||
|
||||
*stamp = g_audio.start_time + (port.counter + (tag - port.tag)) * 256000000 / 48000;
|
||||
|
||||
@ -705,7 +711,7 @@ s32 cellAudioGetPortBlockTag(u32 portNum, u64 blockNo, vm::ptr<u64> tag)
|
||||
return CELL_AUDIO_ERROR_PARAM;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(g_audio.mutex);
|
||||
std::lock_guard<std::mutex> lock(g_audio.thread.mutex);
|
||||
|
||||
u64 tag_base = port.tag;
|
||||
if (tag_base % port.block > blockNo)
|
||||
@ -801,7 +807,7 @@ s32 cellAudioSetNotifyEventQueue(u64 key)
|
||||
return CELL_AUDIO_ERROR_NOT_INIT;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(g_audio.mutex);
|
||||
std::lock_guard<std::mutex> lock(g_audio.thread.mutex);
|
||||
|
||||
for (auto k : g_audio.keys) // check for duplicates
|
||||
{
|
||||
@ -834,7 +840,7 @@ s32 cellAudioRemoveNotifyEventQueue(u64 key)
|
||||
return CELL_AUDIO_ERROR_NOT_INIT;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(g_audio.mutex);
|
||||
std::lock_guard<std::mutex> lock(g_audio.thread.mutex);
|
||||
|
||||
for (auto i = g_audio.keys.begin(); i != g_audio.keys.end(); i++)
|
||||
{
|
||||
|
@ -123,9 +123,8 @@ struct AudioPortConfig
|
||||
|
||||
struct AudioConfig //custom structure
|
||||
{
|
||||
std::mutex mutex;
|
||||
atomic<AudioState> state;
|
||||
thread_t audio_thread;
|
||||
thread_t thread;
|
||||
|
||||
AudioPortConfig ports[AUDIO_PORT_COUNT];
|
||||
u32 buffer; // 1 MB memory for audio ports
|
||||
@ -134,9 +133,7 @@ struct AudioConfig //custom structure
|
||||
u64 start_time;
|
||||
std::vector<u64> keys;
|
||||
|
||||
AudioConfig() : audio_thread("Audio Thread")
|
||||
{
|
||||
}
|
||||
AudioConfig() = default;
|
||||
|
||||
u32 open_port()
|
||||
{
|
||||
|
@ -305,16 +305,10 @@ void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
|
||||
|
||||
dmux.id = dmux_id;
|
||||
|
||||
dmux.dmuxCb = static_cast<PPUThread*>(Emu.GetCPU().AddThread(CPU_THREAD_PPU).get());
|
||||
dmux.dmuxCb->SetName(fmt::format("Demuxer[0x%x] Callback", dmux_id));
|
||||
dmux.dmuxCb->SetEntry(0);
|
||||
dmux.dmuxCb->SetPrio(1001);
|
||||
dmux.dmuxCb->SetStackSize(0x10000);
|
||||
dmux.dmuxCb->InitStack();
|
||||
dmux.dmuxCb->InitRegs();
|
||||
dmux.dmuxCb->DoRun();
|
||||
|
||||
thread_t t(fmt::format("Demuxer[0x%x] Thread", dmux_id), [sptr]()
|
||||
dmux.dmuxCb = Emu.GetIdManager().make_ptr<PPUThread>(fmt::format("Demuxer[0x%x] Thread", dmux_id));
|
||||
dmux.dmuxCb->prio = 1001;
|
||||
dmux.dmuxCb->stack_size = 0x10000;
|
||||
dmux.dmuxCb->custom_task = [sptr](PPUThread& CPU)
|
||||
{
|
||||
Demuxer& dmux = *sptr;
|
||||
|
||||
@ -352,7 +346,7 @@ void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
|
||||
auto dmuxMsg = vm::ptr<CellDmuxMsg>::make(dmux.memAddr + (cb_add ^= 16));
|
||||
dmuxMsg->msgType = CELL_DMUX_MSG_TYPE_DEMUX_DONE;
|
||||
dmuxMsg->supplementalInfo = stream.userdata;
|
||||
dmux.cbFunc(*dmux.dmuxCb, dmux.id, dmuxMsg, dmux.cbArg);
|
||||
dmux.cbFunc(CPU, dmux.id, dmuxMsg, dmux.cbArg);
|
||||
|
||||
dmux.is_working = false;
|
||||
|
||||
@ -505,7 +499,7 @@ void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
|
||||
auto esMsg = vm::ptr<CellDmuxEsMsg>::make(dmux.memAddr + (cb_add ^= 16));
|
||||
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_AU_FOUND;
|
||||
esMsg->supplementalInfo = stream.userdata;
|
||||
es.cbFunc(*dmux.dmuxCb, dmux.id, es.id, esMsg, es.cbArg);
|
||||
es.cbFunc(CPU, dmux.id, es.id, esMsg, es.cbArg);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -570,7 +564,7 @@ void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
|
||||
auto esMsg = vm::ptr<CellDmuxEsMsg>::make(dmux.memAddr + (cb_add ^= 16));
|
||||
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_AU_FOUND;
|
||||
esMsg->supplementalInfo = stream.userdata;
|
||||
es.cbFunc(*dmux.dmuxCb, dmux.id, es.id, esMsg, es.cbArg);
|
||||
es.cbFunc(CPU, dmux.id, es.id, esMsg, es.cbArg);
|
||||
}
|
||||
|
||||
if (pes.has_ts)
|
||||
@ -646,7 +640,7 @@ void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
|
||||
auto dmuxMsg = vm::ptr<CellDmuxMsg>::make(dmux.memAddr + (cb_add ^= 16));
|
||||
dmuxMsg->msgType = CELL_DMUX_MSG_TYPE_DEMUX_DONE;
|
||||
dmuxMsg->supplementalInfo = stream.userdata;
|
||||
dmux.cbFunc(*dmux.dmuxCb, dmux.id, dmuxMsg, dmux.cbArg);
|
||||
dmux.cbFunc(CPU, dmux.id, dmuxMsg, dmux.cbArg);
|
||||
|
||||
stream = {};
|
||||
|
||||
@ -734,7 +728,7 @@ void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
|
||||
auto esMsg = vm::ptr<CellDmuxEsMsg>::make(dmux.memAddr + (cb_add ^= 16));
|
||||
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_AU_FOUND;
|
||||
esMsg->supplementalInfo = stream.userdata;
|
||||
es.cbFunc(*dmux.dmuxCb, dmux.id, es.id, esMsg, es.cbArg);
|
||||
es.cbFunc(CPU, dmux.id, es.id, esMsg, es.cbArg);
|
||||
}
|
||||
|
||||
if (es.raw_data.size())
|
||||
@ -746,7 +740,7 @@ void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
|
||||
auto esMsg = vm::ptr<CellDmuxEsMsg>::make(dmux.memAddr + (cb_add ^= 16));
|
||||
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_FLUSH_DONE;
|
||||
esMsg->supplementalInfo = stream.userdata;
|
||||
es.cbFunc(*dmux.dmuxCb, dmux.id, es.id, esMsg, es.cbArg);
|
||||
es.cbFunc(CPU, dmux.id, es.id, esMsg, es.cbArg);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -769,7 +763,10 @@ void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
|
||||
}
|
||||
|
||||
dmux.is_finished = true;
|
||||
});
|
||||
};
|
||||
|
||||
dmux.dmuxCb->Run();
|
||||
dmux.dmuxCb->Exec();
|
||||
}
|
||||
|
||||
s32 cellDmuxQueryAttr(vm::cptr<CellDmuxType> type, vm::ptr<CellDmuxAttr> attr)
|
||||
@ -876,7 +873,7 @@ s32 cellDmuxClose(u32 handle)
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
}
|
||||
|
||||
if (dmux->dmuxCb) Emu.GetCPU().RemoveThread(dmux->dmuxCb->GetId());
|
||||
Emu.GetIdManager().remove<PPUThread>(dmux->dmuxCb->GetId());
|
||||
Emu.GetIdManager().remove<Demuxer>(handle);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -408,7 +408,7 @@ public:
|
||||
std::atomic<bool> is_running;
|
||||
std::atomic<bool> is_working;
|
||||
|
||||
PPUThread* dmuxCb;
|
||||
std::shared_ptr<PPUThread> dmuxCb;
|
||||
|
||||
Demuxer(u32 addr, u32 size, vm::ptr<CellDmuxCbMsg> func, u32 arg)
|
||||
: is_finished(false)
|
||||
@ -419,7 +419,6 @@ public:
|
||||
, memSize(size)
|
||||
, cbFunc(func)
|
||||
, cbArg(arg)
|
||||
, dmuxCb(nullptr)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
@ -493,10 +493,9 @@ s32 cellFsStReadStart(u32 fd, u64 offset, u64 size)
|
||||
offset = std::min<u64>(file->file->GetSize(), offset);
|
||||
size = std::min<u64>(file->file->GetSize() - offset, size);
|
||||
|
||||
file->st_thread.set_name(fmt::format("FS ST Thread[0x%x]", fd));
|
||||
file->st_read_size = size;
|
||||
|
||||
file->st_thread.start([=]()
|
||||
file->st_thread.start([=]{ return fmt::format("FS ST Thread[0x%x]", fd); }, [=]()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(file->mutex);
|
||||
|
||||
@ -935,7 +934,7 @@ s32 cellFsAioRead(vm::ptr<CellFsAio> aio, vm::ptr<s32> id, fs_aio_cb_t func)
|
||||
|
||||
const s32 xid = (*id = ++g_fs_aio_id);
|
||||
|
||||
thread_t("FS AIO Read Thread", [=]{ fsAio(aio, false, xid, func); }).detach();
|
||||
thread_t(WRAP_EXPR("FS AIO Read Thread"), [=]{ fsAio(aio, false, xid, func); }).detach();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -948,7 +947,7 @@ s32 cellFsAioWrite(vm::ptr<CellFsAio> aio, vm::ptr<s32> id, fs_aio_cb_t func)
|
||||
|
||||
const s32 xid = (*id = ++g_fs_aio_id);
|
||||
|
||||
thread_t("FS AIO Write Thread", [=]{ fsAio(aio, true, xid, func); }).detach();
|
||||
thread_t(WRAP_EXPR("FS AIO Write Thread"), [=]{ fsAio(aio, true, xid, func); }).detach();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ s32 cellMsgDialogOpen2(u32 type, vm::cptr<char> msgString, vm::ptr<CellMsgDialog
|
||||
|
||||
std::string msg = msgString.get_ptr();
|
||||
|
||||
thread_t t("MsgDialog Thread", [type, msg, callback, userData, extParam]()
|
||||
thread_t(WRAP_EXPR("MsgDialog Thread"), [type, msg, callback, userData, extParam]()
|
||||
{
|
||||
switch (type & CELL_MSGDIALOG_TYPE_SE_TYPE)
|
||||
{
|
||||
@ -161,7 +161,8 @@ s32 cellMsgDialogOpen2(u32 type, vm::cptr<char> msgString, vm::ptr<CellMsgDialog
|
||||
g_msg_dialog->Destroy();
|
||||
g_msg_dialog->state = msgDialogNone;
|
||||
});
|
||||
});
|
||||
|
||||
}).detach();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -156,9 +156,9 @@ s32 _cellSpursTasksetAttributeInitialize(vm::ptr<CellSpursTasksetAttribute> attr
|
||||
//
|
||||
// SPURS task functions
|
||||
//
|
||||
s32 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm::ptr<u32> elf_addr, vm::ptr<u32> context_addr, u32 context_size, vm::ptr<CellSpursTaskLsPattern> ls_pattern, vm::ptr<CellSpursTaskArgument> arg);
|
||||
s32 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm::cptr<void> elf, vm::cptr<void> context, u32 size, vm::ptr<CellSpursTaskLsPattern> ls_pattern, vm::ptr<CellSpursTaskArgument> arg);
|
||||
s32 spursTaskStart(PPUThread& CPU, vm::ptr<CellSpursTaskset> taskset, u32 taskId);
|
||||
s32 cellSpursCreateTask(PPUThread& CPU, vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> taskId, u32 elf_addr, u32 context_addr, u32 context_size, vm::ptr<CellSpursTaskLsPattern> lsPattern, vm::ptr<CellSpursTaskArgument> argument);
|
||||
s32 cellSpursCreateTask(PPUThread& CPU, vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> taskId, vm::cptr<void> elf, vm::cptr<void> context, u32 size, vm::ptr<CellSpursTaskLsPattern> lsPattern, vm::ptr<CellSpursTaskArgument> argument);
|
||||
s32 _cellSpursSendSignal(PPUThread& CPU, vm::ptr<CellSpursTaskset> taskset, u32 taskId);
|
||||
s32 cellSpursCreateTaskWithAttribute();
|
||||
s32 cellSpursTaskExitCodeGet();
|
||||
@ -303,16 +303,14 @@ s32 spursCreateLv2EventQueue(PPUThread& CPU, vm::ptr<CellSpurs> spurs, vm::ptr<u
|
||||
};
|
||||
|
||||
sys_event_queue_attribute_initialize(attr);
|
||||
memcpy(attr->name, name.get_ptr(), sizeof(attr->name));
|
||||
auto rc = sys_event_queue_create(queueId, attr, SYS_EVENT_QUEUE_LOCAL, size);
|
||||
if (rc != CELL_OK)
|
||||
std::memcpy(attr->name, name.get_ptr(), sizeof(attr->name));
|
||||
if (s32 rc = sys_event_queue_create(queueId, attr, SYS_EVENT_QUEUE_LOCAL, size))
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
|
||||
vm::stackvar<u8> _port(CPU);
|
||||
rc = spursAttachLv2EventQueue(CPU, spurs, *queueId, _port, 1 /*isDynamic*/, true /*spursCreated*/);
|
||||
if (rc != CELL_OK)
|
||||
if (s32 rc = spursAttachLv2EventQueue(CPU, spurs, *queueId, _port, 1 /*isDynamic*/, true /*spursCreated*/))
|
||||
{
|
||||
sys_event_queue_destroy(*queueId, SYS_EVENT_QUEUE_DESTROY_FORCE);
|
||||
}
|
||||
@ -873,7 +871,7 @@ s32 spursStopEventHelper(PPUThread& CPU, vm::ptr<CellSpurs> spurs)
|
||||
return CELL_SPURS_CORE_ERROR_STAT;
|
||||
}
|
||||
|
||||
if (sys_ppu_thread_join(spurs->ppu1, vm::stackvar<be_t<u64>>(CPU)) != CELL_OK)
|
||||
if (sys_ppu_thread_join(CPU, spurs->ppu1, vm::stackvar<be_t<u64>>(CPU)) != CELL_OK)
|
||||
{
|
||||
return CELL_SPURS_CORE_ERROR_STAT;
|
||||
}
|
||||
@ -932,7 +930,7 @@ s32 spursJoinHandlerThread(PPUThread& CPU, vm::ptr<CellSpurs> spurs)
|
||||
return CELL_SPURS_CORE_ERROR_STAT;
|
||||
}
|
||||
|
||||
if (s32 rc = sys_ppu_thread_join(spurs->ppu0, vm::stackvar<be_t<u64>>(CPU)))
|
||||
if (s32 rc = sys_ppu_thread_join(CPU, spurs->ppu0, vm::stackvar<be_t<u64>>(CPU)))
|
||||
{
|
||||
throw __FUNCTION__;
|
||||
}
|
||||
@ -1171,7 +1169,7 @@ s32 spursInit(
|
||||
return rollback(), rc;
|
||||
}
|
||||
|
||||
const auto spuThread = std::static_pointer_cast<SPUThread>(Emu.GetCPU().GetThread(spurs->spus[num] = spuThreadId.value()));
|
||||
const auto spuThread = Emu.GetIdManager().get<SPUThread>(spurs->spus[num] = spuThreadId.value());
|
||||
|
||||
// entry point cannot be initialized immediately because SPU LS will be rewritten by sys_spu_thread_group_start()
|
||||
spuThread->m_custom_task = [spurs](SPUThread& SPU)
|
||||
@ -1288,21 +1286,7 @@ s32 cellSpursInitialize(PPUThread& CPU, vm::ptr<CellSpurs> spurs, s32 nSpus, s32
|
||||
{
|
||||
cellSpurs.Warning("cellSpursInitialize(spurs=*0x%x, nSpus=%d, spuPriority=%d, ppuPriority=%d, exitIfNoWork=%d)", spurs, nSpus, spuPriority, ppuPriority, exitIfNoWork);
|
||||
|
||||
return spursInit(
|
||||
CPU,
|
||||
spurs,
|
||||
0,
|
||||
0,
|
||||
nSpus,
|
||||
spuPriority,
|
||||
ppuPriority,
|
||||
exitIfNoWork ? SAF_EXIT_IF_NO_WORK : SAF_NONE,
|
||||
vm::null,
|
||||
0,
|
||||
0,
|
||||
vm::null,
|
||||
0,
|
||||
0);
|
||||
return spursInit(CPU, spurs, 0, 0, nSpus, spuPriority, ppuPriority, exitIfNoWork ? SAF_EXIT_IF_NO_WORK : SAF_NONE, vm::null, 0, 0, vm::null, 0, 0);
|
||||
}
|
||||
|
||||
/// Initialise SPURS
|
||||
@ -3521,43 +3505,43 @@ s32 cellSpursShutdownTaskset(vm::ptr<CellSpursTaskset> taskset)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm::ptr<u32> elf_addr, vm::ptr<u32> context_addr, u32 context_size, vm::ptr<CellSpursTaskLsPattern> ls_pattern, vm::ptr<CellSpursTaskArgument> arg)
|
||||
s32 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm::cptr<void> elf, vm::cptr<void> context, u32 size, vm::ptr<CellSpursTaskLsPattern> ls_pattern, vm::ptr<CellSpursTaskArgument> arg)
|
||||
{
|
||||
if (!taskset || !elf_addr)
|
||||
if (!taskset || !elf)
|
||||
{
|
||||
return CELL_SPURS_TASK_ERROR_NULL_POINTER;
|
||||
}
|
||||
|
||||
if (elf_addr % 16)
|
||||
if (elf % 16)
|
||||
{
|
||||
return CELL_SPURS_TASK_ERROR_ALIGN;
|
||||
}
|
||||
|
||||
auto sdk_version = spursGetSdkVersion();
|
||||
if (sdk_version < 0x27FFFF)
|
||||
if (spursGetSdkVersion() < 0x27FFFF)
|
||||
{
|
||||
if (context_addr % 16)
|
||||
if (context % 16)
|
||||
{
|
||||
return CELL_SPURS_TASK_ERROR_ALIGN;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (context_addr % 128)
|
||||
if (context % 128)
|
||||
{
|
||||
return CELL_SPURS_TASK_ERROR_ALIGN;
|
||||
}
|
||||
}
|
||||
|
||||
u32 alloc_ls_blocks = 0;
|
||||
if (context_addr)
|
||||
|
||||
if (context)
|
||||
{
|
||||
if (context_size < CELL_SPURS_TASK_EXECUTION_CONTEXT_SIZE)
|
||||
if (size < CELL_SPURS_TASK_EXECUTION_CONTEXT_SIZE)
|
||||
{
|
||||
return CELL_SPURS_TASK_ERROR_INVAL;
|
||||
}
|
||||
|
||||
alloc_ls_blocks = context_size > 0x3D400 ? 0x7A : ((context_size - 0x400) >> 11);
|
||||
alloc_ls_blocks = size > 0x3D400 ? 0x7A : ((size - 0x400) >> 11);
|
||||
if (ls_pattern)
|
||||
{
|
||||
u128 ls_pattern_128 = u128::from64r(ls_pattern->_u64[0], ls_pattern->_u64[1]);
|
||||
@ -3607,8 +3591,8 @@ s32 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm:
|
||||
return CELL_SPURS_TASK_ERROR_AGAIN;
|
||||
}
|
||||
|
||||
taskset->task_info[tmp_task_id].elf_addr.set(elf_addr.addr());
|
||||
taskset->task_info[tmp_task_id].context_save_storage_and_alloc_ls_blocks = (context_addr.addr() | alloc_ls_blocks);
|
||||
taskset->task_info[tmp_task_id].elf = elf;
|
||||
taskset->task_info[tmp_task_id].context_save_storage_and_alloc_ls_blocks = (context.addr() | alloc_ls_blocks);
|
||||
taskset->task_info[tmp_task_id].args = *arg;
|
||||
if (ls_pattern)
|
||||
{
|
||||
@ -3642,11 +3626,9 @@ s32 spursTaskStart(PPUThread& CPU, vm::ptr<CellSpursTaskset> taskset, u32 taskId
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 cellSpursCreateTask(PPUThread& CPU, vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> taskId, u32 elf_addr, u32 context_addr, u32 context_size,
|
||||
vm::ptr<CellSpursTaskLsPattern> lsPattern, vm::ptr<CellSpursTaskArgument> argument)
|
||||
s32 cellSpursCreateTask(PPUThread& CPU, vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> taskId, vm::cptr<void> elf, vm::cptr<void> context, u32 size, vm::ptr<CellSpursTaskLsPattern> lsPattern, vm::ptr<CellSpursTaskArgument> argument)
|
||||
{
|
||||
cellSpurs.Warning("cellSpursCreateTask(taskset=*0x%x, taskID=*0x%x, elf_addr=0x%x, context_addr=0x%x, context_size=%d, lsPattern_addr=0x%x, argument_addr=0x%x)",
|
||||
taskset.addr(), taskId.addr(), elf_addr, context_addr, context_size, lsPattern.addr(), argument.addr());
|
||||
cellSpurs.Warning("cellSpursCreateTask(taskset=*0x%x, taskID=*0x%x, elf=*0x%x, context=*0x%x, size=0x%x, lsPattern=*0x%x, argument=*0x%x)", taskset, taskId, elf, context, size, lsPattern, argument);
|
||||
|
||||
if (!taskset)
|
||||
{
|
||||
@ -3659,7 +3641,7 @@ s32 cellSpursCreateTask(PPUThread& CPU, vm::ptr<CellSpursTaskset> taskset, vm::p
|
||||
}
|
||||
|
||||
vm::stackvar<be_t<u32>> tmpTaskId(CPU);
|
||||
auto rc = spursCreateTask(taskset, tmpTaskId, vm::ptr<u32>::make(elf_addr), vm::ptr<u32>::make(context_addr), context_size, lsPattern, argument);
|
||||
auto rc = spursCreateTask(taskset, tmpTaskId, elf, context, size, lsPattern, argument);
|
||||
if (rc != CELL_OK)
|
||||
{
|
||||
return rc;
|
||||
@ -3732,7 +3714,7 @@ s32 cellSpursCreateTaskWithAttribute()
|
||||
|
||||
s32 cellSpursTasksetAttributeSetName(vm::ptr<CellSpursTasksetAttribute> attr, vm::cptr<char> name)
|
||||
{
|
||||
cellSpurs.Warning("%s(attr=0x%x, name=0x%x)", __FUNCTION__, attr.addr(), name.addr());
|
||||
cellSpurs.Warning("cellSpursTasksetAttributeSetName(attr=*0x%x, name=*0x%x)", attr, name);
|
||||
|
||||
if (!attr || !name)
|
||||
{
|
||||
@ -3750,7 +3732,7 @@ s32 cellSpursTasksetAttributeSetName(vm::ptr<CellSpursTasksetAttribute> attr, vm
|
||||
|
||||
s32 cellSpursTasksetAttributeSetTasksetSize(vm::ptr<CellSpursTasksetAttribute> attr, u32 size)
|
||||
{
|
||||
cellSpurs.Warning("%s(attr=0x%x, size=0x%x)", __FUNCTION__, attr.addr(), size);
|
||||
cellSpurs.Warning("cellSpursTasksetAttributeSetTasksetSize(attr=*0x%x, size=0x%x)", attr, size);
|
||||
|
||||
if (!attr)
|
||||
{
|
||||
@ -3773,7 +3755,7 @@ s32 cellSpursTasksetAttributeSetTasksetSize(vm::ptr<CellSpursTasksetAttribute> a
|
||||
|
||||
s32 cellSpursTasksetAttributeEnableClearLS(vm::ptr<CellSpursTasksetAttribute> attr, s32 enable)
|
||||
{
|
||||
cellSpurs.Warning("%s(attr=0x%x, enable=%d)", __FUNCTION__, attr.addr(), enable);
|
||||
cellSpurs.Warning("cellSpursTasksetAttributeEnableClearLS(attr=*0x%x, enable=%d)", attr, enable);
|
||||
|
||||
if (!attr)
|
||||
{
|
||||
@ -3791,7 +3773,7 @@ s32 cellSpursTasksetAttributeEnableClearLS(vm::ptr<CellSpursTasksetAttribute> at
|
||||
|
||||
s32 _cellSpursTasksetAttribute2Initialize(vm::ptr<CellSpursTasksetAttribute2> attribute, u32 revision)
|
||||
{
|
||||
cellSpurs.Warning("_cellSpursTasksetAttribute2Initialize(attribute_addr=0x%x, revision=%d)", attribute.addr(), revision);
|
||||
cellSpurs.Warning("_cellSpursTasksetAttribute2Initialize(attribute=*0x%x, revision=%d)", attribute, revision);
|
||||
|
||||
memset(attribute.get_ptr(), 0, sizeof(CellSpursTasksetAttribute2));
|
||||
attribute->revision = revision;
|
||||
@ -3859,7 +3841,7 @@ s32 cellSpursTaskAttributeSetExitCodeContainer()
|
||||
|
||||
s32 _cellSpursTaskAttribute2Initialize(vm::ptr<CellSpursTaskAttribute2> attribute, u32 revision)
|
||||
{
|
||||
cellSpurs.Warning("_cellSpursTaskAttribute2Initialize(attribute_addr=0x%x, revision=%d)", attribute.addr(), revision);
|
||||
cellSpurs.Warning("_cellSpursTaskAttribute2Initialize(attribute=*0x%x, revision=%d)", attribute, revision);
|
||||
|
||||
attribute->revision = revision;
|
||||
attribute->sizeContext = 0;
|
||||
@ -3939,7 +3921,7 @@ s32 cellSpursCreateTask2WithBinInfo()
|
||||
|
||||
s32 cellSpursTasksetSetExceptionEventHandler(vm::ptr<CellSpursTaskset> taskset, vm::ptr<CellSpursTasksetExceptionEventHandler> handler, vm::ptr<u64> arg)
|
||||
{
|
||||
cellSpurs.Warning("%s(taskset=0x5x, handler=0x%x, arg=0x%x)", __FUNCTION__, taskset.addr(), handler.addr(), arg.addr());
|
||||
cellSpurs.Warning("cellSpursTasksetSetExceptionEventHandler(taskset=*0x%x, handler=*0x%x, arg=*0x%x)", taskset, handler, arg);
|
||||
|
||||
if (!taskset || !handler)
|
||||
{
|
||||
@ -3968,7 +3950,7 @@ s32 cellSpursTasksetSetExceptionEventHandler(vm::ptr<CellSpursTaskset> taskset,
|
||||
|
||||
s32 cellSpursTasksetUnsetExceptionEventHandler(vm::ptr<CellSpursTaskset> taskset)
|
||||
{
|
||||
cellSpurs.Warning("%s(taskset=0x%x)", __FUNCTION__, taskset.addr());
|
||||
cellSpurs.Warning("cellSpursTasksetUnsetExceptionEventHandler(taskset=*0x%x)", taskset);
|
||||
|
||||
if (!taskset)
|
||||
{
|
||||
@ -4012,7 +3994,7 @@ s32 cellSpursLookUpTasksetAddress(PPUThread& CPU, vm::ptr<CellSpurs> spurs, vm::
|
||||
|
||||
s32 cellSpursTasksetGetSpursAddress(vm::cptr<CellSpursTaskset> taskset, vm::ptr<u32> spurs)
|
||||
{
|
||||
cellSpurs.Warning("%s(taskset=0x%x, spurs=0x%x)", __FUNCTION__, taskset.addr(), spurs.addr());
|
||||
cellSpurs.Warning("cellSpursTasksetGetSpursAddress(taskset=*0x%x, spurs=**0x%x)", taskset, spurs);
|
||||
|
||||
if (!taskset || !spurs)
|
||||
{
|
||||
@ -4041,8 +4023,8 @@ s32 cellSpursGetTasksetInfo()
|
||||
|
||||
s32 _cellSpursTasksetAttributeInitialize(vm::ptr<CellSpursTasksetAttribute> attribute, u32 revision, u32 sdk_version, u64 args, vm::cptr<u8> priority, u32 max_contention)
|
||||
{
|
||||
cellSpurs.Warning("%s(attribute=0x%x, revision=%d, skd_version=%d, args=0x%llx, priority=0x%x, max_contention=%d)",
|
||||
__FUNCTION__, attribute.addr(), revision, sdk_version, args, priority.addr(), max_contention);
|
||||
cellSpurs.Warning("_cellSpursTasksetAttributeInitialize(attribute=*0x%x, revision=%d, skd_version=0x%x, args=0x%llx, priority=*0x%x, max_contention=%d)",
|
||||
attribute, revision, sdk_version, args, priority, max_contention);
|
||||
|
||||
if (!attribute)
|
||||
{
|
||||
|
@ -735,7 +735,7 @@ struct set_alignment(128) CellSpursTaskset
|
||||
struct TaskInfo
|
||||
{
|
||||
CellSpursTaskArgument args; // 0x00
|
||||
vm::bptr<u64, u64> elf_addr; // 0x10
|
||||
vm::bcptr<void, u64> elf; // 0x10
|
||||
be_t<u64> context_save_storage_and_alloc_ls_blocks; // 0x18 This is (context_save_storage_addr | allocated_ls_blocks)
|
||||
CellSpursTaskLsPattern ls_pattern; // 0x20
|
||||
};
|
||||
|
@ -109,7 +109,7 @@ u32 cellSpursModulePollStatus(SPUThread & spu, u32 * status) {
|
||||
/// Exit current workload
|
||||
void cellSpursModuleExit(SPUThread & spu) {
|
||||
auto ctxt = vm::get_ptr<SpursKernelContext>(spu.offset + 0x100);
|
||||
spu.SetBranch(ctxt->exitToKernelAddr);
|
||||
spu.PC = ctxt->exitToKernelAddr - 4;
|
||||
throw SpursModuleExit();
|
||||
}
|
||||
|
||||
@ -506,7 +506,7 @@ void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) {
|
||||
spu.GPR[3]._u32[3] = 0x100;
|
||||
spu.GPR[4]._u64[1] = wklInfo->arg;
|
||||
spu.GPR[5]._u32[3] = pollStatus;
|
||||
spu.SetBranch(0xA00);
|
||||
spu.PC = 0xA00 - 4;
|
||||
}
|
||||
|
||||
/// SPURS kernel workload exit
|
||||
@ -606,8 +606,10 @@ bool spursSysServiceEntry(SPUThread & spu) {
|
||||
void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
|
||||
bool shouldExit;
|
||||
|
||||
std::unique_lock<std::mutex> lock(spu.mutex, std::defer_lock);
|
||||
|
||||
while (true) {
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), vm::cast(ctxt->spurs.addr()), 128, [&spu](){ spu.Notify(); });
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), vm::cast(ctxt->spurs.addr()), 128, [&spu](){ spu.cv.notify_one(); });
|
||||
auto spurs = vm::get_ptr<CellSpurs>(spu.offset + 0x100);
|
||||
|
||||
// Find the number of SPUs that are idling in this SPURS instance
|
||||
@ -674,8 +676,10 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
|
||||
// If all SPUs are idling and the exit_if_no_work flag is set then the SPU thread group must exit. Otherwise wait for external events.
|
||||
if (spuIdling && shouldExit == false && foundReadyWorkload == false) {
|
||||
// The system service blocks by making a reservation and waiting on the lock line reservation lost event.
|
||||
spu.WaitForAnySignal(1);
|
||||
if (Emu.IsStopped()) throw SpursModuleExit();
|
||||
if (!lock) lock.lock();
|
||||
spu.cv.wait_for(lock, std::chrono::milliseconds(1));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (vm::reservation_update(vm::cast(ctxt->spurs.addr()), vm::get_ptr(spu.offset + 0x100), 128) && (shouldExit || foundReadyWorkload)) {
|
||||
@ -1127,9 +1131,10 @@ bool spursTasksetSyscallEntry(SPUThread & spu) {
|
||||
spu.GPR[3]._u32[3] = spursTasksetProcessSyscall(spu, spu.GPR[3]._u32[3], spu.GPR[4]._u32[3]);
|
||||
|
||||
// Resume the previously executing task if the syscall did not cause a context switch
|
||||
if (spu.m_is_branch == false) {
|
||||
spursTasksetResumeTask(spu);
|
||||
}
|
||||
throw __FUNCTION__;
|
||||
//if (spu.m_is_branch == false) {
|
||||
// spursTasksetResumeTask(spu);
|
||||
//}
|
||||
}
|
||||
|
||||
catch (SpursModuleExit) {
|
||||
@ -1149,7 +1154,7 @@ void spursTasksetResumeTask(SPUThread & spu) {
|
||||
spu.GPR[80 + i] = ctxt->savedContextR80ToR127[i];
|
||||
}
|
||||
|
||||
spu.SetBranch(spu.GPR[0]._u32[3]);
|
||||
spu.PC = spu.GPR[0]._u32[3] - 4;
|
||||
}
|
||||
|
||||
/// Start a task
|
||||
@ -1165,7 +1170,7 @@ void spursTasksetStartTask(SPUThread & spu, CellSpursTaskArgument & taskArgs) {
|
||||
spu.GPR[i].clear();
|
||||
}
|
||||
|
||||
spu.SetBranch(ctxt->savedContextLr.value()._u32[3]);
|
||||
spu.PC = ctxt->savedContextLr.value()._u32[3] - 4;
|
||||
}
|
||||
|
||||
/// Process a request and update the state of the taskset
|
||||
@ -1457,8 +1462,8 @@ void spursTasksetDispatch(SPUThread & spu) {
|
||||
// DMA in the task info for the selected task
|
||||
memcpy(vm::get_ptr(spu.offset + 0x2780), &ctxt->taskset->task_info[taskId], sizeof(CellSpursTaskset::TaskInfo));
|
||||
auto taskInfo = vm::get_ptr<CellSpursTaskset::TaskInfo>(spu.offset + 0x2780);
|
||||
auto elfAddr = taskInfo->elf_addr.addr().value();
|
||||
taskInfo->elf_addr.set(taskInfo->elf_addr.addr() & 0xFFFFFFFFFFFFFFF8ull);
|
||||
auto elfAddr = taskInfo->elf.addr().value();
|
||||
taskInfo->elf.set(taskInfo->elf.addr() & 0xFFFFFFFFFFFFFFF8ull);
|
||||
|
||||
// Trace - Task: Incident=dispatch
|
||||
CellSpursTracePacket pkt;
|
||||
@ -1475,7 +1480,7 @@ void spursTasksetDispatch(SPUThread & spu) {
|
||||
|
||||
u32 entryPoint;
|
||||
u32 lowestLoadAddr;
|
||||
if (spursTasksetLoadElf(spu, &entryPoint, &lowestLoadAddr, taskInfo->elf_addr.addr(), false) != CELL_OK) {
|
||||
if (spursTasksetLoadElf(spu, &entryPoint, &lowestLoadAddr, taskInfo->elf.addr(), false) != CELL_OK) {
|
||||
assert(!"spursTaskLoadElf() failed");
|
||||
spursHalt(spu);
|
||||
}
|
||||
@ -1516,7 +1521,7 @@ void spursTasksetDispatch(SPUThread & spu) {
|
||||
if (ls_pattern != u128::from64r(0x03FFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull)) {
|
||||
// Load the ELF
|
||||
u32 entryPoint;
|
||||
if (spursTasksetLoadElf(spu, &entryPoint, nullptr, taskInfo->elf_addr.addr(), true) != CELL_OK) {
|
||||
if (spursTasksetLoadElf(spu, &entryPoint, nullptr, taskInfo->elf.addr(), true) != CELL_OK) {
|
||||
assert(!"spursTasksetLoadElf() failed");
|
||||
spursHalt(spu);
|
||||
}
|
||||
|
@ -39,7 +39,6 @@ VideoDecoder::VideoDecoder(s32 type, u32 profile, u32 addr, u32 size, vm::ptr<Ce
|
||||
, codec(nullptr)
|
||||
, input_format(nullptr)
|
||||
, ctx(nullptr)
|
||||
, vdecCb(nullptr)
|
||||
{
|
||||
av_register_all();
|
||||
avcodec_register_all();
|
||||
@ -215,16 +214,10 @@ void vdecOpen(u32 vdec_id) // TODO: call from the constructor
|
||||
|
||||
vdec.id = vdec_id;
|
||||
|
||||
vdec.vdecCb = static_cast<PPUThread*>(Emu.GetCPU().AddThread(CPU_THREAD_PPU).get());
|
||||
vdec.vdecCb->SetName(fmt::format("VideoDecoder[0x%x] Callback", vdec_id));
|
||||
vdec.vdecCb->SetEntry(0);
|
||||
vdec.vdecCb->SetPrio(1001);
|
||||
vdec.vdecCb->SetStackSize(0x10000);
|
||||
vdec.vdecCb->InitStack();
|
||||
vdec.vdecCb->InitRegs();
|
||||
vdec.vdecCb->DoRun();
|
||||
|
||||
thread_t t(fmt::format("VideoDecoder[0x%x] Thread", vdec_id), [sptr]()
|
||||
vdec.vdecCb = Emu.GetIdManager().make_ptr<PPUThread>(fmt::format("VideoDecoder[0x%x] Thread", vdec_id));
|
||||
vdec.vdecCb->prio = 1001;
|
||||
vdec.vdecCb->stack_size = 0x10000;
|
||||
vdec.vdecCb->custom_task = [sptr](PPUThread& CPU)
|
||||
{
|
||||
VideoDecoder& vdec = *sptr;
|
||||
VdecTask& task = vdec.task;
|
||||
@ -547,7 +540,10 @@ void vdecOpen(u32 vdec_id) // TODO: call from the constructor
|
||||
}
|
||||
|
||||
vdec.is_finished = true;
|
||||
});
|
||||
};
|
||||
|
||||
vdec.vdecCb->Run();
|
||||
vdec.vdecCb->Exec();
|
||||
}
|
||||
|
||||
s32 cellVdecQueryAttr(vm::cptr<CellVdecType> type, vm::ptr<CellVdecAttr> attr)
|
||||
@ -606,7 +602,7 @@ s32 cellVdecClose(u32 handle)
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
}
|
||||
|
||||
if (vdec->vdecCb) Emu.GetCPU().RemoveThread(vdec->vdecCb->GetId());
|
||||
Emu.GetIdManager().remove<PPUThread>(vdec->vdecCb->GetId());
|
||||
Emu.GetIdManager().remove<VideoDecoder>(handle);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -729,7 +729,7 @@ public:
|
||||
u32 frc_set; // frame rate overwriting
|
||||
AVRational rfr, afr;
|
||||
|
||||
PPUThread* vdecCb;
|
||||
std::shared_ptr<PPUThread> vdecCb;
|
||||
|
||||
VideoDecoder(s32 type, u32 profile, u32 addr, u32 size, vm::ptr<CellVdecCbMsg> func, u32 arg);
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "Utilities/Log.h"
|
||||
#include "Emu/Memory/Memory.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/IdManager.h"
|
||||
#include "Emu/SysCalls/Modules.h"
|
||||
#include "Emu/SysCalls/CB_FUNC.h"
|
||||
#include "Emu/Cell/PPUInstrTable.h"
|
||||
@ -328,21 +329,13 @@ int cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
|
||||
|
||||
libmixer.Warning("*** surMixer created (ch1=%d, ch2=%d, ch6=%d, ch8=%d)", config->chStrips1, config->chStrips2, config->chStrips6, config->chStrips8);
|
||||
|
||||
thread_t t("Surmixer Thread", []()
|
||||
auto ppu = Emu.GetIdManager().make_ptr<PPUThread>("Surmixer Thread");
|
||||
ppu->prio = 1001;
|
||||
ppu->stack_size = 0x10000;
|
||||
ppu->custom_task = [](PPUThread& CPU)
|
||||
{
|
||||
AudioPortConfig& port = g_audio.ports[g_surmx.audio_port];
|
||||
|
||||
auto cb_thread = Emu.GetCPU().AddThread(CPU_THREAD_PPU);
|
||||
|
||||
auto& ppu = static_cast<PPUThread&>(*cb_thread);
|
||||
ppu.SetName("Surmixer Callback Thread");
|
||||
ppu.SetEntry(0);
|
||||
ppu.SetPrio(1001);
|
||||
ppu.SetStackSize(0x10000);
|
||||
ppu.InitStack();
|
||||
ppu.InitRegs();
|
||||
ppu.DoRun();
|
||||
|
||||
while (port.state.load() != AUDIO_PORT_STATE_CLOSED && !Emu.IsStopped())
|
||||
{
|
||||
if (mixcount > (port.tag + 0)) // adding positive value (1-15): preemptive buffer filling (hack)
|
||||
@ -358,7 +351,7 @@ int cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
|
||||
memset(mixdata, 0, sizeof(mixdata));
|
||||
if (surMixerCb)
|
||||
{
|
||||
surMixerCb(ppu, surMixerCbArg, (u32)mixcount, 256);
|
||||
surMixerCb(CPU, surMixerCbArg, (u32)mixcount, 256);
|
||||
}
|
||||
|
||||
//u64 stamp1 = get_system_time();
|
||||
@ -463,9 +456,15 @@ int cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
|
||||
ssp.clear();
|
||||
}
|
||||
|
||||
Emu.GetCPU().RemoveThread(ppu.GetId());
|
||||
surMixerCb.set(0);
|
||||
});
|
||||
|
||||
const u32 id = CPU.GetId();
|
||||
|
||||
CallAfter([id]()
|
||||
{
|
||||
Emu.GetIdManager().remove<PPUThread>(id);
|
||||
});
|
||||
};
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ s32 sys_lwmutex_lock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout
|
||||
// locking succeeded
|
||||
auto old = lwmutex->vars.owner.exchange(tid);
|
||||
|
||||
if (old != lwmutex_reserved && !Emu.IsStopped())
|
||||
if (old != lwmutex_reserved)
|
||||
{
|
||||
sysPrxForUser.Fatal("sys_lwmutex_lock(lwmutex=*0x%x): locking failed (owner=0x%x)", lwmutex, old);
|
||||
}
|
||||
@ -299,7 +299,7 @@ s32 sys_lwmutex_trylock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
// locking succeeded
|
||||
auto old = lwmutex->vars.owner.exchange(tid);
|
||||
|
||||
if (old != lwmutex_reserved && !Emu.IsStopped())
|
||||
if (old != lwmutex_reserved)
|
||||
{
|
||||
sysPrxForUser.Fatal("sys_lwmutex_trylock(lwmutex=*0x%x): locking failed (owner=0x%x)", lwmutex, old);
|
||||
}
|
||||
@ -588,7 +588,7 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
const auto old = lwmutex->vars.owner.exchange(tid);
|
||||
lwmutex->recursive_count = recursive_value;
|
||||
|
||||
if (old != lwmutex_reserved && !Emu.IsStopped())
|
||||
if (old != lwmutex_reserved)
|
||||
{
|
||||
sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): locking failed (lwmutex->owner=0x%x)", lwcond, old);
|
||||
}
|
||||
@ -617,7 +617,7 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
const auto old = lwmutex->vars.owner.exchange(tid);
|
||||
lwmutex->recursive_count = recursive_value;
|
||||
|
||||
if (old != lwmutex_reserved && !Emu.IsStopped())
|
||||
if (old != lwmutex_reserved)
|
||||
{
|
||||
sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): locking failed after timeout (lwmutex->owner=0x%x)", lwcond, old);
|
||||
}
|
||||
@ -1189,13 +1189,9 @@ void sys_spinlock_lock(vm::ptr<atomic_be_t<u32>> lock)
|
||||
// prx: exchange with 0xabadcafe, repeat until exchanged with 0
|
||||
while (lock->exchange(0xabadcafe).data())
|
||||
{
|
||||
g_sys_spinlock_wm.wait_op(lock.addr(), [lock](){ return lock->load().data() == 0; });
|
||||
g_sys_spinlock_wm.wait_op(lock.addr(), WRAP_EXPR(!lock->load().data()));
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sysPrxForUser.Warning("sys_spinlock_lock(lock=*0x%x) aborted", lock);
|
||||
break;
|
||||
}
|
||||
CHECK_EMU_STATUS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -897,12 +897,11 @@ void SysCalls::DoSyscall(PPUThread& CPU, u64 code)
|
||||
{
|
||||
if (code >= 1024)
|
||||
{
|
||||
CPU.m_last_syscall = code;
|
||||
throw "Invalid syscall number";
|
||||
throw EXCEPTION("Invalid syscall number (0x%llx)", code);
|
||||
}
|
||||
|
||||
auto old_last_syscall = CPU.m_last_syscall;
|
||||
CPU.m_last_syscall = ~code;
|
||||
auto last_code = CPU.hle_code;
|
||||
CPU.hle_code = ~code;
|
||||
|
||||
if (Ini.HLELogging.GetValue())
|
||||
{
|
||||
@ -916,5 +915,5 @@ void SysCalls::DoSyscall(PPUThread& CPU, u64 code)
|
||||
LOG_NOTICE(PPU, "Syscall %lld finished: %s -> 0x%llx", code, SysCalls::GetFuncName(~code), CPU.GPR[3]);
|
||||
}
|
||||
|
||||
CPU.m_last_syscall = old_last_syscall;
|
||||
CPU.hle_code = last_code;
|
||||
}
|
||||
|
@ -25,5 +25,5 @@ class SysCalls
|
||||
{
|
||||
public:
|
||||
static void DoSyscall(PPUThread& CPU, u64 code);
|
||||
static std::string GetFuncName(const u64 fid);
|
||||
static std::string GetFuncName(const s64 fid);
|
||||
};
|
||||
|
@ -145,13 +145,13 @@ u32 sleep_queue_t::signal(u32 protocol)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
u64 highest_prio = ~0ull;
|
||||
s32 highest_prio = INT32_MAX;
|
||||
u64 sel = ~0ull;
|
||||
for (auto& v : m_waiting)
|
||||
{
|
||||
if (const auto t = Emu.GetCPU().GetThread(v))
|
||||
if (const auto t = Emu.GetIdManager().get<PPUThread>(v))
|
||||
{
|
||||
const u64 prio = t->GetPrio();
|
||||
const s32 prio = t->prio;
|
||||
if (prio < highest_prio)
|
||||
{
|
||||
highest_prio = prio;
|
||||
|
@ -163,7 +163,7 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
const auto thread = Emu.GetCPU().GetThread(CPU.GetId());
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(CPU.GetId());
|
||||
|
||||
if (cond->mutex->owner.owner_before(thread) || thread.owner_before(cond->mutex->owner)) // check equality
|
||||
{
|
||||
|
@ -161,7 +161,6 @@ s32 sys_event_queue_receive(PPUThread& CPU, u32 equeue_id, vm::ptr<sys_event_t>
|
||||
{
|
||||
if (queue->cancelled)
|
||||
{
|
||||
queue->waiters--;
|
||||
return CELL_ECANCELED;
|
||||
}
|
||||
|
||||
|
@ -47,9 +47,9 @@ s32 sys_interrupt_tag_destroy(u32 intrtag)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u64 intrthread, u64 arg)
|
||||
s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u32 intrthread, u64 arg)
|
||||
{
|
||||
sys_interrupt.Warning("sys_interrupt_thread_establish(ih=*0x%x, intrtag=0x%x, intrthread=%lld, arg=0x%llx)", ih, intrtag, intrthread, arg);
|
||||
sys_interrupt.Warning("sys_interrupt_thread_establish(ih=*0x%x, intrtag=0x%x, intrthread=0x%x, arg=0x%llx)", ih, intrtag, intrthread, arg);
|
||||
|
||||
const u32 class_id = intrtag >> 8;
|
||||
|
||||
@ -71,7 +71,7 @@ s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u64 intrthread,
|
||||
|
||||
// CELL_ESTAT is not returned (can't detect exact condition)
|
||||
|
||||
const auto it = Emu.GetCPU().GetThread((u32)intrthread);
|
||||
const auto it = Emu.GetIdManager().get<PPUThread>(intrthread);
|
||||
|
||||
if (!it)
|
||||
{
|
||||
@ -104,9 +104,8 @@ s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u64 intrthread,
|
||||
|
||||
ppu.custom_task = [t, &tag, arg](PPUThread& CPU)
|
||||
{
|
||||
const auto func = vm::ptr<void(u64 arg)>::make(CPU.entry);
|
||||
const auto pc = vm::read32(func.addr());
|
||||
const auto rtoc = vm::read32(func.addr() + 4);
|
||||
const auto pc = CPU.PC;
|
||||
const auto rtoc = CPU.GPR[2];
|
||||
|
||||
std::unique_lock<std::mutex> cond_lock(tag.handler_mutex);
|
||||
|
||||
@ -115,9 +114,14 @@ s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u64 intrthread,
|
||||
// call interrupt handler until int status is clear
|
||||
if (tag.stat.load())
|
||||
{
|
||||
//func(CPU, arg);
|
||||
CPU.GPR[3] = arg;
|
||||
CPU.FastCall2(pc, rtoc);
|
||||
try
|
||||
{
|
||||
CPU.GPR[3] = arg;
|
||||
CPU.FastCall2(pc, rtoc);
|
||||
}
|
||||
catch (CPUThreadReturn)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
tag.cond.wait_for(cond_lock, std::chrono::milliseconds(1));
|
||||
@ -156,7 +160,7 @@ void sys_interrupt_thread_eoi(PPUThread& CPU)
|
||||
sys_interrupt.Log("sys_interrupt_thread_eoi()");
|
||||
|
||||
// TODO: maybe it should actually unwind the stack (ensure that all the automatic objects are finalized)?
|
||||
CPU.GPR[1] = align(CPU.GetStackAddr() + CPU.GetStackSize(), 0x200) - 0x200; // supercrutch (just to hide error messages)
|
||||
CPU.GPR[1] = align(CPU.stack_addr + CPU.stack_size, 0x200) - 0x200; // supercrutch (just to hide error messages)
|
||||
|
||||
CPU.FastStop();
|
||||
}
|
||||
|
@ -18,6 +18,6 @@ REG_ID_TYPE(lv2_int_handler_t, 0x0B); // SYS_INTR_SERVICE_HANDLE_OBJECT
|
||||
|
||||
// SysCalls
|
||||
s32 sys_interrupt_tag_destroy(u32 intrtag);
|
||||
s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u64 intrthread, u64 arg);
|
||||
s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u32 intrthread, u64 arg);
|
||||
s32 _sys_interrupt_thread_disestablish(u32 ih, vm::ptr<u64> r13);
|
||||
void sys_interrupt_thread_eoi(PPUThread& CPU);
|
||||
|
@ -95,7 +95,7 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
const auto thread = Emu.GetCPU().GetThread(CPU.GetId(), CPU_THREAD_PPU);
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(CPU.GetId());
|
||||
|
||||
if (!mutex->owner.owner_before(thread) && !thread.owner_before(mutex->owner)) // check equality
|
||||
{
|
||||
@ -153,7 +153,7 @@ s32 sys_mutex_trylock(PPUThread& CPU, u32 mutex_id)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
const auto thread = Emu.GetCPU().GetThread(CPU.GetId());
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(CPU.GetId());
|
||||
|
||||
if (!mutex->owner.owner_before(thread) && !thread.owner_before(mutex->owner)) // check equality
|
||||
{
|
||||
@ -195,7 +195,7 @@ s32 sys_mutex_unlock(PPUThread& CPU, u32 mutex_id)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
const auto thread = Emu.GetCPU().GetThread(CPU.GetId());
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(CPU.GetId());
|
||||
|
||||
if (mutex->owner.owner_before(thread) || thread.owner_before(mutex->owner)) // check inequality
|
||||
{
|
||||
|
@ -2,7 +2,8 @@
|
||||
#include "Emu/Memory/Memory.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/SysCalls/SysCalls.h"
|
||||
#include "Emu/SysCalls/CB_FUNC.h"
|
||||
#include "Emu/IdManager.h"
|
||||
#include "Emu/DbgCommand.h"
|
||||
|
||||
#include "Emu/CPU/CPUThreadManager.h"
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
@ -14,17 +15,19 @@ void _sys_ppu_thread_exit(PPUThread& CPU, u64 errorcode)
|
||||
{
|
||||
sys_ppu_thread.Log("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode);
|
||||
|
||||
CPU.SetExitStatus(errorcode);
|
||||
CPU.Stop();
|
||||
LV2_LOCK;
|
||||
|
||||
if (!CPU.IsJoinable())
|
||||
if (!CPU.is_joinable)
|
||||
{
|
||||
const u32 id = CPU.GetId();
|
||||
|
||||
CallAfter([id]()
|
||||
{
|
||||
Emu.GetCPU().RemoveThread(id);
|
||||
Emu.GetIdManager().remove<PPUThread>(id);
|
||||
});
|
||||
}
|
||||
|
||||
CPU.Exit();
|
||||
}
|
||||
|
||||
void sys_ppu_thread_yield()
|
||||
@ -34,29 +37,46 @@ void sys_ppu_thread_yield()
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
}
|
||||
|
||||
s32 sys_ppu_thread_join(u32 thread_id, vm::ptr<u64> vptr)
|
||||
s32 sys_ppu_thread_join(PPUThread& CPU, u32 thread_id, vm::ptr<u64> vptr)
|
||||
{
|
||||
sys_ppu_thread.Warning("sys_ppu_thread_join(thread_id=0x%x, vptr=*0x%x)", thread_id, vptr);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(thread_id);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
while (t->IsAlive())
|
||||
if (!thread->is_joinable || thread->is_joining)
|
||||
{
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_ppu_thread.Warning("sys_ppu_thread_join(%d) aborted", thread_id);
|
||||
return CELL_OK;
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
*vptr = t->GetExitStatus();
|
||||
Emu.GetCPU().RemoveThread(thread_id);
|
||||
if (&CPU == thread.get())
|
||||
{
|
||||
return CELL_EDEADLK;
|
||||
}
|
||||
|
||||
// mark joining
|
||||
thread->is_joining = true;
|
||||
|
||||
// join thread
|
||||
while (thread->IsActive())
|
||||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
thread->cv.wait_for(lv2_lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
// get exit status from the register
|
||||
*vptr = thread->GPR[3];
|
||||
|
||||
// cleanup
|
||||
Emu.GetIdManager().remove<PPUThread>(thread->GetId());
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -64,19 +84,27 @@ s32 sys_ppu_thread_detach(u32 thread_id)
|
||||
{
|
||||
sys_ppu_thread.Warning("sys_ppu_thread_detach(thread_id=0x%x)", thread_id);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(thread_id);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!t->IsJoinable())
|
||||
if (!thread->is_joinable)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
t->SetJoinable(false);
|
||||
if (thread->is_joining)
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
// "detach"
|
||||
thread->is_joinable = false;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -85,21 +113,30 @@ void sys_ppu_thread_get_join_state(PPUThread& CPU, vm::ptr<s32> isjoinable)
|
||||
{
|
||||
sys_ppu_thread.Warning("sys_ppu_thread_get_join_state(isjoinable=*0x%x)", isjoinable);
|
||||
|
||||
*isjoinable = CPU.IsJoinable();
|
||||
LV2_LOCK;
|
||||
|
||||
*isjoinable = CPU.is_joinable;
|
||||
}
|
||||
|
||||
s32 sys_ppu_thread_set_priority(u32 thread_id, s32 prio)
|
||||
{
|
||||
sys_ppu_thread.Log("sys_ppu_thread_set_priority(thread_id=0x%x, prio=%d)", thread_id, prio);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(thread_id);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
t->SetPrio(prio);
|
||||
if (prio < 0 || prio > 3071)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
thread->prio = prio;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -108,14 +145,16 @@ s32 sys_ppu_thread_get_priority(u32 thread_id, vm::ptr<s32> priop)
|
||||
{
|
||||
sys_ppu_thread.Log("sys_ppu_thread_get_priority(thread_id=0x%x, priop=*0x%x)", thread_id, priop);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(thread_id);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
*priop = static_cast<s32>(t->GetPrio());
|
||||
*priop = thread->prio;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -124,66 +163,71 @@ s32 sys_ppu_thread_get_stack_information(PPUThread& CPU, vm::ptr<sys_ppu_thread_
|
||||
{
|
||||
sys_ppu_thread.Log("sys_ppu_thread_get_stack_information(sp=*0x%x)", sp);
|
||||
|
||||
sp->pst_addr = CPU.GetStackAddr();
|
||||
sp->pst_size = CPU.GetStackSize();
|
||||
sp->pst_addr = CPU.stack_addr;
|
||||
sp->pst_size = CPU.stack_size;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_ppu_thread_stop(u32 thread_id)
|
||||
{
|
||||
sys_ppu_thread.Error("sys_ppu_thread_stop(thread_id=0x%x)", thread_id);
|
||||
sys_ppu_thread.Todo("sys_ppu_thread_stop(thread_id=0x%x)", thread_id);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(thread_id);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
t->Stop();
|
||||
//t->Stop();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_ppu_thread_restart(u32 thread_id)
|
||||
{
|
||||
sys_ppu_thread.Error("sys_ppu_thread_restart(thread_id=0x%x)", thread_id);
|
||||
sys_ppu_thread.Todo("sys_ppu_thread_restart(thread_id=0x%x)", thread_id);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(thread_id);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
t->Stop();
|
||||
t->Run();
|
||||
//t->Stop();
|
||||
//t->Run();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
u32 ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool is_joinable, bool is_interrupt, std::string name, std::function<void(PPUThread&)> task)
|
||||
{
|
||||
const auto new_thread = Emu.GetCPU().AddThread(CPU_THREAD_PPU);
|
||||
const auto ppu = Emu.GetIdManager().make_ptr<PPUThread>(name);
|
||||
|
||||
auto& ppu = static_cast<PPUThread&>(*new_thread);
|
||||
ppu->prio = prio;
|
||||
ppu->stack_size = stacksize < 0x4000 ? 0x4000 : stacksize; // (hack) adjust minimal stack size
|
||||
ppu->custom_task = task;
|
||||
ppu->Run();
|
||||
|
||||
ppu.SetEntry(entry);
|
||||
ppu.SetPrio(prio);
|
||||
ppu.SetStackSize(stacksize < 0x4000 ? 0x4000 : stacksize); // (hack) adjust minimal stack size
|
||||
ppu.SetJoinable(is_joinable);
|
||||
ppu.SetName(name);
|
||||
ppu.custom_task = task;
|
||||
ppu.Run();
|
||||
if (entry)
|
||||
{
|
||||
ppu->PC = vm::read32(entry);
|
||||
ppu->GPR[2] = vm::read32(entry + 4); // rtoc
|
||||
}
|
||||
|
||||
if (!is_interrupt)
|
||||
{
|
||||
ppu.GPR[3] = arg;
|
||||
ppu.Exec();
|
||||
ppu->GPR[3] = arg;
|
||||
ppu->Exec();
|
||||
}
|
||||
|
||||
return ppu.GetId();
|
||||
return ppu->GetId();
|
||||
}
|
||||
|
||||
s32 _sys_ppu_thread_create(vm::ptr<u64> thread_id, vm::ptr<ppu_thread_param_t> param, u64 arg, u64 unk, s32 prio, u32 stacksize, u64 flags, vm::cptr<char> threadname)
|
||||
@ -191,6 +235,8 @@ s32 _sys_ppu_thread_create(vm::ptr<u64> thread_id, vm::ptr<ppu_thread_param_t> p
|
||||
sys_ppu_thread.Warning("_sys_ppu_thread_create(thread_id=*0x%x, param=*0x%x, arg=0x%llx, unk=0x%llx, prio=%d, stacksize=0x%x, flags=0x%llx, threadname=*0x%x)",
|
||||
thread_id, param, arg, unk, prio, stacksize, flags, threadname);
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
if (prio < 0 || prio > 3071)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
@ -204,26 +250,25 @@ s32 _sys_ppu_thread_create(vm::ptr<u64> thread_id, vm::ptr<ppu_thread_param_t> p
|
||||
return CELL_EPERM;
|
||||
}
|
||||
|
||||
const auto new_thread = Emu.GetCPU().AddThread(CPU_THREAD_PPU);
|
||||
const auto ppu = Emu.GetIdManager().make_ptr<PPUThread>(threadname ? threadname.get_ptr() : "");
|
||||
|
||||
auto& ppu = static_cast<PPUThread&>(*new_thread);
|
||||
ppu->prio = prio;
|
||||
ppu->stack_size = stacksize < 0x4000 ? 0x4000 : stacksize; // (hack) adjust minimal stack size
|
||||
ppu->Run();
|
||||
|
||||
ppu.SetEntry(param->entry);
|
||||
ppu.SetPrio(prio);
|
||||
ppu.SetStackSize(stacksize < 0x4000 ? 0x4000 : stacksize); // (hack) adjust minimal stack size
|
||||
ppu.SetJoinable(is_joinable);
|
||||
ppu.SetName(threadname ? threadname.get_ptr() : "");
|
||||
ppu.Run();
|
||||
ppu->PC = vm::read32(param->entry);
|
||||
ppu->GPR[2] = vm::read32(param->entry + 4); // rtoc
|
||||
ppu->GPR[3] = arg;
|
||||
ppu->GPR[4] = unk; // actually unknown
|
||||
|
||||
ppu.GPR[3] = arg;
|
||||
ppu.GPR[4] = unk; // actually unknown
|
||||
ppu->is_joinable = is_joinable;
|
||||
|
||||
if (u32 tls = param->tls) // hack
|
||||
{
|
||||
ppu.GPR[13] = tls;
|
||||
ppu->GPR[13] = tls;
|
||||
}
|
||||
|
||||
*thread_id = ppu.GetId();
|
||||
*thread_id = ppu->GetId();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -232,30 +277,32 @@ s32 sys_ppu_thread_start(u32 thread_id)
|
||||
{
|
||||
sys_ppu_thread.Warning("sys_ppu_thread_start(thread_id=0x%x)", thread_id);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(thread_id, CPU_THREAD_PPU);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
t->Exec();
|
||||
thread->Exec();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_ppu_thread_rename(u32 thread_id, vm::cptr<char> name)
|
||||
{
|
||||
sys_ppu_thread.Error("sys_ppu_thread_rename(thread_id=0x%x, name=*0x%x)", thread_id, name);
|
||||
sys_ppu_thread.Todo("sys_ppu_thread_rename(thread_id=0x%x, name=*0x%x)", thread_id, name);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(thread_id, CPU_THREAD_PPU);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<PPUThread>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
t->SetThreadName(name.get_ptr());
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ u32 ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool is_joina
|
||||
// SysCalls
|
||||
void _sys_ppu_thread_exit(PPUThread& CPU, u64 errorcode);
|
||||
void sys_ppu_thread_yield();
|
||||
s32 sys_ppu_thread_join(u32 thread_id, vm::ptr<u64> vptr);
|
||||
s32 sys_ppu_thread_join(PPUThread& CPU, u32 thread_id, vm::ptr<u64> vptr);
|
||||
s32 sys_ppu_thread_detach(u32 thread_id);
|
||||
void sys_ppu_thread_get_join_state(PPUThread& CPU, vm::ptr<s32> isjoinable);
|
||||
s32 sys_ppu_thread_set_priority(u32 thread_id, s32 prio);
|
||||
|
@ -229,7 +229,7 @@ s32 sys_process_get_number_of_object(u32 object, vm::ptr<u32> nump)
|
||||
case SYS_LWCOND_OBJECT:
|
||||
case SYS_EVENT_FLAG_OBJECT:
|
||||
{
|
||||
*nump = Emu.GetIdManager().get_count_by_type(object);
|
||||
*nump = Emu.GetIdManager().get_count(object);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
@ -262,7 +262,7 @@ s32 sys_process_get_id(u32 object, vm::ptr<u32> buffer, u32 size, vm::ptr<u32> s
|
||||
case SYS_LWCOND_OBJECT:
|
||||
case SYS_EVENT_FLAG_OBJECT:
|
||||
{
|
||||
const auto objects = Emu.GetIdManager().get_IDs_by_type(object);
|
||||
const auto objects = Emu.GetIdManager().get_IDs(object);
|
||||
|
||||
u32 i = 0;
|
||||
|
||||
|
@ -92,26 +92,21 @@ s32 sys_spu_image_open(vm::ptr<sys_spu_image> img, vm::cptr<char> path)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
u32 spu_thread_initialize(u32 group_id, u32 spu_num, vm::ptr<sys_spu_image> img, const std::string& name, u32 option, u64 a1, u64 a2, u64 a3, u64 a4, std::function<void(SPUThread&)> task)
|
||||
u32 spu_thread_initialize(u32 group_id, u32 spu_num, vm::ptr<sys_spu_image> img, const std::string& name, u32 option, u64 a1, u64 a2, u64 a3, u64 a4, std::function<void(SPUThread&)> task = nullptr)
|
||||
{
|
||||
if (option)
|
||||
{
|
||||
sys_spu.Todo("Unsupported SPU Thread options (0x%x)", option);
|
||||
}
|
||||
|
||||
const auto t = Emu.GetCPU().AddThread(CPU_THREAD_SPU);
|
||||
const auto spu = Emu.GetIdManager().make_ptr<SPUThread>(name, spu_num, Memory.MainMem.AllocAlign(0x40000));
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
spu.index = spu_num;
|
||||
spu.offset = Memory.MainMem.AllocAlign(256 * 1024);
|
||||
spu.SetName(name);
|
||||
spu.m_custom_task = task;
|
||||
spu->m_custom_task = task;
|
||||
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(group_id);
|
||||
|
||||
spu.tg = group;
|
||||
group->threads[spu_num] = t;
|
||||
spu->tg = group;
|
||||
group->threads[spu_num] = spu;
|
||||
group->args[spu_num] = { a1, a2, a3, a4 };
|
||||
group->images[spu_num] = img;
|
||||
|
||||
@ -131,7 +126,7 @@ u32 spu_thread_initialize(u32 group_id, u32 spu_num, vm::ptr<sys_spu_image> img,
|
||||
group->state = SPU_THREAD_GROUP_STATUS_INITIALIZED;
|
||||
}
|
||||
|
||||
return spu.GetId();
|
||||
return spu->GetId();
|
||||
}
|
||||
|
||||
s32 sys_spu_thread_initialize(vm::ptr<u32> thread, u32 group_id, u32 spu_num, vm::ptr<sys_spu_image> img, vm::ptr<sys_spu_thread_attribute> attr, vm::ptr<sys_spu_thread_argument> arg)
|
||||
@ -167,23 +162,21 @@ s32 sys_spu_thread_set_argument(u32 id, vm::ptr<sys_spu_thread_argument> arg)
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!t)
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
const auto group = thread->tg.lock();
|
||||
|
||||
const auto group = spu.tg.lock();
|
||||
assert(thread->index < group->threads.size());
|
||||
|
||||
assert(spu.index < group->threads.size());
|
||||
|
||||
group->args[spu.index].arg1 = arg->arg1;
|
||||
group->args[spu.index].arg2 = arg->arg2;
|
||||
group->args[spu.index].arg3 = arg->arg3;
|
||||
group->args[spu.index].arg4 = arg->arg4;
|
||||
group->args[thread->index].arg1 = arg->arg1;
|
||||
group->args[thread->index].arg2 = arg->arg2;
|
||||
group->args[thread->index].arg3 = arg->arg3;
|
||||
group->args[thread->index].arg4 = arg->arg4;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -194,36 +187,20 @@ s32 sys_spu_thread_get_exit_status(u32 id, vm::ptr<u32> status)
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!t)
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
// TODO: check CELL_ESTAT condition
|
||||
|
||||
u32 res;
|
||||
if (!spu.IsStopped() || !spu.ch_out_mbox.pop(res)) // TODO: Is it possible to get the same status twice? If so, we shouldn't use destructive read
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
*status = res;
|
||||
*status = thread->ch_out_mbox.pop_uncond();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
u32 spu_thread_group_create(const std::string& name, u32 num, s32 prio, s32 type, u32 container)
|
||||
{
|
||||
if (type)
|
||||
{
|
||||
sys_spu.Todo("Unsupported SPU Thread Group type (0x%x)", type);
|
||||
}
|
||||
|
||||
return Emu.GetIdManager().make<spu_group_t>(name, num, prio, type, container);
|
||||
}
|
||||
|
||||
s32 sys_spu_thread_group_create(vm::ptr<u32> id, u32 num, s32 prio, vm::ptr<sys_spu_thread_group_attribute> attr)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_group_create(id=*0x%x, num=%d, prio=%d, attr=*0x%x)", id, num, prio, attr);
|
||||
@ -235,7 +212,13 @@ s32 sys_spu_thread_group_create(vm::ptr<u32> id, u32 num, s32 prio, vm::ptr<sys_
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
*id = spu_thread_group_create(std::string(attr->name.get_ptr(), attr->nsize - 1), num, prio, attr->type, attr->ct);
|
||||
if (attr->type.data())
|
||||
{
|
||||
sys_spu.Todo("Unsupported SPU Thread Group type (0x%x)", attr->type);
|
||||
}
|
||||
|
||||
*id = Emu.GetIdManager().make<spu_group_t>(std::string{ attr->name.get_ptr(), attr->nsize - 1 }, num, prio, attr->type, attr->ct);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -265,7 +248,7 @@ s32 sys_spu_thread_group_destroy(u32 id)
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
Memory.MainMem.Free(spu.offset);
|
||||
Emu.GetCPU().RemoveThread(spu.GetId());
|
||||
Emu.GetIdManager().remove<SPUThread>(spu.GetId());
|
||||
|
||||
t.reset();
|
||||
}
|
||||
@ -314,8 +297,8 @@ s32 sys_spu_thread_group_start(u32 id)
|
||||
// TODO: use segment info
|
||||
memcpy(vm::get_ptr<void>(spu.offset), vm::get_ptr<void>(image->addr), 256 * 1024);
|
||||
|
||||
spu.SetEntry(image->entry_point);
|
||||
spu.Run();
|
||||
spu.PC = image->entry_point;
|
||||
spu.GPR[3] = u128::from64(0, args.arg1);
|
||||
spu.GPR[4] = u128::from64(0, args.arg2);
|
||||
spu.GPR[5] = u128::from64(0, args.arg3);
|
||||
@ -386,9 +369,7 @@ s32 sys_spu_thread_group_suspend(u32 id)
|
||||
{
|
||||
if (t)
|
||||
{
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
spu.FastStop();
|
||||
t->Sleep(); // trigger m_state check
|
||||
}
|
||||
}
|
||||
|
||||
@ -433,9 +414,7 @@ s32 sys_spu_thread_group_resume(u32 id)
|
||||
{
|
||||
if (t)
|
||||
{
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
spu.FastRun();
|
||||
t->Awake(); // trigger m_state check
|
||||
}
|
||||
}
|
||||
|
||||
@ -472,7 +451,7 @@ s32 sys_spu_thread_group_terminate(u32 id, s32 value)
|
||||
LV2_LOCK;
|
||||
|
||||
// seems the id can be either SPU Thread Group or SPU Thread
|
||||
auto thread = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group && !thread)
|
||||
@ -520,7 +499,7 @@ s32 sys_spu_thread_group_terminate(u32 id, s32 value)
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
spu.status.exchange(SPU_STATUS_STOPPED);
|
||||
spu.FastStop();
|
||||
spu.Stop();
|
||||
}
|
||||
}
|
||||
|
||||
@ -566,7 +545,7 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
|
||||
{
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
if (!(spu.status.load() & SPU_STATUS_STOPPED_BY_STOP))
|
||||
if ((spu.status.load() & SPU_STATUS_STOPPED_BY_STOP) == 0)
|
||||
{
|
||||
stopped = false;
|
||||
break;
|
||||
@ -622,31 +601,33 @@ s32 sys_spu_thread_write_ls(u32 id, u32 address, u64 value, u32 type)
|
||||
{
|
||||
sys_spu.Log("sys_spu_thread_write_ls(id=0x%x, address=0x%x, value=0x%llx, type=%d)", id, address, value, type);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!t->IsRunning())
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
if (address >= 0x40000 || address + type > 0x40000 || address % type) // check range and alignment
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
const auto group = thread->tg.lock();
|
||||
|
||||
if ((group->state < SPU_THREAD_GROUP_STATUS_WAITING) || (group->state > SPU_THREAD_GROUP_STATUS_RUNNING))
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case 1: spu.write8(address, (u8)value); break;
|
||||
case 2: spu.write16(address, (u16)value); break;
|
||||
case 4: spu.write32(address, (u32)value); break;
|
||||
case 8: spu.write64(address, value); break;
|
||||
case 1: thread->write8(address, (u8)value); break;
|
||||
case 2: thread->write16(address, (u16)value); break;
|
||||
case 4: thread->write32(address, (u32)value); break;
|
||||
case 8: thread->write64(address, value); break;
|
||||
default: return CELL_EINVAL;
|
||||
}
|
||||
|
||||
@ -657,31 +638,33 @@ s32 sys_spu_thread_read_ls(u32 id, u32 address, vm::ptr<u64> value, u32 type)
|
||||
{
|
||||
sys_spu.Log("sys_spu_thread_read_ls(id=0x%x, address=0x%x, value=*0x%x, type=%d)", id, address, value, type);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!t->IsRunning())
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
if (address >= 0x40000 || address + type > 0x40000 || address % type) // check range and alignment
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
const auto group = thread->tg.lock();
|
||||
|
||||
if ((group->state < SPU_THREAD_GROUP_STATUS_WAITING) || (group->state > SPU_THREAD_GROUP_STATUS_RUNNING))
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case 1: *value = spu.read8(address); break;
|
||||
case 2: *value = spu.read16(address); break;
|
||||
case 4: *value = spu.read32(address); break;
|
||||
case 8: *value = spu.read64(address); break;
|
||||
case 1: *value = thread->read8(address); break;
|
||||
case 2: *value = thread->read16(address); break;
|
||||
case 4: *value = thread->read32(address); break;
|
||||
case 8: *value = thread->read64(address); break;
|
||||
default: return CELL_EINVAL;
|
||||
}
|
||||
|
||||
@ -692,16 +675,23 @@ s32 sys_spu_thread_write_spu_mb(u32 id, u32 value)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_write_spu_mb(id=0x%x, value=0x%x)", id, value);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
const auto group = thread->tg.lock();
|
||||
|
||||
spu.ch_in_mbox.push_uncond(value);
|
||||
if ((group->state < SPU_THREAD_GROUP_STATUS_WAITING) || (group->state > SPU_THREAD_GROUP_STATUS_RUNNING))
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
thread->ch_in_mbox.push_uncond(value);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -710,9 +700,11 @@ s32 sys_spu_thread_set_spu_cfg(u32 id, u64 value)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_set_spu_cfg(id=0x%x, value=0x%x)", id, value);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
@ -722,9 +714,7 @@ s32 sys_spu_thread_set_spu_cfg(u32 id, u64 value)
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
spu.snr_config = value;
|
||||
thread->snr_config = value;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -733,16 +723,16 @@ s32 sys_spu_thread_get_spu_cfg(u32 id, vm::ptr<u64> value)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_get_spu_cfg(id=0x%x, value=*0x%x)", id, value);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
*value = spu.snr_config;
|
||||
*value = thread->snr_config;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -751,9 +741,11 @@ s32 sys_spu_thread_write_snr(u32 id, u32 number, u32 value)
|
||||
{
|
||||
sys_spu.Log("sys_spu_thread_write_snr(id=0x%x, number=%d, value=0x%x)", id, number, value);
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
LV2_LOCK;
|
||||
|
||||
if (!t)
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
@ -763,9 +755,14 @@ s32 sys_spu_thread_write_snr(u32 id, u32 number, u32 value)
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
const auto group = thread->tg.lock();
|
||||
|
||||
spu.write_snr(number ? true : false, value);
|
||||
if ((group->state < SPU_THREAD_GROUP_STATUS_WAITING) || (group->state > SPU_THREAD_GROUP_STATUS_RUNNING))
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
thread->write_snr(number, value);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -895,23 +892,21 @@ s32 sys_spu_thread_connect_event(u32 id, u32 eq, u32 et, u8 spup)
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(eq);
|
||||
|
||||
if (!t || !queue)
|
||||
if (!thread || !queue)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
if (et != SYS_SPU_THREAD_EVENT_USER || spup > 63 || queue->type != SYS_PPU_QUEUE)
|
||||
{
|
||||
sys_spu.Error("sys_spu_thread_connect_event(): invalid arguments (et=%d, spup=%d, queue->type=%d)", et, spup, queue->type);
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
auto& port = spu.spup[spup];
|
||||
auto& port = thread->spup[spup];
|
||||
|
||||
if (!port.expired())
|
||||
{
|
||||
@ -929,22 +924,20 @@ s32 sys_spu_thread_disconnect_event(u32 id, u32 et, u8 spup)
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!t)
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
if (et != SYS_SPU_THREAD_EVENT_USER || spup > 63)
|
||||
{
|
||||
sys_spu.Error("sys_spu_thread_disconnect_event(): invalid arguments (et=%d, spup=%d)", et, spup);
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
auto& port = spu.spup[spup];
|
||||
auto& port = thread->spup[spup];
|
||||
|
||||
if (port.expired())
|
||||
{
|
||||
@ -962,22 +955,20 @@ s32 sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num)
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(spuq);
|
||||
|
||||
if (!t || !queue)
|
||||
if (!thread || !queue)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
if (queue->type != SYS_SPU_QUEUE)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
for (auto& v : spu.spuq)
|
||||
for (auto& v : thread->spuq)
|
||||
{
|
||||
if (auto q = v.second.lock())
|
||||
{
|
||||
@ -988,7 +979,7 @@ s32 sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num)
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& v : spu.spuq)
|
||||
for (auto& v : thread->spuq)
|
||||
{
|
||||
if (v.second.expired())
|
||||
{
|
||||
@ -1008,16 +999,14 @@ s32 sys_spu_thread_unbind_queue(u32 id, u32 spuq_num)
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
const auto thread = Emu.GetIdManager().get<SPUThread>(id);
|
||||
|
||||
if (!t)
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<SPUThread&>(*t);
|
||||
|
||||
for (auto& v : spu.spuq)
|
||||
for (auto& v : thread->spuq)
|
||||
{
|
||||
if (v.first == spuq_num && !v.second.expired())
|
||||
{
|
||||
@ -1142,18 +1131,16 @@ s32 sys_raw_spu_create(vm::ptr<u32> id, vm::ptr<void> attr)
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().AddThread(CPU_THREAD_RAW_SPU);
|
||||
const auto thread = Emu.GetCPU().NewRawSPUThread();
|
||||
|
||||
if (!t)
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_EAGAIN;
|
||||
}
|
||||
|
||||
Memory.Map(t->offset = RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * t->index, 0x40000);
|
||||
thread->Run();
|
||||
|
||||
t->Run();
|
||||
|
||||
*id = t->index;
|
||||
*id = thread->index;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -1164,20 +1151,16 @@ s32 sys_raw_spu_destroy(u32 id)
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().GetRawSPUThread(id);
|
||||
const auto thread = Emu.GetCPU().GetRawSPUThread(id);
|
||||
|
||||
if (!t)
|
||||
if (!thread)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto& spu = static_cast<RawSPUThread&>(*t);
|
||||
|
||||
// TODO: check if busy
|
||||
|
||||
Memory.Unmap(spu.offset);
|
||||
|
||||
Emu.GetCPU().RemoveThread(t->GetId());
|
||||
Emu.GetIdManager().remove<RawSPUThread>(thread->GetId());
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -214,8 +214,6 @@ u32 LoadSpuImage(vfsStream& stream, u32& spu_ep);
|
||||
|
||||
// Aux
|
||||
s32 spu_image_import(sys_spu_image& img, u32 src, u32 type);
|
||||
u32 spu_thread_group_create(const std::string& name, u32 num, s32 prio, s32 type, u32 container);
|
||||
u32 spu_thread_initialize(u32 group, u32 spu_num, vm::ptr<sys_spu_image> img, const std::string& name, u32 option, u64 a1, u64 a2, u64 a3, u64 a4, std::function<void(SPUThread&)> task = nullptr);
|
||||
|
||||
// SysCalls
|
||||
s32 sys_spu_initialize(u32 max_usable_spu, u32 max_raw_spu);
|
||||
|
@ -16,16 +16,21 @@ lv2_timer_t::lv2_timer_t()
|
||||
: start(0)
|
||||
, period(0)
|
||||
, state(SYS_TIMER_STATE_STOP)
|
||||
, thread(fmt::format("Timer[0x%x] Thread", Emu.GetIdManager().get_current_id()))
|
||||
{
|
||||
thread.start([this]()
|
||||
{
|
||||
LV2_LOCK;
|
||||
auto name = fmt::format("Timer[0x%x] Thread", Emu.GetIdManager().get_current_id());
|
||||
|
||||
while (thread.joinable() && !Emu.IsStopped())
|
||||
thread.start([name]{ return name; }, [this]()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(thread.mutex);
|
||||
|
||||
while (thread.joinable())
|
||||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
if (state == SYS_TIMER_STATE_RUN)
|
||||
{
|
||||
LV2_LOCK;
|
||||
|
||||
if (get_system_time() >= start)
|
||||
{
|
||||
const auto queue = port.lock();
|
||||
@ -48,14 +53,14 @@ lv2_timer_t::lv2_timer_t()
|
||||
}
|
||||
}
|
||||
|
||||
cv.wait_for(lv2_lock, std::chrono::milliseconds(1));
|
||||
thread.cv.wait_for(lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
lv2_timer_t::~lv2_timer_t()
|
||||
{
|
||||
cv.notify_all();
|
||||
thread.cv.notify_one();
|
||||
thread.join();
|
||||
}
|
||||
|
||||
@ -88,8 +93,6 @@ s32 sys_timer_destroy(u32 timer_id)
|
||||
|
||||
Emu.GetIdManager().remove<lv2_timer_t>(timer_id);
|
||||
|
||||
lv2_lock.unlock();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -108,7 +111,7 @@ s32 sys_timer_get_information(u32 timer_id, vm::ptr<sys_timer_information_t> inf
|
||||
|
||||
info->next_expiration_time = timer->start;
|
||||
|
||||
info->period = timer->period;
|
||||
info->period = timer->period;
|
||||
info->timer_state = timer->state;
|
||||
|
||||
return CELL_OK;
|
||||
@ -160,10 +163,11 @@ s32 _sys_timer_start(u32 timer_id, u64 base_time, u64 period)
|
||||
|
||||
// sys_timer_start_periodic() will use current time (TODO: is it correct?)
|
||||
|
||||
timer->start = base_time ? base_time : start_time + period;
|
||||
timer->start = base_time ? base_time : start_time + period;
|
||||
timer->period = period;
|
||||
timer->state = SYS_TIMER_STATE_RUN;
|
||||
timer->cv.notify_one();
|
||||
timer->state = SYS_TIMER_STATE_RUN;
|
||||
|
||||
timer->thread.cv.notify_one();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -205,10 +209,10 @@ s32 sys_timer_connect_event_queue(u32 timer_id, u32 queue_id, u64 name, u64 data
|
||||
return CELL_EISCONN;
|
||||
}
|
||||
|
||||
timer->port = queue; // connect event queue
|
||||
timer->port = queue; // connect event queue
|
||||
timer->source = name ? name : ((u64)process_getpid() << 32) | timer_id;
|
||||
timer->data1 = data1;
|
||||
timer->data2 = data2;
|
||||
timer->data1 = data1;
|
||||
timer->data2 = data2;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -30,7 +30,6 @@ struct lv2_timer_t
|
||||
u64 period; // period (oneshot if 0)
|
||||
|
||||
std::atomic<u32> state; // timer state
|
||||
std::condition_variable cv;
|
||||
|
||||
thread_t thread; // timer thread
|
||||
|
||||
|
@ -97,46 +97,6 @@ void Emulator::SetTitle(const std::string& title)
|
||||
m_title = title;
|
||||
}
|
||||
|
||||
void Emulator::CheckStatus()
|
||||
{
|
||||
//auto threads = GetCPU().GetThreads();
|
||||
|
||||
//if (!threads.size())
|
||||
//{
|
||||
// Stop();
|
||||
// return;
|
||||
//}
|
||||
|
||||
//bool AllPaused = true;
|
||||
|
||||
//for (auto& t : threads)
|
||||
//{
|
||||
// if (t->IsPaused()) continue;
|
||||
// AllPaused = false;
|
||||
// break;
|
||||
//}
|
||||
|
||||
//if (AllPaused)
|
||||
//{
|
||||
// Pause();
|
||||
// return;
|
||||
//}
|
||||
|
||||
//bool AllStopped = true;
|
||||
|
||||
//for (auto& t : threads)
|
||||
//{
|
||||
// if (t->IsStopped()) continue;
|
||||
// AllStopped = false;
|
||||
// break;
|
||||
//}
|
||||
|
||||
//if (AllStopped)
|
||||
//{
|
||||
// Pause();
|
||||
//}
|
||||
}
|
||||
|
||||
bool Emulator::BootGame(const std::string& path, bool direct)
|
||||
{
|
||||
static const char* elf_path[6] =
|
||||
@ -180,6 +140,8 @@ bool Emulator::BootGame(const std::string& path, bool direct)
|
||||
|
||||
void Emulator::Load()
|
||||
{
|
||||
m_status = Ready;
|
||||
|
||||
GetModuleManager().Init();
|
||||
|
||||
if (!fs::is_file(m_path)) return;
|
||||
@ -288,8 +250,6 @@ void Emulator::Load()
|
||||
|
||||
LoadPoints(BreakPointsDBName);
|
||||
|
||||
m_status = Ready;
|
||||
|
||||
GetGSManager().Init();
|
||||
GetCallbackManager().Init();
|
||||
GetAudioManager().Init();
|
||||
@ -327,8 +287,13 @@ void Emulator::Pause()
|
||||
if (!IsRunning()) return;
|
||||
SendDbgCommand(DID_PAUSE_EMU);
|
||||
|
||||
if (sync_bool_compare_and_swap((volatile u32*)&m_status, Running, Paused))
|
||||
if (sync_bool_compare_and_swap(&m_status, Running, Paused))
|
||||
{
|
||||
for (auto& t : GetCPU().GetAllThreads())
|
||||
{
|
||||
t->Sleep(); // trigger status check
|
||||
}
|
||||
|
||||
SendDbgCommand(DID_PAUSED_EMU);
|
||||
|
||||
GetCallbackManager().RunPauseCallbacks(true);
|
||||
@ -342,7 +307,10 @@ void Emulator::Resume()
|
||||
|
||||
m_status = Running;
|
||||
|
||||
CheckStatus();
|
||||
for (auto& t : GetCPU().GetAllThreads())
|
||||
{
|
||||
t->Awake(); // trigger status check
|
||||
}
|
||||
|
||||
SendDbgCommand(DID_RESUMED_EMU);
|
||||
|
||||
@ -359,13 +327,9 @@ void Emulator::Stop()
|
||||
|
||||
m_status = Stopped;
|
||||
|
||||
for (auto& t : GetCPU().GetAllThreads())
|
||||
{
|
||||
auto threads = GetCPU().GetThreads();
|
||||
|
||||
for (auto& t : threads)
|
||||
{
|
||||
t->AddEvent(CPU_EVENT_STOP);
|
||||
}
|
||||
t->Pause(); // trigger status check
|
||||
}
|
||||
|
||||
while (g_thread_count)
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include "Loader/Loader.h"
|
||||
|
||||
enum Status
|
||||
enum Status : u32
|
||||
{
|
||||
Running,
|
||||
Paused,
|
||||
@ -34,7 +34,7 @@ private:
|
||||
u32 m_sdk_version = 0x360001;
|
||||
u32 m_malloc_pagesize = 0x100000;
|
||||
u32 m_primary_stacksize = 0x100000;
|
||||
u32 m_primary_prio = 0x50;
|
||||
s32 m_primary_prio = 0x50;
|
||||
|
||||
public:
|
||||
EmuInfo()
|
||||
@ -51,7 +51,7 @@ class Emulator
|
||||
Interpreter,
|
||||
};
|
||||
|
||||
volatile uint m_status;
|
||||
volatile u32 m_status;
|
||||
uint m_mode;
|
||||
|
||||
u32 m_rsx_callback;
|
||||
@ -146,7 +146,7 @@ public:
|
||||
m_info.m_tls_memsz = memsz;
|
||||
}
|
||||
|
||||
void SetParams(u32 sdk_ver, u32 malloc_pagesz, u32 stacksz, u32 prio)
|
||||
void SetParams(u32 sdk_ver, u32 malloc_pagesz, u32 stacksz, s32 prio)
|
||||
{
|
||||
m_info.m_sdk_version = sdk_ver;
|
||||
m_info.m_malloc_pagesize = malloc_pagesz;
|
||||
@ -171,12 +171,11 @@ public:
|
||||
u32 GetMallocPageSize() { return m_info.m_malloc_pagesize; }
|
||||
u32 GetSDKVersion() { return m_info.m_sdk_version; }
|
||||
u32 GetPrimaryStackSize() { return m_info.m_primary_stacksize; }
|
||||
u32 GetPrimaryPrio() { return m_info.m_primary_prio; }
|
||||
s32 GetPrimaryPrio() { return m_info.m_primary_prio; }
|
||||
|
||||
u32 GetRSXCallback() const { return m_rsx_callback; }
|
||||
u32 GetCPUThreadStop() const { return m_cpu_thr_stop; }
|
||||
|
||||
void CheckStatus();
|
||||
bool BootGame(const std::string& path, bool direct = false);
|
||||
|
||||
void Load();
|
||||
@ -197,7 +196,9 @@ public:
|
||||
using lv2_lock_type = std::unique_lock<std::mutex>;
|
||||
|
||||
#define LV2_LOCK lv2_lock_type lv2_lock(Emu.GetCoreMutex())
|
||||
#define CHECK_LV2_LOCK(x) assert((x).owns_lock() && (x).mutex() == &Emu.GetCoreMutex())
|
||||
#define LV2_DEFER_LOCK lv2_lock_type lv2_lock
|
||||
#define CHECK_LV2_LOCK(x) if (!(x).owns_lock() || (x).mutex() != &Emu.GetCoreMutex()) throw EXCEPTION("Invalid LV2_LOCK (locked=%d)", (x).owns_lock())
|
||||
#define CHECK_EMU_STATUS if (Emu.IsStopped()) throw EXCEPTION("Aborted (emulation stopped)")
|
||||
|
||||
extern Emulator Emu;
|
||||
|
||||
|
@ -76,16 +76,16 @@ struct wxWriter : Log::LogListener
|
||||
{
|
||||
switch (msg.mServerity)
|
||||
{
|
||||
case Log::LogSeverityNotice:
|
||||
case Log::Severity::Notice:
|
||||
llogcon->SetDefaultStyle(m_color_white);
|
||||
break;
|
||||
case Log::LogSeverityWarning:
|
||||
case Log::Severity::Warning:
|
||||
llogcon->SetDefaultStyle(m_color_yellow);
|
||||
break;
|
||||
case Log::LogSeverityError:
|
||||
case Log::Severity::Error:
|
||||
llogcon->SetDefaultStyle(m_color_red);
|
||||
break;
|
||||
case Log::LogSeveritySuccess:
|
||||
case Log::Severity::Success:
|
||||
llogcon->SetDefaultStyle(m_color_green);
|
||||
break;
|
||||
default:
|
||||
|
@ -93,10 +93,6 @@ public:
|
||||
case DID_RESUME_EMU:
|
||||
m_btn_run->SetLabel("Pause");
|
||||
break;
|
||||
|
||||
case DID_EXIT_THR_SYSCALL:
|
||||
Emu.GetCPU().RemoveThread(((CPUThread*)event.GetClientData())->GetId());
|
||||
break;
|
||||
}
|
||||
|
||||
UpdateUI();
|
||||
|
@ -73,7 +73,7 @@ InstructionEditorDialog::InstructionEditorDialog(wxPanel *parent, u64 _pc, CPUTh
|
||||
s_panel_margin_x->AddSpacer(12);
|
||||
|
||||
this->Connect(wxEVT_COMMAND_TEXT_UPDATED, wxCommandEventHandler(InstructionEditorDialog::updatePreview));
|
||||
t2_instr->SetValue(wxString::Format("%08x", vm::ps3::read32(CPU->offset + pc).value()));
|
||||
t2_instr->SetValue(wxString::Format("%08x", vm::ps3::read32(CPU->GetOffset() + pc).value()));
|
||||
|
||||
this->SetSizerAndFit(s_panel_margin_x);
|
||||
|
||||
@ -83,7 +83,7 @@ InstructionEditorDialog::InstructionEditorDialog(wxPanel *parent, u64 _pc, CPUTh
|
||||
if (!t2_instr->GetValue().ToULong(&opcode, 16))
|
||||
wxMessageBox("This instruction could not be parsed.\nNo changes were made.","Error");
|
||||
else
|
||||
vm::ps3::write32(CPU->offset + pc, (u32)opcode);
|
||||
vm::ps3::write32(CPU->GetOffset() + pc, (u32)opcode);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -108,9 +108,8 @@ void InterpreterDisAsmFrame::UpdateUnitList()
|
||||
{
|
||||
m_choice_units->Freeze();
|
||||
m_choice_units->Clear();
|
||||
auto thrs = Emu.GetCPU().GetThreads();
|
||||
|
||||
for (auto& t : thrs)
|
||||
for (auto& t : Emu.GetCPU().GetAllThreads())
|
||||
{
|
||||
m_choice_units->Append(t->GetFName(), t.get());
|
||||
}
|
||||
@ -248,10 +247,10 @@ void InterpreterDisAsmFrame::ShowAddr(const u64 addr)
|
||||
}
|
||||
else
|
||||
{
|
||||
disasm->offset = vm::get_ptr<u8>(CPU->offset);
|
||||
disasm->offset = vm::get_ptr<u8>(CPU->GetOffset());
|
||||
for(uint i=0, count = 4; i<m_item_count; ++i, PC += count)
|
||||
{
|
||||
if(!vm::check_addr(CPU->offset + PC, 4))
|
||||
if(!vm::check_addr(CPU->GetOffset() + PC, 4))
|
||||
{
|
||||
m_list->SetItem(i, 0, wxString(IsBreakPoint(PC) ? ">>> " : " ") + wxString::Format("[%08llx] illegal address", PC));
|
||||
count = 4;
|
||||
@ -259,7 +258,7 @@ void InterpreterDisAsmFrame::ShowAddr(const u64 addr)
|
||||
}
|
||||
|
||||
disasm->dump_pc = PC;
|
||||
count = decoder->DecodeMemory(CPU->offset + PC);
|
||||
count = decoder->DecodeMemory(CPU->GetOffset() + PC);
|
||||
|
||||
if(IsBreakPoint(PC))
|
||||
{
|
||||
@ -272,7 +271,7 @@ void InterpreterDisAsmFrame::ShowAddr(const u64 addr)
|
||||
|
||||
wxColour colour;
|
||||
|
||||
if((!CPU->IsRunning() || !Emu.IsRunning()) && PC == CPU->PC)
|
||||
if(CPU->IsPaused() && PC == CPU->GetPC())
|
||||
{
|
||||
colour = wxColour("Green");
|
||||
}
|
||||
@ -456,11 +455,11 @@ void InterpreterDisAsmFrame::Show_Val(wxCommandEvent& WXUNUSED(event))
|
||||
|
||||
diag->SetSizerAndFit( s_panel );
|
||||
|
||||
if(CPU) p_pc->SetValue(wxString::Format("%x", CPU->PC));
|
||||
if(CPU) p_pc->SetValue(wxString::Format("%x", CPU->GetPC()));
|
||||
|
||||
if(diag->ShowModal() == wxID_OK)
|
||||
{
|
||||
unsigned long pc = CPU ? CPU->PC : 0x0;
|
||||
unsigned long pc = CPU ? CPU->GetPC() : 0x0;
|
||||
p_pc->GetValue().ToULong(&pc, 16);
|
||||
Emu.GetMarkedPoints().push_back(pc);
|
||||
remove_markedPC.push_back(Emu.GetMarkedPoints().size()-1);
|
||||
@ -470,10 +469,9 @@ void InterpreterDisAsmFrame::Show_Val(wxCommandEvent& WXUNUSED(event))
|
||||
|
||||
void InterpreterDisAsmFrame::Show_PC(wxCommandEvent& WXUNUSED(event))
|
||||
{
|
||||
if(CPU) ShowAddr(CentrePc(CPU->PC));
|
||||
if(CPU) ShowAddr(CentrePc(CPU->GetPC()));
|
||||
}
|
||||
|
||||
extern bool dump_enable;
|
||||
void InterpreterDisAsmFrame::DoRun(wxCommandEvent& WXUNUSED(event))
|
||||
{
|
||||
if(!CPU) return;
|
||||
@ -496,7 +494,7 @@ void InterpreterDisAsmFrame::DoPause(wxCommandEvent& WXUNUSED(event))
|
||||
|
||||
void InterpreterDisAsmFrame::DoStep(wxCommandEvent& WXUNUSED(event))
|
||||
{
|
||||
if(CPU) CPU->ExecOnce();
|
||||
if(CPU) CPU->Step();
|
||||
}
|
||||
|
||||
void InterpreterDisAsmFrame::InstrKey(wxListEvent& event)
|
||||
|
@ -77,11 +77,11 @@ void KernelExplorer::Update()
|
||||
// TODO: FileSystem
|
||||
|
||||
// Semaphores
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_SEMAPHORE_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_SEMAPHORE_OBJECT))
|
||||
{
|
||||
sprintf(name, "Semaphores (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_SEMAPHORE_OBJECT))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs(SYS_SEMAPHORE_OBJECT))
|
||||
{
|
||||
const auto sem = Emu.GetIdManager().get<lv2_sema_t>(id);
|
||||
sprintf(name, "Semaphore: ID = 0x%x '%s', Count = %d, Max Count = %d, Waiters = %d", id, &name64(sem->name), sem->value.load(), sem->max, sem->waiters.load());
|
||||
@ -90,11 +90,11 @@ void KernelExplorer::Update()
|
||||
}
|
||||
|
||||
// Mutexes
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_MUTEX_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_MUTEX_OBJECT))
|
||||
{
|
||||
sprintf(name, "Mutexes (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_MUTEX_OBJECT))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs(SYS_MUTEX_OBJECT))
|
||||
{
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_mutex_t>(id);
|
||||
sprintf(name, "Mutex: ID = 0x%x '%s'", id, &name64(mutex->name));
|
||||
@ -103,11 +103,11 @@ void KernelExplorer::Update()
|
||||
}
|
||||
|
||||
// Light Weight Mutexes
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_LWMUTEX_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_LWMUTEX_OBJECT))
|
||||
{
|
||||
sprintf(name, "Lightweight Mutexes (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_LWMUTEX_OBJECT))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs(SYS_LWMUTEX_OBJECT))
|
||||
{
|
||||
const auto lwm = Emu.GetIdManager().get<lv2_lwmutex_t>(id);
|
||||
sprintf(name, "Lightweight Mutex: ID = 0x%x '%s'", id, &name64(lwm->name));
|
||||
@ -116,11 +116,11 @@ void KernelExplorer::Update()
|
||||
}
|
||||
|
||||
// Condition Variables
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_COND_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_COND_OBJECT))
|
||||
{
|
||||
sprintf(name, "Condition Variables (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_COND_OBJECT))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs(SYS_COND_OBJECT))
|
||||
{
|
||||
const auto cond = Emu.GetIdManager().get<lv2_cond_t>(id);
|
||||
sprintf(name, "Condition Variable: ID = 0x%x '%s'", id, &name64(cond->name));
|
||||
@ -129,11 +129,11 @@ void KernelExplorer::Update()
|
||||
}
|
||||
|
||||
// Light Weight Condition Variables
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_LWCOND_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_LWCOND_OBJECT))
|
||||
{
|
||||
sprintf(name, "Lightweight Condition Variables (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_LWCOND_OBJECT))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs(SYS_LWCOND_OBJECT))
|
||||
{
|
||||
const auto lwc = Emu.GetIdManager().get<lv2_lwcond_t>(id);
|
||||
sprintf(name, "Lightweight Condition Variable: ID = 0x%x '%s'", id, &name64(lwc->name));
|
||||
@ -142,11 +142,11 @@ void KernelExplorer::Update()
|
||||
}
|
||||
|
||||
// Event Queues
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_EVENT_QUEUE_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_EVENT_QUEUE_OBJECT))
|
||||
{
|
||||
sprintf(name, "Event Queues (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_EVENT_QUEUE_OBJECT))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs(SYS_EVENT_QUEUE_OBJECT))
|
||||
{
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(id);
|
||||
sprintf(name, "Event Queue: ID = 0x%x '%s', Key = %#llx", id, &name64(queue->name), queue->key);
|
||||
@ -155,11 +155,11 @@ void KernelExplorer::Update()
|
||||
}
|
||||
|
||||
// Event Ports
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_EVENT_PORT_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_EVENT_PORT_OBJECT))
|
||||
{
|
||||
sprintf(name, "Event Ports (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_EVENT_PORT_OBJECT))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs(SYS_EVENT_PORT_OBJECT))
|
||||
{
|
||||
const auto port = Emu.GetIdManager().get<lv2_event_port_t>(id);
|
||||
sprintf(name, "Event Port: ID = 0x%x, Name = %#llx", id, port->name);
|
||||
@ -168,13 +168,13 @@ void KernelExplorer::Update()
|
||||
}
|
||||
|
||||
// Modules
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_PRX_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_PRX_OBJECT))
|
||||
{
|
||||
sprintf(name, "Modules (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
//sprintf(name, "Segment List (%l)", 2 * objects.size()); // TODO: Assuming 2 segments per PRX file is not good
|
||||
//m_tree->AppendItem(node, name);
|
||||
for (const auto& id : Emu.GetIdManager().get_IDs_by_type(SYS_PRX_OBJECT))
|
||||
for (const auto& id : Emu.GetIdManager().get_IDs(SYS_PRX_OBJECT))
|
||||
{
|
||||
sprintf(name, "PRX: ID = 0x%x", id);
|
||||
m_tree->AppendItem(node, name);
|
||||
@ -182,11 +182,11 @@ void KernelExplorer::Update()
|
||||
}
|
||||
|
||||
// Memory Containers
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_MEM_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_MEM_OBJECT))
|
||||
{
|
||||
sprintf(name, "Memory Containers (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto& id : Emu.GetIdManager().get_IDs_by_type(SYS_MEM_OBJECT))
|
||||
for (const auto& id : Emu.GetIdManager().get_IDs(SYS_MEM_OBJECT))
|
||||
{
|
||||
sprintf(name, "Memory Container: ID = 0x%x", id);
|
||||
m_tree->AppendItem(node, name);
|
||||
@ -194,11 +194,11 @@ void KernelExplorer::Update()
|
||||
}
|
||||
|
||||
// Event Flags
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_EVENT_FLAG_OBJECT))
|
||||
if (u32 count = Emu.GetIdManager().get_count(SYS_EVENT_FLAG_OBJECT))
|
||||
{
|
||||
sprintf(name, "Event Flags (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto& id : Emu.GetIdManager().get_IDs_by_type(SYS_EVENT_FLAG_OBJECT))
|
||||
for (const auto& id : Emu.GetIdManager().get_IDs(SYS_EVENT_FLAG_OBJECT))
|
||||
{
|
||||
sprintf(name, "Event Flag: ID = 0x%x", id);
|
||||
m_tree->AppendItem(node, name);
|
||||
|
@ -13,8 +13,7 @@
|
||||
#include "define_new_memleakdetect.h"
|
||||
#endif
|
||||
|
||||
// This header should be frontend-agnostic, so don't assume wx includes everything
|
||||
#pragma warning( disable : 4800 )
|
||||
#pragma warning( disable : 4351 )
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
@ -53,6 +52,8 @@ using s16 = std::int16_t;
|
||||
using s32 = std::int32_t;
|
||||
using s64 = std::int64_t;
|
||||
|
||||
using b8 = std::uint8_t;
|
||||
|
||||
using f32 = float;
|
||||
using f64 = double;
|
||||
|
||||
@ -109,6 +110,7 @@ template<typename T> struct ID_type;
|
||||
#define CHECK_SIZE_ALIGN(type, size, align) CHECK_SIZE(type, size); CHECK_ALIGN(type, align)
|
||||
|
||||
#define WRAP_EXPR(expr) [&]{ return (expr); }
|
||||
#define EXCEPTION(text, ...) fmt::exception(__FILE__, __LINE__, __FUNCTION__, text, ##__VA_ARGS__)
|
||||
|
||||
#define _PRGNAME_ "RPCS3"
|
||||
#define _PRGVER_ "0.0.0.5"
|
||||
|
Loading…
Reference in New Issue
Block a user