mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 02:32:36 +01:00
Replace verify() with ensure() with auto src location.
Expression ensure(x) returns x. Using comma operator removed.
This commit is contained in:
parent
38745e5782
commit
e055d16b2c
@ -1,4 +1,4 @@
|
||||
#include "stdafx.h"
|
||||
#include "stdafx.h"
|
||||
#include "Config.h"
|
||||
#include "Utilities/types.h"
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "File.h"
|
||||
#include "File.h"
|
||||
#include "mutex.h"
|
||||
#include "StrFmt.h"
|
||||
#include "BEType.h"
|
||||
@ -41,10 +41,10 @@ static std::unique_ptr<wchar_t[]> to_wchar(const std::string& source)
|
||||
std::memcpy(buffer.get() + 32768 + 4, L"UNC\\", 4 * sizeof(wchar_t));
|
||||
}
|
||||
|
||||
verify("to_wchar" HERE), MultiByteToWideChar(CP_UTF8, 0, source.c_str(), size, buffer.get() + 32768 + (unc ? 8 : 4), size);
|
||||
ensure(MultiByteToWideChar(CP_UTF8, 0, source.c_str(), size, buffer.get() + 32768 + (unc ? 8 : 4), size)); // "to_wchar"
|
||||
|
||||
// Canonicalize wide path (replace '/', ".", "..", \\ repetitions, etc)
|
||||
verify("to_wchar" HERE), GetFullPathNameW(buffer.get() + 32768, 32768, buffer.get(), nullptr) - 1 < 32768 - 1;
|
||||
ensure(GetFullPathNameW(buffer.get() + 32768, 32768, buffer.get(), nullptr) - 1 < 32768 - 1); // "to_wchar"
|
||||
|
||||
return buffer;
|
||||
}
|
||||
@ -63,7 +63,7 @@ static void to_utf8(std::string& out, const wchar_t* source)
|
||||
const int result = WideCharToMultiByte(CP_UTF8, 0, source, static_cast<int>(length) + 1, &out.front(), buf_size, NULL, NULL);
|
||||
|
||||
// Fix the size
|
||||
out.resize(verify("to_utf8" HERE, result) - 1);
|
||||
out.resize(ensure(result) - 1);
|
||||
}
|
||||
|
||||
static time_t to_time(const ULARGE_INTEGER& ft)
|
||||
@ -315,7 +315,7 @@ std::shared_ptr<fs::device_base> fs::get_virtual_device(const std::string& path)
|
||||
|
||||
std::shared_ptr<fs::device_base> fs::set_virtual_device(const std::string& name, const std::shared_ptr<device_base>& device)
|
||||
{
|
||||
verify(HERE), name.starts_with("//"), name[2] != '/';
|
||||
ensure(name.starts_with("//") && name[2] != '/');
|
||||
|
||||
return get_device_manager().set_device(name, device);
|
||||
}
|
||||
@ -355,7 +355,7 @@ bool fs::stat(const std::string& path, stat_t& info)
|
||||
if (!GetFileAttributesExW(to_wchar(std::string(epath) + '/').get(), GetFileExInfoStandard, &attrs))
|
||||
{
|
||||
g_tls_error = to_error(GetLastError());
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
info.is_directory = true; // Handle drives as directories
|
||||
@ -404,7 +404,7 @@ bool fs::stat(const std::string& path, stat_t& info)
|
||||
if (const DWORD err = GetLastError(); err != ERROR_NO_MORE_FILES)
|
||||
{
|
||||
g_tls_error = to_error(err);
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
g_tls_error = fs::error::noent;
|
||||
@ -990,7 +990,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
stat_t stat() override
|
||||
{
|
||||
FILE_BASIC_INFO basic_info;
|
||||
verify("file::stat" HERE), GetFileInformationByHandleEx(m_handle, FileBasicInfo, &basic_info, sizeof(FILE_BASIC_INFO));
|
||||
ensure(GetFileInformationByHandleEx(m_handle, FileBasicInfo, &basic_info, sizeof(FILE_BASIC_INFO))); // "file::stat"
|
||||
|
||||
stat_t info;
|
||||
info.is_directory = (basic_info.FileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
|
||||
@ -1008,7 +1008,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
|
||||
void sync() override
|
||||
{
|
||||
verify("file::sync" HERE), FlushFileBuffers(m_handle);
|
||||
ensure(FlushFileBuffers(m_handle)); // "file::sync"
|
||||
}
|
||||
|
||||
bool trunc(u64 length) override
|
||||
@ -1031,7 +1031,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
const int size = narrow<int>(count, "file::read" HERE);
|
||||
|
||||
DWORD nread;
|
||||
verify("file::read" HERE), ReadFile(m_handle, buffer, size, &nread, NULL);
|
||||
ensure(ReadFile(m_handle, buffer, size, &nread, NULL)); // "file::read"
|
||||
|
||||
return nread;
|
||||
}
|
||||
@ -1042,7 +1042,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
const int size = narrow<int>(count, "file::write" HERE);
|
||||
|
||||
DWORD nwritten;
|
||||
verify("file::write" HERE), WriteFile(m_handle, buffer, size, &nwritten, NULL);
|
||||
ensure(WriteFile(m_handle, buffer, size, &nwritten, NULL)); // "file::write"
|
||||
|
||||
return nwritten;
|
||||
}
|
||||
@ -1070,7 +1070,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
u64 size() override
|
||||
{
|
||||
LARGE_INTEGER size;
|
||||
verify("file::size" HERE), GetFileSizeEx(m_handle, &size);
|
||||
ensure(GetFileSizeEx(m_handle, &size)); // "file::size"
|
||||
|
||||
return size.QuadPart;
|
||||
}
|
||||
@ -1119,7 +1119,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
if (mode & fs::trunc && mode & (fs::lock + fs::unread) && mode & fs::write)
|
||||
{
|
||||
// Postpone truncation in order to avoid using O_TRUNC on a locked file
|
||||
verify(HERE), ::ftruncate(fd, 0) == 0;
|
||||
ensure(::ftruncate(fd, 0) == 0);
|
||||
}
|
||||
|
||||
class unix_file final : public file_base
|
||||
@ -1140,7 +1140,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
stat_t stat() override
|
||||
{
|
||||
struct ::stat file_info;
|
||||
verify("file::stat" HERE), ::fstat(m_fd, &file_info) == 0;
|
||||
ensure(::fstat(m_fd, &file_info) == 0); // "file::stat"
|
||||
|
||||
stat_t info;
|
||||
info.is_directory = S_ISDIR(file_info.st_mode);
|
||||
@ -1158,7 +1158,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
|
||||
void sync() override
|
||||
{
|
||||
verify("file::sync" HERE), ::fsync(m_fd) == 0;
|
||||
ensure(::fsync(m_fd) == 0); // "file::sync"
|
||||
}
|
||||
|
||||
bool trunc(u64 length) override
|
||||
@ -1175,7 +1175,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
u64 read(void* buffer, u64 count) override
|
||||
{
|
||||
const auto result = ::read(m_fd, buffer, count);
|
||||
verify("file::read" HERE), result != -1;
|
||||
ensure(result != -1); // "file::read"
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -1183,7 +1183,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
u64 write(const void* buffer, u64 count) override
|
||||
{
|
||||
const auto result = ::write(m_fd, buffer, count);
|
||||
verify("file::write" HERE), result != -1;
|
||||
ensure(result != -1); // "file::write"
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -1210,7 +1210,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
u64 size() override
|
||||
{
|
||||
struct ::stat file_info;
|
||||
verify("file::size" HERE), ::fstat(m_fd, &file_info) == 0;
|
||||
ensure(::fstat(m_fd, &file_info) == 0); // "file::size"
|
||||
|
||||
return file_info.st_size;
|
||||
}
|
||||
@ -1226,7 +1226,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
static_assert(offsetof(iovec, iov_len) == offsetof(iovec_clone, iov_len), "Weird iovec::iov_len offset");
|
||||
|
||||
const auto result = ::writev(m_fd, reinterpret_cast<const iovec*>(buffers), buf_count);
|
||||
verify("file::write_gather" HERE), result != -1;
|
||||
ensure(result != -1); // "file::write_gather"
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -1386,7 +1386,7 @@ bool fs::dir::open(const std::string& path)
|
||||
add_entry(found);
|
||||
}
|
||||
|
||||
verify("dir::read" HERE), ERROR_NO_MORE_FILES == GetLastError();
|
||||
ensure(ERROR_NO_MORE_FILES == GetLastError()); // "dir::read"
|
||||
FindClose(handle);
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "types.h"
|
||||
#include "types.h"
|
||||
#include "JIT.h"
|
||||
#include "StrFmt.h"
|
||||
#include "File.h"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "StrFmt.h"
|
||||
#include "StrFmt.h"
|
||||
#include "BEType.h"
|
||||
#include "StrUtil.h"
|
||||
#include "cfmt.h"
|
||||
@ -249,7 +249,7 @@ namespace fmt
|
||||
thread_ctrl::emergency_exit(msg);
|
||||
}
|
||||
|
||||
void raw_verify_error(const char* msg, const fmt_type_info* sup, u64 arg)
|
||||
void raw_verify_error(const src_loc& loc)
|
||||
{
|
||||
std::string out{"Verification failed"};
|
||||
|
||||
@ -257,26 +257,31 @@ namespace fmt
|
||||
#ifdef _WIN32
|
||||
if (DWORD error = GetLastError())
|
||||
{
|
||||
fmt::append(out, " (e=%#x)", error);
|
||||
fmt::append(out, " (e=%#x):", error);
|
||||
}
|
||||
#else
|
||||
if (int error = errno)
|
||||
{
|
||||
fmt::append(out, " (e=%d)", error);
|
||||
fmt::append(out, " (e=%d):", error);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (sup)
|
||||
if (loc.col != umax)
|
||||
{
|
||||
out += " (";
|
||||
sup->fmt_string(out, arg); // Print value
|
||||
out += ")";
|
||||
fmt::append(out, "\n(in file %s:%s[:%s]", loc.file, loc.line, loc.col);
|
||||
}
|
||||
else
|
||||
{
|
||||
fmt::append(out, "\n(in file %s:%s", loc.file, loc.line);
|
||||
}
|
||||
|
||||
if (msg)
|
||||
if (loc.func && *loc.func)
|
||||
{
|
||||
out += ": ";
|
||||
out += msg;
|
||||
fmt::append(out, ", in function %s)", loc.func);
|
||||
}
|
||||
else
|
||||
{
|
||||
out += ')';
|
||||
}
|
||||
|
||||
thread_ctrl::emergency_exit(out);
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "stdafx.h"
|
||||
#include "stdafx.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/Cell/SPUThread.h"
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
@ -1872,7 +1872,7 @@ void thread_base::start()
|
||||
// Receive "that" native thread handle, sent "this" thread_base
|
||||
const u64 _self = reinterpret_cast<u64>(atomic_storage<thread_base*>::load(*tls));
|
||||
m_thread.release(_self);
|
||||
verify(HERE), _self != reinterpret_cast<u64>(this);
|
||||
ensure(_self != reinterpret_cast<u64>(this));
|
||||
atomic_storage<thread_base*>::store(*tls, this);
|
||||
s_thread_pool[pos].notify_one();
|
||||
return;
|
||||
@ -1880,9 +1880,10 @@ void thread_base::start()
|
||||
|
||||
#ifdef _WIN32
|
||||
m_thread = ::_beginthreadex(nullptr, 0, entry_point, this, CREATE_SUSPENDED, nullptr);
|
||||
verify("thread_ctrl::start" HERE), m_thread, ::ResumeThread(reinterpret_cast<HANDLE>(+m_thread)) != -1;
|
||||
ensure(m_thread);
|
||||
ensure(::ResumeThread(reinterpret_cast<HANDLE>(+m_thread)) != -1);
|
||||
#else
|
||||
verify("thread_ctrl::start" HERE), pthread_create(reinterpret_cast<pthread_t*>(&m_thread.raw()), nullptr, entry_point, this) == 0;
|
||||
ensure(pthread_create(reinterpret_cast<pthread_t*>(&m_thread.raw()), nullptr, entry_point, this) == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "cond.h"
|
||||
#include "cond.h"
|
||||
#include "sync.h"
|
||||
#include "lockless.h"
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
void cond_variable::imp_wait(u32 _old, u64 _timeout) noexcept
|
||||
{
|
||||
// Not supposed to fail
|
||||
verify(HERE), _old;
|
||||
ensure(_old);
|
||||
|
||||
// Wait with timeout
|
||||
m_value.wait(_old, c_signal_mask, atomic_wait_timeout{_timeout > max_timeout ? UINT64_MAX : _timeout * 1000});
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
void shared_mutex::imp_lock_shared(u32 val)
|
||||
{
|
||||
verify("shared_mutex underflow" HERE), val < c_err;
|
||||
ensure(val < c_err); // "shared_mutex underflow"
|
||||
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
@ -23,14 +23,14 @@ void shared_mutex::imp_lock_shared(u32 val)
|
||||
return;
|
||||
}
|
||||
|
||||
verify("shared_mutex overflow" HERE), (old % c_sig) + c_one < c_sig;
|
||||
ensure((old % c_sig) + c_one < c_sig); // "shared_mutex overflow"
|
||||
imp_wait();
|
||||
lock_downgrade();
|
||||
}
|
||||
|
||||
void shared_mutex::imp_unlock_shared(u32 old)
|
||||
{
|
||||
verify("shared_mutex underflow" HERE), old - 1 < c_err;
|
||||
ensure(old - 1 < c_err); // "shared_mutex underflow"
|
||||
|
||||
// Check reader count, notify the writer if necessary
|
||||
if ((old - 1) % c_one == 0)
|
||||
@ -71,7 +71,7 @@ void shared_mutex::imp_signal()
|
||||
|
||||
void shared_mutex::imp_lock(u32 val)
|
||||
{
|
||||
verify("shared_mutex underflow" HERE), val < c_err;
|
||||
ensure(val < c_err); // "shared_mutex underflow"
|
||||
|
||||
for (int i = 0; i < 10; i++)
|
||||
{
|
||||
@ -90,13 +90,13 @@ void shared_mutex::imp_lock(u32 val)
|
||||
return;
|
||||
}
|
||||
|
||||
verify("shared_mutex overflow" HERE), (old % c_sig) + c_one < c_sig;
|
||||
ensure((old % c_sig) + c_one < c_sig); // "shared_mutex overflow"
|
||||
imp_wait();
|
||||
}
|
||||
|
||||
void shared_mutex::imp_unlock(u32 old)
|
||||
{
|
||||
verify("shared_mutex underflow" HERE), old - c_one < c_err;
|
||||
ensure(old - c_one < c_err); // "shared_mutex underflow"
|
||||
|
||||
// 1) Notify the next writer if necessary
|
||||
// 2) Notify all readers otherwise if necessary (currently indistinguishable from writers)
|
||||
@ -121,7 +121,7 @@ void shared_mutex::imp_lock_upgrade()
|
||||
// Convert to writer lock
|
||||
const u32 old = m_value.fetch_add(c_one - 1);
|
||||
|
||||
verify("shared_mutex overflow" HERE), (old % c_sig) + c_one - 1 < c_sig;
|
||||
ensure((old % c_sig) + c_one - 1 < c_sig); // "shared_mutex overflow"
|
||||
|
||||
if (old % c_one == 1)
|
||||
{
|
||||
|
@ -52,7 +52,7 @@ void semaphore_base::imp_wait()
|
||||
|
||||
void semaphore_base::imp_post(s32 _old)
|
||||
{
|
||||
verify("semaphore_base: overflow" HERE), _old < 0;
|
||||
ensure(_old < 0); // "semaphore_base: overflow"
|
||||
|
||||
m_value.notify_one();
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "sysinfo.h"
|
||||
#include "sysinfo.h"
|
||||
#include "StrFmt.h"
|
||||
#include "File.h"
|
||||
#include "Emu/system_config.h"
|
||||
|
@ -178,8 +178,8 @@ namespace std
|
||||
#endif
|
||||
|
||||
using steady_clock = std::conditional<
|
||||
std::chrono::high_resolution_clock::is_steady,
|
||||
std::chrono::high_resolution_clock, std::chrono::steady_clock>::type;
|
||||
std::chrono::high_resolution_clock::is_steady,
|
||||
std::chrono::high_resolution_clock, std::chrono::steady_clock>::type;
|
||||
|
||||
// Get integral type from type size
|
||||
template <std::size_t N>
|
||||
@ -224,7 +224,7 @@ using get_sint_t = typename get_int_impl<N>::stype;
|
||||
template <typename T>
|
||||
std::remove_cvref_t<T> as_rvalue(T&& obj)
|
||||
{
|
||||
return std::forward<T>(obj);
|
||||
return std::forward<T>(obj);
|
||||
}
|
||||
|
||||
// Formatting helper, type-specific preprocessing for improving safety and functionality
|
||||
@ -605,8 +605,8 @@ union alignas(2) f16
|
||||
// See http://stackoverflow.com/a/26779139
|
||||
// The conversion doesn't handle NaN/Inf
|
||||
u32 raw = ((_u16 & 0x8000) << 16) | // Sign (just moved)
|
||||
(((_u16 & 0x7c00) + 0x1C000) << 13) | // Exponent ( exp - 15 + 127)
|
||||
((_u16 & 0x03FF) << 13); // Mantissa
|
||||
(((_u16 & 0x7c00) + 0x1C000) << 13) | // Exponent ( exp - 15 + 127)
|
||||
((_u16 & 0x03FF) << 13); // Mantissa
|
||||
|
||||
return std::bit_cast<f32>(raw);
|
||||
}
|
||||
@ -760,75 +760,41 @@ constexpr u64 operator""_u64(const char* s, std::size_t /*length*/)
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(__INTELLISENSE__) && !__has_builtin(__builtin_COLUMN) && !defined(_MSC_VER)
|
||||
constexpr unsigned __builtin_COLUMN()
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct src_loc
|
||||
{
|
||||
u32 line;
|
||||
u32 col;
|
||||
const char* file;
|
||||
const char* func;
|
||||
};
|
||||
|
||||
namespace fmt
|
||||
{
|
||||
[[noreturn]] void raw_error(const char* msg);
|
||||
[[noreturn]] void raw_verify_error(const char* msg, const fmt_type_info* sup, u64 arg);
|
||||
[[noreturn]] void raw_verify_error(const src_loc& loc);
|
||||
[[noreturn]] void raw_narrow_error(const char* msg, const fmt_type_info* sup, u64 arg);
|
||||
}
|
||||
|
||||
struct verify_func
|
||||
template <typename T>
|
||||
constexpr decltype(auto) ensure(T&& arg,
|
||||
u32 line = __builtin_LINE(),
|
||||
u32 col = __builtin_COLUMN(),
|
||||
const char* file = __builtin_FILE(),
|
||||
const char* func = __builtin_FUNCTION()) noexcept
|
||||
{
|
||||
template <typename T>
|
||||
bool operator()(T&& value) const
|
||||
if (std::forward<T>(arg)) [[likely]]
|
||||
{
|
||||
if (std::forward<T>(value))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
template <uint N>
|
||||
struct verify_impl
|
||||
{
|
||||
const char* cause;
|
||||
|
||||
template <typename T>
|
||||
auto operator,(T&& value) const
|
||||
{
|
||||
// Verification (can be safely disabled)
|
||||
if (!verify_func()(std::forward<T>(value)))
|
||||
{
|
||||
fmt::raw_verify_error(cause, nullptr, N);
|
||||
}
|
||||
|
||||
return verify_impl<N + 1>{cause};
|
||||
}
|
||||
};
|
||||
|
||||
// Verification helper, checks several conditions delimited with comma operator
|
||||
inline auto verify(const char* cause)
|
||||
{
|
||||
return verify_impl<0>{cause};
|
||||
}
|
||||
|
||||
// Verification helper (returns value or lvalue reference, may require to use verify_move instead)
|
||||
template <typename F = verify_func, typename T>
|
||||
inline T verify(const char* cause, T&& value, F&& pred = F())
|
||||
{
|
||||
if (!pred(std::forward<T>(value)))
|
||||
{
|
||||
using unref = std::remove_const_t<std::remove_reference_t<T>>;
|
||||
fmt::raw_verify_error(cause, fmt::get_type_info<fmt_unveil_t<unref>>(), fmt_unveil<unref>::get(value));
|
||||
return std::forward<T>(arg);
|
||||
}
|
||||
|
||||
return std::forward<T>(value);
|
||||
}
|
||||
|
||||
// Verification helper (must be used in return expression or in place of std::move)
|
||||
template <typename F = verify_func, typename T>
|
||||
inline std::remove_reference_t<T>&& verify_move(const char* cause, T&& value, F&& pred = F())
|
||||
{
|
||||
if (!pred(std::forward<T>(value)))
|
||||
{
|
||||
using unref = std::remove_const_t<std::remove_reference_t<T>>;
|
||||
fmt::raw_verify_error(cause, fmt::get_type_info<fmt_unveil_t<unref>>(), fmt_unveil<unref>::get(value));
|
||||
}
|
||||
|
||||
return std::move(value);
|
||||
fmt::raw_verify_error({line, col, file, func});
|
||||
}
|
||||
|
||||
// narrow() function details
|
||||
|
@ -33,7 +33,8 @@ void AudioDumper::WriteData(const void* buffer, u32 size)
|
||||
{
|
||||
if (GetCh())
|
||||
{
|
||||
verify(HERE), size, m_output.write(buffer, size) == size;
|
||||
ensure(size);
|
||||
ensure(m_output.write(buffer, size) == size);
|
||||
m_header.Size += size;
|
||||
m_header.RIFF.Size += size;
|
||||
}
|
||||
|
@ -464,7 +464,7 @@ void cpu_thread::operator()()
|
||||
if (progress == umax && std::exchange(wait_set, false))
|
||||
{
|
||||
// Operation finished: need to clean wait flag
|
||||
verify(HERE), !_cpu->check_state();
|
||||
ensure(!_cpu->check_state());
|
||||
return;
|
||||
}
|
||||
});
|
||||
@ -484,7 +484,7 @@ void cpu_thread::operator()()
|
||||
|
||||
if (progress == umax && std::exchange(wait_set, false))
|
||||
{
|
||||
verify(HERE), !_cpu->check_state();
|
||||
ensure(!_cpu->check_state());
|
||||
return;
|
||||
}
|
||||
};
|
||||
@ -693,7 +693,7 @@ bool cpu_thread::check_state() noexcept
|
||||
cpu_counter::add(this);
|
||||
}
|
||||
|
||||
verify(HERE), cpu_can_stop || !retval;
|
||||
ensure(cpu_can_stop || !retval);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -859,7 +859,7 @@ std::string cpu_thread::dump_misc() const
|
||||
bool cpu_thread::suspend_work::push(cpu_thread* _this) noexcept
|
||||
{
|
||||
// Can't allow pre-set wait bit (it'd be a problem)
|
||||
verify(HERE), !_this || !(_this->state & cpu_flag::wait);
|
||||
ensure(!_this || !(_this->state & cpu_flag::wait));
|
||||
|
||||
do
|
||||
{
|
||||
@ -998,7 +998,7 @@ bool cpu_thread::suspend_work::push(cpu_thread* _this) noexcept
|
||||
}
|
||||
|
||||
// Finalization (last increment)
|
||||
verify(HERE), g_suspend_counter++ & 1;
|
||||
ensure(g_suspend_counter++ & 1);
|
||||
|
||||
cpu_counter::for_all_cpu(copy2, [&](cpu_thread* cpu)
|
||||
{
|
||||
|
@ -85,7 +85,7 @@ llvm::Value* cpu_translator::bitcast(llvm::Value* val, llvm::Type* type)
|
||||
|
||||
if (const auto c1 = llvm::dyn_cast<llvm::Constant>(val))
|
||||
{
|
||||
return verify(HERE, llvm::ConstantFoldCastOperand(llvm::Instruction::BitCast, c1, type, m_module->getDataLayout()));
|
||||
return ensure(llvm::ConstantFoldCastOperand(llvm::Instruction::BitCast, c1, type, m_module->getDataLayout()));
|
||||
}
|
||||
|
||||
return m_ir->CreateBitCast(val, type);
|
||||
@ -203,8 +203,8 @@ llvm::Constant* cpu_translator::make_const_vector<v128>(v128 v, llvm::Type* t)
|
||||
return llvm::ConstantInt::get(t, llvm::APInt(128, llvm::makeArrayRef(reinterpret_cast<const u64*>(v._bytes), 2)));
|
||||
}
|
||||
|
||||
verify(HERE), t->isVectorTy();
|
||||
verify(HERE), 128 == t->getScalarSizeInBits() * llvm::cast<llvm::VectorType>(t)->getNumElements();
|
||||
ensure(t->isVectorTy());
|
||||
ensure(128 == t->getScalarSizeInBits() * llvm::cast<llvm::VectorType>(t)->getNumElements());
|
||||
|
||||
const auto sct = t->getScalarType();
|
||||
|
||||
|
@ -811,7 +811,7 @@ void ElementaryStream::push_au(u32 size, u64 dts, u64 pts, u64 userdata, bool ra
|
||||
u32 addr;
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
verify(HERE), !is_full(size);
|
||||
ensure(!is_full(size));
|
||||
|
||||
if (put + size + 128 > memAddr + memSize)
|
||||
{
|
||||
@ -852,7 +852,7 @@ void ElementaryStream::push_au(u32 size, u64 dts, u64 pts, u64 userdata, bool ra
|
||||
put_count++;
|
||||
}
|
||||
|
||||
verify(HERE), entries.push(addr, &dmux->is_closed);
|
||||
ensure(entries.push(addr, &dmux->is_closed));
|
||||
}
|
||||
|
||||
void ElementaryStream::push(DemuxerStream& stream, u32 size)
|
||||
|
@ -177,7 +177,7 @@ error_code cellHddGameCheck(ppu_thread& ppu, u32 version, vm::cptr<char> dirName
|
||||
std::string game_dir = dirName.get_ptr();
|
||||
|
||||
// TODO: Find error code
|
||||
verify(HERE), game_dir.size() == 9;
|
||||
ensure(game_dir.size() == 9);
|
||||
|
||||
const std::string dir = "/dev_hdd0/game/" + game_dir;
|
||||
|
||||
|
@ -383,7 +383,7 @@ error_code _cellGcmInitBody(ppu_thread& ppu, vm::pptr<CellGcmContextData> contex
|
||||
// Create contexts
|
||||
const auto area = vm::reserve_map(vm::rsx_context, 0, 0x10000000, 0x403);
|
||||
const u32 rsx_ctxaddr = area ? area->alloc(0x400000) : 0;
|
||||
verify(HERE), rsx_ctxaddr != 0;
|
||||
ensure(rsx_ctxaddr);
|
||||
|
||||
g_defaultCommandBufferBegin = ioAddress;
|
||||
g_defaultCommandBufferFragmentCount = cmdSize / (32 * 1024);
|
||||
@ -990,7 +990,7 @@ error_code cellGcmMapEaIoAddressWithFlags(ppu_thread& ppu, u32 ea, u32 io, u32 s
|
||||
{
|
||||
cellGcmSys.warning("cellGcmMapEaIoAddressWithFlags(ea=0x%x, io=0x%x, size=0x%x, flags=0x%x)", ea, io, size, flags);
|
||||
|
||||
verify(HERE), flags == 2 /*CELL_GCM_IOMAP_FLAG_STRICT_ORDERING*/;
|
||||
ensure(flags == 2 /*CELL_GCM_IOMAP_FLAG_STRICT_ORDERING*/);
|
||||
|
||||
const auto cfg = g_fxo->get<gcm_config>();
|
||||
std::lock_guard lock(cfg->gcmio_mutex);
|
||||
@ -1374,7 +1374,7 @@ static std::pair<u32, u32> getNextCommandBufferBeginEnd(u32 current)
|
||||
static u32 getOffsetFromAddress(u32 address)
|
||||
{
|
||||
const u32 upper = g_fxo->get<gcm_config>()->offsetTable.ioAddress[address >> 20]; // 12 bits
|
||||
verify(HERE), (upper != 0xFFFF);
|
||||
ensure(upper != 0xFFFF);
|
||||
return (upper << 20) | (address & 0xFFFFF);
|
||||
}
|
||||
|
||||
|
@ -363,7 +363,7 @@ bool microphone_device::has_data() const
|
||||
|
||||
u32 microphone_device::capture_audio()
|
||||
{
|
||||
verify(HERE), sample_size > 0;
|
||||
ensure(sample_size > 0);
|
||||
|
||||
u32 num_samples = inbuf_size / sample_size;
|
||||
|
||||
@ -412,7 +412,7 @@ void microphone_device::get_raw(const u32 num_samples)
|
||||
}
|
||||
break;
|
||||
case microphone_handler::singstar:
|
||||
verify(HERE), sample_size == 4;
|
||||
ensure(sample_size == 4);
|
||||
|
||||
// Mixing the 2 mics as if channels
|
||||
if (input_devices.size() == 2)
|
||||
@ -466,7 +466,7 @@ void microphone_device::get_dsp(const u32 num_samples)
|
||||
}
|
||||
break;
|
||||
case microphone_handler::singstar:
|
||||
verify(HERE), sample_size == 4;
|
||||
ensure(sample_size == 4);
|
||||
|
||||
// Mixing the 2 mics as if channels
|
||||
if (input_devices.size() == 2)
|
||||
|
@ -37,7 +37,7 @@ void fmt_class_string<CellPamfError>::format(std::string& out, u64 arg)
|
||||
error_code pamfStreamTypeToEsFilterId(u8 type, u8 ch, CellCodecEsFilterId& pEsFilterId)
|
||||
{
|
||||
// convert type and ch to EsFilterId
|
||||
verify(HERE), (ch < 16);
|
||||
ensure(ch < 16);
|
||||
pEsFilterId.supplementalInfo1 = type == CELL_PAMF_STREAM_TYPE_AVC;
|
||||
pEsFilterId.supplementalInfo2 = 0;
|
||||
|
||||
@ -137,7 +137,7 @@ error_code pamfStreamTypeToEsFilterId(u8 type, u8 ch, CellCodecEsFilterId& pEsFi
|
||||
u8 pamfGetStreamType(vm::ptr<CellPamfReader> pSelf, u32 stream)
|
||||
{
|
||||
// TODO: get stream type correctly
|
||||
verify(HERE), (stream < pSelf->pAddr->stream_count);
|
||||
ensure(stream < pSelf->pAddr->stream_count);
|
||||
auto& header = pSelf->pAddr->stream_headers[stream];
|
||||
|
||||
switch (header.type)
|
||||
@ -158,7 +158,7 @@ u8 pamfGetStreamType(vm::ptr<CellPamfReader> pSelf, u32 stream)
|
||||
u8 pamfGetStreamChannel(vm::ptr<CellPamfReader> pSelf, u32 stream)
|
||||
{
|
||||
// TODO: get stream channel correctly
|
||||
verify(HERE), (stream < pSelf->pAddr->stream_count);
|
||||
ensure(stream < pSelf->pAddr->stream_count);
|
||||
auto& header = pSelf->pAddr->stream_headers[stream];
|
||||
|
||||
switch (header.type)
|
||||
@ -166,29 +166,34 @@ u8 pamfGetStreamChannel(vm::ptr<CellPamfReader> pSelf, u32 stream)
|
||||
case 0x1b: // AVC
|
||||
case 0x02: // M2V
|
||||
{
|
||||
verify(HERE), (header.fid_major & 0xf0) == 0xe0, header.fid_minor == 0;
|
||||
ensure((header.fid_major & 0xf0) == 0xe0);
|
||||
ensure(!header.fid_minor);
|
||||
return header.fid_major % 16;
|
||||
}
|
||||
|
||||
case 0xdc: // ATRAC3PLUS
|
||||
{
|
||||
verify(HERE), header.fid_major == 0xbd, (header.fid_minor & 0xf0) == 0;
|
||||
ensure((header.fid_major == 0xbd));
|
||||
ensure((header.fid_minor & 0xf0) == 0);
|
||||
return header.fid_minor % 16;
|
||||
}
|
||||
|
||||
case 0x80: // LPCM
|
||||
{
|
||||
verify(HERE), header.fid_major == 0xbd, (header.fid_minor & 0xf0) == 0x40;
|
||||
ensure((header.fid_major == 0xbd));
|
||||
ensure((header.fid_minor & 0xf0) == 0x40);
|
||||
return header.fid_minor % 16;
|
||||
}
|
||||
case 0x81: // AC3
|
||||
{
|
||||
verify(HERE), header.fid_major == 0xbd, (header.fid_minor & 0xf0) == 0x30;
|
||||
ensure((header.fid_major == 0xbd));
|
||||
ensure((header.fid_minor & 0xf0) == 0x30);
|
||||
return header.fid_minor % 16;
|
||||
}
|
||||
case 0xdd:
|
||||
{
|
||||
verify(HERE), header.fid_major == 0xbd, (header.fid_minor & 0xf0) == 0x20;
|
||||
ensure((header.fid_major == 0xbd));
|
||||
ensure((header.fid_minor & 0xf0) == 0x20);
|
||||
return header.fid_minor % 16;
|
||||
}
|
||||
}
|
||||
@ -473,7 +478,7 @@ error_code cellPamfReaderGetEsFilterId(vm::ptr<CellPamfReader> pSelf, vm::ptr<Ce
|
||||
|
||||
// always returns CELL_OK
|
||||
|
||||
verify(HERE), static_cast<u32>(pSelf->stream) < pSelf->pAddr->stream_count;
|
||||
ensure(static_cast<u32>(pSelf->stream) < pSelf->pAddr->stream_count);
|
||||
auto& header = pSelf->pAddr->stream_headers[pSelf->stream];
|
||||
pEsFilterId->filterIdMajor = header.fid_major;
|
||||
pEsFilterId->filterIdMinor = header.fid_minor;
|
||||
@ -486,7 +491,7 @@ error_code cellPamfReaderGetStreamInfo(vm::ptr<CellPamfReader> pSelf, vm::ptr<vo
|
||||
{
|
||||
cellPamf.warning("cellPamfReaderGetStreamInfo(pSelf=*0x%x, pInfo=*0x%x, size=%d)", pSelf, pInfo, size);
|
||||
|
||||
verify(HERE), static_cast<u32>(pSelf->stream) < pSelf->pAddr->stream_count;
|
||||
ensure(static_cast<u32>(pSelf->stream) < pSelf->pAddr->stream_count);
|
||||
auto& header = pSelf->pAddr->stream_headers[pSelf->stream];
|
||||
const u8 type = pamfGetStreamType(pSelf, pSelf->stream);
|
||||
const u8 ch = pamfGetStreamChannel(pSelf, pSelf->stream);
|
||||
|
@ -462,7 +462,8 @@ public:
|
||||
|
||||
while (u32 res = m_sync.atomic_op([&pos](squeue_sync_var_t& sync) -> u32
|
||||
{
|
||||
verify(HERE), sync.count <= sq_size, sync.position < sq_size;
|
||||
ensure(sync.count <= sq_size);
|
||||
ensure(sync.position < sq_size);
|
||||
|
||||
if (sync.push_lock)
|
||||
{
|
||||
@ -491,7 +492,9 @@ public:
|
||||
|
||||
m_sync.atomic_op([](squeue_sync_var_t& sync)
|
||||
{
|
||||
verify(HERE), sync.count <= sq_size, sync.position < sq_size, !!sync.push_lock;
|
||||
ensure(sync.count <= sq_size);
|
||||
ensure(sync.position < sq_size);
|
||||
ensure(!!sync.push_lock);
|
||||
sync.push_lock = 0;
|
||||
sync.count++;
|
||||
});
|
||||
@ -522,7 +525,8 @@ public:
|
||||
|
||||
while (u32 res = m_sync.atomic_op([&pos](squeue_sync_var_t& sync) -> u32
|
||||
{
|
||||
verify(HERE), sync.count <= sq_size, sync.position < sq_size;
|
||||
ensure(sync.count <= sq_size);
|
||||
ensure(sync.position < sq_size);
|
||||
|
||||
if (!sync.count)
|
||||
{
|
||||
@ -551,7 +555,9 @@ public:
|
||||
|
||||
m_sync.atomic_op([](squeue_sync_var_t& sync)
|
||||
{
|
||||
verify(HERE), sync.count <= sq_size, sync.position < sq_size, !!sync.pop_lock;
|
||||
ensure(sync.count <= sq_size);
|
||||
ensure(sync.position < sq_size);
|
||||
ensure(!!sync.pop_lock);
|
||||
sync.pop_lock = 0;
|
||||
sync.position++;
|
||||
sync.count--;
|
||||
@ -583,12 +589,13 @@ public:
|
||||
|
||||
bool peek(T& data, u32 start_pos, const std::function<bool()>& test_exit)
|
||||
{
|
||||
verify(HERE), start_pos < sq_size;
|
||||
ensure(start_pos < sq_size);
|
||||
u32 pos = 0;
|
||||
|
||||
while (u32 res = m_sync.atomic_op([&pos, start_pos](squeue_sync_var_t& sync) -> u32
|
||||
{
|
||||
verify(HERE), sync.count <= sq_size, sync.position < sq_size;
|
||||
ensure(sync.count <= sq_size);
|
||||
ensure(sync.position < sq_size);
|
||||
|
||||
if (sync.count <= start_pos)
|
||||
{
|
||||
@ -617,7 +624,9 @@ public:
|
||||
|
||||
m_sync.atomic_op([](squeue_sync_var_t& sync)
|
||||
{
|
||||
verify(HERE), sync.count <= sq_size, sync.position < sq_size, !!sync.pop_lock;
|
||||
ensure(sync.count <= sq_size);
|
||||
ensure(sync.position < sq_size);
|
||||
ensure(!!sync.pop_lock);
|
||||
sync.pop_lock = 0;
|
||||
});
|
||||
|
||||
@ -656,7 +665,7 @@ public:
|
||||
public:
|
||||
T& operator [] (u32 index)
|
||||
{
|
||||
verify(HERE), index < m_count;
|
||||
ensure(index < m_count);
|
||||
index += m_pos;
|
||||
index = index < sq_size ? index : index - sq_size;
|
||||
return m_data[index];
|
||||
@ -669,7 +678,8 @@ public:
|
||||
|
||||
while (m_sync.atomic_op([&pos, &count](squeue_sync_var_t& sync) -> u32
|
||||
{
|
||||
verify(HERE), sync.count <= sq_size, sync.position < sq_size;
|
||||
ensure(sync.count <= sq_size);
|
||||
ensure(sync.position < sq_size);
|
||||
|
||||
if (sync.pop_lock || sync.push_lock)
|
||||
{
|
||||
@ -691,7 +701,10 @@ public:
|
||||
|
||||
m_sync.atomic_op([](squeue_sync_var_t& sync)
|
||||
{
|
||||
verify(HERE), sync.count <= sq_size, sync.position < sq_size, !!sync.pop_lock, !!sync.push_lock;
|
||||
ensure(sync.count <= sq_size);
|
||||
ensure(sync.position < sq_size);
|
||||
ensure(!!sync.pop_lock);
|
||||
ensure(!!sync.push_lock);
|
||||
sync.pop_lock = 0;
|
||||
sync.push_lock = 0;
|
||||
});
|
||||
@ -704,7 +717,8 @@ public:
|
||||
{
|
||||
while (m_sync.atomic_op([](squeue_sync_var_t& sync) -> u32
|
||||
{
|
||||
verify(HERE), sync.count <= sq_size, sync.position < sq_size;
|
||||
ensure(sync.count <= sq_size);
|
||||
ensure(sync.position < sq_size);
|
||||
|
||||
if (sync.pop_lock || sync.push_lock)
|
||||
{
|
||||
|
@ -836,7 +836,7 @@ error_code cellSailPlayerCreateDescriptor(vm::ptr<CellSailPlayer> pSelf, s32 str
|
||||
u32 buffer = vm::alloc(size, vm::main);
|
||||
auto bufPtr = vm::cptr<PamfHeader>::make(buffer);
|
||||
PamfHeader *buf = const_cast<PamfHeader*>(bufPtr.get_ptr());
|
||||
verify(HERE), f.read(buf, size) == size;
|
||||
ensure(f.read(buf, size) == size);
|
||||
u32 sp_ = vm::alloc(sizeof(CellPamfReader), vm::main);
|
||||
auto sp = vm::ptr<CellPamfReader>::make(sp_);
|
||||
u32 reader = cellPamfReaderInitialize(sp, bufPtr, size, 0);
|
||||
|
@ -714,7 +714,7 @@ void _spurs::handler_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
|
||||
|
||||
if ((spurs->flags1 & SF1_EXIT_IF_NO_WORK) == 0)
|
||||
{
|
||||
verify(HERE), (spurs->handlerExiting == 1);
|
||||
ensure((spurs->handlerExiting == 1));
|
||||
|
||||
return sys_ppu_thread_exit(ppu, 0);
|
||||
}
|
||||
@ -790,16 +790,16 @@ s32 _spurs::wakeup_shutdown_completion_waiter(ppu_thread& ppu, vm::ptr<CellSpurs
|
||||
{
|
||||
wklF->hook(ppu, spurs, wid, wklF->hookArg);
|
||||
|
||||
verify(HERE), (wklEvent->load() & 0x01);
|
||||
verify(HERE), (wklEvent->load() & 0x02);
|
||||
verify(HERE), (wklEvent->load() & 0x20) == 0;
|
||||
ensure((wklEvent->load() & 0x01));
|
||||
ensure((wklEvent->load() & 0x02));
|
||||
ensure((wklEvent->load() & 0x20) == 0);
|
||||
wklEvent->fetch_or(0x20);
|
||||
}
|
||||
|
||||
s32 rc = CELL_OK;
|
||||
if (!wklF->hook || wklEvent->load() & 0x10)
|
||||
{
|
||||
verify(HERE), (wklF->x28 == 2u);
|
||||
ensure((wklF->x28 == 2u));
|
||||
rc = sys_semaphore_post(ppu, static_cast<u32>(wklF->sem), 1);
|
||||
}
|
||||
|
||||
@ -2335,8 +2335,8 @@ s32 _spurs::add_workload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32>
|
||||
u32 index = wnum & 0xf;
|
||||
if (wnum <= 15)
|
||||
{
|
||||
verify(HERE), (spurs->wklCurrentContention[wnum] & 0xf) == 0;
|
||||
verify(HERE), (spurs->wklPendingContention[wnum] & 0xf) == 0;
|
||||
ensure((spurs->wklCurrentContention[wnum] & 0xf) == 0);
|
||||
ensure((spurs->wklPendingContention[wnum] & 0xf) == 0);
|
||||
spurs->wklState1[wnum] = SPURS_WKL_STATE_PREPARING;
|
||||
spurs->wklStatus1[wnum] = 0;
|
||||
spurs->wklEvent1[wnum] = 0;
|
||||
@ -2371,8 +2371,8 @@ s32 _spurs::add_workload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32>
|
||||
}
|
||||
else
|
||||
{
|
||||
verify(HERE), (spurs->wklCurrentContention[index] & 0xf0) == 0;
|
||||
verify(HERE), (spurs->wklPendingContention[index] & 0xf0) == 0;
|
||||
ensure((spurs->wklCurrentContention[index] & 0xf0) == 0);
|
||||
ensure((spurs->wklPendingContention[index] & 0xf0) == 0);
|
||||
spurs->wklState2[index] = SPURS_WKL_STATE_PREPARING;
|
||||
spurs->wklStatus2[index] = 0;
|
||||
spurs->wklEvent2[index] = 0;
|
||||
@ -2447,7 +2447,7 @@ s32 _spurs::add_workload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32>
|
||||
(wnum < CELL_SPURS_MAX_WORKLOAD ? op.wklState1[wnum] : op.wklState2[wnum % 16]) = SPURS_WKL_STATE_RUNNABLE;
|
||||
});
|
||||
|
||||
verify(HERE), (res_wkl <= 31);
|
||||
ensure((res_wkl <= 31));
|
||||
vm::light_op(spurs->sysSrvMsgUpdateWorkload, [](atomic_t<u8>& v){ v.release(0xff); });
|
||||
vm::light_op(spurs->sysSrvMessage, [](atomic_t<u8>& v){ v.release(0xff); });
|
||||
return CELL_OK;
|
||||
@ -2612,7 +2612,7 @@ s32 cellSpursWaitForWorkloadShutdown(ppu_thread& ppu, vm::ptr<CellSpurs> spurs,
|
||||
|
||||
if (wait_sema)
|
||||
{
|
||||
verify(HERE), sys_semaphore_wait(ppu, static_cast<u32>(info.sem), 0) == 0;
|
||||
ensure(sys_semaphore_wait(ppu, static_cast<u32>(info.sem), 0) == 0);
|
||||
}
|
||||
|
||||
// Reverified
|
||||
@ -2657,7 +2657,7 @@ s32 cellSpursRemoveWorkload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid)
|
||||
|
||||
if (spurs->wklFlagReceiver == wid)
|
||||
{
|
||||
verify(HERE), ppu_execute<&_cellSpursWorkloadFlagReceiver>(ppu, spurs, wid, 0) == 0;
|
||||
ensure(ppu_execute<&_cellSpursWorkloadFlagReceiver>(ppu, spurs, wid, 0) == 0);
|
||||
}
|
||||
|
||||
s32 rc;
|
||||
|
@ -959,7 +959,7 @@ error_code _cellSyncLFQueueGetPushPointer(ppu_thread& ppu, vm::ptr<CellSyncLFQue
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0) == CELL_OK;
|
||||
ensure(sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0) == CELL_OK);
|
||||
var1 = 1;
|
||||
}
|
||||
}
|
||||
@ -1051,7 +1051,7 @@ error_code _cellSyncLFQueueCompletePushPointer(ppu_thread& ppu, vm::ptr<CellSync
|
||||
|
||||
if (var9 > 1 && static_cast<u32>(var8) > 1)
|
||||
{
|
||||
verify(HERE), (16 - var2 <= 1);
|
||||
ensure((16 - var2 <= 1));
|
||||
}
|
||||
|
||||
s32 var11 = (pack >> 10) & 0x1f;
|
||||
@ -1083,11 +1083,11 @@ error_code _cellSyncLFQueueCompletePushPointer(ppu_thread& ppu, vm::ptr<CellSync
|
||||
|
||||
if (queue->push2.compare_and_swap_test(old, push2))
|
||||
{
|
||||
verify(HERE), (var2 + var4 < 16);
|
||||
ensure((var2 + var4 < 16));
|
||||
if (var6 != umax)
|
||||
{
|
||||
verify(HERE), (queue->push3.compare_and_swap_test(old2, push3));
|
||||
verify(HERE), (fpSendSignal);
|
||||
ensure((queue->push3.compare_and_swap_test(old2, push3)));
|
||||
ensure((fpSendSignal));
|
||||
return not_an_error(fpSendSignal(ppu, vm::cast(queue->m_eaSignal.addr(), HERE), var6));
|
||||
}
|
||||
else
|
||||
@ -1258,7 +1258,7 @@ error_code _cellSyncLFQueueGetPopPointer(ppu_thread& ppu, vm::ptr<CellSyncLFQueu
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), (sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0) == CELL_OK);
|
||||
ensure((sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0) == CELL_OK));
|
||||
var1 = 1;
|
||||
}
|
||||
}
|
||||
@ -1356,7 +1356,7 @@ error_code _cellSyncLFQueueCompletePopPointer(ppu_thread& ppu, vm::ptr<CellSyncL
|
||||
|
||||
if (var9 > 1 && static_cast<u32>(var8) > 1)
|
||||
{
|
||||
verify(HERE), (16 - var2 <= 1);
|
||||
ensure((16 - var2 <= 1));
|
||||
}
|
||||
|
||||
s32 var11 = (pack >> 10) & 0x1f;
|
||||
@ -1386,8 +1386,8 @@ error_code _cellSyncLFQueueCompletePopPointer(ppu_thread& ppu, vm::ptr<CellSyncL
|
||||
{
|
||||
if (var6 != umax)
|
||||
{
|
||||
verify(HERE), (queue->pop3.compare_and_swap_test(old2, pop3));
|
||||
verify(HERE), (fpSendSignal);
|
||||
ensure((queue->pop3.compare_and_swap_test(old2, pop3)));
|
||||
ensure((fpSendSignal));
|
||||
return not_an_error(fpSendSignal(ppu, vm::cast(queue->m_eaSignal.addr(), HERE), var6));
|
||||
}
|
||||
else
|
||||
|
@ -576,7 +576,7 @@ static error_code vdecQueryAttr(s32 type, u32 profile, u32 spec_addr /* may be 0
|
||||
|
||||
attr->decoderVerLower = decoderVerLower;
|
||||
attr->decoderVerUpper = 0x4840010;
|
||||
attr->memSize = !spec_addr ? verify(HERE, memSize) : 4 * 1024 * 1024;
|
||||
attr->memSize = !spec_addr ? ensure(memSize) : 4 * 1024 * 1024;
|
||||
attr->cmdDepth = 4;
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -973,7 +973,7 @@ error_code cellVdecGetPicItem(u32 handle, vm::pptr<CellVdecPicItem> picItem)
|
||||
info->codecType = vdec->type;
|
||||
info->startAddr = 0x00000123; // invalid value (no address for picture)
|
||||
const int buffer_size = av_image_get_buffer_size(vdec->ctx->pix_fmt, vdec->ctx->width, vdec->ctx->height, 1);
|
||||
verify(HERE), (buffer_size >= 0);
|
||||
ensure(buffer_size >= 0);
|
||||
info->size = align<u32>(buffer_size, 128);
|
||||
info->auNum = 1;
|
||||
info->auPts[0].lower = static_cast<u32>(pts);
|
||||
|
@ -238,7 +238,7 @@ error_code cellVideoOutGetConfiguration(u32 videoOut, vm::ptr<CellVideoOutConfig
|
||||
config->aspect = g_video_out_aspect_id.at(g_cfg.video.aspect_ratio);
|
||||
|
||||
CellVideoOutResolution res;
|
||||
verify("Invalid video configuration" HERE), _IntGetResolutionInfo(config->resolutionId, &res) == CELL_OK;
|
||||
ensure(_IntGetResolutionInfo(config->resolutionId, &res) == CELL_OK); // "Invalid video configuration"
|
||||
|
||||
config->pitch = 4 * res.width;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "stdafx.h"
|
||||
#include "stdafx.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/VFS.h"
|
||||
#include "Emu/Cell/PPUModule.h"
|
||||
|
@ -34,12 +34,12 @@ struct sys_lwmutex_locker
|
||||
: ppu(ppu)
|
||||
, mutex(mutex)
|
||||
{
|
||||
verify(HERE), sys_lwmutex_lock(ppu, mutex, 0) == CELL_OK;
|
||||
ensure(sys_lwmutex_lock(ppu, mutex, 0) == CELL_OK);
|
||||
}
|
||||
|
||||
~sys_lwmutex_locker() noexcept(false)
|
||||
{
|
||||
verify(HERE), sys_lwmutex_unlock(ppu, mutex) == CELL_OK;
|
||||
ensure(sys_lwmutex_unlock(ppu, mutex) == CELL_OK);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -34,7 +34,7 @@ extern void libio_sys_config_init()
|
||||
if (cfg->init_ctr++ == 0)
|
||||
{
|
||||
// Belongs to "_cfg_evt_hndlr" thread (8k stack)
|
||||
cfg->stack_addr = verify(HERE, vm::alloc(0x2000, vm::stack, 4096));
|
||||
cfg->stack_addr = (ensure(vm::alloc(0x2000, vm::stack, 4096)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ extern void libio_sys_config_end()
|
||||
|
||||
if (cfg->init_ctr-- == 1)
|
||||
{
|
||||
verify(HERE), vm::dealloc(std::exchange(cfg->stack_addr, 0), vm::stack);
|
||||
ensure(vm::dealloc(std::exchange(cfg->stack_addr, 0), vm::stack));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ s16 __sys_look_ctype_table(s32 ch)
|
||||
{
|
||||
sysPrxForUser.trace("__sys_look_ctype_table(ch=%d)", ch);
|
||||
|
||||
verify("__sys_look_ctype_table" HERE), ch >= -1 && ch <= 127;
|
||||
ensure(ch >= -1 && ch <= 127); // "__sys_look_ctype_table"
|
||||
|
||||
return s_ctype_table[ch + 1];
|
||||
}
|
||||
@ -101,7 +101,7 @@ s32 _sys_tolower(s32 ch)
|
||||
{
|
||||
sysPrxForUser.trace("_sys_tolower(ch=%d)", ch);
|
||||
|
||||
verify("_sys_tolower" HERE), ch >= -1 && ch <= 127;
|
||||
ensure(ch >= -1 && ch <= 127); // "_sys_tolower"
|
||||
|
||||
return s_ctype_table[ch + 1] & 1 ? ch + 0x20 : ch;
|
||||
}
|
||||
@ -110,7 +110,7 @@ s32 _sys_toupper(s32 ch)
|
||||
{
|
||||
sysPrxForUser.trace("_sys_toupper(ch=%d)", ch);
|
||||
|
||||
verify("_sys_toupper" HERE), ch >= -1 && ch <= 127;
|
||||
ensure(ch >= -1 && ch <= 127); // "_sys_toupper"
|
||||
|
||||
return s_ctype_table[ch + 1] & 2 ? ch - 0x20 : ch;
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ void sys_ppu_thread_exit(ppu_thread& ppu, u64 val)
|
||||
sysPrxForUser.trace("sys_ppu_thread_exit(val=0x%llx)", val);
|
||||
|
||||
// Call registered atexit functions
|
||||
verify(HERE), !sys_lwmutex_lock(ppu, g_ppu_atexit_lwm, 0);
|
||||
ensure(!sys_lwmutex_lock(ppu, g_ppu_atexit_lwm, 0));
|
||||
|
||||
for (auto ptr : *g_ppu_atexit)
|
||||
{
|
||||
@ -182,7 +182,7 @@ void sys_ppu_thread_exit(ppu_thread& ppu, u64 val)
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), !sys_lwmutex_unlock(ppu, g_ppu_atexit_lwm);
|
||||
ensure(!sys_lwmutex_unlock(ppu, g_ppu_atexit_lwm));
|
||||
|
||||
// Deallocate TLS
|
||||
ppu_free_tls(vm::cast(ppu.gpr[13], HERE) - 0x7030);
|
||||
@ -239,7 +239,7 @@ void sys_ppu_thread_once(ppu_thread& ppu, vm::ptr<s32> once_ctrl, vm::ptr<void()
|
||||
{
|
||||
sysPrxForUser.notice("sys_ppu_thread_once(once_ctrl=*0x%x, init=*0x%x)", once_ctrl, init);
|
||||
|
||||
verify(HERE), sys_mutex_lock(ppu, *g_ppu_once_mutex, 0) == CELL_OK;
|
||||
ensure(sys_mutex_lock(ppu, *g_ppu_once_mutex, 0) == CELL_OK);
|
||||
|
||||
if (*once_ctrl == SYS_PPU_THREAD_ONCE_INIT)
|
||||
{
|
||||
@ -248,7 +248,7 @@ void sys_ppu_thread_once(ppu_thread& ppu, vm::ptr<s32> once_ctrl, vm::ptr<void()
|
||||
*once_ctrl = SYS_PPU_THREAD_DONE_INIT;
|
||||
}
|
||||
|
||||
verify(HERE), sys_mutex_unlock(ppu, *g_ppu_once_mutex) == CELL_OK;
|
||||
ensure(sys_mutex_unlock(ppu, *g_ppu_once_mutex) == CELL_OK);
|
||||
}
|
||||
|
||||
error_code sys_interrupt_thread_disestablish(ppu_thread& ppu, u32 ih)
|
||||
|
@ -1244,13 +1244,13 @@ struct ppu_acontext
|
||||
|
||||
r.imin = (min + ~mask) & mask;
|
||||
r.imax = max & mask;
|
||||
verify("Impossible range" HERE), r.imin <= r.imax;
|
||||
ensure(r.imin <= r.imax); // "Impossible range"
|
||||
}
|
||||
else
|
||||
{
|
||||
r.imin = min & mask;
|
||||
r.imax = (max + ~mask) & mask;
|
||||
verify("Impossible range" HERE), r.imin >= r.imax;
|
||||
ensure(r.imin >= r.imax); // "Impossible range"
|
||||
}
|
||||
|
||||
// Fix const values
|
||||
|
@ -2162,7 +2162,7 @@ bool ppu_interpreter::VSPLTB(ppu_thread& ppu, ppu_opcode_t op)
|
||||
bool ppu_interpreter::VSPLTH(ppu_thread& ppu, ppu_opcode_t op)
|
||||
{
|
||||
auto& d = ppu.vr[op.vd];
|
||||
verify(HERE), (op.vuimm < 8);
|
||||
ensure((op.vuimm < 8));
|
||||
|
||||
u16 hword = ppu.vr[op.vb]._u16[7 - op.vuimm];
|
||||
|
||||
@ -2212,7 +2212,7 @@ bool ppu_interpreter::VSPLTISW(ppu_thread& ppu, ppu_opcode_t op)
|
||||
bool ppu_interpreter::VSPLTW(ppu_thread& ppu, ppu_opcode_t op)
|
||||
{
|
||||
auto& d = ppu.vr[op.vd];
|
||||
verify(HERE), (op.vuimm < 4);
|
||||
ensure((op.vuimm < 4));
|
||||
|
||||
u32 word = ppu.vr[op.vb]._u32[3 - op.vuimm];
|
||||
|
||||
|
@ -1624,7 +1624,7 @@ void ppu_load_exec(const ppu_exec_object& elf)
|
||||
if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz && (prog.p_flags & 0x2) == 0u /* W */)
|
||||
{
|
||||
// Set memory protection to read-only when necessary
|
||||
verify(HERE), vm::page_protect(addr, ::align(size, 0x1000), 0, 0, vm::page_writable);
|
||||
ensure(vm::page_protect(addr, ::align(size, 0x1000), 0, 0, vm::page_writable));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "stdafx.h"
|
||||
#include "stdafx.h"
|
||||
#include "Utilities/sysinfo.h"
|
||||
#include "Utilities/JIT.h"
|
||||
#include "Crypto/sha1.h"
|
||||
@ -1166,7 +1166,7 @@ extern void sse_cellbe_stvrx_v0(u64 addr, __m128i a);
|
||||
|
||||
void ppu_trap(ppu_thread& ppu, u64 addr)
|
||||
{
|
||||
verify(HERE), (addr & (~u64{UINT32_MAX} | 0x3)) == 0;
|
||||
ensure((addr & (~u64{UINT32_MAX} | 0x3)) == 0);
|
||||
ppu.cia = static_cast<u32>(addr);
|
||||
|
||||
u32 add = static_cast<u32>(g_cfg.core.stub_ppu_traps) * 4;
|
||||
|
@ -264,12 +264,11 @@ Value* PPUTranslator::GetAddr(u64 _add)
|
||||
|
||||
Type* PPUTranslator::ScaleType(Type* type, s32 pow2)
|
||||
{
|
||||
verify(HERE), (type->getScalarType()->isIntegerTy());
|
||||
verify(HERE), pow2 > -32, pow2 < 32;
|
||||
ensure(type->getScalarType()->isIntegerTy());
|
||||
ensure(pow2 > -32 && pow2 < 32);
|
||||
|
||||
uint scaled = type->getScalarSizeInBits();
|
||||
|
||||
verify(HERE), (scaled & (scaled - 1)) == 0;
|
||||
ensure((scaled & (scaled - 1)) == 0);
|
||||
|
||||
if (pow2 > 0)
|
||||
{
|
||||
@ -280,7 +279,7 @@ Type* PPUTranslator::ScaleType(Type* type, s32 pow2)
|
||||
scaled >>= -pow2;
|
||||
}
|
||||
|
||||
verify(HERE), (scaled != 0);
|
||||
ensure(scaled);
|
||||
const auto new_type = m_ir->getIntNTy(scaled);
|
||||
const auto vec_type = dyn_cast<VectorType>(type);
|
||||
return vec_type ? VectorType::get(new_type, vec_type->getNumElements(), false) : cast<Type>(new_type);
|
||||
|
@ -289,7 +289,7 @@ spu_function_t spu_recompiler::compile(spu_program&& _func)
|
||||
const u32 starta = start & -64;
|
||||
const u32 enda = ::align(end, 64);
|
||||
const u32 sizea = (enda - starta) / 64;
|
||||
verify(HERE), sizea;
|
||||
ensure(sizea);
|
||||
|
||||
// Initialize pointers
|
||||
c->lea(x86::rax, x86::qword_ptr(label_code));
|
||||
@ -370,7 +370,7 @@ spu_function_t spu_recompiler::compile(spu_program&& _func)
|
||||
const u32 starta = start & -32;
|
||||
const u32 enda = ::align(end, 32);
|
||||
const u32 sizea = (enda - starta) / 32;
|
||||
verify(HERE), sizea;
|
||||
ensure(sizea);
|
||||
|
||||
if (sizea == 1)
|
||||
{
|
||||
@ -492,7 +492,7 @@ spu_function_t spu_recompiler::compile(spu_program&& _func)
|
||||
const u32 starta = start & -32;
|
||||
const u32 enda = ::align(end, 32);
|
||||
const u32 sizea = (enda - starta) / 32;
|
||||
verify(HERE), sizea;
|
||||
ensure(sizea);
|
||||
|
||||
if (sizea == 1)
|
||||
{
|
||||
@ -1154,7 +1154,7 @@ void spu_recompiler::branch_indirect(spu_opcode_t op, bool jt, bool ret)
|
||||
const u32 end = instr_labels.rbegin()->first + 4;
|
||||
|
||||
// Load local indirect jump address, check local bounds
|
||||
verify(HERE), start == m_base;
|
||||
ensure(start == m_base);
|
||||
Label fail = c->newLabel();
|
||||
c->mov(qw1->r32(), *addr);
|
||||
c->sub(qw1->r32(), pc0->r32());
|
||||
|
@ -708,7 +708,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst)
|
||||
// Write jump instruction with rel32 immediate
|
||||
auto make_jump = [&](u8 op, auto target)
|
||||
{
|
||||
verify("Asm overflow" HERE), raw + 8 <= wxptr + size0 * 22 + 16;
|
||||
ensure(raw + 8 <= wxptr + size0 * 22 + 16);
|
||||
|
||||
// Fallback to dispatch if no target
|
||||
const u64 taddr = target ? reinterpret_cast<u64>(target) : reinterpret_cast<u64>(tr_dispatch);
|
||||
@ -716,13 +716,13 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst)
|
||||
// Compute the distance
|
||||
const s64 rel = taddr - reinterpret_cast<u64>(raw) - (op != 0xe9 ? 6 : 5);
|
||||
|
||||
verify(HERE), rel >= INT32_MIN, rel <= INT32_MAX;
|
||||
ensure(rel >= INT32_MIN && rel <= INT32_MAX);
|
||||
|
||||
if (op != 0xe9)
|
||||
{
|
||||
// First jcc byte
|
||||
*raw++ = 0x0f;
|
||||
verify(HERE), (op >> 4) == 0x8;
|
||||
ensure((op >> 4) == 0x8);
|
||||
}
|
||||
|
||||
*raw++ = op;
|
||||
@ -757,7 +757,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst)
|
||||
u32 size2 = w.size - size1;
|
||||
std::advance(it2, w.size / 2);
|
||||
|
||||
while (verify("spu_runtime::work::level overflow" HERE, w.level != 0xffff))
|
||||
while (ensure(w.level < UINT16_MAX))
|
||||
{
|
||||
it = it2;
|
||||
size1 = w.size - size2;
|
||||
@ -844,7 +844,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst)
|
||||
break;
|
||||
}
|
||||
|
||||
verify(HERE), it != w.beg;
|
||||
ensure(it != w.beg);
|
||||
size1--;
|
||||
size2++;
|
||||
}
|
||||
@ -857,7 +857,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst)
|
||||
}
|
||||
|
||||
// Emit 32-bit comparison
|
||||
verify("Asm overflow" HERE), raw + 12 <= wxptr + size0 * 22 + 16;
|
||||
ensure(raw + 12 <= wxptr + size0 * 22 + 16); // "Asm overflow"
|
||||
|
||||
if (w.from != w.level)
|
||||
{
|
||||
@ -1512,7 +1512,7 @@ spu_program spu_recompiler_base::analyse(const be_t<u32>* ls, u32 entry_point)
|
||||
jt_abs.clear();
|
||||
}
|
||||
|
||||
verify(HERE), jt_abs.size() != jt_rel.size();
|
||||
ensure(jt_abs.size() != jt_rel.size());
|
||||
}
|
||||
|
||||
if (jt_abs.size() >= jt_rel.size())
|
||||
@ -1939,7 +1939,7 @@ spu_program spu_recompiler_base::analyse(const be_t<u32>* ls, u32 entry_point)
|
||||
}
|
||||
else if (u32& raw_val = result.data[new_size])
|
||||
{
|
||||
verify(HERE), raw_val == std::bit_cast<u32, be_t<u32>>(data);
|
||||
ensure(raw_val == std::bit_cast<u32, be_t<u32>>(data));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -3428,7 +3428,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
else if (!callee)
|
||||
{
|
||||
// Create branch patchpoint if chunk == nullptr
|
||||
verify(HERE), m_finfo, !m_finfo->fn || m_function == m_finfo->chunk;
|
||||
ensure(m_finfo && (!m_finfo->fn || m_function == m_finfo->chunk));
|
||||
|
||||
// Register under a unique linkable name
|
||||
const std::string ppname = fmt::format("%s-pp-%u", m_hash, m_pp_id++);
|
||||
@ -3448,7 +3448,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
base_pc = m_ir->getInt32(0);
|
||||
}
|
||||
|
||||
verify(HERE), callee;
|
||||
ensure(callee);
|
||||
auto call = m_ir->CreateCall(callee, {m_thread, m_lsptr, base_pc ? base_pc : m_base_pc});
|
||||
auto func = m_finfo ? m_finfo->chunk : llvm::dyn_cast<llvm::Function>(callee.getCallee());
|
||||
call->setCallingConv(func->getCallingConv());
|
||||
@ -3484,7 +3484,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
r3 = get_reg_fixed<u32[4]>(3).value;
|
||||
}
|
||||
|
||||
const auto _call = m_ir->CreateCall(verify(HERE, fn), {m_thread, m_lsptr, m_base_pc, sp, r3});
|
||||
const auto _call = m_ir->CreateCall(ensure(fn), {m_thread, m_lsptr, m_base_pc, sp, r3});
|
||||
|
||||
_call->setCallingConv(fn->getCallingConv());
|
||||
|
||||
@ -3590,7 +3590,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
|
||||
if (absolute)
|
||||
{
|
||||
verify(HERE), !m_finfo->fn;
|
||||
ensure(!m_finfo->fn);
|
||||
|
||||
const auto next = llvm::BasicBlock::Create(m_context, "", m_function);
|
||||
const auto fail = llvm::BasicBlock::Create(m_context, "", m_function);
|
||||
@ -3632,7 +3632,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
|
||||
if (absolute)
|
||||
{
|
||||
verify(HERE), !m_finfo->fn;
|
||||
ensure(!m_finfo->fn);
|
||||
|
||||
m_ir->CreateStore(m_ir->getInt32(target), spu_ptr<u32>(&spu_thread::pc), true);
|
||||
}
|
||||
@ -3646,7 +3646,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
return result;
|
||||
}
|
||||
|
||||
verify(HERE), !absolute;
|
||||
ensure(!absolute);
|
||||
|
||||
auto& result = m_blocks[target].block;
|
||||
|
||||
@ -3790,7 +3790,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
|
||||
llvm::Value* double_to_xfloat(llvm::Value* val)
|
||||
{
|
||||
verify("double_to_xfloat" HERE), val, val->getType() == get_type<f64[4]>();
|
||||
ensure(val && val->getType() == get_type<f64[4]>());
|
||||
|
||||
const auto d = double_as_uint64(val);
|
||||
const auto s = m_ir->CreateAnd(m_ir->CreateLShr(d, 32), 0x80000000);
|
||||
@ -3801,7 +3801,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
|
||||
llvm::Value* xfloat_to_double(llvm::Value* val)
|
||||
{
|
||||
verify("xfloat_to_double" HERE), val, val->getType() == get_type<u32[4]>();
|
||||
ensure(val && val->getType() == get_type<u32[4]>());
|
||||
|
||||
const auto x = m_ir->CreateZExt(val, get_type<u64[4]>());
|
||||
const auto s = m_ir->CreateShl(m_ir->CreateAnd(x, 0x80000000), 32);
|
||||
@ -3815,7 +3815,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
// Clamp double values to ±Smax, flush values smaller than ±Smin to positive zero
|
||||
llvm::Value* xfloat_in_double(llvm::Value* val)
|
||||
{
|
||||
verify("xfloat_in_double" HERE), val, val->getType() == get_type<f64[4]>();
|
||||
ensure(val && val->getType() == get_type<f64[4]>());
|
||||
|
||||
const auto smax = uint64_as_double(splat<u64[4]>(0x47ffffffe0000000).eval(m_ir));
|
||||
const auto smin = uint64_as_double(splat<u64[4]>(0x3810000000000000).eval(m_ir));
|
||||
@ -4002,7 +4002,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
||||
llvm::StoreInst* dummy{};
|
||||
|
||||
// Check
|
||||
verify(HERE), !m_block || m_regmod[m_pos / 4] == index;
|
||||
ensure(!m_block || m_regmod[m_pos / 4] == index);
|
||||
|
||||
// Test for special case
|
||||
const bool is_xfloat = value->getType() == get_type<f64[4]>();
|
||||
@ -4562,7 +4562,7 @@ public:
|
||||
|
||||
m_ir->SetInsertPoint(cblock);
|
||||
|
||||
verify(HERE), bfound->second.block_end->getTerminator();
|
||||
ensure(bfound->second.block_end->getTerminator());
|
||||
}
|
||||
|
||||
_phi->addIncoming(value, bfound->second.block_end);
|
||||
@ -4668,7 +4668,7 @@ public:
|
||||
m_ir->CreateBr(add_block(target));
|
||||
}
|
||||
|
||||
verify(HERE), m_block->block_end;
|
||||
ensure(m_block->block_end);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -335,7 +335,7 @@ namespace spu
|
||||
busy_wait(count);
|
||||
}
|
||||
|
||||
verify(HERE), !spu.check_state();
|
||||
ensure(!spu.check_state());
|
||||
}
|
||||
|
||||
atomic_instruction_table[pc_offset]++;
|
||||
@ -1540,7 +1540,7 @@ void spu_thread::cpu_return()
|
||||
{
|
||||
ch_in_mbox.clear();
|
||||
|
||||
if (verify(HERE, group->running--) == 1)
|
||||
if (ensure(group->running)-- == 1)
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(group->mutex);
|
||||
@ -1712,12 +1712,12 @@ spu_thread::spu_thread(lv2_spu_group* group, u32 index, std::string_view name, u
|
||||
|
||||
if (!group)
|
||||
{
|
||||
verify(HERE), vm::get(vm::spu)->falloc(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index, SPU_LS_SIZE, &shm);
|
||||
ensure(vm::get(vm::spu)->falloc(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index, SPU_LS_SIZE, &shm));
|
||||
}
|
||||
else
|
||||
{
|
||||
// 0x1000 indicates falloc to allocate page with no access rights in base memory
|
||||
verify(HERE), vm::get(vm::spu)->falloc(SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * (cpu_thread::id & 0xffffff), SPU_LS_SIZE, &shm, 0x1000);
|
||||
ensure(vm::get(vm::spu)->falloc(SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * (cpu_thread::id & 0xffffff), SPU_LS_SIZE, &shm, 0x1000));
|
||||
}
|
||||
|
||||
vm::writer_lock(0);
|
||||
@ -1726,7 +1726,7 @@ spu_thread::spu_thread(lv2_spu_group* group, u32 index, std::string_view name, u
|
||||
{
|
||||
// Map LS mirrors
|
||||
const auto ptr = addr + (i * SPU_LS_SIZE);
|
||||
verify(HERE), shm->map_critical(ptr) == ptr;
|
||||
ensure(shm->map_critical(ptr) == ptr);
|
||||
}
|
||||
|
||||
// Use the middle mirror
|
||||
@ -3592,7 +3592,7 @@ u32 spu_thread::get_ch_count(u32 ch)
|
||||
default: break;
|
||||
}
|
||||
|
||||
verify(HERE), ch < 128u;
|
||||
ensure(ch < 128u);
|
||||
spu_log.error("Unknown/illegal channel in RCHCNT (ch=%s)", spu_ch_name[ch]);
|
||||
return 0; // Default count
|
||||
}
|
||||
@ -4310,7 +4310,7 @@ bool spu_thread::stop_and_signal(u32 code)
|
||||
if (is_stopped())
|
||||
{
|
||||
// The thread group cannot be stopped while waiting for an event
|
||||
verify(HERE), !(state & cpu_flag::stop);
|
||||
ensure(!(state & cpu_flag::stop));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
|
||||
{
|
||||
if (cond.mutex->try_own(*cpu, cpu->id))
|
||||
{
|
||||
verify(HERE), !std::exchange(result, cpu);
|
||||
ensure(!std::exchange(result, cpu));
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,7 +169,7 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
|
||||
{
|
||||
if (cpu->id == thread_id)
|
||||
{
|
||||
verify(HERE), cond.unqueue(cond.sq, cpu);
|
||||
ensure(cond.unqueue(cond.sq, cpu));
|
||||
|
||||
cond.waiters--;
|
||||
|
||||
@ -296,7 +296,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
|
||||
}
|
||||
|
||||
// Verify ownership
|
||||
verify(HERE), cond->mutex->owner >> 1 == ppu.id;
|
||||
ensure(cond->mutex->owner >> 1 == ppu.id);
|
||||
|
||||
// Restore the recursive value
|
||||
cond->mutex->lock_count.release(static_cast<u32>(cond.ret));
|
||||
|
@ -207,7 +207,7 @@ struct lv2_file::file_view : fs::file_base
|
||||
const u64 old_pos = m_file->file.pos();
|
||||
const u64 new_pos = m_file->file.seek(m_off + m_pos);
|
||||
const u64 result = m_file->file.read(buffer, size);
|
||||
verify(HERE), old_pos == m_file->file.seek(old_pos);
|
||||
ensure(old_pos == m_file->file.seek(old_pos));
|
||||
|
||||
m_pos += result;
|
||||
return result;
|
||||
@ -1306,7 +1306,7 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
|
||||
? file->op_read(arg->buf, arg->size)
|
||||
: file->op_write(arg->buf, arg->size);
|
||||
|
||||
verify(HERE), old_pos == file->file.seek(old_pos);
|
||||
ensure(old_pos == file->file.seek(old_pos));
|
||||
|
||||
arg->out_code = CELL_OK;
|
||||
return CELL_OK;
|
||||
|
@ -129,7 +129,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
|
||||
|
||||
if (mode != 2)
|
||||
{
|
||||
verify(HERE), !mutex->signaled;
|
||||
ensure(!mutex->signaled);
|
||||
std::lock_guard lock(mutex->mutex);
|
||||
|
||||
if (mode == 3 && !mutex->sq.empty()) [[unlikely]]
|
||||
@ -140,7 +140,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
|
||||
}
|
||||
else if (mode == 1)
|
||||
{
|
||||
verify(HERE), mutex->add_waiter(result);
|
||||
ensure(mutex->add_waiter(result));
|
||||
result = nullptr;
|
||||
}
|
||||
}
|
||||
@ -229,9 +229,9 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
||||
|
||||
if (mode == 1)
|
||||
{
|
||||
verify(HERE), !mutex->signaled;
|
||||
ensure(!mutex->signaled);
|
||||
std::lock_guard lock(mutex->mutex);
|
||||
verify(HERE), mutex->add_waiter(cpu);
|
||||
ensure(mutex->add_waiter(cpu));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -61,7 +61,7 @@ error_code sys_memory_allocate(cpu_thread& cpu, u32 size, u64 flags, vm::ptr<u32
|
||||
{
|
||||
if (u32 addr = area->alloc(size, nullptr, align))
|
||||
{
|
||||
verify(HERE), !g_fxo->get<sys_memory_address_table>()->addrs[addr >> 16].exchange(dct);
|
||||
ensure(!g_fxo->get<sys_memory_address_table>()->addrs[addr >> 16].exchange(dct));
|
||||
|
||||
if (alloc_addr)
|
||||
{
|
||||
@ -132,7 +132,7 @@ error_code sys_memory_allocate_from_container(cpu_thread& cpu, u32 size, u32 cid
|
||||
{
|
||||
if (u32 addr = area->alloc(size))
|
||||
{
|
||||
verify(HERE), !g_fxo->get<sys_memory_address_table>()->addrs[addr >> 16].exchange(ct.ptr.get());
|
||||
ensure(!g_fxo->get<sys_memory_address_table>()->addrs[addr >> 16].exchange(ct.ptr.get()));
|
||||
|
||||
if (alloc_addr)
|
||||
{
|
||||
@ -164,7 +164,7 @@ error_code sys_memory_free(cpu_thread& cpu, u32 addr)
|
||||
return {CELL_EINVAL, addr};
|
||||
}
|
||||
|
||||
const auto size = verify(HERE, vm::dealloc(addr));
|
||||
const auto size = (ensure(vm::dealloc(addr)));
|
||||
reader_lock{id_manager::g_mutex}, ct->used -= size;
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -1392,7 +1392,7 @@ error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr>
|
||||
|
||||
if (addr)
|
||||
{
|
||||
verify(HERE), native_addr.ss_family == AF_INET;
|
||||
ensure(native_addr.ss_family == AF_INET);
|
||||
|
||||
vm::ptr<sys_net_sockaddr_in> paddr = vm::cast(addr.addr());
|
||||
|
||||
@ -1825,7 +1825,7 @@ error_code sys_net_bnet_getpeername(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sock
|
||||
|
||||
if (::getpeername(sock.socket, reinterpret_cast<struct sockaddr*>(&native_addr), &native_addrlen) == 0)
|
||||
{
|
||||
verify(HERE), native_addr.ss_family == AF_INET;
|
||||
ensure(native_addr.ss_family == AF_INET);
|
||||
|
||||
return {};
|
||||
}
|
||||
@ -1883,7 +1883,7 @@ error_code sys_net_bnet_getsockname(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sock
|
||||
|
||||
if (::getsockname(sock.socket, reinterpret_cast<struct sockaddr*>(&native_addr), &native_addrlen) == 0)
|
||||
{
|
||||
verify(HERE), native_addr.ss_family == AF_INET;
|
||||
ensure(native_addr.ss_family == AF_INET);
|
||||
|
||||
return {};
|
||||
}
|
||||
@ -2494,7 +2494,7 @@ error_code sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32
|
||||
// addr is set earlier for P2P socket
|
||||
if (addr && type != SYS_NET_SOCK_DGRAM_P2P && type != SYS_NET_SOCK_STREAM_P2P)
|
||||
{
|
||||
verify(HERE), native_addr.ss_family == AF_INET;
|
||||
ensure(native_addr.ss_family == AF_INET);
|
||||
|
||||
vm::ptr<sys_net_sockaddr_in> paddr = vm::cast(addr.addr());
|
||||
|
||||
@ -2661,7 +2661,7 @@ error_code sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr<void> buf, u32 l
|
||||
if (nph->is_dns(s))
|
||||
{
|
||||
const s32 ret_analyzer = nph->analyze_dns_packet(s, reinterpret_cast<const u8*>(_buf.data()), len);
|
||||
|
||||
|
||||
// If we're not connected just never send the packet and pretend we did
|
||||
if (!nph->get_net_status())
|
||||
{
|
||||
@ -2816,7 +2816,7 @@ error_code sys_net_bnet_setsockopt(ppu_thread& ppu, s32 s, s32 level, s32 optnam
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
if (level == SYS_NET_SOL_SOCKET)
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "stdafx.h"
|
||||
#include "stdafx.h"
|
||||
#include "sys_ppu_thread.h"
|
||||
|
||||
#include "Emu/IdManager.h"
|
||||
@ -159,7 +159,7 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr
|
||||
const u64 vret = thread->gpr[3];
|
||||
|
||||
// Cleanup
|
||||
verify(HERE), idm::remove_verify<named_thread<ppu_thread>>(thread_id, std::move(thread.ptr));
|
||||
ensure(idm::remove_verify<named_thread<ppu_thread>>(thread_id, std::move(thread.ptr)));
|
||||
|
||||
if (!vptr)
|
||||
{
|
||||
@ -221,7 +221,7 @@ error_code sys_ppu_thread_detach(ppu_thread& ppu, u32 thread_id)
|
||||
|
||||
if (thread.ret == CELL_EAGAIN)
|
||||
{
|
||||
verify(HERE), idm::remove<named_thread<ppu_thread>>(thread_id);
|
||||
ensure(idm::remove<named_thread<ppu_thread>>(thread_id));
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -428,7 +428,7 @@ error_code _sys_prx_start_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys
|
||||
case SYS_PRX_RESIDENT:
|
||||
{
|
||||
// No error code on invalid state, so throw on unexpected state
|
||||
verify(HERE), prx->state.compare_and_swap_test(PRX_STATE_STARTING, PRX_STATE_STARTED);
|
||||
ensure(prx->state.compare_and_swap_test(PRX_STATE_STARTING, PRX_STATE_STARTED));
|
||||
return CELL_OK;
|
||||
}
|
||||
default:
|
||||
@ -506,7 +506,7 @@ error_code _sys_prx_stop_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_
|
||||
case 0:
|
||||
{
|
||||
// No error code on invalid state, so throw on unexpected state
|
||||
verify(HERE), prx->state.compare_and_swap_test(PRX_STATE_STOPPING, PRX_STATE_STOPPED);
|
||||
ensure(prx->state.compare_and_swap_test(PRX_STATE_STOPPING, PRX_STATE_STOPPED));
|
||||
return CELL_OK;
|
||||
}
|
||||
case 1:
|
||||
|
@ -466,7 +466,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
||||
if ((a4 & 0x80000000) != 0)
|
||||
{
|
||||
// NOTE: There currently seem to only be 2 active heads on PS3
|
||||
verify(HERE), a3 < 2;
|
||||
ensure(a3 < 2);
|
||||
|
||||
// last half byte gives buffer, 0xf seems to trigger just last queued
|
||||
u8 idx_check = a4 & 0xf;
|
||||
@ -506,7 +506,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
||||
case 0x103: // Display Queue
|
||||
{
|
||||
// NOTE: There currently seem to only be 2 active heads on PS3
|
||||
verify(HERE), a3 < 2;
|
||||
ensure(a3 < 2);
|
||||
|
||||
driverInfo.head[a3].lastQueuedBufferId = static_cast<u32>(a4);
|
||||
driverInfo.head[a3].flipFlags |= 0x40000000 | (1 << a4);
|
||||
@ -565,7 +565,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
||||
}
|
||||
|
||||
// NOTE: There currently seem to only be 2 active heads on PS3
|
||||
verify(HERE), a3 < 2;
|
||||
ensure(a3 < 2);
|
||||
|
||||
driverInfo.head[a3].flipFlags.atomic_op([&](be_t<u32>& flipStatus)
|
||||
{
|
||||
@ -584,7 +584,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
||||
//a5 high bits = ret.pitch = (pitch / 0x100) << 8;
|
||||
//a5 low bits = ret.format = base | ((base + ((size - 1) / 0x10000)) << 13) | (comp << 26) | (1 << 30);
|
||||
|
||||
verify(HERE), a3 < std::size(render->tiles);
|
||||
ensure(a3 < std::size(render->tiles));
|
||||
|
||||
if (!render->is_fifo_idle())
|
||||
{
|
||||
@ -626,7 +626,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
||||
}
|
||||
|
||||
// Hardcoded value in gcm
|
||||
verify(HERE), !!(a5 & (1 << 30));
|
||||
ensure(a5 & (1 << 30));
|
||||
}
|
||||
|
||||
std::lock_guard lock(rsx_cfg->mutex);
|
||||
@ -669,7 +669,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
||||
//a6 high = status0 = (zcullDir << 1) | (zcullFormat << 2) | ((sFunc & 0xF) << 12) | (sRef << 16) | (sMask << 24);
|
||||
//a6 low = status1 = (0x2000 << 0) | (0x20 << 16);
|
||||
|
||||
verify(HERE), a3 < std::size(render->zculls);
|
||||
ensure(a3 < std::size(render->zculls));
|
||||
|
||||
if (!render->is_fifo_idle())
|
||||
{
|
||||
@ -699,7 +699,8 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
||||
}
|
||||
|
||||
// Hardcoded values in gcm
|
||||
verify(HERE), !!(a4 & (1ull << 32)), (a6 & 0xFFFFFFFF) == 0u + ((0x2000 << 0) | (0x20 << 16));
|
||||
ensure(a4 & (1ull << 32));
|
||||
ensure((a6 & 0xFFFFFFFF) == 0u + ((0x2000 << 0) | (0x20 << 16)));
|
||||
}
|
||||
|
||||
std::lock_guard lock(rsx_cfg->mutex);
|
||||
@ -752,7 +753,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
||||
case 0xFED: // hack: vblank command
|
||||
{
|
||||
// NOTE: There currently seem to only be 2 active heads on PS3
|
||||
verify(HERE), a3 < 2;
|
||||
ensure(a3 < 2);
|
||||
|
||||
// todo: this is wrong and should be 'second' vblank handler and freq, but since currently everything is reported as being 59.94, this should be fine
|
||||
vm::_ref<u32>(render->device_addr + 0x30) = 1;
|
||||
|
@ -259,7 +259,7 @@ error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id)
|
||||
{
|
||||
rwlock->owner = 0;
|
||||
|
||||
verify(HERE), rwlock->rq.empty();
|
||||
ensure(rwlock->rq.empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -150,13 +150,13 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
|
||||
break;
|
||||
}
|
||||
|
||||
verify(HERE), 0 > sem->val.fetch_op([](s32& val)
|
||||
ensure(0 > sem->val.fetch_op([](s32& val)
|
||||
{
|
||||
if (val < 0)
|
||||
{
|
||||
val++;
|
||||
}
|
||||
});
|
||||
}));
|
||||
|
||||
ppu.gpr[3] = CELL_ETIMEDOUT;
|
||||
break;
|
||||
@ -255,7 +255,7 @@ error_code sys_semaphore_post(ppu_thread& ppu, u32 sem_id, s32 count)
|
||||
|
||||
for (s32 i = 0; i < to_awake; i++)
|
||||
{
|
||||
sem->append(verify(HERE, sem->schedule<ppu_thread>(sem->sq, sem->protocol)));
|
||||
sem->append((ensure(sem->schedule<ppu_thread>(sem->sq, sem->protocol))));
|
||||
}
|
||||
|
||||
if (to_awake > 0)
|
||||
|
@ -289,7 +289,7 @@ error_code _sys_spu_image_close(ppu_thread& ppu, vm::ptr<sys_spu_image> img)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
verify(HERE), vm::dealloc(handle->segs.addr(), vm::main);
|
||||
ensure(vm::dealloc(handle->segs.addr(), vm::main));
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -403,7 +403,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
|
||||
|
||||
const u32 tid = (inited << 24) | (group_id & 0xffffff);
|
||||
|
||||
verify(HERE), idm::import<named_thread<spu_thread>>([&]()
|
||||
ensure(idm::import<named_thread<spu_thread>>([&]()
|
||||
{
|
||||
std::string full_name = fmt::format("SPU[0x%07x] ", tid);
|
||||
|
||||
@ -416,7 +416,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
|
||||
group->threads[inited] = spu;
|
||||
group->threads_map[spu_num] = static_cast<s8>(inited);
|
||||
return spu;
|
||||
});
|
||||
}));
|
||||
|
||||
*thread = tid;
|
||||
|
||||
@ -682,7 +682,7 @@ error_code sys_spu_thread_group_destroy(ppu_thread& ppu, u32 id)
|
||||
if (auto thread = t.get())
|
||||
{
|
||||
// Deallocate LS
|
||||
verify(HERE), vm::get(vm::spu)->dealloc(SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * (thread->id & 0xffffff), &thread->shm);
|
||||
ensure(vm::get(vm::spu)->dealloc(SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * (thread->id & 0xffffff), &thread->shm));
|
||||
|
||||
// Remove ID from IDM (destruction will occur in group destructor)
|
||||
idm::remove<named_thread<spu_thread>>(thread->id);
|
||||
@ -1848,7 +1848,7 @@ error_code sys_raw_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<void> at
|
||||
|
||||
const u32 tid = idm::make<named_thread<spu_thread>>(fmt::format("RawSPU[0x%x] ", index), nullptr, index, "", index);
|
||||
|
||||
spu_thread::g_raw_spu_id[index] = verify("RawSPU ID" HERE, tid);
|
||||
spu_thread::g_raw_spu_id[index] = (ensure(tid));
|
||||
|
||||
*id = index;
|
||||
|
||||
@ -1901,7 +1901,7 @@ error_code sys_isolated_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<voi
|
||||
thread->gpr[5] = v128::from64(0, arg3);
|
||||
thread->gpr[6] = v128::from64(0, arg4);
|
||||
|
||||
spu_thread::g_raw_spu_id[index] = verify("IsoSPU ID" HERE, thread->id);
|
||||
spu_thread::g_raw_spu_id[index] = (ensure(thread->id));
|
||||
|
||||
sys_spu_image img;
|
||||
img.load(obj);
|
||||
@ -1910,7 +1910,7 @@ error_code sys_isolated_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<voi
|
||||
img.deploy(thread->ls, image_info->segs.get_ptr(), image_info->nsegs);
|
||||
|
||||
thread->write_reg(ls_addr + RAW_SPU_PROB_OFFSET + SPU_NPC_offs, image_info->e_entry);
|
||||
verify(HERE), idm::remove_verify<lv2_obj, lv2_spu_image>(img.entry_point, std::move(image_info));
|
||||
ensure(idm::remove_verify<lv2_obj, lv2_spu_image>(img.entry_point, std::move(image_info)));
|
||||
|
||||
*id = index;
|
||||
return CELL_OK;
|
||||
|
@ -97,7 +97,7 @@ error_code sys_ss_access_control_engine(u64 pkg_id, u64 a2, u64 a3)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
verify(HERE), a2 == static_cast<u64>(process_getpid());
|
||||
ensure(a2 == static_cast<u64>(process_getpid()));
|
||||
vm::write64(vm::cast(a3), authid);
|
||||
break;
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ public:
|
||||
|
||||
static void set_priority(cpu_thread& thread, s32 prio)
|
||||
{
|
||||
verify(HERE), prio + 512u < 3712;
|
||||
ensure(prio + 512u < 3712);
|
||||
awake(&thread, prio);
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ u64 get_timebased_time()
|
||||
{
|
||||
#ifdef _WIN32
|
||||
LARGE_INTEGER count;
|
||||
verify(HERE), QueryPerformanceCounter(&count);
|
||||
ensure(QueryPerformanceCounter(&count));
|
||||
|
||||
const u64 time = count.QuadPart;
|
||||
const u64 freq = s_time_aux_info.perf_freq;
|
||||
@ -138,7 +138,7 @@ u64 get_timebased_time()
|
||||
return (time / freq * g_timebase_freq + time % freq * g_timebase_freq / freq) * g_cfg.core.clocks_scale / 100u;
|
||||
#else
|
||||
struct timespec ts;
|
||||
verify(HERE), ::clock_gettime(CLOCK_MONOTONIC, &ts) == 0;
|
||||
ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
|
||||
|
||||
return (static_cast<u64>(ts.tv_sec) * g_timebase_freq + static_cast<u64>(ts.tv_nsec) * g_timebase_freq / 1000000000ull) * g_cfg.core.clocks_scale / 100u;
|
||||
#endif
|
||||
@ -151,7 +151,7 @@ u64 get_system_time()
|
||||
{
|
||||
#ifdef _WIN32
|
||||
LARGE_INTEGER count;
|
||||
verify(HERE), QueryPerformanceCounter(&count);
|
||||
ensure(QueryPerformanceCounter(&count));
|
||||
|
||||
const u64 time = count.QuadPart;
|
||||
const u64 freq = s_time_aux_info.perf_freq;
|
||||
@ -159,7 +159,7 @@ u64 get_system_time()
|
||||
const u64 result = time / freq * 1000000ull + (time % freq) * 1000000ull / freq;
|
||||
#else
|
||||
struct timespec ts;
|
||||
verify(HERE), ::clock_gettime(CLOCK_MONOTONIC, &ts) == 0;
|
||||
ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
|
||||
|
||||
const u64 result = static_cast<u64>(ts.tv_sec) * 1000000ull + static_cast<u64>(ts.tv_nsec) / 1000u;
|
||||
#endif
|
||||
@ -196,7 +196,7 @@ error_code sys_time_get_current_time(vm::ptr<s64> sec, vm::ptr<s64> nsec)
|
||||
|
||||
#ifdef _WIN32
|
||||
LARGE_INTEGER count;
|
||||
verify(HERE), QueryPerformanceCounter(&count);
|
||||
ensure(QueryPerformanceCounter(&count));
|
||||
|
||||
const u64 diff_base = count.QuadPart - s_time_aux_info.start_time;
|
||||
|
||||
@ -219,7 +219,7 @@ error_code sys_time_get_current_time(vm::ptr<s64> sec, vm::ptr<s64> nsec)
|
||||
*nsec = time % 1000000000ull;
|
||||
#else
|
||||
struct timespec ts;
|
||||
verify(HERE), ::clock_gettime(CLOCK_REALTIME, &ts) == 0;
|
||||
ensure(::clock_gettime(CLOCK_REALTIME, &ts) == 0);
|
||||
|
||||
if (g_cfg.core.clocks_scale == 100)
|
||||
{
|
||||
|
@ -472,7 +472,7 @@ error_code sys_usbd_initialize(ppu_thread& ppu, vm::ptr<u32> handle)
|
||||
std::lock_guard lock(usbh->mutex);
|
||||
|
||||
// Must not occur (lv2 allows multiple handles, cellUsbd does not)
|
||||
verify("sys_usbd Initialized twice" HERE), !usbh->is_init.exchange(true);
|
||||
ensure(!usbh->is_init.exchange(true));
|
||||
|
||||
*handle = 0x115B;
|
||||
|
||||
|
@ -76,7 +76,7 @@ error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64
|
||||
if (const auto area = vm::find_map(0x10000000, 0x10000000, 2 | (flag & SYS_MEMORY_PAGE_SIZE_MASK)))
|
||||
{
|
||||
// Alloc all memory (shall not fail)
|
||||
verify(HERE), area->alloc(vsize);
|
||||
ensure(area->alloc(vsize));
|
||||
vm::lock_sudo(area->addr, vsize);
|
||||
|
||||
idm::make<sys_vm_t>(area->addr, vsize, ct, psize);
|
||||
@ -117,7 +117,7 @@ error_code sys_vm_unmap(ppu_thread& ppu, u32 addr)
|
||||
const auto vmo = idm::withdraw<sys_vm_t>(sys_vm_t::find_id(addr), [&](sys_vm_t& vmo)
|
||||
{
|
||||
// Free block
|
||||
verify(HERE), vm::unmap(addr);
|
||||
ensure(vm::unmap(addr));
|
||||
|
||||
// Return memory
|
||||
vmo.ct->used -= vmo.psize;
|
||||
|
@ -75,7 +75,7 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w
|
||||
{
|
||||
case 'A':
|
||||
// Activate command
|
||||
verify(HERE), buf_size == 2;
|
||||
ensure(buf_size == 2);
|
||||
q_result = {0x41, buf[1], 0xFF, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00};
|
||||
|
||||
@ -83,7 +83,7 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w
|
||||
break;
|
||||
case 'C':
|
||||
// Set LEDs colour
|
||||
verify(HERE), buf_size == 4;
|
||||
ensure(buf_size == 4);
|
||||
break;
|
||||
case 'M':
|
||||
q_result[0] = 0x4D;
|
||||
@ -92,7 +92,7 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w
|
||||
break;
|
||||
case 'Q':
|
||||
// Queries a block
|
||||
verify(HERE), buf_size == 3;
|
||||
ensure(buf_size == 3);
|
||||
|
||||
q_result[0] = 'Q';
|
||||
q_result[1] = 0x10;
|
||||
@ -107,18 +107,18 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w
|
||||
break;
|
||||
case 'R':
|
||||
// Reset
|
||||
verify(HERE), buf_size == 2;
|
||||
ensure(buf_size == 2);
|
||||
q_result = {
|
||||
0x52, 0x02, 0x0A, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
|
||||
q_queries.push(q_result);
|
||||
break;
|
||||
case 'S':
|
||||
// ?
|
||||
verify(HERE), buf_size == 1;
|
||||
ensure(buf_size == 1);
|
||||
break;
|
||||
case 'W':
|
||||
// Write a block
|
||||
verify(HERE), buf_size == 19;
|
||||
ensure(buf_size == 19);
|
||||
q_result[0] = 'W';
|
||||
q_result[1] = 0x10;
|
||||
q_result[2] = buf[2];
|
||||
@ -147,7 +147,7 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w
|
||||
|
||||
void usb_device_skylander::interrupt_transfer(u32 buf_size, u8* buf, u32 endpoint, UsbTransfer* transfer)
|
||||
{
|
||||
verify(HERE), buf_size == 0x20;
|
||||
ensure(buf_size == 0x20);
|
||||
|
||||
transfer->fake = true;
|
||||
transfer->expected_count = buf_size;
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "stdafx.h"
|
||||
#include "stdafx.h"
|
||||
#include "vm_locking.h"
|
||||
#include "vm_ptr.h"
|
||||
#include "vm_ref.h"
|
||||
@ -686,7 +686,7 @@ namespace vm
|
||||
// 1. To simplify range_lock logic
|
||||
// 2. To make sure it never overlaps with 32-bit addresses
|
||||
// Also check that it's aligned (lowest 16 bits)
|
||||
verify(HERE), (shm_self & 0xffff'8000'0000'ffff) == range_locked;
|
||||
ensure((shm_self & 0xffff'8000'0000'ffff) == range_locked);
|
||||
|
||||
// Find another mirror and map it as shareable too
|
||||
for (auto& ploc : g_locations)
|
||||
@ -716,7 +716,7 @@ namespace vm
|
||||
u64 shm_self = reinterpret_cast<u64>(shm->get()) ^ range_locked;
|
||||
|
||||
// Check (see above)
|
||||
verify(HERE), (shm_self & 0xffff'8000'0000'ffff) == range_locked;
|
||||
ensure((shm_self & 0xffff'8000'0000'ffff) == range_locked);
|
||||
|
||||
// Map range as shareable
|
||||
for (u32 i = addr / 65536; i < addr / 65536 + size / 65536; i++)
|
||||
@ -884,7 +884,7 @@ namespace vm
|
||||
else
|
||||
{
|
||||
// Must be consistent
|
||||
verify(HERE), is_exec == !!(g_pages[i] & page_executable);
|
||||
ensure(is_exec == !!(g_pages[i] & page_executable));
|
||||
}
|
||||
|
||||
size += 4096;
|
||||
@ -1049,8 +1049,8 @@ namespace vm
|
||||
{
|
||||
perf_meter<"PAGE_LCK"_u64> perf;
|
||||
|
||||
verify("lock_sudo" HERE), addr % 4096 == 0;
|
||||
verify("lock_sudo" HERE), size % 4096 == 0;
|
||||
ensure(addr % 4096 == 0);
|
||||
ensure(size % 4096 == 0);
|
||||
|
||||
if (!utils::memory_lock(g_sudo_addr + addr, size))
|
||||
{
|
||||
@ -1075,8 +1075,8 @@ namespace vm
|
||||
if (this->flags & 0x10)
|
||||
{
|
||||
// Mark overflow/underflow guard pages as allocated
|
||||
verify(HERE), !g_pages[addr / 4096].exchange(page_allocated);
|
||||
verify(HERE), !g_pages[addr / 4096 + size / 4096 - 1].exchange(page_allocated);
|
||||
ensure(!g_pages[addr / 4096].exchange(page_allocated));
|
||||
ensure(!g_pages[addr / 4096 + size / 4096 - 1].exchange(page_allocated));
|
||||
}
|
||||
|
||||
// Map "real" memory pages; provide a function to search for mirrors with private member access
|
||||
@ -1208,7 +1208,7 @@ namespace vm
|
||||
std::shared_ptr<utils::shm> shm;
|
||||
|
||||
if (m_common)
|
||||
verify(HERE), !src;
|
||||
ensure(!src);
|
||||
else if (src)
|
||||
shm = *src;
|
||||
else
|
||||
@ -1265,7 +1265,7 @@ namespace vm
|
||||
std::shared_ptr<utils::shm> shm;
|
||||
|
||||
if (m_common)
|
||||
verify(HERE), !src;
|
||||
ensure(!src);
|
||||
else if (src)
|
||||
shm = *src;
|
||||
else
|
||||
@ -1306,12 +1306,12 @@ namespace vm
|
||||
if (flags & 0x10)
|
||||
{
|
||||
// Clear guard pages
|
||||
verify(HERE), g_pages[addr / 4096 - 1].exchange(0) == page_allocated;
|
||||
verify(HERE), g_pages[addr / 4096 + size / 4096].exchange(0) == page_allocated;
|
||||
ensure(g_pages[addr / 4096 - 1].exchange(0) == page_allocated);
|
||||
ensure(g_pages[addr / 4096 + size / 4096].exchange(0) == page_allocated);
|
||||
}
|
||||
|
||||
// Unmap "real" memory pages
|
||||
verify(HERE), size == _page_unmap(addr, size, found->second.second.get());
|
||||
ensure(size == _page_unmap(addr, size, found->second.second.get()));
|
||||
|
||||
// Clear stack guards
|
||||
if (flags & 0x10)
|
||||
|
@ -34,7 +34,7 @@ namespace rsx
|
||||
|
||||
// 'fake' initialize usermemory
|
||||
sys_memory_allocate(*this, buffer_size, SYS_MEMORY_PAGE_SIZE_1M, contextInfo.ptr(&rsx_context::user_addr));
|
||||
verify(HERE), (user_mem_addr = contextInfo->user_addr) != 0;
|
||||
ensure((user_mem_addr = contextInfo->user_addr) != 0);
|
||||
|
||||
if (sys_rsx_device_map(*this, contextInfo.ptr(&rsx_context::dev_addr), vm::null, 0x8) != CELL_OK)
|
||||
fmt::throw_exception("Capture Replay: sys_rsx_device_map failed!");
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
void CgBinaryDisasm::AddCodeAsm(const std::string& code)
|
||||
{
|
||||
verify(HERE), (m_opcode < 70);
|
||||
ensure((m_opcode < 70));
|
||||
std::string op_name;
|
||||
|
||||
if (dst.dest_reg == 63)
|
||||
@ -221,7 +221,7 @@ template<typename T> std::string CgBinaryDisasm::GetSrcDisAsm(T src)
|
||||
{
|
||||
ret += swizzle;
|
||||
}
|
||||
|
||||
|
||||
if (src.neg) ret = "-" + ret;
|
||||
if (src.abs) ret = "|" + ret + "|";
|
||||
|
||||
@ -232,7 +232,7 @@ void CgBinaryDisasm::TaskFP()
|
||||
{
|
||||
m_size = 0;
|
||||
u32* data = reinterpret_cast<u32*>(&m_buffer[m_offset]);
|
||||
verify(HERE), ((m_buffer_size - m_offset) % sizeof(u32) == 0);
|
||||
ensure(((m_buffer_size - m_offset) % sizeof(u32) == 0));
|
||||
for (u32 i = 0; i < (m_buffer_size - m_offset) / sizeof(u32); i++)
|
||||
{
|
||||
// Get BE data
|
||||
@ -481,7 +481,7 @@ void CgBinaryDisasm::TaskFP()
|
||||
break;
|
||||
}
|
||||
|
||||
verify(HERE), m_step % sizeof(u32) == 0;
|
||||
ensure(m_step % sizeof(u32) == 0);
|
||||
data += m_step / sizeof(u32);
|
||||
}
|
||||
}
|
||||
|
@ -348,7 +348,7 @@ public:
|
||||
m_offset = prog.ucode;
|
||||
|
||||
u32* vdata = reinterpret_cast<u32*>(&m_buffer[m_offset]);
|
||||
verify(HERE), (m_buffer_size - m_offset) % sizeof(u32) == 0;
|
||||
ensure((m_buffer_size - m_offset) % sizeof(u32) == 0);
|
||||
for (u32 i = 0; i < (m_buffer_size - m_offset) / sizeof(u32); i++)
|
||||
{
|
||||
vdata[i] = std::bit_cast<u32, be_t<u32>>(vdata[i]);
|
||||
|
@ -6,13 +6,13 @@
|
||||
|
||||
void CgBinaryDisasm::AddScaCodeDisasm(const std::string& code)
|
||||
{
|
||||
verify(HERE), (m_sca_opcode < 21);
|
||||
ensure((m_sca_opcode < 21));
|
||||
m_arb_shader += rsx_vp_sca_op_names[m_sca_opcode] + code + " ";
|
||||
}
|
||||
|
||||
void CgBinaryDisasm::AddVecCodeDisasm(const std::string& code)
|
||||
{
|
||||
verify(HERE), (m_vec_opcode < 26);
|
||||
ensure((m_vec_opcode < 26));
|
||||
m_arb_shader += rsx_vp_vec_op_names[m_vec_opcode] + code + " ";
|
||||
}
|
||||
|
||||
@ -298,7 +298,7 @@ void CgBinaryDisasm::AddCodeCondDisasm(const std::string& dst, const std::string
|
||||
{
|
||||
swizzle.clear();
|
||||
}
|
||||
|
||||
|
||||
std::string cond = fmt::format("%s%s", cond_string_table[d0.cond], swizzle.c_str());
|
||||
AddCodeDisasm(dst + "(" + cond + ") " + ", " + src + ";");
|
||||
}
|
||||
|
@ -581,7 +581,7 @@ namespace
|
||||
|
||||
void write_vertex_array_data_to_buffer(gsl::span<std::byte> raw_dst_span, gsl::span<const std::byte> src_ptr, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride, bool swap_endianness)
|
||||
{
|
||||
verify(HERE), (vector_element_count > 0);
|
||||
ensure((vector_element_count > 0));
|
||||
const u32 src_read_stride = rsx::get_vertex_type_size_on_host(type, vector_element_count);
|
||||
|
||||
bool use_stream_no_stride = false;
|
||||
@ -1042,7 +1042,7 @@ namespace
|
||||
T min_index = invalid_index;
|
||||
T max_index = 0;
|
||||
|
||||
verify(HERE), (dst.size() >= 3 * (src.size() - 2));
|
||||
ensure((dst.size() >= 3 * (src.size() - 2)));
|
||||
|
||||
u32 dst_idx = 0;
|
||||
u32 src_idx = 0;
|
||||
@ -1093,7 +1093,7 @@ namespace
|
||||
T min_index = index_limit<T>();
|
||||
T max_index = 0;
|
||||
|
||||
verify(HERE), (4 * dst.size_bytes() >= 6 * src.size_bytes());
|
||||
ensure((4 * dst.size_bytes() >= 6 * src.size_bytes()));
|
||||
|
||||
u32 dst_idx = 0;
|
||||
u8 set_size = 0;
|
||||
|
@ -126,7 +126,7 @@ void FragmentProgramDecompiler::SetDst(std::string code, u32 flags)
|
||||
|
||||
u32 reg_index = dst.fp16 ? dst.dest_reg >> 1 : dst.dest_reg;
|
||||
|
||||
verify(HERE), reg_index < temp_registers.size();
|
||||
ensure(reg_index < temp_registers.size());
|
||||
|
||||
if (dst.opcode == RSX_FP_OPCODE_MOV &&
|
||||
src0.reg_type == RSX_FP_REGISTER_TYPE_TEMP &&
|
||||
@ -174,7 +174,7 @@ std::string FragmentProgramDecompiler::GetMask()
|
||||
{
|
||||
std::string ret;
|
||||
ret.reserve(5);
|
||||
|
||||
|
||||
static constexpr std::string_view dst_mask = "xyzw";
|
||||
|
||||
ret += '.';
|
||||
@ -1266,7 +1266,7 @@ std::string FragmentProgramDecompiler::Decompile()
|
||||
|
||||
if (dst.end) break;
|
||||
|
||||
verify(HERE), m_offset % sizeof(u32) == 0;
|
||||
ensure(m_offset % sizeof(u32) == 0);
|
||||
data += m_offset / sizeof(u32);
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ struct temp_register
|
||||
bool requires_gather(u8 channel) const
|
||||
{
|
||||
//Data fetched from the single precision register requires merging of the two half registers
|
||||
verify(HERE), channel < 4;
|
||||
ensure(channel < 4);
|
||||
if (aliased_h0 && channel < 2)
|
||||
{
|
||||
return last_write_half[channel];
|
||||
|
@ -53,13 +53,13 @@ vertex_program_utils::vertex_program_metadata vertex_program_utils::analyse_vert
|
||||
|
||||
while (true)
|
||||
{
|
||||
verify(HERE), current_instruction < 512;
|
||||
ensure(current_instruction < 512);
|
||||
|
||||
if (result.instruction_mask[current_instruction])
|
||||
{
|
||||
if (!fast_exit)
|
||||
{
|
||||
if (!has_printed_error)
|
||||
if (!has_printed_error)
|
||||
{
|
||||
// This can be harmless if a dangling RET was encountered before
|
||||
rsx_log.error("vp_analyser: Possible infinite loop detected");
|
||||
@ -198,7 +198,7 @@ vertex_program_utils::vertex_program_metadata vertex_program_utils::analyse_vert
|
||||
|
||||
if (!has_branch_instruction)
|
||||
{
|
||||
verify(HERE), instruction_range.first == entry;
|
||||
ensure(instruction_range.first == entry);
|
||||
std::memcpy(dst_prog.data.data(), data + (instruction_range.first * 4), result.ucode_length);
|
||||
}
|
||||
else
|
||||
|
@ -403,7 +403,7 @@ public:
|
||||
if (I == m_fragment_shader_cache.end())
|
||||
return;
|
||||
|
||||
verify(HERE), (dst_buffer.size_bytes() >= ::narrow<int>(I->second.FragmentConstantOffsetCache.size()) * 16u);
|
||||
ensure((dst_buffer.size_bytes() >= ::narrow<int>(I->second.FragmentConstantOffsetCache.size()) * 16u));
|
||||
|
||||
f32* dst = dst_buffer.data();
|
||||
alignas(16) f32 tmp[4];
|
||||
|
@ -204,7 +204,7 @@ public:
|
||||
|
||||
auto var_blocks = fmt::split(simple_var, { "." });
|
||||
|
||||
verify(HERE), (!var_blocks.empty());
|
||||
ensure((!var_blocks.empty()));
|
||||
|
||||
name = prefix + var_blocks[0];
|
||||
|
||||
|
@ -930,7 +930,7 @@ namespace rsx
|
||||
}
|
||||
|
||||
// Mipmap, height and width aren't allowed to be zero
|
||||
return verify("Texture params" HERE, result) * (cubemap ? 6 : 1);
|
||||
return (ensure(result) * (cubemap ? 6 : 1));
|
||||
}
|
||||
|
||||
size_t get_placed_texture_storage_size(const rsx::fragment_texture& texture, size_t row_pitch_alignment, size_t mipmap_alignment)
|
||||
|
@ -76,7 +76,7 @@ std::string VertexProgramDecompiler::GetDST(bool is_sca)
|
||||
if (!ret.empty())
|
||||
{
|
||||
// Double assignment. Only possible for vector ops
|
||||
verify(HERE), !is_sca;
|
||||
ensure(!is_sca);
|
||||
ret += " = ";
|
||||
}
|
||||
|
||||
@ -507,7 +507,7 @@ std::string VertexProgramDecompiler::Decompile()
|
||||
if (m_prog.entry != m_prog.base_address)
|
||||
{
|
||||
jump_position = find_jump_lvl(m_prog.entry - m_prog.base_address);
|
||||
verify(HERE), jump_position != UINT32_MAX;
|
||||
ensure(jump_position != UINT32_MAX);
|
||||
}
|
||||
|
||||
AddCode(fmt::format("int jump_position = %u;", jump_position));
|
||||
|
@ -146,7 +146,7 @@ namespace rsx
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), region.target == Traits::get(sink);
|
||||
ensure(region.target == Traits::get(sink));
|
||||
orphaned_surfaces.push_back(region.target);
|
||||
data[new_address] = std::move(sink);
|
||||
};
|
||||
@ -169,7 +169,7 @@ namespace rsx
|
||||
}
|
||||
|
||||
// One-time data validity test
|
||||
verify(HERE), prev_surface;
|
||||
ensure(prev_surface);
|
||||
if (prev_surface->read_barrier(cmd); !prev_surface->test())
|
||||
{
|
||||
return;
|
||||
@ -360,7 +360,7 @@ namespace rsx
|
||||
if (ignore) continue;
|
||||
|
||||
this_address = surface->base_addr;
|
||||
verify(HERE), this_address;
|
||||
ensure(this_address);
|
||||
}
|
||||
|
||||
const auto parent_region = surface->get_normalized_memory_area();
|
||||
@ -405,7 +405,9 @@ namespace rsx
|
||||
auto &storage = surface->is_depth_surface() ? m_depth_stencil_storage : m_render_targets_storage;
|
||||
auto &object = storage[e.first];
|
||||
|
||||
verify(HERE), !src_offset.x, !src_offset.y, object;
|
||||
ensure(!src_offset.x);
|
||||
ensure(!src_offset.y);
|
||||
ensure(object);
|
||||
if (!surface->old_contents.empty()) [[unlikely]]
|
||||
{
|
||||
surface->read_barrier(cmd);
|
||||
@ -531,7 +533,7 @@ namespace rsx
|
||||
|
||||
if (!new_surface)
|
||||
{
|
||||
verify(HERE), store;
|
||||
ensure(store);
|
||||
new_surface_storage = Traits::create_new_surface(address, format, width, height, pitch, antialias, std::forward<Args>(extra_params)...);
|
||||
new_surface = Traits::get(new_surface_storage);
|
||||
allocate_rsx_memory(new_surface);
|
||||
@ -590,7 +592,8 @@ namespace rsx
|
||||
(*primary_storage)[address] = std::move(new_surface_storage);
|
||||
}
|
||||
|
||||
verify(HERE), !old_surface_storage, new_surface->get_spp() == get_format_sample_count(antialias);
|
||||
ensure(!old_surface_storage);
|
||||
ensure(new_surface->get_spp() == get_format_sample_count(antialias));
|
||||
return new_surface;
|
||||
}
|
||||
|
||||
@ -602,7 +605,7 @@ namespace rsx
|
||||
|
||||
void free_rsx_memory(surface_type surface)
|
||||
{
|
||||
verify("Surface memory double free" HERE), surface->has_refs();
|
||||
ensure(surface->has_refs()); // "Surface memory double free"
|
||||
|
||||
if (const auto memory_size = surface->get_memory_range().length();
|
||||
m_active_memory_used >= memory_size) [[likely]]
|
||||
@ -976,7 +979,7 @@ namespace rsx
|
||||
if (write_tag == cache_tag && m_skip_write_updates)
|
||||
{
|
||||
// Nothing to do
|
||||
verify(HERE), !m_invalidate_on_write;
|
||||
ensure(!m_invalidate_on_write);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1051,7 +1054,7 @@ namespace rsx
|
||||
free_resource_list(m_render_targets_storage);
|
||||
free_resource_list(m_depth_stencil_storage);
|
||||
|
||||
verify(HERE), m_active_memory_used == 0;
|
||||
ensure(m_active_memory_used == 0);
|
||||
|
||||
m_bound_depth_stencil = std::make_pair(0, nullptr);
|
||||
m_bound_render_targets_config = { 0, 0 };
|
||||
|
@ -106,13 +106,13 @@ namespace rsx
|
||||
|
||||
areai src_rect() const
|
||||
{
|
||||
verify(HERE), width;
|
||||
ensure(width);
|
||||
return { src_x, src_y, src_x + width, src_y + height };
|
||||
}
|
||||
|
||||
areai dst_rect() const
|
||||
{
|
||||
verify(HERE), width;
|
||||
ensure(width);
|
||||
return { dst_x, dst_y, dst_x + u16(width * transfer_scale_x + 0.5f), dst_y + u16(height * transfer_scale_y + 0.5f) };
|
||||
}
|
||||
};
|
||||
@ -349,7 +349,8 @@ namespace rsx
|
||||
#else
|
||||
void queue_tag(u32 address)
|
||||
{
|
||||
verify(HERE), native_pitch, rsx_pitch;
|
||||
ensure(native_pitch);
|
||||
ensure(rsx_pitch);
|
||||
|
||||
base_addr = address;
|
||||
|
||||
@ -444,7 +445,7 @@ namespace rsx
|
||||
template<typename T>
|
||||
void set_old_contents(T* other)
|
||||
{
|
||||
verify(HERE), old_contents.empty();
|
||||
ensure(old_contents.empty());
|
||||
|
||||
if (!other || other->get_rsx_pitch() != this->get_rsx_pitch())
|
||||
{
|
||||
@ -460,7 +461,8 @@ namespace rsx
|
||||
void set_old_contents_region(const T& region, bool normalized)
|
||||
{
|
||||
// NOTE: This method will not perform pitch verification!
|
||||
verify(HERE), region.source, region.source != static_cast<decltype(region.source)>(this);
|
||||
ensure(region.source);
|
||||
ensure(region.source != static_cast<decltype(region.source)>(this));
|
||||
|
||||
old_contents.push_back(region.template cast<image_storage_type>());
|
||||
auto &slice = old_contents.back();
|
||||
@ -621,7 +623,7 @@ namespace rsx
|
||||
if (spp == 1 || sample_layout == rsx::surface_sample_layout::ps3)
|
||||
return;
|
||||
|
||||
verify(HERE), access_type != rsx::surface_access::write;
|
||||
ensure(access_type != rsx::surface_access::write);
|
||||
transform_samples_to_pixels(region);
|
||||
}
|
||||
};
|
||||
|
@ -519,7 +519,7 @@ namespace rsx
|
||||
{
|
||||
for (auto* section : _set)
|
||||
{
|
||||
verify(HERE), section->is_flushed() || section->is_dirty();
|
||||
ensure(section->is_flushed() || section->is_dirty());
|
||||
|
||||
section->discard(/*set_dirty*/ false);
|
||||
}
|
||||
@ -708,7 +708,7 @@ namespace rsx
|
||||
{
|
||||
if (section1 == section2) count++;
|
||||
}
|
||||
verify(HERE), count == 1;
|
||||
ensure(count == 1);
|
||||
}
|
||||
#endif //TEXTURE_CACHE_DEBUG
|
||||
|
||||
@ -739,7 +739,7 @@ namespace rsx
|
||||
// Fast code-path for keeping the fault range protection when not flushing anything
|
||||
if (cause.keep_fault_range_protection() && cause.skip_flush() && !trampled_set.sections.empty())
|
||||
{
|
||||
verify(HERE), cause != invalidation_cause::committed_as_fbo;
|
||||
ensure(cause != invalidation_cause::committed_as_fbo);
|
||||
|
||||
// We discard all sections fully inside fault_range
|
||||
for (auto &obj : trampled_set.sections)
|
||||
@ -1172,7 +1172,7 @@ namespace rsx
|
||||
auto* region_ptr = find_cached_texture(rsx_range, RSX_GCM_FORMAT_IGNORED, false, false);
|
||||
if (region_ptr && region_ptr->is_locked() && region_ptr->get_context() == texture_upload_context::framebuffer_storage)
|
||||
{
|
||||
verify(HERE), region_ptr->get_protection() == utils::protection::no;
|
||||
ensure(region_ptr->get_protection() == utils::protection::no);
|
||||
region_ptr->discard(false);
|
||||
}
|
||||
}
|
||||
@ -1198,9 +1198,9 @@ namespace rsx
|
||||
if (!region.is_dirty())
|
||||
{
|
||||
if (flags == memory_read_flags::flush_once)
|
||||
verify(HERE), m_flush_always_cache.find(memory_range) == m_flush_always_cache.end();
|
||||
ensure(m_flush_always_cache.find(memory_range) == m_flush_always_cache.end());
|
||||
else
|
||||
verify(HERE), m_flush_always_cache[memory_range] == ®ion;
|
||||
ensure(m_flush_always_cache[memory_range] == ®ion);
|
||||
}
|
||||
#endif // TEXTURE_CACHE_DEBUG
|
||||
return;
|
||||
@ -1215,9 +1215,9 @@ namespace rsx
|
||||
#ifdef TEXTURE_CACHE_DEBUG
|
||||
const auto &memory_range = section.get_section_range();
|
||||
if (flags == memory_read_flags::flush_once)
|
||||
verify(HERE), m_flush_always_cache[memory_range] == §ion;
|
||||
ensure(m_flush_always_cache[memory_range] == §ion);
|
||||
else
|
||||
verify(HERE), m_flush_always_cache.find(memory_range) == m_flush_always_cache.end();
|
||||
ensure(m_flush_always_cache.find(memory_range) == m_flush_always_cache.end());
|
||||
#endif
|
||||
update_flush_always_cache(section, flags == memory_read_flags::flush_always);
|
||||
}
|
||||
@ -2169,7 +2169,7 @@ namespace rsx
|
||||
surf->get_surface_height(rsx::surface_metrics::pixels) != surf->height())
|
||||
{
|
||||
// Must go through a scaling operation due to resolution scaling being present
|
||||
verify(HERE), g_cfg.video.resolution_scale_percent != 100;
|
||||
ensure(g_cfg.video.resolution_scale_percent != 100);
|
||||
use_null_region = false;
|
||||
}
|
||||
}
|
||||
@ -2410,7 +2410,7 @@ namespace rsx
|
||||
}
|
||||
else
|
||||
{
|
||||
verify(HERE), src_is_render_target;
|
||||
ensure(src_is_render_target);
|
||||
src_is_depth = (typeless_info.src_is_typeless) ? false : src_subres.is_depth;
|
||||
}
|
||||
}
|
||||
@ -2611,7 +2611,7 @@ namespace rsx
|
||||
|
||||
if (!cached_dest && !dst_is_render_target)
|
||||
{
|
||||
verify(HERE), !dest_texture;
|
||||
ensure(!dest_texture);
|
||||
|
||||
// Need to calculate the minimum required size that will fit the data, anchored on the rsx_address
|
||||
// If the application starts off with an 'inseted' section, the guessed dimensions may not fit!
|
||||
@ -2698,7 +2698,7 @@ namespace rsx
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), cached_dest || dst_is_render_target;
|
||||
ensure(cached_dest || dst_is_render_target);
|
||||
|
||||
// Invalidate any cached subresources in modified range
|
||||
notify_surface_changed(dst_range);
|
||||
@ -2710,7 +2710,7 @@ namespace rsx
|
||||
{
|
||||
// Validate modified range
|
||||
u32 mem_offset = dst_address - cached_dest->get_section_base();
|
||||
verify(HERE), (mem_offset + dst_payload_length) <= cached_dest->get_section_size();
|
||||
ensure((mem_offset + dst_payload_length) <= cached_dest->get_section_size());
|
||||
|
||||
lock.upgrade();
|
||||
|
||||
@ -2749,7 +2749,7 @@ namespace rsx
|
||||
else
|
||||
{
|
||||
// Unlikely situation, but the only one which would allow re-upload from CPU to overlap this section.
|
||||
verify(HERE), !found->is_flushable();
|
||||
ensure(!found->is_flushable());
|
||||
found->discard(true);
|
||||
}
|
||||
}
|
||||
@ -2844,7 +2844,7 @@ namespace rsx
|
||||
auto& section = *(It.second);
|
||||
if (section.get_protection() != utils::protection::no)
|
||||
{
|
||||
verify(HERE), section.exists();
|
||||
ensure(section.exists());
|
||||
AUDIT(section.get_context() == texture_upload_context::framebuffer_storage);
|
||||
AUDIT(section.get_memory_read_flags() == memory_read_flags::flush_always);
|
||||
|
||||
|
@ -71,7 +71,7 @@ namespace rsx {
|
||||
// Initialized to utils::protection::rw
|
||||
static constexpr size_t num_pages = 0x1'0000'0000 / 4096;
|
||||
per_page_info_t _info[num_pages]{0};
|
||||
|
||||
|
||||
static_assert(static_cast<u32>(utils::protection::rw) == 0, "utils::protection::rw must have value 0 for the above constructor to work");
|
||||
|
||||
static constexpr size_t rsx_address_to_index(u32 address)
|
||||
|
@ -298,10 +298,10 @@ namespace rsx
|
||||
src_y += delta;
|
||||
dst_y += delta;
|
||||
|
||||
verify(HERE), dst_y == slice_begin;
|
||||
ensure(dst_y == slice_begin);
|
||||
}
|
||||
|
||||
verify(HERE), dst_y >= slice_begin;
|
||||
ensure(dst_y >= slice_begin);
|
||||
|
||||
const auto h = std::min(section_end, slice_end) - dst_y;
|
||||
dst_y = (dst_y - slice_begin);
|
||||
@ -538,7 +538,7 @@ namespace rsx
|
||||
}
|
||||
|
||||
// Always make sure the conflict is resolved!
|
||||
verify(HERE), is_gcm_depth_format(attr2.gcm_format) == is_depth;
|
||||
ensure(is_gcm_depth_format(attr2.gcm_format) == is_depth);
|
||||
}
|
||||
|
||||
if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_2d ||
|
||||
@ -546,7 +546,7 @@ namespace rsx
|
||||
{
|
||||
if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_1d)
|
||||
{
|
||||
verify(HERE), attr.height == 1;
|
||||
ensure(attr.height == 1);
|
||||
}
|
||||
|
||||
if ((surface_is_rop_target && g_cfg.video.strict_rendering_mode) ||
|
||||
@ -574,7 +574,7 @@ namespace rsx
|
||||
rsx::texture_dimension_extended::texture_dimension_3d, decoded_remap };
|
||||
}
|
||||
|
||||
verify(HERE), extended_dimension == rsx::texture_dimension_extended::texture_dimension_cubemap;
|
||||
ensure(extended_dimension == rsx::texture_dimension_extended::texture_dimension_cubemap);
|
||||
|
||||
return{ texptr->get_surface(rsx::surface_access::read), deferred_request_command::cubemap_unwrap,
|
||||
attr2, {},
|
||||
@ -591,7 +591,7 @@ namespace rsx
|
||||
u32 encoded_remap, const texture_channel_remap_t& decoded_remap,
|
||||
int select_hint = -1)
|
||||
{
|
||||
verify(HERE), (select_hint & 0x1) == select_hint;
|
||||
ensure((select_hint & 0x1) == select_hint);
|
||||
|
||||
bool is_depth = (select_hint == 0) ? fbos.back().is_depth : local.back()->is_depth_texture();
|
||||
bool aspect_mismatch = false;
|
||||
@ -679,7 +679,7 @@ namespace rsx
|
||||
|
||||
if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_1d)
|
||||
{
|
||||
verify(HERE), attr.height == 1;
|
||||
ensure(attr.height == 1);
|
||||
}
|
||||
|
||||
if (!fbos.empty())
|
||||
|
@ -351,7 +351,7 @@ namespace rsx
|
||||
|
||||
void initialize(u32 _index, ranged_storage_type *storage)
|
||||
{
|
||||
verify(HERE), m_storage == nullptr && storage != nullptr;
|
||||
ensure(m_storage == nullptr && storage != nullptr);
|
||||
AUDIT(index < num_blocks);
|
||||
|
||||
m_storage = storage;
|
||||
@ -1052,7 +1052,7 @@ namespace rsx
|
||||
|
||||
void initialize(ranged_storage_block_type *block)
|
||||
{
|
||||
verify(HERE), m_block == nullptr && m_tex_cache == nullptr && m_storage == nullptr;
|
||||
ensure(m_block == nullptr && m_tex_cache == nullptr && m_storage == nullptr);
|
||||
m_block = block;
|
||||
m_storage = &block->get_storage();
|
||||
m_tex_cache = &block->get_texture_cache();
|
||||
|
@ -52,7 +52,7 @@ void GLGSRender::set_scissor(bool clip_viewport)
|
||||
|
||||
void GLGSRender::on_init_thread()
|
||||
{
|
||||
verify(HERE), m_frame;
|
||||
ensure(m_frame);
|
||||
|
||||
// NOTES: All contexts have to be created before any is bound to a thread
|
||||
// This allows context sharing to work (both GLRCs passed to wglShareLists have to be idle or you get ERROR_BUSY)
|
||||
@ -552,7 +552,7 @@ void GLGSRender::clear_surface(u32 arg)
|
||||
|
||||
if ((arg & 0x3) != 0x3 && !require_mem_load && ds->dirty())
|
||||
{
|
||||
verify(HERE), mask;
|
||||
ensure(mask);
|
||||
|
||||
// Only one aspect was cleared. Make sure to memory initialize the other before removing dirty flag
|
||||
if (arg == 1)
|
||||
@ -651,7 +651,7 @@ bool GLGSRender::load_program()
|
||||
if (m_graphics_state & rsx::pipeline_state::invalidate_pipeline_bits)
|
||||
{
|
||||
get_current_fragment_program(fs_sampler_state);
|
||||
verify(HERE), current_fragment_program.valid;
|
||||
ensure(current_fragment_program.valid);
|
||||
|
||||
get_current_vertex_program(vs_sampler_state);
|
||||
|
||||
@ -701,7 +701,7 @@ bool GLGSRender::load_program()
|
||||
}
|
||||
else
|
||||
{
|
||||
verify(HERE), m_program;
|
||||
ensure(m_program);
|
||||
m_program->sync();
|
||||
}
|
||||
}
|
||||
@ -1061,7 +1061,7 @@ void GLGSRender::begin_occlusion_query(rsx::reports::occlusion_query_info* query
|
||||
|
||||
void GLGSRender::end_occlusion_query(rsx::reports::occlusion_query_info* query)
|
||||
{
|
||||
verify(HERE), query->active;
|
||||
ensure(query->active);
|
||||
glEndQuery(GL_ANY_SAMPLES_PASSED);
|
||||
}
|
||||
|
||||
|
@ -553,7 +553,7 @@ namespace gl
|
||||
}
|
||||
}
|
||||
|
||||
verify("Incompatible source and destination format!" HERE), real_src->aspect() == real_dst->aspect();
|
||||
ensure(real_src->aspect() == real_dst->aspect());
|
||||
|
||||
const bool is_depth_copy = (real_src->aspect() != image_aspect::color);
|
||||
const filter interp = (linear_interpolation && !is_depth_copy) ? filter::linear : filter::nearest;
|
||||
|
@ -115,7 +115,7 @@ namespace gl
|
||||
|
||||
bool check_signaled() const
|
||||
{
|
||||
verify(HERE), m_value != nullptr;
|
||||
ensure(m_value);
|
||||
|
||||
if (signaled)
|
||||
return true;
|
||||
@ -145,7 +145,7 @@ namespace gl
|
||||
|
||||
bool wait_for_signal()
|
||||
{
|
||||
verify(HERE), m_value != nullptr;
|
||||
ensure(m_value);
|
||||
|
||||
if (signaled == GL_FALSE)
|
||||
{
|
||||
@ -195,7 +195,7 @@ namespace gl
|
||||
|
||||
void server_wait_sync() const
|
||||
{
|
||||
verify(HERE), m_value != nullptr;
|
||||
ensure(m_value != nullptr);
|
||||
glWaitSync(m_value, 0, GL_TIMEOUT_IGNORED);
|
||||
}
|
||||
};
|
||||
@ -721,7 +721,7 @@ namespace gl
|
||||
|
||||
void data(GLsizeiptr size, const void* data_ = nullptr, GLenum usage = GL_STREAM_DRAW)
|
||||
{
|
||||
verify(HERE), m_memory_type != memory_type::local;
|
||||
ensure(m_memory_type != memory_type::local);
|
||||
|
||||
target target_ = current_target();
|
||||
save_binding_state save(target_, *this);
|
||||
@ -731,7 +731,7 @@ namespace gl
|
||||
|
||||
GLubyte* map(access access_)
|
||||
{
|
||||
verify(HERE), m_memory_type == memory_type::host_visible;
|
||||
ensure(m_memory_type == memory_type::host_visible);
|
||||
|
||||
bind(current_target());
|
||||
return reinterpret_cast<GLubyte*>(glMapBuffer(static_cast<GLenum>(current_target()), static_cast<GLenum>(access_)));
|
||||
@ -739,7 +739,7 @@ namespace gl
|
||||
|
||||
void unmap()
|
||||
{
|
||||
verify(HERE), m_memory_type == memory_type::host_visible;
|
||||
ensure(m_memory_type == memory_type::host_visible);
|
||||
glUnmapBuffer(static_cast<GLenum>(current_target()));
|
||||
}
|
||||
|
||||
@ -794,7 +794,7 @@ namespace gl
|
||||
glBufferStorage(static_cast<GLenum>(m_target), size, data, buffer_storage_flags);
|
||||
m_memory_mapping = glMapBufferRange(static_cast<GLenum>(m_target), 0, size, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT);
|
||||
|
||||
verify(HERE), m_memory_mapping != nullptr;
|
||||
ensure(m_memory_mapping != nullptr);
|
||||
m_data_loc = 0;
|
||||
m_size = ::narrow<u32>(size);
|
||||
}
|
||||
@ -894,7 +894,7 @@ namespace gl
|
||||
|
||||
void reserve_storage_on_heap(u32 alloc_size) override
|
||||
{
|
||||
verify (HERE), m_memory_mapping == nullptr;
|
||||
ensure(m_memory_mapping == nullptr);
|
||||
|
||||
u32 offset = m_data_loc;
|
||||
if (m_data_loc) offset = align(offset, 256);
|
||||
@ -927,7 +927,7 @@ namespace gl
|
||||
m_alignment_offset = ::narrow<u32>(diff_bytes);
|
||||
}
|
||||
|
||||
verify(HERE), m_mapped_bytes >= alloc_size;
|
||||
ensure(m_mapped_bytes >= alloc_size);
|
||||
}
|
||||
|
||||
std::pair<void*, u32> alloc_from_heap(u32 alloc_size, u16 alignment) override
|
||||
@ -994,7 +994,7 @@ namespace gl
|
||||
|
||||
void update(buffer *_buffer, u32 offset, u32 range, GLenum format = GL_R8UI)
|
||||
{
|
||||
verify(HERE), _buffer->size() >= (offset + range);
|
||||
ensure(_buffer->size() >= (offset + range));
|
||||
m_buffer = _buffer;
|
||||
m_offset = offset;
|
||||
m_range = range;
|
||||
@ -1777,7 +1777,7 @@ namespace gl
|
||||
if (aspect_flags & image_aspect::stencil)
|
||||
{
|
||||
constexpr u32 depth_stencil_mask = (image_aspect::depth | image_aspect::stencil);
|
||||
verify("Invalid aspect mask combination" HERE), (aspect_flags & depth_stencil_mask) != depth_stencil_mask;
|
||||
ensure((aspect_flags & depth_stencil_mask) != depth_stencil_mask); // "Invalid aspect mask combination"
|
||||
|
||||
glBindTexture(m_target, m_id);
|
||||
glTexParameteri(m_target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_STENCIL_INDEX);
|
||||
@ -1880,7 +1880,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), aspect() & aspect_flags;
|
||||
ensure(aspect() & aspect_flags);
|
||||
auto mapping = apply_swizzle_remap(get_native_component_layout(), remap);
|
||||
auto view = std::make_unique<texture_view>(this, mapping.data(), aspect_flags);
|
||||
auto result = view.get();
|
||||
@ -2110,7 +2110,7 @@ public:
|
||||
{
|
||||
save_binding_state save(m_parent);
|
||||
|
||||
verify(HERE), rhs.get_target() == texture::target::texture2D;
|
||||
ensure(rhs.get_target() == texture::target::texture2D);
|
||||
m_parent.m_resource_bindings[m_id] = rhs.id();
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, m_id, GL_TEXTURE_2D, rhs.id(), 0);
|
||||
}
|
||||
@ -2315,7 +2315,7 @@ public:
|
||||
return *this;
|
||||
}
|
||||
|
||||
verify(HERE), !m_init_fence.is_empty(); // Do not attempt to compile a shader_view!!
|
||||
ensure(!m_init_fence.is_empty()); // Do not attempt to compile a shader_view!!
|
||||
m_init_fence.server_wait_sync();
|
||||
|
||||
glCompileShader(m_id);
|
||||
|
@ -116,7 +116,7 @@ namespace gl
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), num_worker_threads >= 1;
|
||||
ensure(num_worker_threads >= 1);
|
||||
|
||||
// Create the thread pool
|
||||
g_pipe_compilers = std::make_unique<named_thread_group<pipe_compiler>>("RSX.W", num_worker_threads);
|
||||
@ -136,7 +136,7 @@ namespace gl
|
||||
|
||||
pipe_compiler* get_pipe_compiler()
|
||||
{
|
||||
verify(HERE), g_pipe_compilers;
|
||||
ensure(g_pipe_compilers);
|
||||
int thread_index = g_compiler_index++;
|
||||
|
||||
return g_pipe_compilers.get()->begin() + (thread_index % g_num_pipe_compilers);
|
||||
|
@ -191,7 +191,7 @@ void GLGSRender::init_buffers(rsx::framebuffer_creation_context context, bool sk
|
||||
auto rtt = std::get<1>(m_rtts.m_bound_render_targets[i]);
|
||||
color_targets[i] = rtt->id();
|
||||
|
||||
verify("Pitch mismatch!" HERE), rtt->get_rsx_pitch() == m_framebuffer_layout.actual_color_pitch[i];
|
||||
ensure(rtt->get_rsx_pitch() == m_framebuffer_layout.actual_color_pitch[i]); // "Pitch mismatch!"
|
||||
m_surface_info[i].address = m_framebuffer_layout.color_addresses[i];
|
||||
m_surface_info[i].pitch = m_framebuffer_layout.actual_color_pitch[i];
|
||||
m_surface_info[i].width = m_framebuffer_layout.width;
|
||||
@ -220,7 +220,7 @@ void GLGSRender::init_buffers(rsx::framebuffer_creation_context context, bool sk
|
||||
auto ds = std::get<1>(m_rtts.m_bound_depth_stencil);
|
||||
depth_stencil_target = ds->id();
|
||||
|
||||
verify("Pitch mismatch!" HERE), std::get<1>(m_rtts.m_bound_depth_stencil)->get_rsx_pitch() == m_framebuffer_layout.actual_zeta_pitch;
|
||||
ensure(std::get<1>(m_rtts.m_bound_depth_stencil)->get_rsx_pitch() == m_framebuffer_layout.actual_zeta_pitch); // "Pitch mismatch!"
|
||||
|
||||
m_depth_surface_info.address = m_framebuffer_layout.zeta_address;
|
||||
m_depth_surface_info.pitch = m_framebuffer_layout.actual_zeta_pitch;
|
||||
@ -528,7 +528,7 @@ void gl::render_target::memory_barrier(gl::command_context& cmd, rsx::surface_ac
|
||||
if (get_internal_format() == src_texture->get_internal_format())
|
||||
{
|
||||
// Copy data from old contents onto this one
|
||||
verify(HERE), src_bpp == dst_bpp;
|
||||
ensure(src_bpp == dst_bpp);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -124,7 +124,7 @@ namespace gl
|
||||
|
||||
static inline gl::render_target* as_rtt(gl::texture* t)
|
||||
{
|
||||
return verify(HERE, dynamic_cast<gl::render_target*>(t));
|
||||
return ensure(dynamic_cast<gl::render_target*>(t));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -375,7 +375,7 @@ namespace gl
|
||||
if (reference_mask & (1 << i))
|
||||
{
|
||||
auto sampler_state = static_cast<gl::texture_cache::sampled_image_descriptor*>(descriptors[i].get());
|
||||
verify(HERE), sampler_state;
|
||||
ensure(sampler_state);
|
||||
|
||||
int pool_id = static_cast<int>(sampler_state->image_type);
|
||||
auto& pool = allocator.pools[pool_id];
|
||||
|
@ -122,7 +122,7 @@ namespace gl
|
||||
{
|
||||
if (!enabled) return;
|
||||
|
||||
verify(HERE), initialized;
|
||||
ensure(initialized);
|
||||
|
||||
std::vector<GLint> offsets;
|
||||
std::vector<GLsizei> counts;
|
||||
|
@ -503,7 +503,7 @@ namespace gl
|
||||
}
|
||||
else if (pack_info.type == GL_FLOAT)
|
||||
{
|
||||
verify(HERE), mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 4);
|
||||
ensure(mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 4));
|
||||
mem_info->memory_required = (mem_info->image_size_in_texels * 6);
|
||||
initialize_scratch_mem();
|
||||
|
||||
@ -513,7 +513,7 @@ namespace gl
|
||||
}
|
||||
else if (pack_info.type == GL_FLOAT_32_UNSIGNED_INT_24_8_REV)
|
||||
{
|
||||
verify(HERE), mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 8);
|
||||
ensure(mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 8));
|
||||
mem_info->memory_required = (mem_info->image_size_in_texels * 12);
|
||||
initialize_scratch_mem();
|
||||
|
||||
|
@ -104,7 +104,7 @@ namespace gl
|
||||
synchronized = false;
|
||||
sync_timestamp = 0ull;
|
||||
|
||||
verify(HERE), rsx_pitch;
|
||||
ensure(rsx_pitch);
|
||||
|
||||
this->rsx_pitch = rsx_pitch;
|
||||
this->width = w;
|
||||
@ -327,7 +327,7 @@ namespace gl
|
||||
|
||||
m_fence.wait_for_signal();
|
||||
|
||||
verify(HERE), (offset + size) <= pbo.size();
|
||||
ensure(offset + GLsizeiptr{size} <= pbo.size());
|
||||
pbo.bind(buffer::target::pixel_pack);
|
||||
|
||||
return glMapBufferRange(GL_PIXEL_PACK_BUFFER, offset, size, GL_MAP_READ_BIT);
|
||||
@ -352,15 +352,15 @@ namespace gl
|
||||
case gl::texture::type::ubyte:
|
||||
{
|
||||
// byte swapping does not work on byte types, use uint_8_8_8_8 for rgba8 instead to avoid penalty
|
||||
verify(HERE), !pack_unpack_swap_bytes;
|
||||
ensure(!pack_unpack_swap_bytes);
|
||||
break;
|
||||
}
|
||||
case gl::texture::type::uint_24_8:
|
||||
{
|
||||
// Swap bytes on D24S8 does not swap the whole dword, just shuffles the 3 bytes for D24
|
||||
// In this regard, D24S8 is the same structure on both PC and PS3, but the endianness of the whole block is reversed on PS3
|
||||
verify(HERE), pack_unpack_swap_bytes == false;
|
||||
verify(HERE), real_pitch == (width * 4);
|
||||
ensure(pack_unpack_swap_bytes == false);
|
||||
ensure(real_pitch == (width * 4));
|
||||
if (rsx_pitch == real_pitch) [[likely]]
|
||||
{
|
||||
stream_data_to_memory_swapped_u32<true>(dst, dst, valid_length / 4, 4);
|
||||
@ -708,7 +708,7 @@ namespace gl
|
||||
}
|
||||
else
|
||||
{
|
||||
verify(HERE), dst_image->get_target() == gl::texture::target::texture2D;
|
||||
ensure(dst_image->get_target() == gl::texture::target::texture2D);
|
||||
|
||||
auto _blitter = gl::g_hw_blitter;
|
||||
const areai src_rect = { src_x, src_y, src_x + src_w, src_y + src_h };
|
||||
@ -958,7 +958,7 @@ namespace gl
|
||||
const auto swizzle = get_component_mapping(gcm_format, flags);
|
||||
auto image = static_cast<gl::viewable_image*>(section.get_raw_texture());
|
||||
|
||||
verify(HERE), image != nullptr;
|
||||
ensure(image);
|
||||
image->set_native_component_layout(swizzle);
|
||||
|
||||
section.set_view_flags(flags);
|
||||
|
@ -23,7 +23,7 @@ namespace
|
||||
{
|
||||
// This is an emulated buffer, so our indices only range from 0->original_vertex_array_length
|
||||
const auto element_count = get_index_count(primitive_mode, vertex_count);
|
||||
verify(HERE), !gl::is_primitive_native(primitive_mode);
|
||||
ensure(!gl::is_primitive_native(primitive_mode));
|
||||
|
||||
auto mapping = dst.alloc_from_heap(element_count * sizeof(u16), 256);
|
||||
auto mapped_buffer = static_cast<char*>(mapping.first);
|
||||
@ -199,7 +199,7 @@ gl::vertex_upload_info GLGSRender::set_vertex_buffer()
|
||||
|
||||
if (auto cached = m_vertex_cache->find_vertex_range(storage_address, GL_R8UI, required.first))
|
||||
{
|
||||
verify(HERE), cached->local_address == storage_address;
|
||||
ensure(cached->local_address == storage_address);
|
||||
|
||||
in_cache = true;
|
||||
upload_info.persistent_mapping_offset = cached->offset_in_heap;
|
||||
@ -224,7 +224,7 @@ gl::vertex_upload_info GLGSRender::set_vertex_buffer()
|
||||
|
||||
if (!m_persistent_stream_view.in_range(upload_info.persistent_mapping_offset, required.first, upload_info.persistent_mapping_offset))
|
||||
{
|
||||
verify(HERE), m_max_texbuffer_size < m_attrib_ring_buffer->size();
|
||||
ensure(m_max_texbuffer_size < m_attrib_ring_buffer->size());
|
||||
const size_t view_size = ((upload_info.persistent_mapping_offset + m_max_texbuffer_size) > m_attrib_ring_buffer->size()) ?
|
||||
(m_attrib_ring_buffer->size() - upload_info.persistent_mapping_offset) : m_max_texbuffer_size;
|
||||
|
||||
@ -241,7 +241,7 @@ gl::vertex_upload_info GLGSRender::set_vertex_buffer()
|
||||
|
||||
if (!m_volatile_stream_view.in_range(upload_info.volatile_mapping_offset, required.second, upload_info.volatile_mapping_offset))
|
||||
{
|
||||
verify(HERE), m_max_texbuffer_size < m_attrib_ring_buffer->size();
|
||||
ensure(m_max_texbuffer_size < m_attrib_ring_buffer->size());
|
||||
const size_t view_size = ((upload_info.volatile_mapping_offset + m_max_texbuffer_size) > m_attrib_ring_buffer->size()) ?
|
||||
(m_attrib_ring_buffer->size() - upload_info.volatile_mapping_offset) : m_max_texbuffer_size;
|
||||
|
||||
|
@ -103,7 +103,7 @@ namespace rsx
|
||||
{
|
||||
const auto row = (index / num_columns);
|
||||
const auto col = (index % num_columns);
|
||||
verify(HERE), row < num_rows && col < num_columns;
|
||||
ensure(row < num_rows && col < num_columns);
|
||||
|
||||
auto& _cell = m_grid[index++];
|
||||
_cell.button_flag = props.type_flags;
|
||||
@ -185,11 +185,11 @@ namespace rsx
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), num_shift_layers_by_charset.size();
|
||||
ensure(num_shift_layers_by_charset.size());
|
||||
|
||||
for (u32 layer = 0; layer < num_shift_layers_by_charset.size(); ++layer)
|
||||
{
|
||||
verify(HERE), num_shift_layers_by_charset[layer];
|
||||
ensure(num_shift_layers_by_charset[layer]);
|
||||
}
|
||||
|
||||
// Reset to first shift layer in the first charset, because the panel changed and we don't know if the layers are similar between panels.
|
||||
@ -346,7 +346,7 @@ namespace rsx
|
||||
while (true)
|
||||
{
|
||||
const auto current_index = (start_index + count);
|
||||
verify(HERE), current_index <= index_limit;
|
||||
ensure(current_index <= index_limit);
|
||||
|
||||
if (m_grid[current_index].flags & border_flags::right)
|
||||
{
|
||||
|
@ -183,7 +183,7 @@ namespace rsx
|
||||
return;
|
||||
}
|
||||
|
||||
verify(HERE), !m_remaining_commands;
|
||||
ensure(!m_remaining_commands);
|
||||
const u32 count = (m_cmd >> 18) & 0x7ff;
|
||||
|
||||
if (!count)
|
||||
@ -281,12 +281,13 @@ namespace rsx
|
||||
else
|
||||
{
|
||||
// Not enabled, check if we should try enabling
|
||||
verify(HERE), total_draw_count > 2000;
|
||||
ensure(total_draw_count > 2000);
|
||||
if (fifo_hint != load_unoptimizable)
|
||||
{
|
||||
// If its set to unoptimizable, we already tried and it did not work
|
||||
// If it resets to load low (usually after some kind of loading screen) we can try again
|
||||
verify("Incorrect initial state" HERE), begin_end_ctr == 0, num_collapsed == 0;
|
||||
ensure(begin_end_ctr == 0); // "Incorrect initial state"
|
||||
ensure(num_collapsed == 0);
|
||||
enabled = true;
|
||||
}
|
||||
}
|
||||
@ -589,13 +590,13 @@ namespace rsx
|
||||
case FIFO::EMIT_END:
|
||||
{
|
||||
// Emit end command to close existing scope
|
||||
//verify(HERE), in_begin_end;
|
||||
//ensure(in_begin_end);
|
||||
methods[NV4097_SET_BEGIN_END](this, NV4097_SET_BEGIN_END, 0);
|
||||
break;
|
||||
}
|
||||
case FIFO::EMIT_BARRIER:
|
||||
{
|
||||
//verify(HERE), in_begin_end;
|
||||
//ensure(in_begin_end);
|
||||
methods[NV4097_SET_BEGIN_END](this, NV4097_SET_BEGIN_END, 0);
|
||||
methods[NV4097_SET_BEGIN_END](this, NV4097_SET_BEGIN_END, m_flattener.get_primitive());
|
||||
break;
|
||||
|
@ -317,7 +317,7 @@ struct RSXFragmentProgram
|
||||
|
||||
void clone_data() const
|
||||
{
|
||||
verify(HERE), ucode_length;
|
||||
ensure(ucode_length);
|
||||
data.deep_copy(ucode_length);
|
||||
}
|
||||
};
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include "rsx_utils.h"
|
||||
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
|
||||
namespace rsx
|
||||
{
|
||||
@ -139,7 +138,7 @@ namespace rsx
|
||||
// Backend callback
|
||||
void dma_manager::backend_ctrl(u32 request_code, void* args)
|
||||
{
|
||||
verify(HERE), g_cfg.video.multithreaded_rsx;
|
||||
ensure(g_cfg.video.multithreaded_rsx);
|
||||
|
||||
g_fxo->get<dma_thread>()->m_enqueued_count++;
|
||||
g_fxo->get<dma_thread>()->m_work_queue.push(request_code, args);
|
||||
@ -192,20 +191,20 @@ namespace rsx
|
||||
|
||||
void dma_manager::set_mem_fault_flag()
|
||||
{
|
||||
verify("Access denied" HERE), is_current_thread();
|
||||
ensure(is_current_thread()); // "Access denied"
|
||||
m_mem_fault_flag.release(true);
|
||||
}
|
||||
|
||||
void dma_manager::clear_mem_fault_flag()
|
||||
{
|
||||
verify("Access denied" HERE), is_current_thread();
|
||||
ensure(is_current_thread()); // "Access denied"
|
||||
m_mem_fault_flag.release(false);
|
||||
}
|
||||
|
||||
// Fault recovery
|
||||
utils::address_range dma_manager::get_fault_range(bool writing) const
|
||||
{
|
||||
const auto m_current_job = verify(HERE, g_fxo->get<dma_thread>()->m_current_job);
|
||||
const auto m_current_job = (ensure(g_fxo->get<dma_thread>()->m_current_job));
|
||||
|
||||
void *address = nullptr;
|
||||
u32 range = m_current_job->length;
|
||||
@ -216,11 +215,11 @@ namespace rsx
|
||||
address = (writing) ? m_current_job->dst : m_current_job->src;
|
||||
break;
|
||||
case vector_copy:
|
||||
verify(HERE), writing;
|
||||
ensure(writing);
|
||||
address = m_current_job->dst;
|
||||
break;
|
||||
case index_emulate:
|
||||
verify(HERE), writing;
|
||||
ensure(writing);
|
||||
address = m_current_job->dst;
|
||||
range = get_index_count(static_cast<rsx::primitive_type>(m_current_job->aux_param0), m_current_job->length);
|
||||
break;
|
||||
@ -232,7 +231,7 @@ namespace rsx
|
||||
const uintptr_t addr = uintptr_t(address);
|
||||
const uintptr_t base = uintptr_t(vm::g_base_addr);
|
||||
|
||||
verify(HERE), addr > base;
|
||||
ensure(addr > base);
|
||||
return utils::address_range::start_length(u32(addr - base), range);
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ namespace rsx
|
||||
else
|
||||
max_mipmap_count = floor_log2(static_cast<u32>(std::max(width(), height()))) + 1;
|
||||
|
||||
return std::min(verify(HERE, mipmap()), max_mipmap_count);
|
||||
return std::min(ensure(mipmap()), max_mipmap_count);
|
||||
}
|
||||
|
||||
rsx::texture_wrap_mode fragment_texture::wrap_s() const
|
||||
@ -368,7 +368,7 @@ namespace rsx
|
||||
u16 vertex_texture::get_exact_mipmap_count() const
|
||||
{
|
||||
const u16 max_mipmap_count = floor_log2(static_cast<u32>(std::max(width(), height()))) + 1;
|
||||
return std::min(verify(HERE, mipmap()), max_mipmap_count);
|
||||
return std::min(ensure(mipmap()), max_mipmap_count);
|
||||
}
|
||||
|
||||
std::pair<std::array<u8, 4>, std::array<u8, 4>> vertex_texture::decoded_remap() const
|
||||
|
@ -185,7 +185,7 @@ namespace rsx
|
||||
}
|
||||
fmt::throw_exception("Wrong vector size" HERE);
|
||||
case vertex_base_type::cmp: return 4;
|
||||
case vertex_base_type::ub256: verify(HERE), (size == 4); return sizeof(u8) * 4;
|
||||
case vertex_base_type::ub256: ensure(size == 4); return sizeof(u8) * 4;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -348,7 +348,7 @@ namespace rsx
|
||||
{
|
||||
// In this mode, it is possible to skip the cond render while the backend is still processing data.
|
||||
// The backend guarantees that any draw calls emitted during this time will NOT generate any ROP writes
|
||||
verify(HERE), !cond_render_ctrl.hw_cond_active;
|
||||
ensure(!cond_render_ctrl.hw_cond_active);
|
||||
|
||||
// Pending evaluation, use hardware test
|
||||
begin_conditional_rendering(cond_render_ctrl.eval_sources);
|
||||
@ -357,7 +357,7 @@ namespace rsx
|
||||
{
|
||||
// NOTE: eval_sources list is reversed with newest query first
|
||||
zcull_ctrl->read_barrier(this, cond_render_ctrl.eval_address, cond_render_ctrl.eval_sources.front());
|
||||
verify(HERE), !cond_render_ctrl.eval_pending();
|
||||
ensure(!cond_render_ctrl.eval_pending());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1184,7 +1184,7 @@ namespace rsx
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), layout.color_addresses[index];
|
||||
ensure(layout.color_addresses[index]);
|
||||
|
||||
const auto packed_pitch = (layout.width * color_texel_size);
|
||||
if (packed_render)
|
||||
@ -1581,7 +1581,7 @@ namespace rsx
|
||||
if (!(m_graphics_state & rsx::pipeline_state::vertex_program_dirty))
|
||||
return;
|
||||
|
||||
verify(HERE), !(m_graphics_state & rsx::pipeline_state::vertex_program_ucode_dirty);
|
||||
ensure(!(m_graphics_state & rsx::pipeline_state::vertex_program_ucode_dirty));
|
||||
current_vertex_program.output_mask = rsx::method_registers.vertex_attrib_output_mask();
|
||||
|
||||
for (u32 textures_ref = current_vp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
|
||||
@ -1767,7 +1767,7 @@ namespace rsx
|
||||
if (!(m_graphics_state & rsx::pipeline_state::fragment_program_dirty))
|
||||
return;
|
||||
|
||||
verify(HERE), !(m_graphics_state & rsx::pipeline_state::fragment_program_ucode_dirty);
|
||||
ensure(!(m_graphics_state & rsx::pipeline_state::fragment_program_ucode_dirty));
|
||||
|
||||
m_graphics_state &= ~(rsx::pipeline_state::fragment_program_dirty);
|
||||
|
||||
@ -2457,7 +2457,7 @@ namespace rsx
|
||||
|
||||
//TODO: On sync every sub-unit should finish any pending tasks
|
||||
//Might cause zcull lockup due to zombie 'unclaimed reports' which are not forcefully removed currently
|
||||
//verify (HERE), async_tasks_pending.load() == 0;
|
||||
//ensure(async_tasks_pending.load() == 0);
|
||||
}
|
||||
|
||||
void thread::sync_hint(FIFO_hint /*hint*/, void* args)
|
||||
@ -2875,7 +2875,7 @@ namespace rsx
|
||||
{
|
||||
// Frame was not queued before flipping
|
||||
on_frame_end(buffer, true);
|
||||
verify(HERE), m_queued_flip.pop(buffer);
|
||||
ensure(m_queued_flip.pop(buffer));
|
||||
}
|
||||
|
||||
double limit = 0.;
|
||||
@ -2976,13 +2976,13 @@ namespace rsx
|
||||
|
||||
if (state)
|
||||
{
|
||||
verify(HERE), unit_enabled && m_current_task == nullptr;
|
||||
ensure(unit_enabled && m_current_task == nullptr);
|
||||
allocate_new_query(ptimer);
|
||||
begin_occlusion_query(m_current_task);
|
||||
}
|
||||
else
|
||||
{
|
||||
verify(HERE), m_current_task;
|
||||
ensure(m_current_task);
|
||||
if (m_current_task->num_draws)
|
||||
{
|
||||
end_occlusion_query(m_current_task);
|
||||
@ -3106,7 +3106,7 @@ namespace rsx
|
||||
{
|
||||
// Not the last one in the chain, forward the writing operation to the last writer
|
||||
// Usually comes from truncated queries caused by disabling the testing
|
||||
verify(HERE), It->query;
|
||||
ensure(It->query);
|
||||
|
||||
It->forwarder = forwarder;
|
||||
It->query->owned = true;
|
||||
@ -3228,7 +3228,7 @@ namespace rsx
|
||||
|
||||
void ZCULL_control::write(vm::addr_t sink, u64 timestamp, u32 type, u32 value)
|
||||
{
|
||||
verify(HERE), sink;
|
||||
ensure(sink);
|
||||
|
||||
switch (type)
|
||||
{
|
||||
@ -3323,7 +3323,7 @@ namespace rsx
|
||||
|
||||
if (query)
|
||||
{
|
||||
verify(HERE), query->pending;
|
||||
ensure(query->pending);
|
||||
|
||||
const bool implemented = (writer.type == CELL_GCM_ZPASS_PIXEL_CNT || writer.type == CELL_GCM_ZCULL_STATS3);
|
||||
if (implemented && !result && query->num_draws)
|
||||
@ -3354,13 +3354,13 @@ namespace rsx
|
||||
|
||||
if (!has_unclaimed)
|
||||
{
|
||||
verify(HERE), processed == m_pending_writes.size();
|
||||
ensure(processed == m_pending_writes.size());
|
||||
m_pending_writes.clear();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto remaining = m_pending_writes.size() - processed;
|
||||
verify(HERE), remaining > 0;
|
||||
ensure(remaining > 0);
|
||||
|
||||
if (remaining == 1)
|
||||
{
|
||||
@ -3414,7 +3414,7 @@ namespace rsx
|
||||
if (It->query->num_draws && It->query->sync_tag > m_sync_tag)
|
||||
{
|
||||
ptimer->sync_hint(FIFO_hint::hint_zcull_sync, It->query);
|
||||
verify(HERE), It->query->sync_tag <= m_sync_tag;
|
||||
ensure(It->query->sync_tag <= m_sync_tag);
|
||||
}
|
||||
|
||||
break;
|
||||
@ -3439,7 +3439,7 @@ namespace rsx
|
||||
if (elapsed > max_zcull_delay_us)
|
||||
{
|
||||
ptimer->sync_hint(FIFO_hint::hint_zcull_sync, front.query);
|
||||
verify(HERE), front.query->sync_tag <= m_sync_tag;
|
||||
ensure(front.query->sync_tag <= m_sync_tag);
|
||||
}
|
||||
|
||||
return;
|
||||
@ -3475,7 +3475,7 @@ namespace rsx
|
||||
|
||||
if (query)
|
||||
{
|
||||
verify(HERE), query->pending;
|
||||
ensure(query->pending);
|
||||
|
||||
const bool implemented = (writer.type == CELL_GCM_ZPASS_PIXEL_CNT || writer.type == CELL_GCM_ZCULL_STATS3);
|
||||
if (force_read)
|
||||
@ -3612,7 +3612,7 @@ namespace rsx
|
||||
if (query->sync_tag > m_sync_tag) [[unlikely]]
|
||||
{
|
||||
ptimer->sync_hint(FIFO_hint::hint_zcull_sync, query);
|
||||
verify(HERE), m_sync_tag >= query->sync_tag;
|
||||
ensure(m_sync_tag >= query->sync_tag);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3733,7 +3733,7 @@ namespace rsx
|
||||
{
|
||||
if (hw_cond_active)
|
||||
{
|
||||
verify(HERE), enabled;
|
||||
ensure(enabled);
|
||||
pthr->end_conditional_rendering();
|
||||
}
|
||||
|
||||
@ -3747,7 +3747,7 @@ namespace rsx
|
||||
{
|
||||
if (hw_cond_active)
|
||||
{
|
||||
verify(HERE), enabled;
|
||||
ensure(enabled);
|
||||
pthr->end_conditional_rendering();
|
||||
}
|
||||
|
||||
@ -3765,7 +3765,7 @@ namespace rsx
|
||||
{
|
||||
if (hw_cond_active)
|
||||
{
|
||||
verify(HERE), enabled;
|
||||
ensure(enabled);
|
||||
pthr->end_conditional_rendering();
|
||||
}
|
||||
|
||||
|
@ -286,7 +286,7 @@ namespace rsx
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), _max_index >= _min_index;
|
||||
ensure(_max_index >= _min_index);
|
||||
return { _min_index, (_max_index - _min_index) + 1 };
|
||||
}
|
||||
};
|
||||
|
@ -183,7 +183,7 @@ namespace vk
|
||||
declare_inputs();
|
||||
}
|
||||
|
||||
verify(HERE), m_used_descriptors < VK_MAX_COMPUTE_TASKS;
|
||||
ensure(m_used_descriptors < VK_MAX_COMPUTE_TASKS);
|
||||
|
||||
VkDescriptorSetAllocateInfo alloc_info = {};
|
||||
alloc_info.descriptorPool = m_descriptor_pool;
|
||||
@ -351,7 +351,7 @@ namespace vk
|
||||
|
||||
void set_parameters(VkCommandBuffer cmd, const u32* params, u8 count)
|
||||
{
|
||||
verify(HERE), use_push_constants;
|
||||
ensure(use_push_constants);
|
||||
vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, count * 4, params);
|
||||
}
|
||||
|
||||
@ -460,7 +460,7 @@ namespace vk
|
||||
u32 parameters[4] = { data_length, zeta_offset - data_offset, stencil_offset - data_offset, 0 };
|
||||
set_parameters(cmd, parameters, 4);
|
||||
|
||||
verify(HERE), stencil_offset > data_offset;
|
||||
ensure(stencil_offset > data_offset);
|
||||
m_ssbo_length = stencil_offset + (data_length / 4) - data_offset;
|
||||
cs_shuffle_base::run(cmd, data, data_length, data_offset);
|
||||
}
|
||||
@ -751,7 +751,7 @@ namespace vk
|
||||
|
||||
cs_deswizzle_3d()
|
||||
{
|
||||
verify("Unsupported block type" HERE), (sizeof(_BlockType) & 3) == 0;
|
||||
ensure((sizeof(_BlockType) & 3) == 0); // "Unsupported block type"
|
||||
|
||||
ssbo_count = 2;
|
||||
use_push_constants = true;
|
||||
|
@ -24,7 +24,7 @@ namespace vk
|
||||
return inheritance_info.parent->map_range(range);
|
||||
}
|
||||
|
||||
verify(HERE), range.start >= base_address;
|
||||
ensure(range.start >= base_address);
|
||||
u32 start = range.start;
|
||||
start -= base_address;
|
||||
return allocated_memory->map(start, range.length());
|
||||
@ -44,7 +44,8 @@ namespace vk
|
||||
|
||||
void dma_block::init(const render_device& dev, u32 addr, size_t size)
|
||||
{
|
||||
verify(HERE), size, !(size % s_dma_block_length);
|
||||
ensure(size);
|
||||
ensure(!(size % s_dma_block_length));
|
||||
base_address = addr;
|
||||
|
||||
allocated_memory = std::make_unique<vk::buffer>(dev, size,
|
||||
@ -113,7 +114,7 @@ namespace vk
|
||||
if (!inheritance_info.parent)
|
||||
{
|
||||
auto bit_offset = page_offset / s_bytes_per_entry;
|
||||
verify(HERE), (bit_offset + bits.size()) <= page_info.size();
|
||||
ensure(bit_offset + bits.size() <= page_info.size());
|
||||
std::memcpy(page_info.data() + bit_offset, bits.data(), bits.size());
|
||||
}
|
||||
else
|
||||
@ -149,7 +150,8 @@ namespace vk
|
||||
return inheritance_info.parent->get(range);
|
||||
}
|
||||
|
||||
verify(HERE), range.start >= base_address, range.end <= end();
|
||||
ensure(range.start >= base_address);
|
||||
ensure(range.end <= end());
|
||||
|
||||
// mark_dirty(range);
|
||||
return { (range.start - base_address), allocated_memory.get() };
|
||||
@ -173,7 +175,7 @@ namespace vk
|
||||
|
||||
void dma_block::set_parent(command_buffer& cmd, dma_block* parent)
|
||||
{
|
||||
verify(HERE), parent;
|
||||
ensure(parent);
|
||||
if (inheritance_info.parent == parent)
|
||||
{
|
||||
// Nothing to do
|
||||
@ -201,7 +203,7 @@ namespace vk
|
||||
|
||||
void dma_block::extend(command_buffer& cmd, const render_device &dev, size_t new_size)
|
||||
{
|
||||
verify(HERE), allocated_memory;
|
||||
ensure(allocated_memory);
|
||||
if (new_size <= allocated_memory->size())
|
||||
return;
|
||||
|
||||
@ -308,7 +310,7 @@ namespace vk
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), block_head;
|
||||
ensure(block_head);
|
||||
return block_head->get(map_range);
|
||||
}
|
||||
|
||||
|
@ -387,15 +387,15 @@ void VKGSRender::bind_texture_env()
|
||||
//case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst);
|
||||
raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src);
|
||||
raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_GENERAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage);
|
||||
if (!sampler_state->is_cyclic_reference)
|
||||
{
|
||||
// This was used in a cyclic ref before, but is missing a barrier
|
||||
@ -426,7 +426,7 @@ void VKGSRender::bind_texture_env()
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
|
||||
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage);
|
||||
raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
break;
|
||||
}
|
||||
@ -527,15 +527,15 @@ void VKGSRender::bind_texture_env()
|
||||
//case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst);
|
||||
raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src);
|
||||
raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_GENERAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage);
|
||||
if (!sampler_state->is_cyclic_reference)
|
||||
{
|
||||
// Custom barrier, see similar block in FS stage
|
||||
@ -565,7 +565,7 @@ void VKGSRender::bind_texture_env()
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
|
||||
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage);
|
||||
raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
break;
|
||||
}
|
||||
@ -635,15 +635,15 @@ void VKGSRender::bind_interpreter_texture_env()
|
||||
//case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst);
|
||||
raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src);
|
||||
raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_GENERAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage);
|
||||
if (!sampler_state->is_cyclic_reference)
|
||||
{
|
||||
// This was used in a cyclic ref before, but is missing a barrier
|
||||
@ -674,7 +674,8 @@ void VKGSRender::bind_interpreter_texture_env()
|
||||
break;
|
||||
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
|
||||
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
|
||||
verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage, !sampler_state->is_cyclic_reference;
|
||||
ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage);
|
||||
ensure(!sampler_state->is_cyclic_reference);
|
||||
raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
break;
|
||||
}
|
||||
@ -794,7 +795,7 @@ void VKGSRender::emit_geometry(u32 sub_index)
|
||||
// Update vertex fetch parameters
|
||||
update_vertex_env(sub_index, upload_info);
|
||||
|
||||
verify(HERE), m_vertex_layout_storage;
|
||||
ensure(m_vertex_layout_storage);
|
||||
if (update_descriptors)
|
||||
{
|
||||
m_program->bind_uniform(persistent_buffer, binding_table.vertex_buffers_first_bind_slot, m_current_frame->descriptor_set);
|
||||
@ -910,7 +911,7 @@ void VKGSRender::end()
|
||||
m_current_frame->used_descriptors = 0;
|
||||
}
|
||||
|
||||
verify(HERE), !m_current_frame->swap_command_buffer;
|
||||
ensure(!m_current_frame->swap_command_buffer);
|
||||
|
||||
m_current_frame->flags &= ~frame_context_state::dirty;
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ void VKFragmentDecompilerThread::insertConstants(std::stringstream & OS)
|
||||
}
|
||||
}
|
||||
|
||||
verify("Too many sampler descriptors!" HERE), location <= m_binding_table.vertex_textures_first_bind_slot;
|
||||
ensure(location <= m_binding_table.vertex_textures_first_bind_slot); // "Too many sampler descriptors!"
|
||||
|
||||
std::string constants_block;
|
||||
for (const ParamType& PT : m_parr.params[PF_PARAM_UNIFORM])
|
||||
|
@ -275,7 +275,7 @@ namespace
|
||||
idx++;
|
||||
}
|
||||
|
||||
verify(HERE), idx == binding_table.total_descriptor_bindings;
|
||||
ensure(idx == binding_table.total_descriptor_bindings);
|
||||
|
||||
std::array<VkPushConstantRange, 1> push_constants;
|
||||
push_constants[0].offset = 0;
|
||||
@ -671,7 +671,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
|
||||
if (g_fxo->get<rsx::dma_manager>()->is_current_thread())
|
||||
{
|
||||
// The offloader thread cannot handle flush requests
|
||||
verify(HERE), !(m_queue_status & flush_queue_state::deadlock);
|
||||
ensure(!(m_queue_status & flush_queue_state::deadlock));
|
||||
|
||||
m_offloader_fault_range = g_fxo->get<rsx::dma_manager>()->get_fault_range(is_writing);
|
||||
m_offloader_fault_cause = (is_writing) ? rsx::invalidation_cause::write : rsx::invalidation_cause::read;
|
||||
@ -794,7 +794,7 @@ void VKGSRender::notify_tile_unbound(u32 tile)
|
||||
|
||||
void VKGSRender::check_heap_status(u32 flags)
|
||||
{
|
||||
verify(HERE), flags;
|
||||
ensure(flags);
|
||||
|
||||
bool heap_critical;
|
||||
if (flags == VK_HEAP_CHECK_ALL)
|
||||
@ -917,7 +917,7 @@ void VKGSRender::check_descriptors()
|
||||
{
|
||||
// Ease resource pressure if the number of draw calls becomes too high or we are running low on memory resources
|
||||
const auto required_descriptors = rsx::method_registers.current_draw_clause.pass_count();
|
||||
verify(HERE), required_descriptors < DESCRIPTOR_MAX_DRAW_CALLS;
|
||||
ensure(required_descriptors < DESCRIPTOR_MAX_DRAW_CALLS);
|
||||
if ((required_descriptors + m_current_frame->used_descriptors) > DESCRIPTOR_MAX_DRAW_CALLS)
|
||||
{
|
||||
// Should hard sync before resetting descriptors for spec compliance
|
||||
@ -932,7 +932,7 @@ VkDescriptorSet VKGSRender::allocate_descriptor_set()
|
||||
{
|
||||
if (!m_shader_interpreter.is_interpreter(m_program)) [[likely]]
|
||||
{
|
||||
verify(HERE), m_current_frame->used_descriptors < DESCRIPTOR_MAX_DRAW_CALLS;
|
||||
ensure(m_current_frame->used_descriptors < DESCRIPTOR_MAX_DRAW_CALLS);
|
||||
|
||||
VkDescriptorSetAllocateInfo alloc_info = {};
|
||||
alloc_info.descriptorPool = m_current_frame->descriptor_pool;
|
||||
@ -1113,7 +1113,7 @@ void VKGSRender::clear_surface(u32 mask)
|
||||
|
||||
if ((mask & 0x3) != 0x3 && !require_mem_load && ds->state_flags & rsx::surface_state_flags::erase_bkgnd)
|
||||
{
|
||||
verify(HERE), depth_stencil_mask;
|
||||
ensure(depth_stencil_mask);
|
||||
|
||||
if (!g_cfg.video.read_depth_buffer)
|
||||
{
|
||||
@ -1349,7 +1349,7 @@ void VKGSRender::flush_command_queue(bool hard_sync)
|
||||
|
||||
void VKGSRender::sync_hint(rsx::FIFO_hint hint, void* args)
|
||||
{
|
||||
verify(HERE), args;
|
||||
ensure(args);
|
||||
rsx::thread::sync_hint(hint, args);
|
||||
|
||||
// Occlusion queries not enabled, do nothing
|
||||
@ -1470,7 +1470,7 @@ bool VKGSRender::load_program()
|
||||
if (m_graphics_state & rsx::pipeline_state::invalidate_pipeline_bits)
|
||||
{
|
||||
get_current_fragment_program(fs_sampler_state);
|
||||
verify(HERE), current_fragment_program.valid;
|
||||
ensure(current_fragment_program.valid);
|
||||
|
||||
get_current_vertex_program(vs_sampler_state);
|
||||
|
||||
@ -1871,7 +1871,7 @@ void VKGSRender::update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_
|
||||
|
||||
if (!m_vertex_layout_storage || !m_vertex_layout_storage->in_range(offset32, range32, base_offset))
|
||||
{
|
||||
verify("Incompatible driver (MacOS?)" HERE), m_texbuffer_view_size >= m_vertex_layout_stream_info.range;
|
||||
ensure(m_texbuffer_view_size >= m_vertex_layout_stream_info.range);
|
||||
|
||||
if (m_vertex_layout_storage)
|
||||
m_current_frame->buffer_views_to_clean.push_back(std::move(m_vertex_layout_storage));
|
||||
@ -1914,7 +1914,7 @@ void VKGSRender::init_buffers(rsx::framebuffer_creation_context context, bool)
|
||||
|
||||
void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore wait_semaphore, VkSemaphore signal_semaphore, VkPipelineStageFlags pipeline_stage_flags)
|
||||
{
|
||||
verify("Recursive calls to submit the current commandbuffer will cause a deadlock" HERE), !m_queue_status.test_and_set(flush_queue_state::flushing);
|
||||
ensure(!m_queue_status.test_and_set(flush_queue_state::flushing));
|
||||
|
||||
// Workaround for deadlock occuring during RSX offloader fault
|
||||
// TODO: Restructure command submission infrastructure to avoid this condition
|
||||
@ -1960,7 +1960,7 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore
|
||||
#if 0 // Currently unreachable
|
||||
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_conditional_render)
|
||||
{
|
||||
verify(HERE), m_render_pass_open;
|
||||
ensure(m_render_pass_open);
|
||||
m_device->cmdEndConditionalRenderingEXT(*m_current_command_buffer);
|
||||
}
|
||||
#endif
|
||||
@ -1987,7 +1987,7 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore
|
||||
|
||||
if (force_flush)
|
||||
{
|
||||
verify(HERE), m_current_command_buffer->submit_fence->flushed;
|
||||
ensure(m_current_command_buffer->submit_fence->flushed);
|
||||
}
|
||||
|
||||
m_queue_status.clear(flush_queue_state::flushing);
|
||||
@ -2087,7 +2087,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
|
||||
|
||||
m_surface_info[index].address = m_framebuffer_layout.color_addresses[index];
|
||||
m_surface_info[index].pitch = m_framebuffer_layout.actual_color_pitch[index];
|
||||
verify("Pitch mismatch!" HERE), surface->rsx_pitch == m_framebuffer_layout.actual_color_pitch[index];
|
||||
ensure(surface->rsx_pitch == m_framebuffer_layout.actual_color_pitch[index]);
|
||||
|
||||
m_texture_cache.notify_surface_changed(m_surface_info[index].get_memory_range(m_framebuffer_layout.aa_factors));
|
||||
m_draw_buffers.push_back(index);
|
||||
@ -2101,7 +2101,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
|
||||
|
||||
m_depth_surface_info.address = m_framebuffer_layout.zeta_address;
|
||||
m_depth_surface_info.pitch = m_framebuffer_layout.actual_zeta_pitch;
|
||||
verify("Pitch mismatch!" HERE), ds->rsx_pitch == m_framebuffer_layout.actual_zeta_pitch;
|
||||
ensure(ds->rsx_pitch == m_framebuffer_layout.actual_zeta_pitch);
|
||||
|
||||
m_texture_cache.notify_surface_changed(m_depth_surface_info.get_memory_range(m_framebuffer_layout.aa_factors));
|
||||
}
|
||||
@ -2258,7 +2258,7 @@ bool VKGSRender::scaled_image_from_memory(rsx::blit_src_info& src, rsx::blit_dst
|
||||
|
||||
void VKGSRender::begin_occlusion_query(rsx::reports::occlusion_query_info* query)
|
||||
{
|
||||
verify(HERE), !m_occlusion_query_active;
|
||||
ensure(!m_occlusion_query_active);
|
||||
|
||||
query->result = 0;
|
||||
//query->sync_timestamp = get_system_time();
|
||||
@ -2269,7 +2269,7 @@ void VKGSRender::begin_occlusion_query(rsx::reports::occlusion_query_info* query
|
||||
|
||||
void VKGSRender::end_occlusion_query(rsx::reports::occlusion_query_info* query)
|
||||
{
|
||||
verify(HERE), query == m_active_query_info;
|
||||
ensure(query == m_active_query_info);
|
||||
|
||||
// NOTE: flushing the queue is very expensive, do not flush just because query stopped
|
||||
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_open_query)
|
||||
@ -2360,7 +2360,7 @@ void VKGSRender::discard_occlusion_query(rsx::reports::occlusion_query_info* que
|
||||
|
||||
void VKGSRender::emergency_query_cleanup(vk::command_buffer* commands)
|
||||
{
|
||||
verify("Command list mismatch" HERE), commands == static_cast<vk::command_buffer*>(m_current_command_buffer);
|
||||
ensure(commands == static_cast<vk::command_buffer*>(m_current_command_buffer));
|
||||
|
||||
if (m_current_command_buffer->flags & vk::command_buffer::cb_has_open_query)
|
||||
{
|
||||
@ -2372,7 +2372,7 @@ void VKGSRender::emergency_query_cleanup(vk::command_buffer* commands)
|
||||
|
||||
void VKGSRender::begin_conditional_rendering(const std::vector<rsx::reports::occlusion_query_info*>& sources)
|
||||
{
|
||||
verify(HERE), !sources.empty();
|
||||
ensure(!sources.empty());
|
||||
|
||||
// Flag check whether to calculate all entries or only one
|
||||
bool partial_eval;
|
||||
@ -2474,7 +2474,7 @@ void VKGSRender::begin_conditional_rendering(const std::vector<rsx::reports::occ
|
||||
if (dst_offset)
|
||||
{
|
||||
// Fast path should have been caught above
|
||||
verify(HERE), dst_offset > 4;
|
||||
ensure(dst_offset > 4);
|
||||
|
||||
if (!partial_eval)
|
||||
{
|
||||
|
@ -468,7 +468,7 @@ namespace vk
|
||||
|
||||
vk::mem_allocator_base* get_current_mem_allocator()
|
||||
{
|
||||
verify (HERE, g_current_renderer);
|
||||
ensure(g_current_renderer);
|
||||
return g_current_renderer->get_allocator();
|
||||
}
|
||||
|
||||
@ -919,7 +919,7 @@ namespace vk
|
||||
|
||||
void advance_frame_counter()
|
||||
{
|
||||
verify(HERE), g_num_processed_frames <= g_num_total_frames;
|
||||
ensure(g_num_processed_frames <= g_num_total_frames);
|
||||
g_num_total_frames++;
|
||||
}
|
||||
|
||||
@ -1011,7 +1011,7 @@ namespace vk
|
||||
void do_query_cleanup(vk::command_buffer& cmd)
|
||||
{
|
||||
auto renderer = dynamic_cast<VKGSRender*>(rsx::get_current_renderer());
|
||||
verify(HERE), renderer;
|
||||
ensure(renderer);
|
||||
|
||||
renderer->emergency_query_cleanup(&cmd);
|
||||
}
|
||||
|
@ -601,7 +601,7 @@ namespace vk
|
||||
}
|
||||
else
|
||||
{
|
||||
verify(HERE), pdev;
|
||||
ensure(pdev);
|
||||
if (vkEnumerateDeviceExtensionProperties(pdev, layer_name, &count, nullptr) != VK_SUCCESS)
|
||||
return;
|
||||
}
|
||||
@ -680,7 +680,7 @@ private:
|
||||
}
|
||||
|
||||
auto getPhysicalDeviceFeatures2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2KHR>(vkGetInstanceProcAddr(parent, "vkGetPhysicalDeviceFeatures2KHR"));
|
||||
verify("vkGetInstanceProcAddress failed to find entry point!" HERE), getPhysicalDeviceFeatures2KHR;
|
||||
ensure(getPhysicalDeviceFeatures2KHR); // "vkGetInstanceProcAddress failed to find entry point!"
|
||||
getPhysicalDeviceFeatures2KHR(dev, &features2);
|
||||
|
||||
shader_types_support.allow_float64 = !!features2.features.shaderFloat64;
|
||||
@ -1372,7 +1372,7 @@ private:
|
||||
}
|
||||
|
||||
// Check for hanging queries to avoid driver hang
|
||||
verify("close and submit of commandbuffer with a hanging query!" HERE), (flags & cb_has_open_query) == 0;
|
||||
ensure((flags & cb_has_open_query) == 0); // "close and submit of commandbuffer with a hanging query!"
|
||||
|
||||
if (!pfence)
|
||||
{
|
||||
@ -1547,7 +1547,7 @@ private:
|
||||
|
||||
void pop_layout(VkCommandBuffer cmd)
|
||||
{
|
||||
verify(HERE), !m_layout_stack.empty();
|
||||
ensure(!m_layout_stack.empty());
|
||||
|
||||
auto layout = m_layout_stack.top();
|
||||
m_layout_stack.pop();
|
||||
@ -1559,7 +1559,7 @@ private:
|
||||
if (current_layout == new_layout)
|
||||
return;
|
||||
|
||||
verify(HERE), m_layout_stack.empty();
|
||||
ensure(m_layout_stack.empty());
|
||||
change_image_layout(cmd, this, new_layout);
|
||||
}
|
||||
|
||||
@ -1736,7 +1736,7 @@ private:
|
||||
|
||||
const auto range = vk::get_image_subresource_range(0, 0, info.arrayLayers, info.mipLevels, aspect() & mask);
|
||||
|
||||
verify(HERE), range.aspectMask;
|
||||
ensure(range.aspectMask);
|
||||
auto view = std::make_unique<vk::image_view>(*get_current_renderer(), this, VK_IMAGE_VIEW_TYPE_MAX_ENUM, real_mapping, range);
|
||||
|
||||
auto result = view.get();
|
||||
@ -3190,7 +3190,7 @@ public:
|
||||
|
||||
void create(const vk::render_device &dev, VkDescriptorPoolSize *sizes, u32 size_descriptors_count, u32 max_sets, u8 subpool_count)
|
||||
{
|
||||
verify(HERE), subpool_count;
|
||||
ensure(subpool_count);
|
||||
|
||||
VkDescriptorPoolCreateInfo infos = {};
|
||||
infos.flags = 0;
|
||||
@ -3542,7 +3542,7 @@ public:
|
||||
|
||||
VkShaderModule compile()
|
||||
{
|
||||
verify(HERE), m_handle == VK_NULL_HANDLE;
|
||||
ensure(m_handle == VK_NULL_HANDLE);
|
||||
|
||||
if (!vk::compile_glsl_to_spv(m_source, type, m_compiled))
|
||||
{
|
||||
@ -3737,7 +3737,8 @@ public:
|
||||
{
|
||||
if (!dirty_ranges.empty())
|
||||
{
|
||||
verify (HERE), shadow, heap;
|
||||
ensure(shadow);
|
||||
ensure(heap);
|
||||
vkCmdCopyBuffer(cmd, shadow->value, heap->value, ::size32(dirty_ranges), dirty_ranges.data());
|
||||
dirty_ranges.clear();
|
||||
|
||||
|
@ -270,7 +270,7 @@ namespace vk
|
||||
else
|
||||
program = build_pipeline(key, pass);
|
||||
|
||||
verify(HERE), m_used_descriptors < VK_OVERLAY_MAX_DRAW_CALLS;
|
||||
ensure(m_used_descriptors < VK_OVERLAY_MAX_DRAW_CALLS);
|
||||
|
||||
VkDescriptorSetAllocateInfo alloc_info = {};
|
||||
alloc_info.descriptorPool = m_descriptor_pool;
|
||||
|
@ -108,7 +108,7 @@ namespace vk
|
||||
vp.scissorCount = 1;
|
||||
|
||||
VkPipelineMultisampleStateCreateInfo ms = create_info.state.ms;
|
||||
verify("Multisample state mismatch!" HERE), ms.rasterizationSamples == VkSampleCountFlagBits((create_info.renderpass_key >> 16) & 0xF);
|
||||
ensure(ms.rasterizationSamples == VkSampleCountFlagBits((create_info.renderpass_key >> 16) & 0xF)); // "Multisample state mismatch!"
|
||||
if (ms.rasterizationSamples != VK_SAMPLE_COUNT_1_BIT)
|
||||
{
|
||||
// Update the sample mask pointer
|
||||
@ -160,7 +160,7 @@ namespace vk
|
||||
const std::vector<glsl::program_input>& vs_inputs, const std::vector<glsl::program_input>& fs_inputs)
|
||||
{
|
||||
// It is very inefficient to defer this as all pointers need to be saved
|
||||
verify(HERE), flags == COMPILE_INLINE;
|
||||
ensure(flags == COMPILE_INLINE);
|
||||
return int_compile_graphics_pipe(create_info, pipe_layout, vs_inputs, fs_inputs);
|
||||
}
|
||||
|
||||
@ -204,10 +204,10 @@ namespace vk
|
||||
}
|
||||
}
|
||||
|
||||
verify(HERE), num_worker_threads >= 1;
|
||||
ensure(num_worker_threads >= 1);
|
||||
|
||||
const vk::render_device* dev = vk::get_current_renderer();
|
||||
verify("Cannot initialize pipe compiler before creating a logical device" HERE), dev;
|
||||
ensure(dev); // "Cannot initialize pipe compiler before creating a logical device"
|
||||
|
||||
// Create the thread pool
|
||||
g_pipe_compilers = std::make_unique<named_thread_group<pipe_compiler>>("RSX.W", num_worker_threads);
|
||||
@ -227,7 +227,7 @@ namespace vk
|
||||
|
||||
pipe_compiler* get_pipe_compiler()
|
||||
{
|
||||
verify(HERE), g_pipe_compilers;
|
||||
ensure(g_pipe_compilers);
|
||||
int thread_index = g_compiler_index++;
|
||||
|
||||
return g_pipe_compilers.get()->begin() + (thread_index % g_num_pipe_compilers);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user