1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 02:32:36 +01:00

Savestates Support For PS3 Emulation (#10478)

This commit is contained in:
Elad Ashkenazi 2022-07-04 16:02:17 +03:00 committed by GitHub
parent 969b9eb89d
commit fcd297ffb2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
154 changed files with 4948 additions and 635 deletions

View File

@ -71,6 +71,10 @@ namespace fs
s64 atime;
s64 mtime;
s64 ctime;
using enable_bitcopy = std::true_type;
constexpr bool operator==(const stat_t&) const = default;
};
// Helper, layout is equal to iovec struct
@ -105,6 +109,8 @@ namespace fs
: stat_t{}
{
}
using enable_bitcopy = std::false_type;
};
// Directory handle base
@ -681,9 +687,10 @@ namespace fs
T obj;
u64 pos;
container_stream(T&& obj)
container_stream(T&& obj, const stat_t& init_stat = {})
: obj(std::forward<T>(obj))
, pos(0)
, m_stat(init_stat)
{
}
@ -694,6 +701,7 @@ namespace fs
bool trunc(u64 length) override
{
obj.resize(length);
update_time(true);
return true;
}
@ -708,6 +716,7 @@ namespace fs
{
std::copy(obj.cbegin() + pos, obj.cbegin() + pos + max, static_cast<value_type*>(buffer));
pos = pos + max;
update_time();
return max;
}
}
@ -743,6 +752,7 @@ namespace fs
obj.insert(obj.end(), src + overlap, src + size);
pos += size;
if (size) update_time(true);
return size;
}
@ -767,13 +777,33 @@ namespace fs
{
return obj.size();
}
stat_t stat() override
{
return m_stat;
}
private:
stat_t m_stat{};
void update_time(bool write = false)
{
// TODO: Accurate timestamps
m_stat.atime++;
if (write)
{
m_stat.mtime = std::max(m_stat.atime, ++m_stat.mtime);
m_stat.ctime = m_stat.mtime;
}
}
};
template <typename T>
file make_stream(T&& container = T{})
file make_stream(T&& container = T{}, const stat_t& stat = stat_t{})
{
file result;
result.reset(std::make_unique<container_stream<T>>(std::forward<T>(container)));
result.reset(std::make_unique<container_stream<T>>(std::forward<T>(container), stat));
return result;
}

View File

@ -26,11 +26,17 @@ struct loaded_npdrm_keys
}
// TODO: Check if correct for ELF files usage
u128 last_key() const
u128 last_key(usz backwards = 0) const
{
backwards++;
const usz pos = dec_keys_pos;
return pos ? dec_keys[(pos - 1) % std::size(dec_keys)].load() : u128{};
return pos >= backwards ? dec_keys[(pos - backwards) % std::size(dec_keys)].load() : u128{};
}
SAVESTATE_INIT_POS(2);
loaded_npdrm_keys() = default;
loaded_npdrm_keys(utils::serial& ar);
void save(utils::serial& ar);
};
struct NPD_HEADER

View File

@ -49,6 +49,7 @@ void fmt_class_string<cpu_flag>::format(std::string& out, u64 arg)
case cpu_flag::pause: return "p";
case cpu_flag::suspend: return "s";
case cpu_flag::ret: return "ret";
case cpu_flag::again: return "a";
case cpu_flag::signal: return "sig";
case cpu_flag::memory: return "mem";
case cpu_flag::pending: return "pend";
@ -720,7 +721,7 @@ bool cpu_thread::check_state() noexcept
}
// Atomically clean wait flag and escape
if (!(flags & (cpu_flag::exit + cpu_flag::ret + cpu_flag::stop)))
if (!is_stopped(flags) && flags.none_of(cpu_flag::ret))
{
// Check pause flags which hold thread inside check_state (ignore suspend/debug flags on cpu_flag::temp)
if (flags & (cpu_flag::pause + cpu_flag::memory) || (cpu_can_stop && flags & (cpu_flag::dbg_global_pause + cpu_flag::dbg_pause + cpu_flag::suspend)))

View File

@ -19,6 +19,7 @@ enum class cpu_flag : u32
pause, // Thread suspended by suspend_all technique
suspend, // Thread suspended
ret, // Callback return requested
again, // Thread must complete the syscall after deserialization
signal, // Thread received a signal (HLE)
memory, // Thread must unlock memory mutex
pending, // Thread has postponed work
@ -34,7 +35,7 @@ enum class cpu_flag : u32
// Test stopped state
constexpr bool is_stopped(bs_t<cpu_flag> state)
{
return !!(state & (cpu_flag::stop + cpu_flag::exit));
return !!(state & (cpu_flag::stop + cpu_flag::exit + cpu_flag::again));
}
// Test paused state

View File

@ -88,6 +88,8 @@ enum : u32
struct alignas(16) spu_mfc_cmd
{
ENABLE_BITWISE_SERIALIZATION;
MFC cmd;
u8 tag;
u16 size;

View File

@ -342,6 +342,52 @@ void audio_port::tag(s32 offset)
prev_touched_tag_nr = -1;
}
cell_audio_thread::cell_audio_thread(utils::serial& ar)
: cell_audio_thread()
{
ar(init);
if (!init)
{
return;
}
ar(key_count, event_period);
keys.resize(ar);
for (key_info& k : keys)
{
ar(k.start_period, k.flags, k.source);
k.port = lv2_event_queue::load_ptr(ar, k.port);
}
ar(ports);
}
void cell_audio_thread::save(utils::serial& ar)
{
ar(init);
if (!init)
{
return;
}
USING_SERIALIZATION_VERSION(cellAudio);
ar(key_count, event_period);
ar(keys.size());
for (const key_info& k : keys)
{
ar(k.start_period, k.flags, k.source);
lv2_event_queue::save_ptr(ar, k.port.get());
}
ar(ports);
}
std::tuple<u32, u32, u32, u32> cell_audio_thread::count_port_buffer_tags()
{
AUDIT(cfg.buffering_enabled);
@ -615,6 +661,11 @@ void cell_audio_thread::operator()()
thread_ctrl::scoped_priority high_prio(+1);
while (Emu.IsPaused())
{
thread_ctrl::wait_for(5000);
}
u32 untouched_expected = 0;
// Main cellAudio loop

View File

@ -189,6 +189,16 @@ struct audio_port
f32 last_tag_value[PORT_BUFFER_TAG_COUNT] = { 0 };
void tag(s32 offset = 0);
audio_port() = default;
// Handle copy ctor of atomic var
audio_port(const audio_port& r)
{
std::memcpy(this, &r, sizeof(r));
}
ENABLE_BITWISE_SERIALIZATION;
};
struct cell_audio_config
@ -366,7 +376,7 @@ public:
atomic_t<audio_backend_update> m_update_configuration = audio_backend_update::NONE;
shared_mutex mutex{};
atomic_t<u32> init = 0;
atomic_t<u8> init = 0;
u32 key_count = 0;
u8 event_period = 0;
@ -390,10 +400,14 @@ public:
bool m_backend_failed = false;
bool m_audio_should_restart = false;
cell_audio_thread();
void operator()();
SAVESTATE_INIT_POS(9);
cell_audio_thread();
cell_audio_thread(utils::serial& ar);
void save(utils::serial& ar);
audio_port* open_port();
static constexpr auto thread_name = "cellAudio Thread"sv;

View File

@ -131,6 +131,25 @@ static const char* get_camera_attr_name(s32 value)
return nullptr;
}
camera_context::camera_context(utils::serial& ar)
{
save(ar);
}
void camera_context::save(utils::serial& ar)
{
ar(init);
if (!init)
{
return;
}
USING_SERIALIZATION_VERSION_COND(ar.is_writing(), cellCamera);
ar(notify_data_map, start_timestamp, read_mode, is_streaming, is_attached, is_open, info, attr, frame_num);
}
static bool check_dev_num(s32 dev_num)
{
return dev_num == 0;

View File

@ -375,6 +375,8 @@ struct CellCameraInfoEx
be_t<u32> container;
be_t<s32> read_mode;
vm::bptr<u8> pbuf[2];
ENABLE_BITWISE_SERIALIZATION;
};
struct CellCameraReadEx
@ -392,6 +394,8 @@ class camera_context
{
u64 source;
u64 flag;
ENABLE_BITWISE_SERIALIZATION;
};
public:
@ -433,14 +437,23 @@ public:
struct attr_t
{
u32 v1, v2;
ENABLE_BITWISE_SERIALIZATION;
};
attr_t attr[500]{};
atomic_t<bool> has_new_frame = false;
atomic_t<u32> frame_num = 0;
atomic_t<u32> frame_timestamp = 0;
atomic_t<u32> bytes_read = 0;
atomic_t<u32> init = 0;
atomic_t<u8> init = 0;
SAVESTATE_INIT_POS(16);
camera_context() = default;
camera_context(utils::serial& ar);
void save(utils::serial& ar);
static constexpr auto thread_name = "Camera Thread"sv;

View File

@ -165,6 +165,7 @@ public:
static const u32 id_base = 1;
static const u32 id_step = 1;
static const u32 id_count = 1023;
SAVESTATE_INIT_POS(34);
ElementaryStream(Demuxer* dmux, u32 addr, u32 size, u32 fidMajor, u32 fidMinor, u32 sup1, u32 sup2, vm::ptr<CellDmuxCbEsMsg> cbFunc, u32 cbArg, u32 spec);

View File

@ -94,7 +94,7 @@ public:
static constexpr auto thread_name = "Gem Thread"sv;
atomic_t<u32> state = 0;
atomic_t<u8> state = 0;
struct gem_color
{
@ -131,6 +131,8 @@ public:
u64 calibration_start_us{0}; // The start timestamp of the calibration in microseconds
static constexpr u64 calibration_time_us = 500000; // The calibration supposedly takes 0.5 seconds (500000 microseconds)
ENABLE_BITWISE_SERIALIZATION;
};
CellGemAttribute attribute = {};
@ -202,6 +204,30 @@ public:
controllers[gem_num].port = 7u - gem_num;
}
}
gem_config_data() = default;
SAVESTATE_INIT_POS(15);
void save(utils::serial& ar)
{
ar(state);
if (!state)
{
return;
}
USING_SERIALIZATION_VERSION_COND(ar.is_writing(), cellGem);
ar(attribute, vc_attribute, status_flags, enable_pitch_correction, inertial_counter, controllers
, connected_controllers, update_started, camera_frame, memory_ptr, start_timestamp);
}
gem_config_data(utils::serial& ar)
{
save(ar);
}
};
static inline int32_t cellGemGetVideoConvertSize(s32 output_format)
@ -854,7 +880,7 @@ error_code cellGemEnd(ppu_thread& ppu)
if (gem.state.compare_and_swap_test(1, 0))
{
if (u32 addr = gem.memory_ptr)
if (u32 addr = std::exchange(gem.memory_ptr, 0))
{
sys_memory_free(ppu, addr);
}

View File

@ -171,6 +171,8 @@ struct CellGemAttribute
be_t<u32> memory_ptr;
be_t<u32> spurs_addr;
u8 spu_priorities[8];
ENABLE_BITWISE_SERIALIZATION;
};
struct CellGemCameraState
@ -180,6 +182,8 @@ struct CellGemCameraState
be_t<f32> gain;
be_t<f32> pitch_angle;
be_t<f32> pitch_angle_estimate;
ENABLE_BITWISE_SERIALIZATION;
};
struct CellGemExtPortData
@ -270,4 +274,6 @@ struct CellGemVideoConvertAttribute
vm::bptr<u8> buffer_memory;
vm::bptr<u8> video_data_out;
u8 alpha;
ENABLE_BITWISE_SERIALIZATION;
};

View File

@ -110,6 +110,7 @@ struct CellJpgDecSubHandle
static const u32 id_base = 1;
static const u32 id_step = 1;
static const u32 id_count = 1023;
SAVESTATE_INIT_POS(35);
u32 fd;
u64 fileSize;

View File

@ -1,5 +1,6 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Io/KeyboardHandler.h"
@ -31,6 +32,33 @@ void fmt_class_string<CellKbError>::format(std::string& out, u64 arg)
});
}
KeyboardHandlerBase::KeyboardHandlerBase(utils::serial* ar)
{
if (!ar)
{
return;
}
(*ar)(m_info.max_connect);
if (m_info.max_connect)
{
Emu.DeferDeserialization([this]()
{
Init(m_info.max_connect);
init.init();
});
}
}
void KeyboardHandlerBase::save(utils::serial& ar)
{
const auto inited = init.access();
ar(inited ? m_info.max_connect : 0);
}
error_code cellKbInit(u32 max_connect)
{
sys_io.warning("cellKbInit(max_connect=%d)", max_connect);

View File

@ -356,7 +356,7 @@ public:
std::unordered_map<s32, microphone_device> mic_list;
shared_mutex mutex;
atomic_t<u32> init = 0;
atomic_t<u8> init = 0;
static constexpr auto thread_name = "Microphone Thread"sv;

View File

@ -1,5 +1,6 @@
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Io/MouseHandler.h"
@ -32,6 +33,32 @@ void fmt_class_string<CellMouseError>::format(std::string& out, u64 arg)
});
}
MouseHandlerBase::MouseHandlerBase(utils::serial* ar)
{
if (!ar)
{
return;
}
(*ar)(m_info.max_connect);
if (m_info.max_connect)
{
Emu.DeferDeserialization([this]()
{
Init(m_info.max_connect);
init.init();
});
}
}
void MouseHandlerBase::save(utils::serial& ar)
{
const auto inited = init.access();
ar(inited ? m_info.max_connect : 0);
}
error_code cellMouseInit(u32 max_connect)
{
sys_io.warning("cellMouseInit(max_connect=%d)", max_connect);

View File

@ -74,10 +74,32 @@ struct music_state
std::shared_ptr<music_handler_base> handler;
music_selection_context current_selection_context;
SAVESTATE_INIT_POS(16);
music_state()
{
handler = Emu.GetCallbacks().get_music_handler();
}
music_state(utils::serial& ar)
: music_state()
{
save(ar);
}
void save(utils::serial& ar)
{
ar(func);
if (!func)
{
return;
}
USING_SERIALIZATION_VERSION_COND(ar.is_writing(), cellMusic);
ar(userData);
}
};
error_code cell_music_select_contents()

View File

@ -50,6 +50,18 @@ void fmt_class_string<CellPadFilterError>::format(std::string& out, u64 arg)
});
}
pad_info::pad_info(utils::serial& ar)
: max_connect(ar)
, port_setting(ar)
{
}
void pad_info::save(utils::serial& ar)
{
ar(max_connect, port_setting);
}
error_code cellPadInit(u32 max_connect)
{
sys_io.warning("cellPadInit(max_connect=%d)", max_connect);

View File

@ -3,6 +3,7 @@
#include "Emu/Io/pad_types.h"
#include <array>
#include "util/types.hpp"
enum CellPadError : u32
{
@ -197,6 +198,12 @@ struct pad_info
{
atomic_t<u32> max_connect = 0;
std::array<u32, CELL_PAD_MAX_PORT_NUM> port_setting{ 0 };
SAVESTATE_INIT_POS(11);
pad_info() = default;
pad_info(utils::serial& ar);
void save(utils::serial& ar);
};
error_code cellPadGetData(u32 port_no, vm::ptr<CellPadData> data);

View File

@ -81,6 +81,8 @@ struct search_content_t
CellSearchVideoListInfo video_list;
CellSearchVideoSceneInfo scene;
} data;
ENABLE_BITWISE_SERIALIZATION;
};
using content_id_type = std::pair<u64, std::shared_ptr<search_content_t>>;
@ -90,6 +92,8 @@ struct content_id_map
std::unordered_map<u64, std::shared_ptr<search_content_t>> map;
shared_mutex mutex;
SAVESTATE_INIT_POS(36);
};
struct search_object_t
@ -98,6 +102,7 @@ struct search_object_t
static const u32 id_base = 1;
static const u32 id_step = 1;
static const u32 id_count = 1024; // TODO
SAVESTATE_INIT_POS(36.1);
std::vector<content_id_type> content_ids;
};

View File

@ -68,6 +68,8 @@ struct sysutil_cb_manager
struct alignas(8) registered_cb
{
ENABLE_BITWISE_SERIALIZATION;
vm::ptr<CellSysutilCallback> callback;
vm::ptr<void> user_data;
};
@ -78,6 +80,21 @@ struct sysutil_cb_manager
atomic_t<bool> draw_cb_started{};
atomic_t<u64> read_counter{0};
SAVESTATE_INIT_POS(13);
sysutil_cb_manager() = default;
sysutil_cb_manager(utils::serial& ar)
{
ar(callbacks);
}
void save(utils::serial& ar)
{
ensure(!registered);
ar(callbacks);
}
};
extern void sysutil_register_cb(std::function<s32(ppu_thread&)>&& cb)

View File

@ -7,6 +7,7 @@
#include "Emu/Cell/lv2/sys_process.h"
#include "sysPrxForUser.h"
#include "util/media_utils.h"
#include "util/init_mutex.hpp"
#ifdef _MSC_VER
#pragma warning(push, 0)
@ -173,6 +174,7 @@ struct vdec_context final
static const u32 id_base = 0xf0000000;
static const u32 id_step = 0x00000100;
static const u32 id_count = 1024;
SAVESTATE_INIT_POS(24);
u32 handle = 0;
@ -627,7 +629,43 @@ struct vdec_context final
}
};
static void vdecEntry(ppu_thread& ppu, u32 vid)
struct vdec_creation_lock
{
stx::init_mutex locked;
vdec_creation_lock()
{
locked.init();
}
};
extern bool try_lock_vdec_context_creation()
{
bool exist = false;
auto& lock = g_fxo->get<vdec_creation_lock>();
auto reset = lock.locked.reset();
if (reset)
{
bool context_exists = false;
idm::select<vdec_context>([&](u32, vdec_context&)
{
context_exists = true;
});
if (context_exists)
{
reset.set_init();
return false;
}
}
return true;
}
extern void vdecEntry(ppu_thread& ppu, u32 vid)
{
idm::get<vdec_context>(vid)->exec(ppu, vid);
@ -856,9 +894,20 @@ static error_code vdecOpen(ppu_thread& ppu, T type, U res, vm::cptr<CellVdecCb>
}
// Create decoder context
const u32 vid = idm::make<vdec_context>(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg);
std::shared_ptr<vdec_context> vdec;
if (auto access = g_fxo->get<vdec_creation_lock>().locked.access(); access)
{
vdec = idm::make_ptr<vdec_context>(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg);
}
else
{
ppu.state += cpu_flag::again;
return {};
}
const u32 vid = idm::last_id();
auto vdec = idm::get<vdec_context>(vid);
ensure(vdec);
vdec->handle = vid;

View File

@ -46,6 +46,12 @@ void voice_manager::reset()
queue_keys.clear();
}
void voice_manager::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION_COND(ar.is_writing(), cellVoice);
ar(id_ctr, port_source, ports, queue_keys, voice_service_started);
}
error_code cellVoiceConnectIPortToOPort(u32 ips, u32 ops)
{
cellVoice.todo("cellVoiceConnectIPortToOPort(ips=%d, ops=%d)", ips, ops);

View File

@ -182,6 +182,8 @@ struct voice_manager
{
s32 state = CELLVOICE_PORTSTATE_NULL;
CellVoicePortParam info;
ENABLE_BITWISE_SERIALIZATION;
};
// See cellVoiceCreatePort
@ -210,4 +212,10 @@ struct voice_manager
void reset();
shared_mutex mtx;
atomic_t<bool> is_init{ false };
SAVESTATE_INIT_POS(17);
voice_manager() = default;
voice_manager(utils::serial& ar) { save(ar); }
void save(utils::serial& ar);
};

View File

@ -340,6 +340,7 @@ public:
static const u32 id_base = 1;
static const u32 id_step = 1;
static const u32 id_count = 1023;
SAVESTATE_INIT_POS(23);
const bool to_rgba;

View File

@ -47,6 +47,11 @@ struct sns_fb_handle_t
static const u32 id_step = 1;
static const u32 id_count = SCE_NP_SNS_FB_HANDLE_SLOT_MAX + 1;
static const u32 invalid = SCE_NP_SNS_FB_INVALID_HANDLE;
SAVESTATE_INIT_POS(20);
sns_fb_handle_t() = default;
sns_fb_handle_t(utils::serial&){}
void save(utils::serial&){}
};
// Initialization parameters for functionalities coordinated with Facebook

View File

@ -33,10 +33,51 @@ struct trophy_context_t
static const u32 id_base = 1;
static const u32 id_step = 1;
static const u32 id_count = 4;
SAVESTATE_INIT_POS(42);
std::string trp_name;
std::unique_ptr<TROPUSRLoader> tropusr;
bool read_only = false;
trophy_context_t() = default;
trophy_context_t(utils::serial& ar)
: trp_name(ar.operator std::string())
{
std::string trophy_path = vfs::get(Emu.GetDir() + "TROPDIR/" + trp_name + "/TROPHY.TRP");
fs::file trp_stream(trophy_path);
if (!trp_stream)
{
// Fallback
trophy_path = vfs::get("/dev_bdvd/PS3_GAME/TROPDIR/" + trp_name + "/TROPHY.TRP");
trp_stream.open(trophy_path);
}
if (!ar.operator bool())
{
ar(read_only);
return;
}
ar(read_only);
if (!trp_stream && g_cfg.savestate.state_inspection_mode)
{
return;
}
const std::string trophyPath = "/dev_hdd0/home/" + Emu.GetUsr() + "/trophy/" + trp_name;
tropusr = std::make_unique<TROPUSRLoader>();
const std::string trophyUsrPath = trophyPath + "/TROPUSR.DAT";
const std::string trophyConfPath = trophyPath + "/TROPCONF.SFM";
ensure(tropusr->Load(trophyUsrPath, trophyConfPath).success);
}
void save(utils::serial& ar)
{
ar(trp_name, tropusr.operator bool(), read_only);
}
};
struct trophy_handle_t
@ -44,8 +85,21 @@ struct trophy_handle_t
static const u32 id_base = 1;
static const u32 id_step = 1;
static const u32 id_count = 4;
SAVESTATE_INIT_POS(43);
bool is_aborted = false;
trophy_handle_t() = default;
trophy_handle_t(utils::serial& ar)
: is_aborted(ar)
{
}
void save(utils::serial& ar)
{
ar(is_aborted);
}
};
struct sce_np_trophy_manager
@ -103,6 +157,25 @@ struct sce_np_trophy_manager
return res;
}
SAVESTATE_INIT_POS(12);
sce_np_trophy_manager() = default;
sce_np_trophy_manager(utils::serial& ar)
: is_initialized(ar)
{
}
void save(utils::serial& ar)
{
ar(is_initialized);
if (is_initialized)
{
USING_SERIALIZATION_VERSION(sceNpTrophy);
}
}
};
template<>

View File

@ -11,6 +11,7 @@ struct HeapInfo
static const u32 id_base = 1;
static const u32 id_step = 1;
static const u32 id_count = 1023;
SAVESTATE_INIT_POS(22);
const std::string name;

View File

@ -18,6 +18,7 @@ struct memory_pool_t
static const u32 id_base = 1;
static const u32 id_step = 1;
static const u32 id_count = 1023;
SAVESTATE_INIT_POS(21);
u32 mutexid;
u32 condid;

View File

@ -1,8 +1,10 @@
#include "stdafx.h"
#include "PPUFunction.h"
#include "Utilities/JIT.h"
#include "util/serialization.hpp"
#include "PPUModule.h"
#include "PPUInterpreter.h"
// Get function name by FNID
extern std::string ppu_get_function_name(const std::string& _module, u32 fnid)
@ -1943,6 +1945,16 @@ auto gen_ghc_cpp_trampoline(ppu_intrp_func_t fn_target)
#error "Not implemented!"
#endif
ppu_function_manager::ppu_function_manager(utils::serial& ar)
: addr(ar)
{
}
void ppu_function_manager::save(utils::serial& ar)
{
ar(addr);
}
std::vector<ppu_intrp_func_t>& ppu_function_manager::access(bool ghc)
{
static std::vector<ppu_intrp_func_t> list

View File

@ -115,6 +115,7 @@ namespace ppu_func_detail
static FORCE_INLINE void put_result(ppu_thread& ppu, const T& result)
{
if (ppu.state & cpu_flag::again) return;
ppu.gpr[3] = ppu_gpr_cast(result);
}
};
@ -126,6 +127,7 @@ namespace ppu_func_detail
static FORCE_INLINE void put_result(ppu_thread& ppu, const T& result)
{
if (ppu.state & cpu_flag::again) return;
ppu.fpr[1] = static_cast<T>(result);
}
};
@ -137,6 +139,7 @@ namespace ppu_func_detail
static FORCE_INLINE void put_result(ppu_thread& ppu, const T& result)
{
if (ppu.state & cpu_flag::again) return;
ppu.vr[2] = result;
}
};
@ -299,6 +302,9 @@ public:
// Allocation address
u32 addr = 0;
void save(utils::serial& ar);
ppu_function_manager(utils::serial& ar);
};
template<typename T, T Func>

View File

@ -4,6 +4,7 @@
#include "Utilities/bin_patch.h"
#include "Utilities/StrUtil.h"
#include "Utilities/address_range.h"
#include "util/serialization.hpp"
#include "Crypto/sha1.h"
#include "Crypto/unself.h"
#include "Loader/ELF.h"
@ -152,7 +153,7 @@ struct ppu_linkage_info
};
// Initialize static modules.
static void ppu_initialize_modules(ppu_linkage_info* link)
static void ppu_initialize_modules(ppu_linkage_info* link, utils::serial* ar = nullptr)
{
if (!link->modules.empty())
{
@ -280,7 +281,10 @@ static void ppu_initialize_modules(ppu_linkage_info* link)
u32& hle_funcs_addr = g_fxo->get<ppu_function_manager>().addr;
// Allocate memory for the array (must be called after fixed allocations)
hle_funcs_addr = vm::alloc(::size32(hle_funcs) * 8, vm::main);
if (!hle_funcs_addr)
hle_funcs_addr = vm::alloc(::size32(hle_funcs) * 8, vm::main);
else
vm::page_protect(hle_funcs_addr, utils::align(::size32(hle_funcs) * 8, 0x1000), 0, vm::page_writable);
// Initialize as PPU executable code
ppu_register_range(hle_funcs_addr, ::size32(hle_funcs) * 8);
@ -319,6 +323,71 @@ static void ppu_initialize_modules(ppu_linkage_info* link)
ppu_loader.trace("Registered static module: %s", _module->name);
}
struct hle_vars_save
{
hle_vars_save() = default;
hle_vars_save(const hle_vars_save&) = delete;
hle_vars_save& operator =(const hle_vars_save&) = delete;
hle_vars_save(utils::serial& ar)
{
auto& manager = ppu_module_manager::get();
while (true)
{
const std::string name = ar.operator std::string();
if (name.empty())
{
// Null termination
break;
}
const auto _module = manager.at(name);
auto& variable = _module->variables;
for (u32 i = 0, end = ar.operator usz(); i < end; i++)
{
auto* ptr = &variable.at(ar.operator u32());
ptr->addr = ar.operator u32();
ensure(!!ptr->var);
}
}
}
void save(utils::serial& ar)
{
for (auto& pair : ppu_module_manager::get())
{
const auto _module = pair.second;
ar(_module->name);
ar(_module->variables.size());
for (auto& variable : _module->variables)
{
ar(variable.first, variable.second.addr);
}
}
// Null terminator
ar(std::string{});
}
};
if (ar)
{
g_fxo->init<hle_vars_save>(*ar);
}
else
{
g_fxo->init<hle_vars_save>();
}
for (auto& pair : ppu_module_manager::get())
{
const auto _module = pair.second;
@ -345,7 +414,11 @@ static void ppu_initialize_modules(ppu_linkage_info* link)
ppu_loader.trace("** &0x%08X: %s (size=0x%x, align=0x%x)", variable.first, variable.second.name, variable.second.size, variable.second.align);
// Allocate HLE variable
if (variable.second.size >= 0x10000 || variable.second.align >= 0x10000)
if (ar)
{
// Already loaded
}
else if (variable.second.size >= 0x10000 || variable.second.align >= 0x10000)
{
variable.second.addr = vm::alloc(variable.second.size, vm::main, std::max<u32>(variable.second.align, 0x10000));
}
@ -790,6 +863,49 @@ void ppu_manual_load_imports_exports(u32 imports_start, u32 imports_size, u32 ex
ppu_load_imports(_main.relocs, &link, imports_start, imports_start + imports_size);
}
// For savestates
extern bool is_memory_read_only_of_executable(u32 addr)
{
if (g_cfg.savestate.state_inspection_mode)
{
return false;
}
const auto _main = g_fxo->try_get<ppu_module>();
ensure(_main);
for (const auto& seg : _main->segs)
{
if (!seg.addr || (seg.flags & 0x2) /* W */)
continue;
if (addr >= seg.addr && addr < (seg.addr + seg.size))
return true;
}
return false;
}
void init_ppu_functions(utils::serial* ar, bool full = false)
{
g_fxo->need<ppu_linkage_info>();
if (ar)
{
ensure(vm::check_addr(g_fxo->init<ppu_function_manager>(*ar)->addr));
}
else
g_fxo->init<ppu_function_manager>();
if (full)
{
ensure(ar);
// Initialize HLE modules
ppu_initialize_modules(&g_fxo->get<ppu_linkage_info>(), ar);
}
}
static void ppu_check_patch_spu_images(const ppu_segment& seg)
{
const std::string_view seg_view{vm::get_super_ptr<char>(seg.addr), seg.size};
@ -894,7 +1010,7 @@ static void ppu_check_patch_spu_images(const ppu_segment& seg)
void try_spawn_ppu_if_exclusive_program(const ppu_module& m)
{
// If only PRX/OVL has been loaded at Emu.BootGame(), launch a single PPU thread so its memory can be viewed
if (Emu.IsReady() && g_fxo->get<ppu_module>().segs.empty())
if (Emu.IsReady() && g_fxo->get<ppu_module>().segs.empty() && !Emu.DeserialManager())
{
ppu_thread_params p
{
@ -911,7 +1027,7 @@ void try_spawn_ppu_if_exclusive_program(const ppu_module& m)
}
}
std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, const std::string& path, s64 file_offset)
std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, const std::string& path, s64 file_offset, utils::serial* ar)
{
if (elf != elf_error::ok)
{
@ -919,7 +1035,7 @@ std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, const std::stri
}
// Create new PRX object
const auto prx = idm::make_ptr<lv2_obj, lv2_prx>();
const auto prx = !ar ? idm::make_ptr<lv2_obj, lv2_prx>() : std::make_shared<lv2_prx>();
// Access linkage information object
auto& link = g_fxo->get<ppu_linkage_info>();
@ -957,15 +1073,16 @@ std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, const std::stri
//const u32 init_addr = ::narrow<u32>(prog.p_vaddr);
// Alloc segment memory
const u32 addr = vm::alloc(mem_size, vm::main);
// Or use saved address
const u32 addr = !ar ? vm::alloc(mem_size, vm::main) : ar->operator u32();
if (!addr)
if (!vm::check_addr(addr))
{
fmt::throw_exception("vm::alloc() failed (size=0x%x)", mem_size);
}
// Copy segment data
std::memcpy(vm::base(addr), prog.bin.data(), file_size);
if (!ar) std::memcpy(vm::base(addr), prog.bin.data(), file_size);
ppu_loader.warning("**** Loaded to 0x%x...0x%x (size=0x%x)", addr, addr + mem_size - 1, mem_size);
// Hash segment
@ -1068,6 +1185,11 @@ std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, const std::stri
const u64 rdata = _rel.data = data_base + rel.ptr.addr();
prx->relocs.emplace_back(_rel);
if (ar)
{
break;
}
switch (rtype)
{
case 1: // R_PPC64_ADDR32
@ -1293,7 +1415,7 @@ void ppu_unload_prx(const lv2_prx& prx)
}
}
bool ppu_load_exec(const ppu_exec_object& elf)
bool ppu_load_exec(const ppu_exec_object& elf, utils::serial* ar)
{
if (elf != elf_error::ok)
{
@ -1316,8 +1438,7 @@ bool ppu_load_exec(const ppu_exec_object& elf)
}
}
g_fxo->need<ppu_linkage_info>();
g_fxo->need<ppu_function_manager>();
init_ppu_functions(ar, false);
// Set for delayed initialization in ppu_initialize()
auto& _main = g_fxo->get<ppu_module>();
@ -1390,7 +1511,17 @@ bool ppu_load_exec(const ppu_exec_object& elf)
return false;
}
if (!vm::falloc(addr, size, vm::main))
const bool already_loaded = ar && (_seg.flags & 0x2);
if (already_loaded)
{
if (!vm::check_addr(addr, vm::page_readable, size))
{
ppu_loader.fatal("ppu_load_exec(): Archived PPU executable memory has not been found! (addr=0x%x, memsz=0x%x)", addr, size);
return false;
}
}
else if (!vm::falloc(addr, size, vm::main))
{
ppu_loader.error("vm::falloc(vm::main) failed (addr=0x%x, memsz=0x%x)", addr, size); // TODO
@ -1402,7 +1533,18 @@ bool ppu_load_exec(const ppu_exec_object& elf)
}
// Copy segment data, hash it
std::memcpy(vm::base(addr), prog.bin.data(), prog.bin.size());
if (!already_loaded)
{
std::memcpy(vm::base(addr), prog.bin.data(), prog.bin.size());
}
else
{
// For backwards compatibility: already loaded memory will always be writable
const u32 size0 = utils::align(size + addr % 0x10000, 0x10000);
const u32 addr0 = addr & -0x10000;
vm::page_protect(addr0, size0, 0, vm::page_writable | vm::page_readable, vm::page_executable);
}
sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_vaddr), sizeof(prog.p_vaddr));
sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_memsz), sizeof(prog.p_memsz));
sha1_update(&sha, prog.bin.data(), prog.bin.size());
@ -1476,7 +1618,7 @@ bool ppu_load_exec(const ppu_exec_object& elf)
}
// Initialize HLE modules
ppu_initialize_modules(&link);
ppu_initialize_modules(&link, ar);
// Embedded SPU elf patching
for (const auto& seg : _main.segs)
@ -1641,6 +1783,46 @@ bool ppu_load_exec(const ppu_exec_object& elf)
}
}
// Initialize memory stats (according to sdk version)
u32 mem_size;
if (g_ps3_process_info.get_cellos_appname() == "vsh.self"sv)
{
// Because vsh.self comes before any generic application, more memory is available to it
mem_size = 0xF000000;
}
else if (sdk_version > 0x0021FFFF)
{
mem_size = 0xD500000;
}
else if (sdk_version > 0x00192FFF)
{
mem_size = 0xD300000;
}
else if (sdk_version > 0x0018FFFF)
{
mem_size = 0xD100000;
}
else if (sdk_version > 0x0017FFFF)
{
mem_size = 0xD000000;
}
else if (sdk_version > 0x00154FFF)
{
mem_size = 0xCC00000;
}
else
{
mem_size = 0xC800000;
}
if (g_cfg.core.debug_console_mode)
{
// TODO: Check for all sdk versions
mem_size += 0xC000000;
}
if (!ar) g_fxo->init<lv2_memory_container>(mem_size);
// Initialize process
std::vector<std::shared_ptr<lv2_prx>> loaded_modules;
@ -1658,9 +1840,9 @@ bool ppu_load_exec(const ppu_exec_object& elf)
load_libs.emplace("libsysmodule.sprx");
}
if (g_ps3_process_info.get_cellos_appname() == "vsh.self"sv)
if (ar || g_ps3_process_info.get_cellos_appname() == "vsh.self"sv)
{
// Cannot be used with vsh.self (it self-manages itself)
// Cannot be used with vsh.self or savestates (they self-manage itself)
load_libs.clear();
}
@ -1676,6 +1858,25 @@ bool ppu_load_exec(const ppu_exec_object& elf)
// Program entry
u32 entry = 0;
// Set path (TODO)
_main.name.clear();
_main.path = vfs::get(Emu.argv[0]);
// Analyse executable (TODO)
_main.analyse(0, static_cast<u32>(elf.header.e_entry), end, applied);
// Validate analyser results (not required)
_main.validate(0);
// Set SDK version
g_ps3_process_info.sdk_ver = sdk_version;
// Set ppc fixed allocations segment permission
g_ps3_process_info.ppc_seg = ppc_seg;
void init_fxo_for_exec(utils::serial* ar, bool full);
init_fxo_for_exec(ar, false);
if (!load_libs.empty())
{
for (const auto& name : load_libs)
@ -1686,7 +1887,7 @@ bool ppu_load_exec(const ppu_exec_object& elf)
{
ppu_loader.warning("Loading library: %s", name);
auto prx = ppu_load_prx(obj, lle_dir + name, 0);
auto prx = ppu_load_prx(obj, lle_dir + name, 0, nullptr);
if (prx->funcs.empty())
{
@ -1715,21 +1916,11 @@ bool ppu_load_exec(const ppu_exec_object& elf)
}
}
// Set path (TODO)
_main.name.clear();
_main.path = vfs::get(Emu.argv[0]);
// Analyse executable (TODO)
_main.analyse(0, static_cast<u32>(elf.header.e_entry), end, applied);
// Validate analyser results (not required)
_main.validate(0);
// Set SDK version
g_ps3_process_info.sdk_ver = sdk_version;
// Set ppc fixed allocations segment permission
g_ps3_process_info.ppc_seg = ppc_seg;
if (ar)
{
error_handler.errored = false;
return true;
}
if (ppc_seg != 0x0)
{
@ -1804,44 +1995,6 @@ bool ppu_load_exec(const ppu_exec_object& elf)
ppu->gpr[1] -= Emu.data.size();
}
// Initialize memory stats (according to sdk version)
u32 mem_size;
if (g_ps3_process_info.get_cellos_appname() == "vsh.self"sv)
{
// Because vsh.self comes before any generic application, more memory is available to it
mem_size = 0xF000000;
}
else if (sdk_version > 0x0021FFFF)
{
mem_size = 0xD500000;
}
else if (sdk_version > 0x00192FFF)
{
mem_size = 0xD300000;
}
else if (sdk_version > 0x0018FFFF)
{
mem_size = 0xD100000;
}
else if (sdk_version > 0x0017FFFF)
{
mem_size = 0xD000000;
}
else if (sdk_version > 0x00154FFF)
{
mem_size = 0xCC00000;
}
else
{
mem_size = 0xC800000;
}
if (g_cfg.core.debug_console_mode)
{
// TODO: Check for all sdk versions
mem_size += 0xC000000;
}
if (Emu.init_mem_containers)
{
// Refer to sys_process_exit2 for explanation
@ -1913,7 +2066,7 @@ bool ppu_load_exec(const ppu_exec_object& elf)
return true;
}
std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object& elf, const std::string& path, s64 file_offset)
std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object& elf, const std::string& path, s64 file_offset, utils::serial* ar)
{
if (elf != elf_error::ok)
{
@ -1975,7 +2128,17 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
if (prog.bin.size() > size || prog.bin.size() != prog.p_filesz)
fmt::throw_exception("Invalid binary size (0x%llx, memsz=0x%x)", prog.bin.size(), size);
if (!vm::get(vm::any, 0x30000000)->falloc(addr, size))
const bool already_loaded = ar /*&& !!(_seg.flags & 0x2)*/;
if (already_loaded)
{
if (!vm::check_addr(addr, vm::page_readable, size))
{
ppu_loader.fatal("ppu_load_overlay(): Archived PPU overlay memory has not been found! (addr=0x%x, memsz=0x%x)", addr, size);
return {nullptr, CELL_EABORT};
}
}
else if (!vm::get(vm::any, 0x30000000)->falloc(addr, size))
{
ppu_loader.error("ppu_load_overlay(): vm::falloc() failed (addr=0x%x, memsz=0x%x)", addr, size);
@ -1990,7 +2153,7 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
}
// Copy segment data, hash it
std::memcpy(vm::base(addr), prog.bin.data(), prog.bin.size());
if (!already_loaded) std::memcpy(vm::base(addr), prog.bin.data(), prog.bin.size());
sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_vaddr), sizeof(prog.p_vaddr));
sha1_update(&sha, reinterpret_cast<const uchar*>(&prog.p_memsz), sizeof(prog.p_memsz));
sha1_update(&sha, prog.bin.data(), prog.bin.size());
@ -2158,9 +2321,11 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
// Validate analyser results (not required)
ovlm->validate(0);
idm::import_existing<lv2_obj, lv2_overlay>(ovlm);
try_spawn_ppu_if_exclusive_program(*ovlm);
if (!ar)
{
idm::import_existing<lv2_obj, lv2_overlay>(ovlm);
try_spawn_ppu_if_exclusive_program(*ovlm);
}
return {std::move(ovlm), {}};
}

View File

@ -1,6 +1,7 @@
#include "stdafx.h"
#include "Utilities/JIT.h"
#include "Utilities/StrUtil.h"
#include "util/serialization.hpp"
#include "Crypto/sha1.h"
#include "Crypto/unself.h"
#include "Loader/ELF.h"
@ -138,13 +139,28 @@ void fmt_class_string<typename ppu_thread::call_history_t>::format(std::string&
extern const ppu_decoder<ppu_itype> g_ppu_itype{};
extern const ppu_decoder<ppu_iname> g_ppu_iname{};
template <>
bool serialize<ppu_thread::cr_bits>(utils::serial& ar, typename ppu_thread::cr_bits& o)
{
if (ar.is_writing())
{
ar(o.pack());
}
else
{
o.unpack(ar);
}
return true;
}
extern void ppu_initialize();
extern void ppu_finalize(const ppu_module& info);
extern bool ppu_initialize(const ppu_module& info, bool = false);
static void ppu_initialize2(class jit_compiler& jit, const ppu_module& module_part, const std::string& cache_path, const std::string& obj_name);
extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, const std::string& path, s64 file_offset);
extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, const std::string& path, s64 file_offset, utils::serial* = nullptr);
extern void ppu_unload_prx(const lv2_prx&);
extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, const std::string&, s64 file_offset);
extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, const std::string&, s64 file_offset, utils::serial* = nullptr);
extern void ppu_execute_syscall(ppu_thread& ppu, u64 code);
static void ppu_break(ppu_thread&, ppu_opcode_t, be_t<u32>*, ppu_intrp_func*);
@ -1351,6 +1367,12 @@ void ppu_thread::cpu_task()
cmd_pop(1), func(*this, {}, vm::_ptr<u32>(cia - 4), &ppu_ret);
break;
}
case ppu_cmd::cia_call:
{
loaded_from_savestate = true;
cmd_pop(), fast_call(std::exchange(cia, 0), gpr[2]);
break;
}
case ppu_cmd::initialize:
{
#ifdef __APPLE__
@ -1358,19 +1380,8 @@ void ppu_thread::cpu_task()
#endif
cmd_pop();
while (!g_fxo->get<rsx::thread>().is_inited && !is_stopped())
{
// Wait for RSX to be initialized
thread_ctrl::wait_on(g_fxo->get<rsx::thread>().is_inited, false);
}
ppu_initialize(), spu_cache::initialize();
// Wait until the progress dialog is closed.
// We don't want to open a cell dialog while a native progress dialog is still open.
thread_ctrl::wait_on<atomic_wait::op_ne>(g_progr_ptotal, 0);
g_fxo->get<progress_dialog_workaround>().skip_the_progress_dialog = true;
#ifdef __APPLE__
pthread_jit_write_protect_np(true);
#endif
@ -1380,6 +1391,24 @@ void ppu_thread::cpu_task()
asm("DSB ISH");
#endif
// Wait until the progress dialog is closed.
// We don't want to open a cell dialog while a native progress dialog is still open.
thread_ctrl::wait_on<atomic_wait::op_ne>(g_progr_ptotal, 0);
g_fxo->get<progress_dialog_workaround>().skip_the_progress_dialog = true;
// Sadly we can't postpone initializing guest time because we need ti run PPU threads
// (the farther it's postponed, the less accuracy of guest time has been lost)
Emu.FixGuestTime();
// Check if this is the only PPU left to initialize (savestates related)
if (lv2_obj::is_scheduler_ready())
{
if (Emu.IsStarting())
{
Emu.FinalizeRunRequest();
}
}
break;
}
case ppu_cmd::sleep:
@ -1484,6 +1513,7 @@ ppu_thread::ppu_thread(const ppu_thread_params& param, std::string_view name, u3
, joiner(detached != 0 ? ppu_join_status::detached : ppu_join_status::joinable)
, entry_func(param.entry)
, start_time(get_guest_system_time())
, is_interrupt_thread(detached < 0)
, ppu_tname(make_single<std::string>(name))
{
gpr[1] = stack_addr + stack_size - ppu_stack_start_offset;
@ -1520,6 +1550,195 @@ ppu_thread::ppu_thread(const ppu_thread_params& param, std::string_view name, u3
#endif
}
struct disable_precomp_t
{
atomic_t<bool> disable = false;
};
void vdecEntry(ppu_thread& ppu, u32 vid);
bool ppu_thread::savable() const
{
if (joiner == ppu_join_status::exited)
{
return false;
}
if (cia == g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(vdecEntry)))
{
// Do not attempt to save the state of HLE VDEC threads
return false;
}
return true;
}
void ppu_thread::serialize_common(utils::serial& ar)
{
ar(gpr, fpr, cr, fpscr.bits, lr, ctr, vrsave, cia, xer, sat, nj, prio, optional_syscall_state);
for (v128& reg : vr)
ar(reg._bytes);
}
ppu_thread::ppu_thread(utils::serial& ar)
: cpu_thread(idm::last_id()) // last_id() is showed to constructor on serialization
, stack_size(ar)
, stack_addr(ar)
, joiner(ar.operator ppu_join_status())
, entry_func(std::bit_cast<ppu_func_opd_t, u64>(ar))
, is_interrupt_thread(ar)
{
struct init_pushed
{
bool pushed = false;
atomic_t<bool> inited = false;
};
serialize_common(ar);
// Restore jm_mask
jm_mask = nj ? 0x7F800000 : 0x7fff'ffff;
auto queue_intr_entry = [&]()
{
if (is_interrupt_thread)
{
void ppu_interrupt_thread_entry(ppu_thread&, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*);
cmd_list
({
{ ppu_cmd::ptr_call, 0 },
std::bit_cast<u64>(&ppu_interrupt_thread_entry)
});
}
};
switch (const u32 status = ar.operator u32())
{
case PPU_THREAD_STATUS_IDLE:
{
stop_flag_removal_protection = true;
break;
}
case PPU_THREAD_STATUS_RUNNABLE:
case PPU_THREAD_STATUS_ONPROC:
{
lv2_obj::awake(this);
[[fallthrough]];
}
case PPU_THREAD_STATUS_SLEEP:
{
if (std::exchange(g_fxo->get<init_pushed>().pushed, true))
{
cmd_list
({
{ppu_cmd::ptr_call, 0}, +[](ppu_thread& ppu) -> bool
{
while (!Emu.IsStopped() && !g_fxo->get<init_pushed>().inited)
{
thread_ctrl::wait_on(g_fxo->get<init_pushed>().inited, false);
}
return false;
}
});
}
else
{
g_fxo->init<disable_precomp_t>();
g_fxo->get<disable_precomp_t>().disable = true;
cmd_push({ppu_cmd::initialize, 0});
cmd_list
({
{ppu_cmd::ptr_call, 0}, +[](ppu_thread&) -> bool
{
auto& inited = g_fxo->get<init_pushed>().inited;
inited = true;
inited.notify_all();
return true;
}
});
}
if (status == PPU_THREAD_STATUS_SLEEP)
{
cmd_list
({
{ppu_cmd::ptr_call, 0},
+[](ppu_thread& ppu) -> bool
{
ppu.loaded_from_savestate = true;
ppu_execute_syscall(ppu, ppu.gpr[11]);
ppu.loaded_from_savestate = false;
return true;
}
});
lv2_obj::set_future_sleep(this);
}
queue_intr_entry();
cmd_push({ppu_cmd::cia_call, 0});
break;
}
case PPU_THREAD_STATUS_ZOMBIE:
{
state += cpu_flag::exit;
break;
}
case PPU_THREAD_STATUS_STOP:
{
queue_intr_entry();
break;
}
}
// Trigger the scheduler
state += cpu_flag::suspend;
if (!g_use_rtm)
{
state += cpu_flag::memory;
}
ppu_tname = make_single<std::string>(ar.operator std::string());
}
void ppu_thread::save(utils::serial& ar)
{
const u64 entry = std::bit_cast<u64>(entry_func);
ppu_join_status _joiner = joiner;
if (_joiner >= ppu_join_status::max)
{
// Joining thread should recover this member properly
_joiner = ppu_join_status::joinable;
}
if (state & cpu_flag::again)
{
std::memcpy(&gpr[3], syscall_args, sizeof(syscall_args));
cia -= 4;
}
ar(stack_size, stack_addr, _joiner, entry, is_interrupt_thread);
serialize_common(ar);
ppu_thread_status status = lv2_obj::ppu_state(this, false);
if (status == PPU_THREAD_STATUS_SLEEP && cpu_flag::again - state)
{
// Hack for sys_fs
status = PPU_THREAD_STATUS_RUNNABLE;
}
ar(status);
ar(*ppu_tname.load());
}
ppu_thread::thread_name_t::operator std::string() const
{
std::string thread_name = fmt::format("PPU[0x%x]", _this->id);
@ -1596,7 +1815,7 @@ be_t<u64>* ppu_thread::get_stack_arg(s32 i, u64 align)
return vm::_ptr<u64>(vm::cast((gpr[1] + 0x30 + 0x8 * (i - 1)) & (0 - align)));
}
void ppu_thread::fast_call(u32 addr, u32 rtoc)
void ppu_thread::fast_call(u32 addr, u64 rtoc)
{
const auto old_cia = cia;
const auto old_rtoc = gpr[2];
@ -1604,11 +1823,17 @@ void ppu_thread::fast_call(u32 addr, u32 rtoc)
const auto old_func = current_function;
const auto old_fmt = g_tls_log_prefix;
interrupt_thread_executing = true;
cia = addr;
gpr[2] = rtoc;
lr = g_fxo->get<ppu_function_manager>().func_addr(1) + 4; // HLE stop address
current_function = nullptr;
if (std::exchange(loaded_from_savestate, false))
{
lr = old_lr;
}
g_tls_log_prefix = []
{
const auto _this = static_cast<ppu_thread*>(get_current_cpu_thread());
@ -1643,15 +1868,21 @@ void ppu_thread::fast_call(u32 addr, u32 rtoc)
cpu_on_stop();
current_function = old_func;
}
else
else if (old_cia)
{
state -= cpu_flag::ret;
if (state & cpu_flag::exit)
{
ppu_log.error("HLE callstack savestate is not implemented!");
}
cia = old_cia;
gpr[2] = old_rtoc;
lr = old_lr;
current_function = old_func;
g_tls_log_prefix = old_fmt;
}
current_function = old_func;
g_tls_log_prefix = old_fmt;
state -= cpu_flag::ret;
};
exec_task();
@ -2466,6 +2697,13 @@ namespace
};
}
extern fs::file make_file_view(fs::file&& _file, u64 offset)
{
fs::file file;
file.reset(std::make_unique<file_view>(std::move(_file), offset));
return file;
}
extern void ppu_finalize(const ppu_module& info)
{
// Get cache path for this executable
@ -2503,13 +2741,18 @@ extern void ppu_finalize(const ppu_module& info)
#endif
}
extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<lv2_prx*>* loaded_prx)
extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_module*>* loaded_modules)
{
if (g_cfg.core.ppu_decoder != ppu_decoder_type::llvm)
{
return;
}
if (auto dis = g_fxo->try_get<disable_precomp_t>(); dis && dis->disable)
{
return;
}
// Make sure we only have one '/' at the end and remove duplicates.
for (std::string& dir : dir_queue)
{
@ -2560,53 +2803,48 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<lv2_
std::string upper = fmt::to_upper(entry.name);
// Skip already loaded modules or HLEd ones
auto is_ignored = [&](s64 offset) -> bool
{
if (dir_queue[i] != firmware_sprx_path)
{
return false;
}
if (loaded_modules)
{
if (std::any_of(loaded_modules->begin(), loaded_modules->end(), [&](ppu_module* obj)
{
return obj->name == entry.name;
}))
{
return true;
}
}
if (g_cfg.core.libraries_control.get_set().count(entry.name + ":lle"))
{
// Force LLE
return false;
}
else if (g_cfg.core.libraries_control.get_set().count(entry.name + ":hle"))
{
// Force HLE
return true;
}
extern const std::map<std::string_view, int> g_prx_list;
// Use list
return g_prx_list.count(entry.name) && g_prx_list.at(entry.name) != 0;
};
// Check .sprx filename
if (upper.ends_with(".SPRX") && entry.name != "libfs_utility_init.sprx"sv)
{
// Skip already loaded modules or HLEd ones
if (dir_queue[i] == firmware_sprx_path)
if (is_ignored(0))
{
bool ignore = false;
if (loaded_prx)
{
for (auto* obj : *loaded_prx)
{
if (obj->name == entry.name)
{
ignore = true;
break;
}
}
if (ignore)
{
continue;
}
}
if (g_cfg.core.libraries_control.get_set().count(entry.name + ":lle"))
{
// Force LLE
ignore = false;
}
else if (g_cfg.core.libraries_control.get_set().count(entry.name + ":hle"))
{
// Force HLE
ignore = true;
}
else
{
extern const std::map<std::string_view, int> g_prx_list;
// Use list
ignore = g_prx_list.count(entry.name) && g_prx_list.at(entry.name) != 0;
}
if (ignore)
{
continue;
}
continue;
}
// Get full path
@ -2800,8 +3038,6 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<lv2_
extern void ppu_initialize()
{
auto& _main = g_fxo->get<ppu_module>();
if (!g_fxo->is_init<ppu_module>())
{
return;
@ -2812,6 +3048,8 @@ extern void ppu_initialize()
return;
}
auto& _main = g_fxo->get<ppu_module>();
scoped_progress_dialog progr = "Scanning PPU modules...";
bool compile_main = false;
@ -2822,20 +3060,39 @@ extern void ppu_initialize()
compile_main = ppu_initialize(_main, true);
}
std::vector<lv2_prx*> prx_list;
std::vector<ppu_module*> module_list;
idm::select<lv2_obj, lv2_prx>([&](u32, lv2_prx& prx)
const std::string firmware_sprx_path = vfs::get("/dev_flash/sys/external/");
// If empty we have no indication for firmware cache state, check everything
bool compile_fw = true;
idm::select<lv2_obj, lv2_prx>([&](u32, lv2_prx& _module)
{
prx_list.emplace_back(&prx);
if (_module.path.starts_with(firmware_sprx_path))
{
// Postpone testing
compile_fw = false;
}
module_list.emplace_back(&_module);
});
// If empty we have no indication for cache state, check everything
bool compile_fw = prx_list.empty();
idm::select<lv2_obj, lv2_overlay>([&](u32, lv2_overlay& _module)
{
module_list.emplace_back(&_module);
});
// Check preloaded libraries cache
for (auto ptr : prx_list)
if (!compile_fw)
{
compile_fw |= ppu_initialize(*ptr, true);
for (auto ptr : module_list)
{
if (ptr->path.starts_with(firmware_sprx_path))
{
compile_fw |= ppu_initialize(*ptr, true);
}
}
}
std::vector<std::string> dir_queue;
@ -2871,7 +3128,7 @@ extern void ppu_initialize()
dir_queue.insert(std::end(dir_queue), std::begin(dirs), std::end(dirs));
}
ppu_precompile(dir_queue, &prx_list);
ppu_precompile(dir_queue, &module_list);
if (Emu.IsStopped())
{
@ -2885,7 +3142,7 @@ extern void ppu_initialize()
}
// Initialize preloaded libraries
for (auto ptr : prx_list)
for (auto ptr : module_list)
{
if (Emu.IsStopped())
{

View File

@ -20,6 +20,7 @@ enum class ppu_cmd : u32
hle_call, // Execute function by index (arg)
ptr_call, // Execute function by pointer
opd_call, // Execute function by provided rtoc and address (unlike lle_call, does not read memory)
cia_call, // Execute from current CIA, mo GPR modification applied
initialize, // ppu_initialize()
sleep,
reset_stack, // resets stack address
@ -140,10 +141,15 @@ public:
virtual void cpu_on_stop() override;
virtual ~ppu_thread() override;
ppu_thread(const ppu_thread_params&, std::string_view name, u32 prio, int detached = 0);
SAVESTATE_INIT_POS(3);
ppu_thread(const ppu_thread_params&, std::string_view name, u32 prio, int detached = 0);
ppu_thread(utils::serial& ar);
ppu_thread(const ppu_thread&) = delete;
ppu_thread& operator=(const ppu_thread&) = delete;
bool savable() const;
void serialize_common(utils::serial& ar);
void save(utils::serial& ar);
using cpu_thread::operator=;
@ -180,8 +186,8 @@ public:
{
for (u8& b : bits)
{
b = value & 0x1;
value >>= 1;
b = !!(value & (1u << 31));
value <<= 1;
}
}
};
@ -215,6 +221,8 @@ public:
// Fixed-Point Exception Register (abstract representation)
struct
{
ENABLE_BITWISE_SERIALIZATION;
bool so{}; // Summary Overflow
bool ov{}; // Overflow
bool ca{}; // Carry
@ -266,6 +274,8 @@ public:
const char* current_function{}; // Current function name for diagnosis, optimized for speed.
const char* last_function{}; // Sticky copy of current_function, is not cleared on function return
const bool is_interrupt_thread; // True for interrupts-handler threads
// Thread name
atomic_ptr<std::string> ppu_tname;
@ -307,9 +317,15 @@ public:
operator std::string() const;
} thread_name{ this };
// For savestates
bool stop_flag_removal_protection = false; // If set, Emulator::Run won't remove stop flag
bool loaded_from_savestate = false; // Indicates the thread had just started straight from savestate load
u64 optional_syscall_state{};
bool interrupt_thread_executing = false;
be_t<u64>* get_stack_arg(s32 i, u64 align = alignof(u64));
void exec_task();
void fast_call(u32 addr, u32 rtoc);
void fast_call(u32 addr, u64 rtoc);
static std::pair<vm::addr_t, u32> stack_push(u32 size, u32 align_v);
static void stack_pop_verbose(u32 addr, u32 size) noexcept;

View File

@ -5544,11 +5544,7 @@ public:
spu_runtime::g_escape(_spu);
}
if (_spu->test_stopped())
{
_spu->pc += 4;
spu_runtime::g_escape(_spu);
}
static_cast<void>(_spu->test_stopped());
}
void STOP(spu_opcode_t op) //
@ -5591,12 +5587,7 @@ public:
spu_runtime::g_escape(_spu);
}
if (_spu->test_stopped())
{
_spu->pc += 4;
spu_runtime::g_escape(_spu);
}
static_cast<void>(_spu->test_stopped());
return static_cast<u32>(result & 0xffffffff);
}
@ -5614,12 +5605,7 @@ public:
{
_spu->state += cpu_flag::wait;
std::this_thread::yield();
if (_spu->test_stopped())
{
_spu->pc += 4;
spu_runtime::g_escape(_spu);
}
static_cast<void>(_spu->test_stopped());
}
return res;
@ -5655,6 +5641,8 @@ public:
val0 = m_ir->CreateTrunc(val0, get_type<u32>());
m_ir->CreateCondBr(cond, done, wait);
m_ir->SetInsertPoint(wait);
update_pc();
m_block->store.fill(nullptr);
const auto val1 = call("spu_read_channel", &exec_rdch, m_thread, m_ir->getInt32(op.ra));
m_ir->CreateBr(done);
m_ir->SetInsertPoint(done);
@ -5685,6 +5673,7 @@ public:
case SPU_RdInMbox:
{
update_pc();
m_block->store.fill(nullptr);
res.value = call("spu_read_in_mbox", &exec_read_in_mbox, m_thread);
break;
}
@ -5731,6 +5720,7 @@ public:
case SPU_RdEventStat:
{
update_pc();
m_block->store.fill(nullptr);
res.value = call("spu_read_events", &exec_read_events, m_thread);
break;
}
@ -5744,6 +5734,7 @@ public:
default:
{
update_pc();
m_block->store.fill(nullptr);
res.value = call("spu_read_channel", &exec_rdch, m_thread, m_ir->getInt32(op.ra));
break;
}
@ -5954,6 +5945,7 @@ public:
m_ir->CreateCondBr(m_ir->CreateICmpNE(m_ir->CreateLoad(spu_ptr<u32>(&spu_thread::ch_tag_upd)), m_ir->getInt32(MFC_TAG_UPDATE_IMMEDIATE)), _mfc, next);
m_ir->SetInsertPoint(_mfc);
update_pc();
m_block->store.fill(nullptr);
call("spu_write_channel", &exec_wrch, m_thread, m_ir->getInt32(op.ra), val.value);
m_ir->CreateBr(next);
m_ir->SetInsertPoint(next);
@ -6106,6 +6098,7 @@ public:
m_ir->SetInsertPoint(next);
m_ir->CreateStore(ci, spu_ptr<u8>(&spu_thread::ch_mfc_cmd, &spu_mfc_cmd::cmd));
update_pc();
m_block->store.fill(nullptr);
call("spu_exec_mfc_cmd", &exec_mfc_cmd, m_thread);
return;
}
@ -6373,6 +6366,7 @@ public:
m_ir->CreateCondBr(m_ir->CreateICmpNE(_old, _new), _mfc, next);
m_ir->SetInsertPoint(_mfc);
update_pc();
m_block->store.fill(nullptr);
call("spu_list_unstall", &exec_list_unstall, m_thread, eval(val & 0x1f).value);
m_ir->CreateBr(next);
m_ir->SetInsertPoint(next);
@ -6396,6 +6390,7 @@ public:
}
update_pc();
m_block->store.fill(nullptr);
call("spu_write_channel", &exec_wrch, m_thread, m_ir->getInt32(op.ra), val.value);
}
@ -6416,6 +6411,7 @@ public:
{
m_block->block_end = m_ir->GetInsertBlock();
update_pc(m_pos + 4);
m_block->store.fill(nullptr);
tail_chunk(m_dispatch);
}
}

View File

@ -27,11 +27,13 @@
#include <cfenv>
#include <thread>
#include <shared_mutex>
#include <span>
#include "util/vm.hpp"
#include "util/asm.hpp"
#include "util/v128.hpp"
#include "util/simd.hpp"
#include "util/sysinfo.hpp"
#include "util/serialization.hpp"
using spu_rdata_t = decltype(spu_thread::rdata);
@ -1623,63 +1625,28 @@ spu_thread::~spu_thread()
shm->unmap(ls + SPU_LS_SIZE);
shm->unmap(ls);
shm->unmap(ls - SPU_LS_SIZE);
utils::memory_release(ls - SPU_LS_SIZE * 2, SPU_LS_SIZE * 5);
perf_log.notice("Perf stats for transactions: success %u, failure %u", stx, ftx);
perf_log.notice("Perf stats for PUTLLC reload: successs %u, failure %u", last_succ, last_fail);
}
u8* spu_thread::map_ls(utils::shm& shm)
{
vm::writer_lock mlock;
const auto ls = static_cast<u8*>(ensure(utils::memory_reserve(SPU_LS_SIZE * 5, nullptr, true))) + SPU_LS_SIZE * 2;
ensure(shm.map_critical(ls - SPU_LS_SIZE).first && shm.map_critical(ls).first && shm.map_critical(ls + SPU_LS_SIZE).first);
return ls;
}
spu_thread::spu_thread(lv2_spu_group* group, u32 index, std::string_view name, u32 lv2_id, bool is_isolated, u32 option)
: cpu_thread(idm::last_id())
, group(group)
, index(index)
, shm(std::make_shared<utils::shm>(SPU_LS_SIZE))
, ls([&]()
{
if (!group)
{
ensure(vm::get(vm::spu)->falloc(vm_offset(), SPU_LS_SIZE, &shm, vm::page_size_64k));
}
else
{
// alloc_hidden indicates falloc to allocate page with no access rights in base memory
ensure(vm::get(vm::spu)->falloc(vm_offset(), SPU_LS_SIZE, &shm, static_cast<u64>(vm::page_size_64k) | static_cast<u64>(vm::alloc_hidden)));
}
// Try to guess free area
const auto start = vm::g_free_addr + SPU_LS_SIZE * (cpu_thread::id & 0xffffff) * 12;
u32 total = 0;
// Map LS and its mirrors
for (u64 addr = reinterpret_cast<u64>(start); addr < 0x8000'0000'0000;)
{
if (auto ptr = shm->try_map(reinterpret_cast<u8*>(addr)))
{
if (++total == 3)
{
// Use the middle mirror
return ptr - SPU_LS_SIZE;
}
addr += SPU_LS_SIZE;
}
else
{
// Reset, cleanup and start again
for (u32 i = 1; i <= total; i++)
{
shm->unmap(reinterpret_cast<u8*>(addr - i * SPU_LS_SIZE));
}
total = 0;
addr += 0x10000;
}
}
fmt::throw_exception("Failed to map SPU LS memory");
}())
, thread_type(group ? spu_type::threaded : is_isolated ? spu_type::isolated : spu_type::raw)
, shm(std::make_shared<utils::shm>(SPU_LS_SIZE))
, ls(map_ls(*this->shm))
, option(option)
, lv2_id(lv2_id)
, spu_tname(make_single<std::string>(name))
@ -1688,12 +1655,26 @@ spu_thread::spu_thread(lv2_spu_group* group, u32 index, std::string_view name, u
{
jit = spu_recompiler_base::make_asmjit_recompiler();
}
if (g_cfg.core.spu_decoder == spu_decoder_type::llvm)
else if (g_cfg.core.spu_decoder == spu_decoder_type::llvm)
{
jit = spu_recompiler_base::make_fast_llvm_recompiler();
}
if (g_cfg.core.mfc_debug)
{
utils::memory_commit(vm::g_stat_addr + vm_offset(), SPU_LS_SIZE);
}
if (!group)
{
ensure(vm::get(vm::spu)->falloc(vm_offset(), SPU_LS_SIZE, &shm, vm::page_size_64k));
}
else
{
// alloc_hidden indicates falloc to allocate page with no access rights in base memory
ensure(vm::get(vm::spu)->falloc(vm_offset(), SPU_LS_SIZE, &shm, static_cast<u64>(vm::page_size_64k) | static_cast<u64>(vm::alloc_hidden)));
}
if (g_cfg.core.spu_decoder == spu_decoder_type::asmjit || g_cfg.core.spu_decoder == spu_decoder_type::llvm)
{
if (g_cfg.core.spu_block_size != spu_block_size_type::safe)
@ -1716,6 +1697,165 @@ spu_thread::spu_thread(lv2_spu_group* group, u32 index, std::string_view name, u
range_lock = vm::alloc_range_lock();
}
void spu_thread::serialize_common(utils::serial& ar)
{
for (v128& reg : gpr)
ar(reg._bytes);
ar(pc, ch_mfc_cmd, mfc_size, mfc_barrier, mfc_fence, mfc_prxy_cmd, mfc_prxy_mask, mfc_prxy_write_state.all
, srr0
, ch_tag_upd
, ch_tag_mask
, ch_tag_stat.data
, ch_stall_mask
, ch_stall_stat.data
, ch_atomic_stat.data
, ch_out_mbox.data
, ch_out_intr_mbox.data
, snr_config
, ch_snr1.data
, ch_snr2.data
, ch_events.raw().all
, interrupts_enabled
, run_ctrl
, exit_status.data
, status_npc.raw().status);
std::for_each_n(mfc_queue, mfc_size, [&](spu_mfc_cmd& cmd) { ar(cmd); });
}
spu_thread::spu_thread(utils::serial& ar, lv2_spu_group* group)
: cpu_thread(idm::last_id())
, group(group)
, index(ar)
, thread_type(group ? spu_type::threaded : ar.operator u8() ? spu_type::isolated : spu_type::raw)
, shm(ensure(vm::get(vm::spu)->peek(vm_offset()).second))
, ls(map_ls(*this->shm))
, option(ar)
, lv2_id(ar)
, spu_tname(make_single<std::string>(ar.operator std::string()))
{
if (g_cfg.core.spu_decoder == spu_decoder_type::asmjit)
{
jit = spu_recompiler_base::make_asmjit_recompiler();
}
else if (g_cfg.core.spu_decoder == spu_decoder_type::llvm)
{
jit = spu_recompiler_base::make_fast_llvm_recompiler();
}
if (g_cfg.core.mfc_debug)
{
utils::memory_commit(vm::g_stat_addr + vm_offset(), SPU_LS_SIZE);
}
if (g_cfg.core.spu_decoder != spu_decoder_type::_static && g_cfg.core.spu_decoder != spu_decoder_type::dynamic)
{
if (g_cfg.core.spu_block_size != spu_block_size_type::safe)
{
// Initialize stack mirror
std::memset(stack_mirror.data(), 0xff, sizeof(stack_mirror));
}
}
if (get_type() >= spu_type::raw)
{
cpu_init();
}
range_lock = vm::alloc_range_lock();
serialize_common(ar);
{
u32 vals[4]{};
const u8 count = ar;
ar(std::span(vals, count));
ch_in_mbox.set_values(count, vals[0], vals[1], vals[2], vals[3]);
}
status_npc.raw().npc = pc | u8{interrupts_enabled};
if (get_type() == spu_type::threaded)
{
for (auto& pair : spuq)
{
ar(pair.first);
pair.second = idm::get_unlocked<lv2_obj, lv2_event_queue>(ar.operator u32());
}
for (auto& q : spup)
{
q = idm::get_unlocked<lv2_obj, lv2_event_queue>(ar.operator u32());
}
}
else
{
for (spu_int_ctrl_t& ctrl : int_ctrl)
{
ar(ctrl.mask, ctrl.stat);
ctrl.tag = idm::get_unlocked<lv2_obj, lv2_int_tag>(ar.operator u32());
}
g_raw_spu_ctr++;
g_raw_spu_id[index] = id;
}
ar(stop_flag_removal_protection);
}
void spu_thread::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(spu);
if (raddr)
{
// Lose reservation at savestate load with an event if one existed at savestate save
set_events(SPU_EVENT_LR);
}
ar(index);
if (get_type() != spu_type::threaded)
{
ar(u8{get_type() == spu_type::isolated});
}
ar(option, lv2_id, *spu_tname.load());
serialize_common(ar);
{
u32 vals[4]{};
const u8 count = ch_in_mbox.try_read(vals);
ar(count, std::span(vals, count));
}
if (get_type() == spu_type::threaded)
{
for (const auto& [key, q] : spuq)
{
ar(key);
ar(lv2_obj::check(q) ? q->id : 0);
}
for (auto& p : spup)
{
ar(lv2_obj::check(p) ? p->id : 0);
}
}
else
{
for (const spu_int_ctrl_t& ctrl : int_ctrl)
{
ar(ctrl.mask, ctrl.stat, lv2_obj::check(ctrl.tag) ? ctrl.tag->id : 0);
}
}
ar(!!(state & cpu_flag::stop));
}
void spu_thread::push_snr(u32 number, u32 value)
{
// Get channel
@ -3408,7 +3548,8 @@ bool spu_thread::process_mfc_cmd()
std::memcpy(dump.data, _ptr<u8>(ch_mfc_cmd.lsa & 0x3ff80), 128);
}
return !test_stopped();
static_cast<void>(test_stopped());
return true;
}
case MFC_PUTLLUC_CMD:
{
@ -3422,7 +3563,8 @@ bool spu_thread::process_mfc_cmd()
do_putlluc(ch_mfc_cmd);
ch_atomic_stat.set_value(MFC_PUTLLUC_SUCCESS);
return !test_stopped();
static_cast<void>(test_stopped());
return true;
}
case MFC_PUTQLLUC_CMD:
{
@ -4161,6 +4303,11 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
spu_log.warning("sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x): error (%s)", spup, (value & 0x00ffffff), data, res);
}
if (res == CELL_EAGAIN)
{
return false;
}
ch_in_mbox.set_values(1, res);
return true;
}
@ -4183,6 +4330,12 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
// TODO: check passing spup value
if (auto res = queue ? queue->send(SYS_SPU_THREAD_EVENT_USER_KEY, lv2_id, (u64{spup} << 32) | (value & 0x00ffffff), data) : CELL_ENOTCONN)
{
if (res == CELL_EAGAIN)
{
ch_out_mbox.set_value(data);
return false;
}
spu_log.warning("sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x) failed (error=%s)", spup, (value & 0x00ffffff), data, res);
}
@ -4207,6 +4360,12 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
// Use the syscall to set flag
const auto res = ch_in_mbox.get_count() ? CELL_EBUSY : 0u + sys_event_flag_set(*this, data, 1ull << flag);
if (res == CELL_EAGAIN)
{
ch_out_mbox.set_value(data);
return false;
}
if (res == CELL_EBUSY)
{
spu_log.warning("sys_event_flag_set_bit(value=0x%x (flag=%d)): In_MBox is not empty (%d)", value, flag, ch_in_mbox.get_count());
@ -4230,7 +4389,12 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
spu_log.trace("sys_event_flag_set_bit_impatient(id=%d, value=0x%x (flag=%d))", data, value, flag);
// Use the syscall to set flag
sys_event_flag_set(*this, data, 1ull << flag);
if (sys_event_flag_set(*this, data, 1ull << flag) + 0u == CELL_EAGAIN)
{
ch_out_mbox.set_value(data);
return false;
}
return true;
}
else
@ -4570,7 +4734,7 @@ bool spu_thread::stop_and_signal(u32 code)
u32 spuq = 0;
if (!ch_out_mbox.try_pop(spuq))
if (!ch_out_mbox.try_read(spuq))
{
fmt::throw_exception("sys_spu_thread_receive_event(): Out_MBox is empty");
}
@ -4581,6 +4745,20 @@ bool spu_thread::stop_and_signal(u32 code)
return ch_in_mbox.set_values(1, CELL_EBUSY), true;
}
struct clear_mbox
{
spu_thread& _this;
~clear_mbox() noexcept
{
if (cpu_flag::again - _this.state)
{
u32 val = 0;
_this.ch_out_mbox.try_pop(val);
}
}
} clear{*this};
spu_log.trace("sys_spu_thread_receive_event(spuq=0x%x)", spuq);
if (!group->has_scheduler_context /*|| group->type & 0xf00*/)
@ -4606,6 +4784,7 @@ bool spu_thread::stop_and_signal(u32 code)
if (is_stopped(old))
{
state += cpu_flag::again;
return false;
}
@ -4624,6 +4803,7 @@ bool spu_thread::stop_and_signal(u32 code)
if (is_stopped())
{
state += cpu_flag::again;
return false;
}
@ -4675,16 +4855,26 @@ bool spu_thread::stop_and_signal(u32 code)
while (auto old = state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(old))
{
return false;
}
if (old & cpu_flag::signal)
{
break;
}
if (is_stopped(old))
{
std::lock_guard qlock(queue->mutex);
old = state.fetch_sub(cpu_flag::signal);
if (old & cpu_flag::signal)
{
break;
}
state += cpu_flag::again;
return false;
}
thread_ctrl::wait_on(state, old);
}
@ -4797,6 +4987,7 @@ bool spu_thread::stop_and_signal(u32 code)
if (is_stopped(old))
{
ch_out_mbox.set_value(value);
return false;
}

View File

@ -699,6 +699,13 @@ public:
using cpu_thread::operator=;
SAVESTATE_INIT_POS(5);
spu_thread(utils::serial& ar, lv2_spu_group* group = nullptr);
void serialize_common(utils::serial& ar);
void save(utils::serial& ar);
bool savable() const { return get_type() != spu_type::threaded; } // Threaded SPUs are saved as part of the SPU group
u32 pc = 0;
u32 dbg_step_pc = 0;
@ -802,9 +809,9 @@ public:
atomic_t<u32> last_exit_status; // Value to be written in exit_status after checking group termination
lv2_spu_group* const group; // SPU Thread Group (access by the spu threads in the group only! From other threads obtain a shared pointer to group using group ID)
const u32 index; // SPU index
const spu_type thread_type;
std::shared_ptr<utils::shm> shm; // SPU memory
const std::add_pointer_t<u8> ls; // SPU LS pointer
const spu_type thread_type;
const u32 option; // sys_spu_thread_initialize option
const u32 lv2_id; // The actual id that is used by syscalls
@ -847,6 +854,7 @@ public:
std::array<atomic_t<bool>, SPU_LS_SIZE / 4> local_breakpoints{};
atomic_t<bool> has_active_local_bps = false;
u32 current_bp_pc = umax;
bool stop_flag_removal_protection = false;
void push_snr(u32 number, u32 value);
static void do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8* ls);
@ -898,6 +906,8 @@ public:
return group ? SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * (id & 0xffffff) : RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index;
}
static u8* map_ls(utils::shm& shm);
// Returns true if reservation existed but was just discovered to be lost
// It is safe to use on any address, even if not directly accessed by SPU (so it's slower)
bool reservation_check(u32 addr, const decltype(rdata)& data) const;

View File

@ -1190,6 +1190,7 @@ DECLARE(lv2_obj::g_mutex);
DECLARE(lv2_obj::g_ppu);
DECLARE(lv2_obj::g_pending);
DECLARE(lv2_obj::g_waiting);
DECLARE(lv2_obj::g_to_sleep);
thread_local DECLARE(lv2_obj::g_to_awake);
@ -1256,6 +1257,32 @@ void lv2_obj::sleep_unlocked(cpu_thread& thread, u64 timeout)
// Find and remove the thread
if (!unqueue(g_ppu, ppu))
{
if (unqueue(g_to_sleep, ppu))
{
ppu->start_time = start_time;
std::string out = fmt::format("Threads (%d):", g_to_sleep.size());
for (auto thread : g_to_sleep)
{
fmt::append(out, " 0x%x,", thread->id);
}
ppu_log.warning("%s", out);
if (g_to_sleep.empty())
{
// All threads are ready, wake threads
Emu.CallFromMainThread([]
{
if (Emu.IsStarting())
{
// It uses lv2_obj::g_mutex, run it on main thread
Emu.FinalizeRunRequest();
}
});
}
}
// Already sleeping
ppu_log.trace("sleep(): called on already sleeping thread.");
return;
@ -1438,11 +1465,12 @@ void lv2_obj::cleanup()
g_ppu.clear();
g_pending.clear();
g_waiting.clear();
g_to_sleep.clear();
}
void lv2_obj::schedule_all()
{
if (g_pending.empty())
if (g_pending.empty() && g_to_sleep.empty())
{
// Wake up threads
for (usz i = 0, x = std::min<usz>(g_cfg.core.ppu_threads, g_ppu.size()); i < x; i++)
@ -1486,7 +1514,7 @@ ppu_thread_status lv2_obj::ppu_state(ppu_thread* ppu, bool lock_idm, bool lock_l
opt_lock[0].emplace(id_manager::g_mutex);
}
if (ppu->state & cpu_flag::stop)
if (!Emu.IsReady() ? ppu->state.all_of(cpu_flag::stop) : ppu->stop_flag_removal_protection)
{
return PPU_THREAD_STATUS_IDLE;
}
@ -1507,6 +1535,11 @@ ppu_thread_status lv2_obj::ppu_state(ppu_thread* ppu, bool lock_idm, bool lock_l
if (it == g_ppu.end())
{
if (!ppu->interrupt_thread_executing)
{
return PPU_THREAD_STATUS_STOP;
}
return PPU_THREAD_STATUS_SLEEP;
}
@ -1517,3 +1550,14 @@ ppu_thread_status lv2_obj::ppu_state(ppu_thread* ppu, bool lock_idm, bool lock_l
return PPU_THREAD_STATUS_ONPROC;
}
void lv2_obj::set_future_sleep(ppu_thread* ppu)
{
g_to_sleep.emplace_back(ppu);
}
bool lv2_obj::is_scheduler_ready()
{
reader_lock lock(g_mutex);
return g_to_sleep.empty();
}

View File

@ -1,14 +1,72 @@
#include "stdafx.h"
#include "sys_cond.h"
#include "util/serialization.hpp"
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
LOG_CHANNEL(sys_cond);
lv2_cond::lv2_cond(utils::serial& ar)
: key(ar)
, name(ar)
, mtx_id(ar)
, mutex(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)) // May be nullptr
{
}
CellError lv2_cond::on_id_create()
{
exists++;
static auto do_it = [](lv2_cond* _this) -> CellError
{
if (lv2_obj::check(_this->mutex))
{
_this->mutex->cond_count++;
return {};
}
// Mutex has been destroyed, cannot create conditional variable
return CELL_ESRCH;
};
if (mutex)
{
return do_it(this);
}
ensure(!!Emu.DeserialManager());
Emu.DeferDeserialization([this]()
{
if (!mutex)
{
mutex = ensure(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id));
}
// Defer function
ensure(CellError{} == do_it(this));
});
return {};
}
std::shared_ptr<void> lv2_cond::load(utils::serial& ar)
{
auto cond = std::make_shared<lv2_cond>(ar);
return lv2_obj::load(cond->key, cond);
}
void lv2_cond::save(utils::serial& ar)
{
ar(key, name, mtx_id);
}
error_code sys_cond_create(ppu_thread& ppu, vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribute_t> attr)
{
ppu.state += cpu_flag::wait;
@ -81,7 +139,7 @@ error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id);
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [](lv2_cond& cond)
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond)
{
if (cond.waiters)
{
@ -89,6 +147,12 @@ error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
cond.waiters--;
@ -114,12 +178,21 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id);
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [](lv2_cond& cond)
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond)
{
if (cond.waiters)
{
std::lock_guard lock(cond.mutex->mutex);
for (auto cpu : cond.sq)
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
}
cpu_thread* result = nullptr;
cond.waiters -= ::size32(cond.sq);
@ -167,6 +240,12 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
{
if (cpu->id == thread_id)
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
ensure(cond.unqueue(cond.sq, cpu));
cond.waiters--;
@ -208,19 +287,33 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> s64
{
if (cond.mutex->owner >> 1 != ppu.id)
if (!ppu.loaded_from_savestate && cond.mutex->owner >> 1 != ppu.id)
{
return -1;
}
std::lock_guard lock(cond.mutex->mutex);
// Register waiter
cond.sq.emplace_back(&ppu);
cond.waiters++;
if (ppu.loaded_from_savestate && ppu.optional_syscall_state & 1)
{
// Mutex sleep
ensure(!cond.mutex->try_own(ppu, ppu.id));
}
else
{
// Register waiter
cond.sq.emplace_back(&ppu);
cond.waiters++;
}
if (ppu.loaded_from_savestate)
{
cond.sleep(ppu, timeout);
return static_cast<u32>(ppu.optional_syscall_state >> 32);
}
// Unlock the mutex
const auto count = cond.mutex->lock_count.exchange(0);
const u32 count = cond.mutex->lock_count.exchange(0);
if (const auto cpu = cond.mutex->reown<ppu_thread>())
{
@ -246,16 +339,28 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(cond->mutex->mutex);
const bool cond_sleep = std::find(cond->sq.begin(), cond->sq.end(), &ppu) != cond->sq.end();
const bool mutex_sleep = std::find(cond->mutex->sq.begin(), cond->mutex->sq.end(), &ppu) != cond->mutex->sq.end();
if (!cond_sleep && !mutex_sleep)
{
break;
}
ppu.optional_syscall_state = u32{mutex_sleep} | (u64{static_cast<u32>(cond.ret)} << 32);
ppu.state += cpu_flag::again;
return {};
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
@ -263,7 +368,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return {};
continue;
}
std::lock_guard lock(cond->mutex->mutex);

View File

@ -38,18 +38,11 @@ struct lv2_cond final : lv2_obj
{
}
CellError on_id_create()
{
if (mutex->exists)
{
mutex->cond_count++;
exists++;
return {};
}
lv2_cond(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
// Mutex has been destroyed, cannot create conditional variable
return CELL_ESRCH;
}
CellError on_id_create();
};
class ppu_thread;

View File

@ -169,6 +169,7 @@ public:
static const u32 id_base = 0x41000000;
static const u32 id_step = 0x100;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(37);
private:
u32 idm_id;
@ -219,6 +220,7 @@ public:
static const u32 id_base = 0x43000000;
static const u32 id_step = 0x100;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(38);
private:
// IDM data
@ -283,6 +285,7 @@ public:
static const u32 id_base = 0x42000000;
static const u32 id_step = 0x100;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(39);
private:
// IDM data

View File

@ -3,6 +3,7 @@
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
@ -21,6 +22,78 @@ lv2_event_queue::lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name, u64
{
}
lv2_event_queue::lv2_event_queue(utils::serial& ar) noexcept
: id(idm::last_id())
, protocol(ar)
, type(ar)
, size(ar)
, name(ar)
, key(ar)
{
ar(events);
}
std::shared_ptr<void> lv2_event_queue::load(utils::serial& ar)
{
auto queue = std::make_shared<lv2_event_queue>(ar);
return lv2_obj::load(queue->key, queue);
}
void lv2_event_queue::save(utils::serial& ar)
{
ar(protocol, type, size, name, key, events);
}
void lv2_event_queue::save_ptr(utils::serial& ar, lv2_event_queue* q)
{
if (!lv2_obj::check(q))
{
ar(u32{0});
return;
}
ar(q->id);
}
std::shared_ptr<lv2_event_queue> lv2_event_queue::load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue)
{
const u32 id = ar.operator u32();
if (!id)
{
return nullptr;
}
if (auto q = idm::get_unlocked<lv2_obj, lv2_event_queue>(id))
{
// Already initialized
return q;
}
Emu.DeferDeserialization([id, &queue]()
{
// Defer resolving
queue = ensure(idm::get_unlocked<lv2_obj, lv2_event_queue>(id));
});
// Null until resolved
return nullptr;
}
lv2_event_port::lv2_event_port(utils::serial& ar)
: type(ar)
, name(ar)
, queue(lv2_event_queue::load_ptr(ar, queue))
{
}
void lv2_event_port::save(utils::serial& ar)
{
ar(type, name);
lv2_event_queue::save_ptr(ar, queue.get());
}
std::shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key)
{
if (ipc_key == SYS_EVENT_QUEUE_LOCAL)
@ -60,6 +133,20 @@ CellError lv2_event_queue::send(lv2_event event)
// Store event in registers
auto& ppu = static_cast<ppu_thread&>(*schedule<ppu_thread>(sq, protocol));
if (ppu.state & cpu_flag::again)
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::again;
cpu->state += cpu_flag::exit;
}
sys_event.warning("Ignored event!");
// Fake error for abort
return CELL_EAGAIN;
}
std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) = event;
awake(&ppu);
@ -68,6 +155,19 @@ CellError lv2_event_queue::send(lv2_event event)
{
// Store event in In_MBox
auto& spu = static_cast<spu_thread&>(*schedule<spu_thread>(sq, protocol));
if (spu.state & cpu_flag::again)
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::exit + cpu_flag::again;
}
sys_event.warning("Ignored event!");
// Fake error for abort
return CELL_EAGAIN;
}
const u32 data1 = static_cast<u32>(std::get<1>(event));
const u32 data2 = static_cast<u32>(std::get<2>(event));
@ -331,11 +431,18 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
// If cancelled, gpr[3] will be non-zero. Other registers must contain event data.
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state) || state & cpu_flag::signal)
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
extern void signal_gcm_intr_thread_offline(lv2_event_queue&);
signal_gcm_intr_thread_offline(*queue);
return {};
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
@ -343,7 +450,7 @@ error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_e
// Wait for rescheduling
if (ppu.check_state())
{
return {};
continue;
}
std::lock_guard lock(queue->mutex);
@ -559,6 +666,11 @@ error_code sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3)
if (port.ret)
{
if (port.ret == CELL_EAGAIN)
{
return CELL_OK;
}
if (port.ret == CELL_EBUSY)
{
return not_an_error(CELL_EBUSY);

View File

@ -93,6 +93,12 @@ struct lv2_event_queue final : public lv2_obj
lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name, u64 ipc_key) noexcept;
lv2_event_queue(utils::serial& ar) noexcept;
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
static void save_ptr(utils::serial&, lv2_event_queue*);
static std::shared_ptr<lv2_event_queue> load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue);
CellError send(lv2_event);
CellError send(u64 source, u64 d1, u64 d2, u64 d3)
@ -102,10 +108,6 @@ struct lv2_event_queue final : public lv2_obj
// Get event queue by its global key
static std::shared_ptr<lv2_event_queue> find(u64 ipc_key);
// Check queue ptr validity (use 'exists' member)
static bool check(const std::weak_ptr<lv2_event_queue>&);
static bool check(const std::shared_ptr<lv2_event_queue>&);
};
struct lv2_event_port final : lv2_obj
@ -122,6 +124,9 @@ struct lv2_event_port final : lv2_obj
, name(name)
{
}
lv2_event_port(utils::serial& ar);
void save(utils::serial& ar);
};
class ppu_thread;

View File

@ -11,6 +11,26 @@
LOG_CHANNEL(sys_event_flag);
lv2_event_flag::lv2_event_flag(utils::serial& ar)
: protocol(ar)
, key(ar)
, type(ar)
, name(ar)
{
ar(pattern);
}
std::shared_ptr<void> lv2_event_flag::load(utils::serial& ar)
{
auto eflag = std::make_shared<lv2_event_flag>(ar);
return lv2_obj::load(eflag->key, eflag);
}
void lv2_event_flag::save(utils::serial& ar)
{
ar(protocol, key, type, name, pattern);
}
error_code sys_event_flag_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<sys_event_flag_attribute_t> attr, u64 init)
{
ppu.state += cpu_flag::wait;
@ -171,16 +191,24 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(flag->mutex);
if (std::find(flag->sq.begin(), flag->sq.end(), &ppu) == flag->sq.end())
{
break;
}
ppu.state += cpu_flag::again;
return {};
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
@ -188,7 +216,7 @@ error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm
// Wait for rescheduling
if (ppu.check_state())
{
return {};
continue;
}
std::lock_guard lock(flag->mutex);
@ -275,6 +303,17 @@ error_code sys_event_flag_set(cpu_thread& cpu, u32 id, u64 bitptn)
{
std::lock_guard lock(flag->mutex);
for (auto ppu : flag->sq)
{
if (ppu->state & cpu_flag::again)
{
cpu.state += cpu_flag::again;
// Fake error for abort
return not_an_error(CELL_EAGAIN);
}
}
// Sort sleep queue in required order
if (flag->protocol != SYS_SYNC_FIFO)
{
@ -379,6 +418,15 @@ error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num)
{
std::lock_guard lock(flag->mutex);
for (auto cpu : flag->sq)
{
if (cpu->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
}
// Get current pattern
const u64 pattern = flag->pattern;
@ -403,10 +451,7 @@ error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num)
}
}
if (ppu.test_stopped())
{
return 0;
}
static_cast<void>(ppu.test_stopped());
if (num) *num = value;
return CELL_OK;

View File

@ -54,6 +54,10 @@ struct lv2_event_flag final : lv2_obj
{
}
lv2_event_flag(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
// Check mode arg
static bool check_mode(u32 mode)
{

View File

@ -14,6 +14,7 @@
#include "Utilities/StrUtil.h"
#include <charconv>
#include <span>
LOG_CHANNEL(sys_fs);
@ -199,6 +200,12 @@ lv2_fs_mount_point* lv2_fs_object::get_mp(std::string_view filename)
return &g_mp_sys_dev_root;
}
lv2_fs_object::lv2_fs_object(utils::serial& ar, bool)
: name(ar)
, mp(get_mp(name.data()))
{
}
u64 lv2_file::op_read(const fs::file& file, vm::ptr<void> buf, u64 size)
{
// Copy data from intermediate buffer (avoid passing vm pointer to a native API)
@ -246,6 +253,136 @@ u64 lv2_file::op_write(const fs::file& file, vm::cptr<void> buf, u64 size)
return result;
}
lv2_file::lv2_file(utils::serial& ar)
: lv2_fs_object(ar, false)
, mode(ar)
, flags(ar)
, type(ar)
{
ar(lock);
be_t<u64> arg = 0;
u64 size = 0;
switch (type)
{
case lv2_file_type::regular: break;
case lv2_file_type::sdata: arg = 0x18000000010, size = 8; break; // TODO: Fix
case lv2_file_type::edata: arg = 0x2, size = 8; break;
}
const std::string retrieve_real = ar;
open_result_t res = lv2_file::open(retrieve_real, flags & CELL_FS_O_ACCMODE, mode, size ? &arg : nullptr, size);
file = std::move(res.file);
real_path = std::move(res.real_path);
g_fxo->get<loaded_npdrm_keys>().npdrm_fds.raw() += type != lv2_file_type::regular;
if (ar.operator bool()) // see lv2_file::save in_mem
{
std::vector<u8> buf = ar;
const fs::stat_t stat = ar;
file = fs::make_stream<std::vector<u8>>(std::move(buf), stat);
}
if (!file)
{
sys_fs.error("Failed to load %s for savestates", name.data());
ar.pos += sizeof(u64);
ensure(!!g_cfg.savestate.state_inspection_mode);
return;
}
file.seek(ar);
}
void lv2_file::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_fs);
ar(name, mode, flags, type, lock, vfs::retrieve(real_path));
if (!(mp->flags & lv2_mp_flag::read_only) && flags & CELL_FS_O_ACCMODE)
{
// Ensure accurate timestamps and content on disk
file.sync();
}
// UNIX allows deletion of files while descriptors are still opened
// descriptpors shall keep the data in memory in this case
const bool in_mem = [&]()
{
if (mp->flags & lv2_mp_flag::read_only)
{
return false;
}
fs::file test{real_path};
if (!test) return true;
return test.stat() != file.stat();
}();
ar(in_mem);
if (in_mem)
{
ar(file.to_vector<u8>());
ar(file.stat());
}
ar(file.pos());
}
lv2_dir::lv2_dir(utils::serial& ar)
: lv2_fs_object(ar, false)
, entries([&]
{
std::vector<fs::dir_entry> entries;
u64 size = 0;
ar.deserialize_vle(size);
entries.resize(size);
for (auto& entry : entries)
{
ar(entry.name, static_cast<fs::stat_t&>(entry));
}
return entries;
}())
, pos(ar)
{
}
void lv2_dir::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_fs);
ar(name);
ar.serialize_vle(entries.size());
for (auto& entry : entries)
{
ar(entry.name, static_cast<const fs::stat_t&>(entry));
}
ar(pos);
}
loaded_npdrm_keys::loaded_npdrm_keys(utils::serial& ar)
{
save(ar);
}
void loaded_npdrm_keys::save(utils::serial& ar)
{
ar(dec_keys_pos);
ar(std::span(dec_keys, std::min<usz>(std::size(dec_keys), dec_keys_pos)));
}
struct lv2_file::file_view : fs::file_base
{
const std::shared_ptr<lv2_file> m_file;
@ -2281,10 +2418,7 @@ error_code sys_fs_fget_block_size(ppu_thread& ppu, u32 fd, vm::ptr<u64> sector_s
return CELL_EBADF;
}
if (ppu.is_stopped())
{
return {};
}
static_cast<void>(ppu.test_stopped());
// TODO
*sector_size = file->mp->sector_size;
@ -2335,10 +2469,7 @@ error_code sys_fs_get_block_size(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<u
return {CELL_EIO, path}; // ???
}
if (ppu.is_stopped())
{
return {};
}
static_cast<void>(ppu.test_stopped());
// TODO
*sector_size = mp->sector_size;

View File

@ -163,20 +163,23 @@ struct lv2_fs_object
static const u32 id_base = 3;
static const u32 id_step = 1;
static const u32 id_count = 255 - id_base;
// Mount Point
const std::add_pointer_t<lv2_fs_mount_point> mp;
SAVESTATE_INIT_POS(40);
// File Name (max 1055)
const std::array<char, 0x420> name;
// Mount Point
const std::add_pointer_t<lv2_fs_mount_point> mp;
protected:
lv2_fs_object(lv2_fs_mount_point* mp, std::string_view filename)
: mp(mp)
, name(get_name(filename))
: name(get_name(filename))
, mp(mp)
{
}
lv2_fs_object(utils::serial& ar, bool dummy);
public:
lv2_fs_object(const lv2_fs_object&) = delete;
@ -197,10 +200,14 @@ public:
name[filename.size()] = 0;
return name;
}
void save(utils::serial&) {}
};
struct lv2_file final : lv2_fs_object
{
static constexpr u32 id_type = 1;
fs::file file;
const s32 mode;
const s32 flags;
@ -241,6 +248,9 @@ struct lv2_file final : lv2_fs_object
{
}
lv2_file(utils::serial& ar);
void save(utils::serial& ar);
struct open_raw_result_t
{
CellError error;
@ -285,6 +295,8 @@ struct lv2_file final : lv2_fs_object
struct lv2_dir final : lv2_fs_object
{
static constexpr u32 id_type = 2;
const std::vector<fs::dir_entry> entries;
// Current reading position
@ -296,6 +308,9 @@ struct lv2_dir final : lv2_fs_object
{
}
lv2_dir(utils::serial& ar);
void save(utils::serial& ar);
// Read next
const fs::dir_entry* dir_read()
{

View File

@ -2,6 +2,7 @@
#include "sys_interrupt.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
@ -11,18 +12,59 @@
LOG_CHANNEL(sys_interrupt);
lv2_int_tag::lv2_int_tag() noexcept
: id(idm::last_id())
: lv2_obj{1}
, id(idm::last_id())
{
exists.release(1);
}
lv2_int_tag::lv2_int_tag(utils::serial& ar) noexcept
: lv2_obj{1}
, id(idm::last_id())
, handler([&]()
{
const u32 id = ar;
auto ptr = idm::get_unlocked<lv2_obj, lv2_int_serv>(id);
if (!ptr && id)
{
Emu.DeferDeserialization([id, &handler = this->handler]()
{
handler = ensure(idm::get_unlocked<lv2_obj, lv2_int_serv>(id));
});
}
return ptr;
}())
{
}
void lv2_int_tag::save(utils::serial& ar)
{
ar(lv2_obj::check(handler) ? handler->id : 0);
}
lv2_int_serv::lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thread, u64 arg1, u64 arg2) noexcept
: id(idm::last_id())
: lv2_obj{1}
, id(idm::last_id())
, thread(thread)
, arg1(arg1)
, arg2(arg2)
{
exists.release(1);
}
lv2_int_serv::lv2_int_serv(utils::serial& ar) noexcept
: lv2_obj{1}
, id(idm::last_id())
, thread(idm::get_unlocked<named_thread<ppu_thread>>(ar))
, arg1(ar)
, arg2(ar)
{
}
void lv2_int_serv::save(utils::serial& ar)
{
ar(thread && idm::check_unlocked<named_thread<ppu_thread>>(thread->id) ? thread->id : 0, arg1, arg2);
}
void ppu_interrupt_thread_entry(ppu_thread&, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*);
@ -197,6 +239,8 @@ void sys_interrupt_thread_eoi(ppu_thread& ppu)
ppu.state += cpu_flag::ret;
lv2_obj::sleep(ppu);
ppu.interrupt_thread_executing = false;
}
void ppu_interrupt_thread_entry(ppu_thread& ppu, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*)

View File

@ -6,7 +6,7 @@
class ppu_thread;
struct lv2_int_tag final : lv2_obj
struct lv2_int_tag final : public lv2_obj
{
static const u32 id_base = 0x0a000000;
@ -14,9 +14,11 @@ struct lv2_int_tag final : lv2_obj
std::shared_ptr<struct lv2_int_serv> handler;
lv2_int_tag() noexcept;
lv2_int_tag(utils::serial& ar) noexcept;
void save(utils::serial& ar);
};
struct lv2_int_serv final : lv2_obj
struct lv2_int_serv final : public lv2_obj
{
static const u32 id_base = 0x0b000000;
@ -26,6 +28,8 @@ struct lv2_int_serv final : lv2_obj
const u64 arg2;
lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thread, u64 arg1, u64 arg2) noexcept;
lv2_int_serv(utils::serial& ar) noexcept;
void save(utils::serial& ar);
void exec() const;
void join() const;

View File

@ -4,10 +4,10 @@
struct lv2_io_buf
{
using id_type = lv2_io_buf;
static const u32 id_base = 0x44000000;
static const u32 id_step = 1;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(41);
const u32 block_count;
const u32 block_size;

View File

@ -9,6 +9,20 @@
LOG_CHANNEL(sys_lwcond);
lv2_lwcond::lv2_lwcond(utils::serial& ar)
: name(ar.operator be_t<u64>())
, lwid(ar)
, protocol(ar)
, control(ar.operator decltype(control)())
{
}
void lv2_lwcond::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_sync);
ar(name, lwid, protocol, control);
}
error_code _sys_lwcond_create(ppu_thread& ppu, vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name)
{
ppu.state += cpu_flag::wait;
@ -115,12 +129,25 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
{
std::lock_guard lock(cond.mutex);
if (cpu)
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
}
auto result = cpu ? cond.unqueue(cond.sq, cpu) :
cond.schedule<ppu_thread>(cond.sq, cond.protocol);
if (result)
{
cond.waiters--;
if (static_cast<ppu_thread*>(result)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
if (mode == 2)
{
@ -137,6 +164,12 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
// Respect ordering of the sleep queue
mutex->sq.emplace_back(result);
result = mutex->schedule<ppu_thread>(mutex->sq, mutex->protocol);
if (static_cast<ppu_thread*>(result)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
}
else if (mode == 1)
{
@ -145,6 +178,8 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
}
}
cond.waiters--;
if (result)
{
cond.awake(result);
@ -218,6 +253,15 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
u32 result = 0;
for (auto cpu : cond.sq)
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
}
while (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.protocol))
{
cond.waiters--;
@ -291,16 +335,32 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
std::lock_guard lock(cond.mutex);
// Add a waiter
cond.waiters++;
cond.sq.emplace_back(&ppu);
if (ppu.loaded_from_savestate && ppu.optional_syscall_state)
{
// Special: loading state from the point of waiting on lwmutex sleep queue
std::lock_guard lock2(mutex->mutex);
mutex->sq.emplace_back(&ppu);
}
else
{
// Add a waiter
cond.waiters++;
cond.sq.emplace_back(&ppu);
}
if (!ppu.loaded_from_savestate)
{
std::lock_guard lock2(mutex->mutex);
// Process lwmutex sleep queue
if (const auto cpu = mutex->schedule<ppu_thread>(mutex->sq, mutex->protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
cond.append(cpu);
}
else
@ -318,18 +378,36 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again)
{
return CELL_OK;
}
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
reader_lock lock(cond->mutex);
reader_lock lock2(mutex->mutex);
const bool cond_sleep = std::find(cond->sq.begin(), cond->sq.end(), &ppu) != cond->sq.end();
const bool mutex_sleep = std::find(mutex->sq.begin(), mutex->sq.end(), &ppu) != mutex->sq.end();
if (!cond_sleep && !mutex_sleep)
{
break;
}
ppu.optional_syscall_state = +mutex_sleep;
ppu.state += cpu_flag::again;
break;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
@ -337,7 +415,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
// Wait for rescheduling
if (ppu.check_state())
{
return {};
continue;
}
std::lock_guard lock(cond->mutex);

View File

@ -41,6 +41,9 @@ struct lv2_lwcond final : lv2_obj
, control(control)
{
}
lv2_lwcond(utils::serial& ar);
void save(utils::serial& ar);
};
// Aux

View File

@ -8,6 +8,19 @@
LOG_CHANNEL(sys_lwmutex);
lv2_lwmutex::lv2_lwmutex(utils::serial& ar)
: protocol(ar)
, control(ar.operator decltype(control)())
, name(ar.operator be_t<u64>())
, signaled(ar)
{
}
void lv2_lwmutex::save(utils::serial& ar)
{
ar(protocol, control, name, signaled);
}
error_code _sys_lwmutex_create(ppu_thread& ppu, vm::ptr<u32> lwmutex_id, u32 protocol, vm::ptr<sys_lwmutex_t> control, s32 has_name, u64 name)
{
ppu.state += cpu_flag::wait;
@ -102,6 +115,7 @@ error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
if (ppu.is_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
@ -170,16 +184,24 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(mutex->mutex);
if (std::find(mutex->sq.begin(), mutex->sq.end(), &ppu) == mutex->sq.end())
{
break;
}
ppu.state += cpu_flag::again;
return {};
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
@ -187,7 +209,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return {};
continue;
}
std::lock_guard lock(mutex->mutex);
@ -257,6 +279,12 @@ error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id)
if (const auto cpu = mutex.schedule<ppu_thread>(mutex.sq, mutex.protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
mutex.awake(cpu);
return;
}
@ -284,6 +312,12 @@ error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id)
if (const auto cpu = mutex.schedule<ppu_thread>(mutex.sq, mutex.protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
mutex.awake(cpu);
return;

View File

@ -71,6 +71,9 @@ struct lv2_lwmutex final : lv2_obj
{
}
lv2_lwmutex(utils::serial& ar);
void save(utils::serial& ar);
// Add a waiter
void add_waiter(cpu_thread* cpu)
{

View File

@ -12,18 +12,77 @@
LOG_CHANNEL(sys_memory);
//
static shared_mutex s_memstats_mtx;
lv2_memory_container::lv2_memory_container(u32 size, bool from_idm) noexcept
: size(size)
, id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID}
{
}
//
static shared_mutex s_memstats_mtx;
lv2_memory_container::lv2_memory_container(utils::serial& ar, bool from_idm) noexcept
: size(ar)
, id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID}
, used(ar)
{
}
std::shared_ptr<void> lv2_memory_container::load(utils::serial& ar)
{
// Use idm::last_id() only for the instances at IDM
return std::make_shared<lv2_memory_container>(stx::exact_t<utils::serial&>(ar), true);
}
void lv2_memory_container::save(utils::serial& ar)
{
ar(size, used);
}
lv2_memory_container* lv2_memory_container::search(u32 id)
{
if (id != SYS_MEMORY_CONTAINER_ID_INVALID)
{
return idm::check<lv2_memory_container>(id);
}
return &g_fxo->get<lv2_memory_container>();
}
struct sys_memory_address_table
{
atomic_t<lv2_memory_container*> addrs[65536]{};
sys_memory_address_table() = default;
SAVESTATE_INIT_POS(id_manager::id_map<lv2_memory_container>::savestate_init_pos + 0.1);
sys_memory_address_table(utils::serial& ar)
{
// First: address, second: conatiner ID (SYS_MEMORY_CONTAINER_ID_INVALID for global FXO memory container)
std::unordered_map<u16, u32> mm;
ar(mm);
for (const auto& [addr, id] : mm)
{
addrs[addr] = ensure(lv2_memory_container::search(id));
}
}
void save(utils::serial& ar)
{
std::unordered_map<u16, u32> mm;
for (auto& ctr : addrs)
{
if (const auto ptr = +ctr)
{
mm[static_cast<u16>(&ctr - addrs)] = ptr->id;
}
}
ar(mm);
}
};
// Todo: fix order of error checks

View File

@ -70,7 +70,13 @@ struct lv2_memory_container
const lv2_mem_container_id id; // ID of the container in if placed at IDM, otherwise SYS_MEMORY_CONTAINER_ID_INVALID
atomic_t<u32> used{}; // Amount of "physical" memory currently used
SAVESTATE_INIT_POS(1);
lv2_memory_container(u32 size, bool from_idm = false) noexcept;
lv2_memory_container(utils::serial& ar, bool from_idm = false) noexcept;
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
static lv2_memory_container* search(u32 id);
// Try to get specified amount of "physical" memory
// Values greater than UINT32_MAX will fail

View File

@ -8,6 +8,8 @@
#include "sys_sync.h"
#include "sys_process.h"
#include <span>
#include "util/vm.hpp"
LOG_CHANNEL(sys_mmapper);
@ -42,6 +44,32 @@ lv2_memory::lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared, lv
#endif
}
lv2_memory::lv2_memory(utils::serial& ar)
: size(ar)
, align(ar)
, flags(ar)
, key(ar)
, pshared(ar)
, ct(lv2_memory_container::search(ar.operator u32()))
, shm([&](u32 addr)
{
if (addr)
{
return ensure(vm::get(vm::any, addr)->peek(addr).second);
}
const auto _shm = std::make_shared<utils::shm>(size, 1);
ar(std::span(_shm->map_self(), size));
return _shm;
}(ar.operator u32()))
, counter(ar)
{
#ifndef _WIN32
// Optimization that's useless on Windows :puke:
utils::memory_lock(shm->map_self(), size);
#endif
}
CellError lv2_memory::on_id_create()
{
if (!exists && !ct->take(size))
@ -53,6 +81,40 @@ CellError lv2_memory::on_id_create()
return {};
}
std::shared_ptr<void> lv2_memory::load(utils::serial& ar)
{
auto mem = std::make_shared<lv2_memory>(ar);
mem->exists++; // Disable on_id_create()
std::shared_ptr<void> ptr = lv2_obj::load(mem->key, mem, +mem->pshared);
mem->exists--;
return ptr;
}
void lv2_memory::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_memory);
ar(size, align, flags, key, pshared, ct->id);
ar(counter ? vm::get_shm_addr(shm) : 0);
if (!counter)
{
ar(std::span(shm->map_self(), size));
}
ar(counter);
}
page_fault_notification_entries::page_fault_notification_entries(utils::serial& ar)
{
ar(entries);
}
void page_fault_notification_entries::save(utils::serial& ar)
{
ar(entries);
}
template <bool exclusive = false>
error_code create_lv2_shm(bool pshared, u64 ipc_key, u64 size, u32 align, u64 flags, lv2_memory_container* ct)
{

View File

@ -30,6 +30,10 @@ struct lv2_memory : lv2_obj
lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared, lv2_memory_container* ct);
lv2_memory(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
CellError on_id_create();
};
@ -54,6 +58,8 @@ enum : u64
struct page_fault_notification_entry
{
ENABLE_BITWISE_SERIALIZATION;
u32 start_addr; // Starting address of region to monitor.
u32 event_queue_id; // Queue to be notified.
u32 port_id; // Port used to notify the queue.
@ -64,6 +70,10 @@ struct page_fault_notification_entries
{
std::vector<page_fault_notification_entry> entries;
shared_mutex mutex;
page_fault_notification_entries() = default;
page_fault_notification_entries(utils::serial& ar);
void save(utils::serial& ar);
};
struct page_fault_event_entries

View File

@ -9,6 +9,27 @@
LOG_CHANNEL(sys_mutex);
lv2_mutex::lv2_mutex(utils::serial& ar)
: protocol(ar)
, recursive(ar)
, adaptive(ar)
, key(ar)
, name(ar)
{
ar(lock_count, owner);
}
std::shared_ptr<void> lv2_mutex::load(utils::serial& ar)
{
auto mtx = std::make_shared<lv2_mutex>(ar);
return lv2_obj::load(mtx->key, mtx);
}
void lv2_mutex::save(utils::serial& ar)
{
ar(protocol, recursive, adaptive, key, name, lock_count, owner & -2);
}
error_code sys_mutex_create(ppu_thread& ppu, vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr)
{
ppu.state += cpu_flag::wait;
@ -154,11 +175,24 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state) || state & cpu_flag::signal)
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(mutex->mutex);
if (std::find(mutex->sq.begin(), mutex->sq.end(), &ppu) == mutex->sq.end())
{
break;
}
ppu.state += cpu_flag::again;
return {};
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
@ -166,7 +200,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return {};
continue;
}
std::lock_guard lock(mutex->mutex);

View File

@ -46,6 +46,10 @@ struct lv2_mutex final : lv2_obj
{
}
lv2_mutex(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
CellError try_lock(u32 id)
{
const u32 value = owner;

View File

@ -240,6 +240,89 @@ void fmt_class_string<struct in_addr>::format(std::string& out, u64 arg)
fmt::append(out, "%u.%u.%u.%u", data[0], data[1], data[2], data[3]);
}
lv2_socket::lv2_socket(utils::serial& ar, lv2_socket_type _type)
: family(ar)
, type(_type)
, protocol(ar)
, so_nbio(ar)
, so_error(ar)
, so_tcp_maxseg(ar)
#ifdef _WIN32
, so_reuseaddr(ar)
, so_reuseport(ar)
{
#else
{
// Try to match structure between different platforms
ar.pos += 8;
#endif
lv2_id = idm::last_id();
ar(last_bound_addr);
}
std::shared_ptr<lv2_socket> lv2_socket::load(utils::serial& ar)
{
const lv2_socket_type type{ar};
std::shared_ptr<lv2_socket> sock_lv2;
switch (type)
{
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
{
auto lv2_native = std::make_shared<lv2_socket_native>(ar, type);
ensure(lv2_native->create_socket() >= 0);
sock_lv2 = std::move(lv2_native);
break;
}
case SYS_NET_SOCK_RAW: sock_lv2 = std::make_shared<lv2_socket_raw>(ar, type); break;
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2p>(ar, type); break;
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2ps>(ar, type); break;
}
if (std::memcmp(&sock_lv2->last_bound_addr, std::array<u8, 16>{}.data(), 16))
{
// NOTE: It is allowed fail
sock_lv2->bind(sock_lv2->last_bound_addr);
}
return sock_lv2;
}
void lv2_socket::save(utils::serial& ar, bool save_only_this_class)
{
USING_SERIALIZATION_VERSION(lv2_net);
if (save_only_this_class)
{
ar(family, protocol, so_nbio, so_error, so_tcp_maxseg);
#ifdef _WIN32
ar(so_reuseaddr, so_reuseport);
#else
ar(std::array<char, 8>{});
#endif
ar(last_bound_addr);
return;
}
ar(type);
switch (type)
{
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
{
static_cast<lv2_socket_native*>(this)->save(ar);
break;
}
case SYS_NET_SOCK_RAW: static_cast<lv2_socket_raw*>(this)->save(ar); break;
case SYS_NET_SOCK_DGRAM_P2P: static_cast<lv2_socket_p2p*>(this)->save(ar); break;
case SYS_NET_SOCK_STREAM_P2P: static_cast<lv2_socket_p2ps*>(this)->save(ar); break;
}
}
void sys_net_dump_data(std::string_view desc, const u8* data, s32 len)
{
if (sys_net_dump.enabled == logs::level::trace)
@ -360,10 +443,7 @@ error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr>
}
}
if (ppu.is_stopped())
{
return {};
}
static_cast<void>(ppu.test_stopped());
if (addr)
{
@ -402,7 +482,7 @@ error_code sys_net_bnet_bind(ppu_thread& ppu, s32 s, vm::cptr<sys_net_sockaddr>
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock) -> s32
{
return sock.bind(sn_addr, s);
return sock.bind(sn_addr);
});
if (!sock)
@ -779,10 +859,7 @@ error_code sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32
}
}
if (ppu.is_stopped())
{
return {};
}
static_cast<void>(ppu.test_stopped());
if (result == -SYS_NET_EWOULDBLOCK)
{

View File

@ -269,6 +269,8 @@ struct sys_net_pollfd
// sockaddr prefixed with sys_net_
struct sys_net_sockaddr
{
ENABLE_BITWISE_SERIALIZATION;
u8 sa_len;
u8 sa_family;
char sa_data[14];
@ -277,6 +279,8 @@ struct sys_net_sockaddr
// sockaddr_dl prefixed with sys_net_
struct sys_net_sockaddr_dl
{
ENABLE_BITWISE_SERIALIZATION;
u8 sdl_len;
u8 sdl_family;
be_t<u16> sdl_index;
@ -290,6 +294,8 @@ struct sys_net_sockaddr_dl
// sockaddr_in prefixed with sys_net_
struct sys_net_sockaddr_in
{
ENABLE_BITWISE_SERIALIZATION;
u8 sin_len;
u8 sin_family;
be_t<u16> sin_port;
@ -300,6 +306,8 @@ struct sys_net_sockaddr_in
// sockaddr_in_p2p prefixed with sys_net_
struct sys_net_sockaddr_in_p2p
{
ENABLE_BITWISE_SERIALIZATION;
u8 sin_len;
u8 sin_family;
be_t<u16> sin_port;

View File

@ -49,7 +49,13 @@ public:
};
public:
SAVESTATE_INIT_POS(7); // Dependency on RPCN
lv2_socket(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket(utils::serial&){}
lv2_socket(utils::serial&, lv2_socket_type type);
static std::shared_ptr<lv2_socket> load(utils::serial& ar);
void save(utils::serial&, bool save_only_this_class = false);
virtual ~lv2_socket() = default;
std::unique_lock<shared_mutex> lock();
@ -73,7 +79,7 @@ public:
public:
virtual std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) = 0;
virtual s32 bind(const sys_net_sockaddr& addr, s32 ps3_id) = 0;
virtual s32 bind(const sys_net_sockaddr& addr) = 0;
virtual std::optional<s32> connect(const sys_net_sockaddr& addr) = 0;
virtual s32 connect_followup() = 0;
@ -102,6 +108,8 @@ public:
static const u32 id_count = 1000;
protected:
lv2_socket(utils::serial&, bool);
shared_mutex mutex;
u32 lv2_id = 0;
@ -130,4 +138,6 @@ protected:
// Tracks connect for WSAPoll workaround
bool connecting = false;
#endif
sys_net_sockaddr last_bound_addr{};
};

View File

@ -13,6 +13,32 @@ lv2_socket_native::lv2_socket_native(lv2_socket_family family, lv2_socket_type t
{
}
lv2_socket_native::lv2_socket_native(utils::serial& ar, lv2_socket_type type)
: lv2_socket(ar, type)
{
#ifdef _WIN32
ar(so_reuseaddr, so_reuseport);
#else
std::array<char, 8> dummy{};
ar(dummy);
if (dummy != std::array<char, 8>{})
{
sys_net.error("[Native] Savestate tried to load Win32 specific data, compatibility may be affected");
}
#endif
}
void lv2_socket_native::save(utils::serial& ar)
{
static_cast<lv2_socket*>(this)->save(ar, true);
#ifdef _WIN32
ar(so_reuseaddr, so_reuseport);
#else
ar(std::array<char, 8>{});
#endif
}
lv2_socket_native::~lv2_socket_native()
{
std::lock_guard lock(mutex);
@ -94,7 +120,7 @@ std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_
return {false, {}, {}, {}};
}
s32 lv2_socket_native::bind(const sys_net_sockaddr& addr, [[maybe_unused]] s32 ps3_id)
s32 lv2_socket_native::bind(const sys_net_sockaddr& addr)
{
std::lock_guard lock(mutex);
@ -110,6 +136,7 @@ s32 lv2_socket_native::bind(const sys_net_sockaddr& addr, [[maybe_unused]] s32 p
if (::bind(socket, reinterpret_cast<struct sockaddr*>(&native_addr), native_addr_len) == 0)
{
last_bound_addr = addr;
return CELL_OK;
}
return -get_last_error(false);

View File

@ -31,11 +31,13 @@ class lv2_socket_native final : public lv2_socket
{
public:
lv2_socket_native(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_native(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
~lv2_socket_native();
s32 create_socket();
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr, s32 ps3_id) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
s32 connect_followup() override;

View File

@ -11,6 +11,36 @@ lv2_socket_p2p::lv2_socket_p2p(lv2_socket_family family, lv2_socket_type type, l
{
}
lv2_socket_p2p::lv2_socket_p2p(utils::serial& ar, lv2_socket_type type)
: lv2_socket(ar, type)
{
ar(port, vport);
std::deque<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>> data_dequeue{ar};
for (; !data_dequeue.empty(); data_dequeue.pop_front())
{
data.push(data_dequeue.front());
}
ar(data_dequeue);
}
void lv2_socket_p2p::save(utils::serial& ar)
{
static_cast<lv2_socket*>(this)->save(ar, true);
ar(port, vport);
std::deque<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>> data_dequeue;
for (; !data.empty(); data.pop())
{
data_dequeue.push_back(data.front());
}
ar(data_dequeue);
}
void lv2_socket_p2p::handle_new_data(sys_net_sockaddr_in_p2p p2p_addr, std::vector<u8> p2p_data)
{
std::lock_guard lock(mutex);
@ -69,7 +99,7 @@ s32 lv2_socket_p2p::listen([[maybe_unused]] s32 backlog)
return {};
}
s32 lv2_socket_p2p::bind(const sys_net_sockaddr& addr, s32 ps3_id)
s32 lv2_socket_p2p::bind(const sys_net_sockaddr& addr)
{
const auto* psa_in_p2p = reinterpret_cast<const sys_net_sockaddr_in_p2p*>(&addr);
u16 p2p_port = psa_in_p2p->sin_port;
@ -118,7 +148,7 @@ s32 lv2_socket_p2p::bind(const sys_net_sockaddr& addr, s32 ps3_id)
}
}
pport.bound_p2p_vports.insert(std::make_pair(p2p_vport, ps3_id));
pport.bound_p2p_vports.insert(std::make_pair(p2p_vport, lv2_id));
}
}
@ -127,6 +157,7 @@ s32 lv2_socket_p2p::bind(const sys_net_sockaddr& addr, s32 ps3_id)
port = p2p_port;
vport = p2p_vport;
socket = real_socket;
last_bound_addr = addr;
}
return CELL_OK;

View File

@ -6,9 +6,11 @@ class lv2_socket_p2p : public lv2_socket
{
public:
lv2_socket_p2p(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_p2p(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr, s32 ps3_id) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
s32 connect_followup() override;

View File

@ -238,6 +238,18 @@ lv2_socket_p2ps::lv2_socket_p2ps(socket_type socket, u16 port, u16 vport, u32 op
status = p2ps_stream_status::stream_connected;
}
lv2_socket_p2ps::lv2_socket_p2ps(utils::serial& ar, lv2_socket_type type)
: lv2_socket_p2p(ar, type)
{
ar(status, max_backlog, backlog, op_port, op_vport, op_addr, data_beg_seq, received_data, cur_seq);
}
void lv2_socket_p2ps::save(utils::serial& ar)
{
static_cast<lv2_socket_p2p*>(this)->save(ar);
ar(status, max_backlog, backlog, op_port, op_vport, op_addr, data_beg_seq, received_data, cur_seq);
}
bool lv2_socket_p2ps::handle_connected(p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr)
{
std::lock_guard lock(mutex);
@ -414,7 +426,7 @@ bool lv2_socket_p2ps::handle_listening(p2ps_encapsulated_tcp* tcp_header, [[mayb
send_u2s_packet(std::move(packet), reinterpret_cast<::sockaddr_in*>(op_addr), send_hdr.seq, true);
}
backlog.push(new_sock_id);
backlog.push_back(new_sock_id);
if (events.test_and_reset(lv2_socket::poll_t::read))
{
bs_t<lv2_socket::poll_t> read_event = lv2_socket::poll_t::read;
@ -501,7 +513,7 @@ std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_
}
auto p2ps_client = backlog.front();
backlog.pop();
backlog.pop_front();
sys_net_sockaddr ps3_addr{};
auto* paddr = reinterpret_cast<sys_net_sockaddr_in_p2p*>(&ps3_addr);
@ -519,7 +531,7 @@ std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_
return {true, p2ps_client, {}, ps3_addr};
}
s32 lv2_socket_p2ps::bind(const sys_net_sockaddr& addr, s32 ps3_id)
s32 lv2_socket_p2ps::bind(const sys_net_sockaddr& addr)
{
const auto* psa_in_p2p = reinterpret_cast<const sys_net_sockaddr_in_p2p*>(&addr);
@ -563,7 +575,7 @@ s32 lv2_socket_p2ps::bind(const sys_net_sockaddr& addr, s32 ps3_id)
{
p2p_vport++;
}
pport.bound_p2p_streams.emplace((static_cast<u64>(p2p_vport) << 32), ps3_id);
pport.bound_p2p_streams.emplace((static_cast<u64>(p2p_vport) << 32), lv2_id);
}
else
{
@ -572,12 +584,13 @@ s32 lv2_socket_p2ps::bind(const sys_net_sockaddr& addr, s32 ps3_id)
{
return -SYS_NET_EADDRINUSE;
}
pport.bound_p2p_streams.emplace(key, ps3_id);
pport.bound_p2p_streams.emplace(key, lv2_id);
}
port = p2p_port;
vport = p2p_vport;
socket = real_socket;
last_bound_addr = addr;
}
}

View File

@ -59,6 +59,8 @@ class lv2_socket_p2ps final : public lv2_socket_p2p
public:
lv2_socket_p2ps(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_p2ps(socket_type socket, u16 port, u16 vport, u32 op_addr, u16 op_port, u16 op_vport, u64 cur_seq, u64 data_beg_seq);
lv2_socket_p2ps(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
p2ps_stream_status get_status() const;
void set_status(p2ps_stream_status new_status);
@ -67,7 +69,7 @@ public:
void send_u2s_packet(std::vector<u8> data, const ::sockaddr_in* dst, u32 seq, bool require_ack);
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr, s32 ps3_id) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
@ -90,7 +92,7 @@ protected:
p2ps_stream_status status = p2ps_stream_status::stream_closed;
usz max_backlog = 0; // set on listen
std::queue<s32> backlog;
std::deque<s32> backlog;
u16 op_port = 0, op_vport = 0;
u32 op_addr = 0;

View File

@ -8,13 +8,23 @@ lv2_socket_raw::lv2_socket_raw(lv2_socket_family family, lv2_socket_type type, l
{
}
lv2_socket_raw::lv2_socket_raw(utils::serial& ar, lv2_socket_type type)
: lv2_socket(ar, type)
{
}
void lv2_socket_raw::save(utils::serial& ar)
{
static_cast<lv2_socket*>(this)->save(ar, true);
}
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_raw::accept([[maybe_unused]] bool is_lock)
{
sys_net.todo("lv2_socket_raw::accept");
return {};
}
s32 lv2_socket_raw::bind([[maybe_unused]] const sys_net_sockaddr& addr, [[maybe_unused]] s32 ps3_id)
s32 lv2_socket_raw::bind([[maybe_unused]] const sys_net_sockaddr& addr)
{
sys_net.todo("lv2_socket_raw::bind");
return {};

View File

@ -6,9 +6,11 @@ class lv2_socket_raw final : public lv2_socket
{
public:
lv2_socket_raw(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_raw(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr, s32 ps3_id) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
s32 connect_followup() override;

View File

@ -12,7 +12,7 @@
#include "sys_overlay.h"
#include "sys_fs.h"
extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, const std::string& path, s64 file_offset);
extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, const std::string& path, s64 file_offset, utils::serial* ar = nullptr);
extern bool ppu_initialize(const ppu_module&, bool = false);
extern void ppu_finalize(const ppu_module&);
@ -61,6 +61,38 @@ static error_code overlay_load_module(vm::ptr<u32> ovlmid, const std::string& vp
return CELL_OK;
}
fs::file make_file_view(fs::file&&, u64);
std::shared_ptr<void> lv2_overlay::load(utils::serial& ar)
{
const std::string path = vfs::get(ar.operator std::string());
const s64 offset = ar;
std::shared_ptr<lv2_overlay> ovlm;
fs::file file{path.substr(0, path.size() - (offset ? fmt::format("_x%x", offset).size() : 0))};
if (file)
{
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
file = make_file_view(std::move(file), offset);
ovlm = ppu_load_overlay(ppu_exec_object{ decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic)) }, path, 0, &ar).first;
ensure(ovlm);
}
else
{
ensure(g_cfg.savestate.state_inspection_mode.get());
}
return std::move(ovlm);
}
void lv2_overlay::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_prx_overlay);
ar(vfs::retrieve(path), offset);
}
error_code sys_overlay_load_module(vm::ptr<u32> ovlmid, vm::cptr<char> path, u64 flags, vm::ptr<u32> entry)
{
sys_overlay.warning("sys_overlay_load_module(ovlmid=*0x%x, path=%s, flags=0x%x, entry=*0x%x)", ovlmid, path, flags, entry);

View File

@ -8,6 +8,11 @@ struct lv2_overlay final : lv2_obj, ppu_module
static const u32 id_base = 0x25000000;
u32 entry;
lv2_overlay() = default;
lv2_overlay(utils::serial& ar){}
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
};
error_code sys_overlay_load_module(vm::ptr<u32> ovlmid, vm::cptr<char> path, u64 flags, vm::ptr<u32> entry);

View File

@ -192,10 +192,12 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr
if (thread->joiner != ppu_join_status::exited)
{
// Thread aborted, log it later
ppu.state += cpu_flag::exit;
ppu.state += cpu_flag::again;
return {};
}
static_cast<void>(ppu.test_stopped());
// Get the exit status from the register
const u64 vret = thread->gpr[3];

View File

@ -15,7 +15,7 @@
#include "sys_memory.h"
#include <span>
extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, const std::string&, s64);
extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, const std::string&, s64, utils::serial* = nullptr);
extern void ppu_unload_prx(const lv2_prx& prx);
extern bool ppu_initialize(const ppu_module&, bool = false);
extern void ppu_finalize(const ppu_module&);
@ -291,6 +291,76 @@ static error_code prx_load_module(const std::string& vpath, u64 flags, vm::ptr<s
return not_an_error(idm::last_id());
}
fs::file make_file_view(fs::file&& _file, u64 offset);
std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
{
const std::string path = vfs::get(ar.operator std::string());
const s64 offset = ar;
const u32 state = ar;
usz seg_count = 0;
ar.deserialize_vle(seg_count);
std::shared_ptr<lv2_prx> prx;
auto hle_load = [&]()
{
prx = std::make_shared<lv2_prx>();
prx->path = path;
prx->name = path.substr(path.find_last_of(fs::delim) + 1);
};
if (seg_count)
{
fs::file file{path.substr(0, path.size() - (offset ? fmt::format("_x%x", offset).size() : 0))};
if (file)
{
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
file = make_file_view(std::move(file), offset);
prx = ppu_load_prx(ppu_prx_object{ decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic)) }, path, 0, &ar);
ensure(prx);
}
else
{
ensure(g_cfg.savestate.state_inspection_mode.get());
hle_load();
// Partially recover information
for (usz i = 0; i < seg_count; i++)
{
auto& seg = prx->segs.emplace_back();
seg.addr = ar;
seg.size = 1; // TODO
}
}
}
else
{
hle_load();
}
prx->state = state;
return std::move(prx);
}
void lv2_prx::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_prx_overlay);
ar(vfs::retrieve(path), offset, state);
// Save segments count
ar.serialize_vle(segs.size());
for (const ppu_segment& seg : segs)
{
if (seg.type == 0x1u && seg.size) ar(seg.addr);
}
}
error_code sys_prx_get_ppu_guid(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;

View File

@ -189,6 +189,11 @@ struct lv2_prx final : lv2_obj, ppu_module
char module_info_name[28];
u8 module_info_version[2];
be_t<u16> module_info_attributes;
lv2_prx() noexcept = default;
lv2_prx(utils::serial&) {}
static std::shared_ptr<void> load(utils::serial&);
void save(utils::serial& ar);
};
enum : u64

View File

@ -76,12 +76,37 @@ void rsx::thread::send_event(u64 data1, u64 event_flags, u64 data3) const
error = sys_event_port_send(rsx_event_port, data1, event_flags, data3);
}
if (error && error + 0u != CELL_ENOTCONN)
if (!Emu.IsPaused() && error && error + 0u != CELL_ENOTCONN)
{
fmt::throw_exception("rsx::thread::send_event() Failed to send event! (error=%x)", +error);
}
}
// Returns true on success of receiving the event
void signal_gcm_intr_thread_offline(lv2_event_queue& q)
{
const auto render = rsx::get_current_renderer();
const auto cpu = cpu_thread::get_current();
static shared_mutex s_dummy;
std::scoped_lock lock_rsx(render ? render->sys_rsx_mtx : s_dummy, q.mutex);
if (std::find(q.sq.begin(), q.sq.end(), cpu) == q.sq.end())
{
return;
}
cpu->state += cpu_flag::again;
if (!vm::check_addr(render->driver_info) || vm::_ref<RsxDriverInfo>(render->driver_info).handler_queue != q.id)
{
return;
}
render->gcm_intr_thread_offline = true;
}
error_code sys_rsx_device_open(cpu_thread& cpu)
{
cpu.state += cpu_flag::wait;
@ -487,7 +512,16 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
}
}
render->request_emu_flip(flip_idx);
if (!render->request_emu_flip(flip_idx))
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::exit;
cpu->state += cpu_flag::again;
}
return {};
}
}
break;
@ -745,6 +779,19 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
break;
case 0xFEC: // hack: flip event notification
{
reader_lock lock(render->sys_rsx_mtx);
if (render->gcm_intr_thread_offline)
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::exit;
cpu->state += cpu_flag::again;
}
break;
}
// we only ever use head 1 for now
driverInfo.head[1].flipFlags |= 0x80000000;
@ -757,6 +804,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
render->send_event(0, SYS_RSX_EVENT_FLIP_BASE << 1, 0);
break;
}
case 0xFED: // hack: vblank command
{
@ -769,6 +817,13 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
reader_lock lock(render->sys_rsx_mtx);
if (render->gcm_intr_thread_offline)
{
break;
}
// todo: this is wrong and should be 'second' vblank handler and freq, but since currently everything is reported as being 59.94, this should be fine
driverInfo.head[a3].lastSecondVTime.atomic_op([&](be_t<u64>& time)
{
@ -796,12 +851,27 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
}
case 0xFEF: // hack: user command
{
reader_lock lock(render->sys_rsx_mtx);
if (render->gcm_intr_thread_offline)
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::exit;
cpu->state += cpu_flag::again;
}
break;
}
// 'custom' invalid package id for now
// as i think we need custom lv1 interrupts to handle this accurately
// this also should probly be set by rsxthread
driverInfo.userCmdParam = static_cast<u32>(a4);
render->send_event(0, SYS_RSX_EVENT_USER_CMD, 0);
break;
}
default:
return CELL_EINVAL;

View File

@ -120,6 +120,8 @@ struct RsxDisplayInfo
be_t<u32> width{0};
be_t<u32> height{0};
ENABLE_BITWISE_SERIALIZATION;
bool valid() const
{
return height != 0u && width != 0u;

View File

@ -9,6 +9,26 @@
LOG_CHANNEL(sys_rwlock);
lv2_rwlock::lv2_rwlock(utils::serial& ar)
: protocol(ar)
, key(ar)
, name(ar)
{
ar(owner);
}
std::shared_ptr<void> lv2_rwlock::load(utils::serial& ar)
{
auto rwlock = std::make_shared<lv2_rwlock>(ar);
return lv2_obj::load(rwlock->key, rwlock);
}
void lv2_rwlock::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_sync);
ar(protocol, key, name, owner);
}
error_code sys_rwlock_create(ppu_thread& ppu, vm::ptr<u32> rw_lock_id, vm::ptr<sys_rwlock_attribute_t> attr)
{
ppu.state += cpu_flag::wait;
@ -130,9 +150,22 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
return {};
std::lock_guard lock(rwlock->mutex);
if (std::find(rwlock->rq.begin(), rwlock->rq.end(), &ppu) == rwlock->rq.end())
{
break;
}
ppu.state += cpu_flag::again;
break;
}
if (state & cpu_flag::signal)
@ -147,7 +180,7 @@ error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return {};
continue;
}
std::lock_guard lock(rwlock->mutex);
@ -257,6 +290,12 @@ error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id)
{
if (const auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
rwlock->owner = cpu->id << 1 | !rwlock->wq.empty() | !rwlock->rq.empty();
rwlock->awake(cpu);
@ -337,16 +376,24 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(rwlock->mutex);
if (std::find(rwlock->wq.begin(), rwlock->wq.end(), &ppu) == rwlock->wq.end())
{
break;
}
ppu.state += cpu_flag::again;
break;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
@ -354,7 +401,7 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return {};
continue;
}
std::lock_guard lock(rwlock->mutex);
@ -461,6 +508,12 @@ error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id)
if (auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
rwlock->owner = cpu->id << 1 | !rwlock->wq.empty() | !rwlock->rq.empty();
rwlock->awake(cpu);

View File

@ -38,6 +38,10 @@ struct lv2_rwlock final : lv2_obj
, name(name)
{
}
lv2_rwlock(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
};
// Aux

View File

@ -9,6 +9,27 @@
LOG_CHANNEL(sys_semaphore);
lv2_sema::lv2_sema(utils::serial& ar)
: protocol(ar)
, key(ar)
, name(ar)
, max(ar)
{
ar(val);
}
std::shared_ptr<void> lv2_sema::load(utils::serial& ar)
{
auto sema = std::make_shared<lv2_sema>(ar);
return lv2_obj::load(sema->key, sema);
}
void lv2_sema::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_sync);
ar(protocol, key, name, max, std::max<s32>(+val, 0));
}
error_code sys_semaphore_create(ppu_thread& ppu, vm::ptr<u32> sem_id, vm::ptr<sys_semaphore_attribute_t> attr, s32 initial_val, s32 max_val)
{
ppu.state += cpu_flag::wait;
@ -46,10 +67,7 @@ error_code sys_semaphore_create(ppu_thread& ppu, vm::ptr<u32> sem_id, vm::ptr<sy
return error;
}
if (ppu.test_stopped())
{
return {};
}
static_cast<void>(ppu.test_stopped());
*sem_id = idm::last_id();
return CELL_OK;
@ -129,16 +147,24 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(sem->mutex);
if (std::find(sem->sq.begin(), sem->sq.end(), &ppu) == sem->sq.end())
{
break;
}
ppu.state += cpu_flag::again;
return {};
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
@ -146,7 +172,7 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
// Wait for rescheduling
if (ppu.check_state())
{
return {};
continue;
}
std::lock_guard lock(sem->mutex);
@ -240,6 +266,15 @@ error_code sys_semaphore_post(ppu_thread& ppu, u32 sem_id, s32 count)
{
std::lock_guard lock(sem->mutex);
for (auto cpu : sem->sq)
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
}
const auto [val, ok] = sem->val.fetch_op([&](s32& val)
{
if (count + 0u <= sem->max + 0u - val)
@ -294,10 +329,7 @@ error_code sys_semaphore_get_value(ppu_thread& ppu, u32 sem_id, vm::ptr<s32> cou
return CELL_EFAULT;
}
if (ppu.test_stopped())
{
return {};
}
static_cast<void>(ppu.test_stopped());
*count = sema.ret;
return CELL_OK;

View File

@ -40,6 +40,10 @@ struct lv2_sema final : lv2_obj
, val(value)
{
}
lv2_sema(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
};
// Aux

View File

@ -194,6 +194,131 @@ void sys_spu_image::deploy(u8* loc, std::span<const sys_spu_segment> segs)
spu_log.notice("Loaded SPU image: %s (<- %u)%s", hash, applied.size(), dump);
}
lv2_spu_group::lv2_spu_group(utils::serial& ar) noexcept
: name(ar.operator std::string())
, id(idm::last_id())
, max_num(ar)
, mem_size(ar)
, type(ar) // SPU Thread Group Type
, ct(lv2_memory_container::search(ar))
, has_scheduler_context(ar.operator u8())
, max_run(ar)
, init(ar)
, prio(ar)
, run_state(ar.operator spu_group_status())
, exit_status(ar)
{
for (auto& thread : threads)
{
if (ar.operator u8())
{
ar(id_manager::g_id);
thread = std::make_shared<named_thread<spu_thread>>(ar, this);
idm::import_existing<named_thread<spu_thread>>(thread, idm::last_id());
running += !thread->stop_flag_removal_protection;
}
}
ar(threads_map);
ar(imgs);
ar(args);
for (auto ep : {&ep_run, &ep_exception, &ep_sysmodule})
{
*ep = idm::get_unlocked<lv2_obj, lv2_event_queue>(ar.operator u32());
}
switch (run_state)
{
// Commented stuff are handled by different means currently
//case SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED:
//case SPU_THREAD_GROUP_STATUS_INITIALIZED:
//case SPU_THREAD_GROUP_STATUS_READY:
//case SPU_THREAD_GROUP_STATUS_WAITING:
case SPU_THREAD_GROUP_STATUS_SUSPENDED:
{
// Suspend all SPU threads
for (const auto& thread : threads)
{
if (thread)
{
thread->state += cpu_flag::suspend;
}
}
break;
}
//case SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED:
//case SPU_THREAD_GROUP_STATUS_RUNNING:
//case SPU_THREAD_GROUP_STATUS_STOPPED:
//case SPU_THREAD_GROUP_STATUS_UNKNOWN:
default:
{
break;
}
}
}
void lv2_spu_group::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(spu);
spu_group_status _run_state = run_state;
switch (_run_state)
{
// Commented stuff are handled by different means currently
//case SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED:
//case SPU_THREAD_GROUP_STATUS_INITIALIZED:
//case SPU_THREAD_GROUP_STATUS_READY:
// Waiting SPU should recover this
case SPU_THREAD_GROUP_STATUS_WAITING: _run_state = SPU_THREAD_GROUP_STATUS_RUNNING; break;
case SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED: _run_state = SPU_THREAD_GROUP_STATUS_SUSPENDED; break;
//case SPU_THREAD_GROUP_STATUS_RUNNING:
//case SPU_THREAD_GROUP_STATUS_STOPPED:
//case SPU_THREAD_GROUP_STATUS_UNKNOWN:
default:
{
break;
}
}
ar(name, max_num, mem_size, type, ct->id, has_scheduler_context, max_run, init, prio, _run_state, exit_status);
for (const auto& thread : threads)
{
ar(u8{thread.operator bool()});
if (thread)
{
ar(thread->id);
thread->save(ar);
}
}
ar(threads_map);
ar(imgs);
ar(args);
for (auto ep : {&ep_run, &ep_exception, &ep_sysmodule})
{
ar(lv2_obj::check(*ep) ? (*ep)->id : 0);
}
}
lv2_spu_image::lv2_spu_image(utils::serial& ar)
: e_entry(ar)
, segs(ar.operator decltype(segs)())
, nsegs(ar)
{
}
void lv2_spu_image::save(utils::serial& ar)
{
ar(e_entry, segs, nsegs);
}
// Get spu thread ptr, returns group ptr as well for refcounting
std::pair<named_thread<spu_thread>*, std::shared_ptr<lv2_spu_group>> lv2_spu_group::get_thread(u32 id)
{
@ -1270,11 +1395,24 @@ error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause
{
const auto state = ppu.state.fetch_sub(cpu_flag::signal);
if (is_stopped(state) || state & cpu_flag::signal)
if (state & cpu_flag::signal)
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(group->mutex);
if (!group->waiter)
{
break;
}
ppu.state += cpu_flag::again;
break;
}
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -114,6 +114,8 @@ struct sys_spu_thread_argument
struct sys_spu_segment
{
ENABLE_BITWISE_SERIALIZATION;
be_t<s32> type; // copy, fill, info
be_t<u32> ls; // local storage address
be_t<u32> size;
@ -248,6 +250,9 @@ struct lv2_spu_image : lv2_obj
, nsegs(nsegs)
{
}
lv2_spu_image(utils::serial& ar);
void save(utils::serial& ar);
};
struct sys_spu_thread_group_syscall_253_info
@ -283,8 +288,8 @@ struct lv2_spu_group
atomic_t<spu_group_status> run_state; // SPU Thread Group State
atomic_t<s32> exit_status; // SPU Thread Group Exit Status
atomic_t<u32> join_state; // flags used to detect exit cause and signal
atomic_t<u32> running; // Number of running threads
atomic_t<u64> stop_count;
atomic_t<u32> running = 0; // Number of running threads
atomic_t<u64> stop_count = 0;
class ppu_thread* waiter = nullptr;
bool set_terminate = false;
@ -297,7 +302,7 @@ struct lv2_spu_group
std::shared_ptr<lv2_event_queue> ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION
std::shared_ptr<lv2_event_queue> ep_sysmodule; // TODO: SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE
lv2_spu_group(std::string name, u32 num, s32 prio, s32 type, lv2_memory_container* ct, bool uses_scheduler, u32 mem_size)
lv2_spu_group(std::string name, u32 num, s32 prio, s32 type, lv2_memory_container* ct, bool uses_scheduler, u32 mem_size) noexcept
: name(std::move(name))
, id(idm::last_id())
, max_num(num)
@ -311,13 +316,16 @@ struct lv2_spu_group
, run_state(SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED)
, exit_status(0)
, join_state(0)
, running(0)
, stop_count(0)
// TODO: args()
{
threads_map.fill(-1);
}
SAVESTATE_INIT_POS(8); // Dependency on SPUs
lv2_spu_group(utils::serial& ar) noexcept;
void save(utils::serial& ar);
CellError send_run_event(u64 data1, u64 data2, u64 data3) const
{
return ep_run ? ep_run->send(SYS_SPU_THREAD_GROUP_EVENT_RUN_KEY, data1, data2, data3) : CELL_ENOTCONN;

View File

@ -26,6 +26,7 @@ struct lv2_storage
static const u32 id_base = 0x45000000;
static const u32 id_step = 1;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(45);
const u64 device_id;
const fs::file file;

View File

@ -84,6 +84,7 @@ private:
}
public:
SAVESTATE_INIT_POS(4); // Dependency on PPUs
// Existence validation (workaround for shared-ptr ref-counting)
atomic_t<u32> exists = 0;
@ -133,7 +134,10 @@ public:
if (protocol == SYS_SYNC_FIFO)
{
const auto res = queue.front();
queue.pop_front();
if (res->state.none_of(cpu_flag::again))
queue.pop_front();
return res;
}
@ -152,7 +156,10 @@ public:
}
const auto res = *it;
queue.erase(it);
if (res->state.none_of(cpu_flag::again))
queue.erase(it);
return res;
}
@ -192,6 +199,10 @@ public:
g_to_awake.emplace_back(thread);
}
// Serialization related
static void set_future_sleep(ppu_thread* ppu);
static bool is_scheduler_ready();
static void cleanup();
template <typename T>
@ -314,6 +325,24 @@ public:
}
}
template <typename T>
static std::shared_ptr<T> load(u64 ipc_key, std::shared_ptr<T> make, u64 pshared = -1)
{
if (pshared == umax ? ipc_key != 0 : pshared != 0)
{
g_fxo->need<ipc_manager<T, u64>>();
make = g_fxo->get<ipc_manager<T, u64>>().add(ipc_key, [&]()
{
return make;
}, true).second;
}
// Ensure no error
ensure(!make->on_id_create());
return make;
}
template <bool IsUsleep = false, bool Scale = true>
static bool wait_timeout(u64 usec, cpu_thread* const cpu = {})
{
@ -420,5 +449,8 @@ private:
// Scheduler queue for timeouts (wait until -> thread)
static std::deque<std::pair<u64, class cpu_thread*>> g_waiting;
// Threads which must call lv2_obj::sleep before the scheduler starts
static std::deque<class ppu_thread*> g_to_sleep;
static void schedule_all();
};

View File

@ -166,11 +166,24 @@ u64 get_timebased_time()
}
// Add an offset to get_timebased_time to avoid leaking PC's uptime into the game
void initalize_timebased_time()
// As if PS3 starts at value 0 (base time) when the game boots
// If none-zero arg is specified it will become the base time (for savestates)
void initialize_timebased_time(u64 timebased_init, bool reset)
{
timebase_offset = 0;
timebase_offset = get_timebased_time();
systemtime_offset = timebase_offset / (g_timebase_freq / 1000000);
if (reset)
{
// We simply want to zero-out these values
systemtime_offset = 0;
return;
}
const u64 current = get_timebased_time();
timebased_init = get_timebased_time() - timebased_init;
timebase_offset = timebased_init;
systemtime_offset = timebased_init / (g_timebase_freq / 1000000);
}
// Returns some relative time in microseconds, don't change this fact
@ -201,7 +214,6 @@ u64 get_system_time()
u64 get_guest_system_time(u64 time)
{
const u64 result = (time != umax ? time : get_system_time()) * g_cfg.core.clocks_scale / 100;
ensure(result >= systemtime_offset);
return result - systemtime_offset;
}

View File

@ -6,6 +6,7 @@
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/System.h"
#include "sys_event.h"
#include "sys_process.h"
@ -23,6 +24,24 @@ struct lv2_timer_thread
static constexpr auto thread_name = "Timer Thread"sv;
};
lv2_timer::lv2_timer(utils::serial& ar)
: lv2_obj{1}
, state(ar)
, port(lv2_event_queue::load_ptr(ar, port))
, source(ar)
, data1(ar)
, data2(ar)
, expire(ar)
, period(ar)
{
}
void lv2_timer::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_sync);
ar(state), lv2_event_queue::save_ptr(ar, port.get()), ar(source, data1, data2, expire, period);
}
u64 lv2_timer::check()
{
while (thread_ctrl::state() != thread_state::aborting)
@ -34,6 +53,7 @@ u64 lv2_timer::check()
const u64 _now = get_guest_system_time();
u64 next = expire;
// If aborting, perform the last accurate check for event
if (_now >= next)
{
std::lock_guard lock(mutex);
@ -73,7 +93,19 @@ u64 lv2_timer::check()
void lv2_timer_thread::operator()()
{
u64 sleep_time = umax;
{
decltype(timers) vec;
idm::select<lv2_obj, lv2_timer>([&vec](u32 id, lv2_timer&)
{
vec.emplace_back(idm::get_unlocked<lv2_obj, lv2_timer>(id));
});
std::lock_guard lock(mutex);
timers = std::move(vec);
}
u64 sleep_time = 0;
while (thread_ctrl::state() != thread_state::aborting)
{
@ -87,6 +119,12 @@ void lv2_timer_thread::operator()()
sleep_time = umax;
if (Emu.IsPaused())
{
sleep_time = 10000;
continue;
}
reader_lock lock(mutex);
for (const auto& timer : timers)
@ -115,6 +153,7 @@ error_code sys_timer_create(ppu_thread& ppu, vm::ptr<u32> timer_id)
auto& thread = g_fxo->get<named_thread<lv2_timer_thread>>();
{
std::lock_guard lock(thread.mutex);
lv2_obj::unqueue(thread.timers, ptr);
thread.timers.emplace_back(std::move(ptr));
}
@ -344,7 +383,7 @@ error_code sys_timer_sleep(ppu_thread& ppu, u32 sleep_time)
{
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_sleep(sleep_time=%d) -> sys_timer_usleep()", sleep_time);
sys_timer.warning("sys_timer_sleep(sleep_time=%d)", sleep_time);
return sys_timer_usleep(ppu, sleep_time * u64{1000000});
}
@ -359,7 +398,10 @@ error_code sys_timer_usleep(ppu_thread& ppu, u64 sleep_time)
{
lv2_obj::sleep(ppu, sleep_time);
lv2_obj::wait_timeout<true>(sleep_time);
if (!lv2_obj::wait_timeout<true>(sleep_time))
{
ppu.state += cpu_flag::again;
}
}
else
{

View File

@ -60,6 +60,9 @@ struct lv2_timer : lv2_obj
info.period = 0;
}
}
lv2_timer(utils::serial& ar);
void save(utils::serial& ar);
};
class ppu_thread;

View File

@ -63,6 +63,18 @@ public:
usb_handler_thread();
~usb_handler_thread();
SAVESTATE_INIT_POS(14);
usb_handler_thread(utils::serial& ar) : usb_handler_thread()
{
is_init = !!ar.operator u8();
}
void save(utils::serial& ar)
{
ar(u8{is_init.load()});
}
// Thread loop
void operator()();
@ -850,18 +862,26 @@ error_code sys_usbd_receive_event(ppu_thread& ppu, u32 handle, vm::ptr<u64> arg1
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
sys_usbd.trace("sys_usbd_receive_event: aborting");
return {};
}
if (state & cpu_flag::signal)
{
sys_usbd.trace("Received event(queued): arg1=0x%x arg2=0x%x arg3=0x%x", ppu.gpr[4], ppu.gpr[5], ppu.gpr[6]);
break;
}
if (is_stopped(state))
{
std::lock_guard lock(usbh.mutex);
if (std::find(usbh.sq.begin(), usbh.sq.end(), &ppu) == usbh.sq.end())
{
break;
}
ppu.state += cpu_flag::again;
sys_usbd.trace("sys_usbd_receive_event: aborting");
return {};
}
thread_ctrl::wait_on(ppu.state, state);
}

View File

@ -17,6 +17,12 @@ sys_vm_t::sys_vm_t(u32 _addr, u32 vsize, lv2_memory_container* ct, u32 psize)
g_ids[addr >> 28].release(idm::last_id());
}
void sys_vm_t::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_vm);
ar(ct->id, addr, size, psize);
}
sys_vm_t::~sys_vm_t()
{
// Free ID
@ -30,6 +36,17 @@ struct sys_vm_global_t
atomic_t<u32> total_vsize = 0;
};
sys_vm_t::sys_vm_t(utils::serial& ar)
: ct(lv2_memory_container::search(ar))
, addr(ar)
, size(ar)
, psize(ar)
{
g_ids[addr >> 28].release(idm::last_id());
g_fxo->need<sys_vm_global_t>();
g_fxo->get<sys_vm_global_t>().total_vsize += size;
}
error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr)
{
ppu.state += cpu_flag::wait;

View File

@ -41,6 +41,11 @@ struct sys_vm_t
sys_vm_t(u32 addr, u32 vsize, lv2_memory_container* ct, u32 psize);
~sys_vm_t();
SAVESTATE_INIT_POS(10);
sys_vm_t(utils::serial& ar);
void save(utils::serial& ar);
static std::array<atomic_t<u32>, id_count> g_ids;
static u32 find_id(u32 addr)

View File

@ -4,6 +4,5 @@
u64 convert_to_timebased_time(u64 time);
u64 get_timebased_time();
void initalize_timebased_time();
u64 get_system_time();
u64 get_guest_system_time(u64 time = umax);

View File

@ -4,15 +4,40 @@
shared_mutex id_manager::g_mutex;
thread_local DECLARE(idm::g_id);
idm::map_data* idm::allocate_id(std::vector<map_data>& vec, u32 type_id, u32 base, u32 step, u32 count, std::pair<u32, u32> invl_range)
namespace id_manager
{
thread_local u32 g_id = 0;
}
std::vector<std::pair<u128, id_manager::typeinfo>>& id_manager::get_typeinfo_map()
{
// Magic static
static std::vector<std::pair<u128, id_manager::typeinfo>> s_map;
return s_map;
}
idm::map_data* idm::allocate_id(std::vector<map_data>& vec, u32 type_id, u32 dst_id, u32 base, u32 step, u32 count, std::pair<u32, u32> invl_range)
{
if (const u32 index = id_manager::get_index(dst_id, base, step, count, invl_range); index < count)
{
// Fixed position construction
ensure(index < vec.size());
if (vec[index].second)
{
return nullptr;
}
id_manager::g_id = dst_id;
vec[index] = {id_manager::id_key(dst_id, type_id), nullptr};
return &vec[index];
}
if (vec.size() < count)
{
// Try to emplace back
const u32 _next = base + step * ::size32(vec);
g_id = _next;
id_manager::g_id = _next;
vec.emplace_back(id_manager::id_key(_next, type_id), nullptr);
return &vec.back();
}
@ -27,7 +52,7 @@ idm::map_data* idm::allocate_id(std::vector<map_data>& vec, u32 type_id, u32 ba
{
// Incremenet ID invalidation counter
const u32 id = next | ((ptr->first + (1u << invl_range.first)) & (invl_range.second ? (((1u << invl_range.second) - 1) << invl_range.first) : 0));
g_id = id;
id_manager::g_id = id;
ptr->first = id_manager::id_key(id, type_id);
return ptr;
}

View File

@ -5,7 +5,10 @@
#include <memory>
#include <vector>
#include <map>
#include <typeinfo>
#include "util/serialization.hpp"
#include "util/fixed_typemap.hpp"
extern stx::manual_typemap<void, 0x20'00000, 128> g_fixed_typemap;
@ -35,6 +38,9 @@ namespace id_manager
template <typename T>
concept IdmCompatible = requires () { T::id_base, T::id_step, T::id_count; };
// Last allocated ID for constructors
extern thread_local u32 g_id;
// ID traits
template <typename T>
struct id_traits
@ -57,53 +63,155 @@ namespace id_manager
static_assert(!invl_range.second || (u64{invl_range.second} + invl_range.first <= 32 /*....*/ ));
};
class typeinfo
static constexpr u32 get_index(u32 id, u32 base, u32 step, u32 count, std::pair<u32, u32> invl_range)
{
// Global variable for each registered type
template <typename T>
struct registered
{
static const u32 index;
};
u32 mask_out = ((1u << invl_range.second) - 1) << invl_range.first;
// Increment type counter
static u32 add_type(u32 i)
{
static atomic_t<u32> g_next{0};
// Note: if id is lower than base, diff / step will be higher than count
u32 diff = (id & ~mask_out) - base;
return g_next.fetch_add(i);
if (diff % step)
{
// id is invalid, return invalid index
return count;
}
// Get actual index
return diff / step;
}
// ID traits
template <typename T, typename = void>
struct id_traits_load_func
{
static constexpr std::shared_ptr<void>(*load)(utils::serial&) = [](utils::serial& ar) -> std::shared_ptr<void> { return std::make_shared<T>(stx::exact_t<utils::serial&>(ar)); };
};
template <typename T>
struct id_traits_load_func<T, std::void_t<decltype(&T::load)>>
{
static constexpr std::shared_ptr<void>(*load)(utils::serial&) = &T::load;
};
template <typename T, typename = void>
struct id_traits_savable_func
{
static constexpr bool(*savable)(void*) = [](void*) -> bool { return true; };
};
template <typename T>
struct id_traits_savable_func<T, std::void_t<decltype(&T::savable)>>
{
static constexpr bool(*savable)(void* ptr) = [](void* ptr) -> bool { return static_cast<const T*>(ptr)->savable(); };
};
struct dummy_construct
{
dummy_construct() {}
dummy_construct(utils::serial&){}
void save(utils::serial&) {}
static constexpr u32 id_base = 1, id_step = 1, id_count = 1;
static constexpr double savestate_init_pos = 0;
};
struct typeinfo;
// Use a vector instead of map to reduce header dependencies in this commonly used header
std::vector<std::pair<u128, typeinfo>>& get_typeinfo_map();
struct typeinfo
{
public:
std::shared_ptr<void>(*load)(utils::serial&);
void(*save)(utils::serial&, void*);
bool(*savable)(void* ptr);
u32 base;
u32 step;
u32 count;
std::pair<u32, u32> invl_range;
// Get type index
template <typename T>
static inline u32 get_index()
{
return registered<T>::index;
return stx::typeindex<id_manager::typeinfo, T>();
}
// Get type count
static inline u32 get_count()
// Unique type ID within the same container: we use id_base if nothing else was specified
template <typename T>
static consteval u32 get_type()
{
return add_type(0);
return T::id_base;
}
// Specified type ID for containers which their types may be sharing an overlapping IDs range
template <typename T> requires requires () { u32{T::id_type}; }
static consteval u32 get_type()
{
return T::id_type;
}
template <typename T>
static typeinfo make_typeinfo()
{
typeinfo info{};
using C = std::conditional_t<IdmCompatible<T> && std::is_constructible_v<T, stx::exact_t<utils::serial&>>, T, dummy_construct>;
using Type = std::conditional_t<IdmCompatible<T>, T, dummy_construct>;
if constexpr (std::is_same_v<C, T>)
{
info =
{
+id_traits_load_func<C>::load,
+[](utils::serial& ar, void* obj) { static_cast<C*>(obj)->save(ar); },
+id_traits_savable_func<C>::savable,
id_traits<C>::base, id_traits<C>::step, id_traits<C>::count, id_traits<C>::invl_range,
};
const u128 key = u128{get_type<C>()} << 64 | std::bit_cast<u64>(C::savestate_init_pos);
for (const auto& tinfo : get_typeinfo_map())
{
if (!(tinfo.first ^ key))
{
ensure(!std::memcmp(&info, &tinfo.second, sizeof(info)));
return info;
}
}
// id_base must be unique within all the objects with the same initialization posistion by definition of id_map with multiple types
get_typeinfo_map().emplace_back(key, info);
}
else
{
info =
{
nullptr,
nullptr,
nullptr,
id_traits<Type>::base, id_traits<Type>::step, id_traits<Type>::count, id_traits<Type>::invl_range,
};
}
return info;
}
};
template <typename T>
const u32 typeinfo::registered<T>::index = typeinfo::add_type(1);
// ID value with additional type stored
class id_key
{
u32 m_value; // ID value
u32 m_type; // True object type
u32 m_value; // ID value
u32 m_base; // ID base (must be unique for each type in the same container)
public:
id_key() = default;
id_key(u32 value, u32 type)
: m_value(value)
, m_type(type)
, m_base(type)
{
}
@ -114,7 +222,7 @@ namespace id_manager
u32 type() const
{
return m_type;
return m_base;
}
operator u32() const
@ -135,6 +243,86 @@ namespace id_manager
vec.reserve(T::id_count);
}
// Order it directly afterward the source type's position
static constexpr double savestate_init_pos = std::bit_cast<double>(std::bit_cast<u64>(T::savestate_init_pos) + 1);
id_map(utils::serial& ar)
{
vec.resize(T::id_count);
u32 i = ar.operator u32();
ensure(i <= T::id_count);
while (--i != umax)
{
// ID, type hash
const u32 id = ar;
const u128 type_init_pos = u128{u32{ar}} << 64 | std::bit_cast<u64>(T::savestate_init_pos);
const typeinfo* info = nullptr;
// Search load functions for the one of this type (see make_typeinfo() for explenation about key composition reasoning)
for (const auto& typeinfo : get_typeinfo_map())
{
if (!(typeinfo.first ^ type_init_pos))
{
info = std::addressof(typeinfo.second);
}
}
ensure(info);
// Construct each object from information collected
// Simulate construction semantics (idm::last_id() value)
g_id = id;
auto& obj = vec[get_index(id, info->base, info->step, info->count, info->invl_range)];
ensure(!obj.second);
obj.first = id_key(id, static_cast<u32>(static_cast<u64>(type_init_pos >> 64)));
obj.second = info->load(ar);
}
}
void save(utils::serial& ar)
{
u32 obj_count = 0;
usz obj_count_offs = ar.data.size();
// To be patched at the end of the function
ar(obj_count);
for (const auto& p : vec)
{
if (!p.second) continue;
const u128 type_init_pos = u128{p.first.type()} << 64 | std::bit_cast<u64>(T::savestate_init_pos);
const typeinfo* info = nullptr;
// Search load functions for the one of this type (see make_typeinfo() for explenation about key composition reasoning)
for (const auto& typeinfo : get_typeinfo_map())
{
if (!(typeinfo.first ^ type_init_pos))
{
ensure(!std::exchange(info, std::addressof(typeinfo.second)));
}
}
// Save each object with needed information
if (info && info->savable(p.second.get()))
{
ar(p.first.value(), p.first.type());
info->save(ar, p.second.get());
obj_count++;
}
}
// Patch object count
std::memcpy(ar.data.data() + obj_count_offs, &obj_count, sizeof(obj_count));
}
template <bool dummy = false> requires (std::is_assignable_v<T&, thread_state>)
id_map& operator=(thread_state state)
{
@ -163,33 +351,12 @@ namespace id_manager
// Object manager for emulated process. Multiple objects of specified arbitrary type are given unique IDs.
class idm
{
// Last allocated ID for constructors
static thread_local u32 g_id;
template <typename T>
static inline u32 get_type()
{
return id_manager::typeinfo::get_index<T>();
}
template <typename T>
static constexpr u32 get_index(u32 id)
{
using traits = id_manager::id_traits<T>;
constexpr u32 mask_out = ((1u << traits::invl_range.second) - 1) << traits::invl_range.first;
// Note: if id is lower than base, diff / step will be higher than count
u32 diff = (id & ~mask_out) - traits::base;
if (diff % traits::step)
{
// id is invalid, return invalid index
return traits::count;
}
// Get actual index
return diff / traits::step;
return id_manager::get_index(id, traits::base, traits::step, traits::count, traits::invl_range);
}
// Helper
@ -259,7 +426,7 @@ class idm
using map_data = std::pair<id_manager::id_key, std::shared_ptr<void>>;
// Prepare new ID (returns nullptr if out of resources)
static map_data* allocate_id(std::vector<map_data>& vec, u32 type_id, u32 base, u32 step, u32 count, std::pair<u32, u32> invl_range);
static map_data* allocate_id(std::vector<map_data>& vec, u32 type_id, u32 dst_id, u32 base, u32 step, u32 count, std::pair<u32, u32> invl_range);
// Find ID (additionally check type if types are not equal)
template <typename T, typename Type>
@ -297,21 +464,24 @@ class idm
return nullptr;
}
// Allocate new ID and assign the object from the provider()
// Allocate new ID (or use fixed ID) and assign the object from the provider()
template <typename T, typename Type, typename F>
static map_data* create_id(F&& provider)
static map_data* create_id(F&& provider, u32 id = id_manager::id_traits<Type>::invalid)
{
static_assert(PtrSame<T, Type>, "Invalid ID type combination");
// ID traits
using traits = id_manager::id_traits<Type>;
// Ensure make_typeinfo() is used for this type
stx::typedata<id_manager::typeinfo, Type>();
// Allocate new id
std::lock_guard lock(id_manager::g_mutex);
auto& map = g_fxo->get<id_manager::id_map<T>>();
if (auto* place = allocate_id(map.vec, get_type<Type>(), traits::base, traits::step, traits::count, traits::invl_range))
if (auto* place = allocate_id(map.vec, get_type<Type>(), id, traits::base, traits::step, traits::count, traits::invl_range))
{
// Get object, store it
place->second = provider();
@ -338,7 +508,14 @@ public:
// Get last ID (updated in create_id/allocate_id)
static inline u32 last_id()
{
return g_id;
return id_manager::g_id;
}
// Get type ID that is meant to be unique within the same container
template <typename T>
static consteval u32 get_type()
{
return id_manager::typeinfo::get_type<T>();
}
// Add a new ID of specified type with specified constructor arguments (returns object or nullptr)
@ -367,9 +544,9 @@ public:
// Add a new ID for an object returned by provider()
template <typename T, typename Made = T, typename F> requires (std::is_invocable_v<F&&>)
static inline u32 import(F&& provider)
static inline u32 import(F&& provider, u32 id = id_manager::id_traits<Made>::invalid)
{
if (auto pair = create_id<T, Made>(std::forward<F>(provider)))
if (auto pair = create_id<T, Made>(std::forward<F>(provider), id))
{
return pair->first;
}
@ -379,9 +556,9 @@ public:
// Add a new ID for an existing object provided (returns new id)
template <typename T, typename Made = T>
static inline u32 import_existing(std::shared_ptr<T> ptr)
static inline u32 import_existing(std::shared_ptr<T> ptr, u32 id = id_manager::id_traits<Made>::invalid)
{
return import<T, Made>([&] { return std::move(ptr); });
return import<T, Made>([&] { return std::move(ptr); }, id);
}
// Access the ID record without locking (unsafe)

View File

@ -77,6 +77,11 @@ public:
virtual void Init(const u32 max_connect) = 0;
virtual ~KeyboardHandlerBase() = default;
KeyboardHandlerBase(utils::serial* ar);
KeyboardHandlerBase(utils::serial& ar) : KeyboardHandlerBase(&ar) {}
void save(utils::serial& ar);
SAVESTATE_INIT_POS(19);
void Key(u32 code, bool pressed);
void SetIntercepted(bool intercepted);

View File

@ -129,6 +129,13 @@ public:
virtual void Init(const u32 max_connect) = 0;
virtual ~MouseHandlerBase() = default;
SAVESTATE_INIT_POS(18);
MouseHandlerBase(const MouseHandlerBase&) = delete;
MouseHandlerBase(utils::serial* ar);
MouseHandlerBase(utils::serial& ar) : MouseHandlerBase(&ar) {}
void save(utils::serial& ar);
void Button(u8 button, bool pressed)
{
std::lock_guard lock(mutex);

View File

@ -4,6 +4,8 @@
class NullKeyboardHandler final : public KeyboardHandlerBase
{
using KeyboardHandlerBase::KeyboardHandlerBase;
public:
void Init(const u32 max_connect) override
{

View File

@ -4,6 +4,8 @@
class NullMouseHandler final : public MouseHandlerBase
{
using MouseHandlerBase::MouseHandlerBase;
public:
void Init(const u32 max_connect) override
{

View File

@ -13,13 +13,17 @@
#include "Emu/Cell/SPURecompiler.h"
#include "Emu/perf_meter.hpp"
#include <deque>
#include <span>
#include "util/vm.hpp"
#include "util/asm.hpp"
#include "util/simd.hpp"
#include "util/serialization.hpp"
LOG_CHANNEL(vm_log, "VM");
void ppu_remove_hle_instructions(u32 addr, u32 size);
extern bool is_memory_read_only_of_executable(u32 addr);
namespace vm
{
@ -883,7 +887,7 @@ namespace vm
// Notify rsx to invalidate range
// Note: This must be done *before* memory gets unmapped while holding the vm lock, otherwise
// the RSX might try to call VirtualProtect on memory that is already unmapped
if (auto& rsxthr = g_fxo->get<rsx::thread>(); g_fxo->is_init<rsx::thread>())
if (auto& rsxthr = g_fxo->get<rsx::thread>(); !Emu.IsPaused() && g_fxo->is_init<rsx::thread>())
{
rsxthr.on_notify_memory_unmapped(addr, size);
}
@ -1145,8 +1149,14 @@ namespace vm
return flags;
}
static u64 init_block_id()
{
static atomic_t<u64> s_id = 1;
return s_id++;
}
block_t::block_t(u32 addr, u32 size, u64 flags)
: m_id([](){ static atomic_t<u64> s_id = 1; return s_id++; }())
: m_id(init_block_id())
, addr(addr)
, size(size)
, flags(process_block_flags(flags))
@ -1450,6 +1460,210 @@ namespace vm
return imp_used(lock);
}
void block_t::get_shared_memory(std::vector<std::pair<utils::shm*, u32>>& shared)
{
auto& m_map = (m.*block_map)();
if (!(flags & preallocated))
{
shared.reserve(shared.size() + m_map.size());
for (const auto& [addr, shm] : m_map)
{
shared.emplace_back(shm.second.get(), addr);
}
}
}
u32 block_t::get_shm_addr(const std::shared_ptr<utils::shm>& shared)
{
auto& m_map = (m.*block_map)();
if (!(flags & preallocated))
{
for (auto& [addr, pair] : m_map)
{
if (pair.second == shared)
{
return addr;
}
}
}
return 0;
}
static bool check_cache_line_zero(const void* ptr)
{
const auto p = reinterpret_cast<const v128*>(ptr);
const v128 _1 = p[0] | p[1];
const v128 _2 = p[2] | p[3];
const v128 _3 = p[4] | p[5];
const v128 _4 = p[6] | p[7];
const v128 _5 = _1 | _2;
const v128 _6 = _3 | _4;
const v128 _7 = _5 | _6;
return _7 == v128{};
}
static void save_memory_bytes(utils::serial& ar, const u8* ptr, usz size)
{
AUDIT(ar.is_writing() && !(size % 1024));
for (; size; ptr += 128 * 8, size -= 128 * 8)
{
ar(u8{}); // bitmap of 1024 bytes (bit is 128-byte)
u8 bitmap = 0, count = 0;
for (usz i = 0, end = std::min<usz>(size, 128 * 8); i < end; i += 128)
{
if (!check_cache_line_zero(ptr + i))
{
bitmap |= 1u << (i / 128);
count++;
ar(std::span(ptr + i, 128));
}
}
// Patch bitmap with correct value
*std::prev(&ar.data.back(), count * 128) = bitmap;
}
}
static void load_memory_bytes(utils::serial& ar, u8* ptr, usz size)
{
AUDIT(ar.is_writing() && !(size % 128));
for (; size; ptr += 128 * 8, size -= 128 * 8)
{
const u8 bitmap{ar};
for (usz i = 0, end = std::min<usz>(size, 128 * 8); i < end; i += 128)
{
if (bitmap & (1u << (i / 128)))
{
ar(std::span(ptr + i, 128));
}
}
}
}
void block_t::save(utils::serial& ar, std::map<utils::shm*, usz>& shared)
{
auto& m_map = (m.*block_map)();
ar(addr, size, flags);
for (const auto& [addr, shm] : m_map)
{
// Assume first page flags represent all the map
ar(g_pages[addr / 4096 + !!(flags & stack_guarded)]);
ar(addr);
ar(shm.first);
if (flags & preallocated)
{
// Do not save read-only memory which comes from the executable
// Because it couldn't have changed
if (!(ar.data.back() & page_writable) && is_memory_read_only_of_executable(addr))
{
// Revert changes
ar.data.resize(ar.data.size() - (sizeof(u32) * 2 + sizeof(memory_page)));
vm_log.success("Removed read-only memory block of the executable from savestate. (addr=0x%x, size=0x%x)", addr, shm.first);
continue;
}
// Save raw binary image
const u32 guard_size = flags & stack_guarded ? 0x1000 : 0;
save_memory_bytes(ar, vm::get_super_ptr<const u8>(addr + guard_size), shm.first - guard_size * 2);
}
else
{
// Save index of shm
ar(shared[shm.second.get()]);
}
}
// Terminator
ar(u8{0});
}
block_t::block_t(utils::serial& ar, std::vector<std::shared_ptr<utils::shm>>& shared)
: m_id(init_block_id())
, addr(ar)
, size(ar)
, flags(ar)
{
if (flags & preallocated)
{
m_common = std::make_shared<utils::shm>(size);
m_common->map_critical(vm::base(addr), utils::protection::no);
m_common->map_critical(vm::get_super_ptr(addr));
lock_sudo(addr, size);
}
auto& m_map = (m.*block_map)();
std::shared_ptr<utils::shm> null_shm;
while (true)
{
const u8 flags0 = ar;
if (!(flags0 & page_allocated))
{
// Terminator found
break;
}
const u32 addr0 = ar;
const u32 size0 = ar;
u64 pflags = 0;
if (flags0 & page_executable)
{
pflags |= alloc_executable;
}
if (~flags0 & page_writable)
{
pflags |= alloc_unwritable;
}
if (~flags0 & page_readable)
{
pflags |= alloc_hidden;
}
if ((flags & page_size_64k) == page_size_64k)
{
pflags |= page_64k_size;
}
else if (!(flags & (page_size_mask & ~page_size_1m)))
{
pflags |= page_1m_size;
}
// Map the memory through the same method as alloc() and falloc()
// Copy the shared handle unconditionally
ensure(try_alloc(addr0, pflags, size0, ::as_rvalue(flags & preallocated ? null_shm : shared[ar.operator usz()])));
if (flags & preallocated)
{
// Load binary image
const u32 guard_size = flags & stack_guarded ? 0x1000 : 0;
load_memory_bytes(ar, vm::get_super_ptr<u8>(addr0 + guard_size), size0 - guard_size * 2);
}
}
}
bool _unmap_block(const std::shared_ptr<block_t>& block)
{
return block->unmap();
}
static bool _test_map(u32 addr, u32 size)
{
const auto range = utils::address_range::start_length(addr, size);
@ -1623,7 +1837,7 @@ namespace vm
result.first = std::move(*it);
g_locations.erase(it);
ensure(result.first->unmap());
ensure(_unmap_block(result.first));
result.second = true;
return result;
}
@ -1764,7 +1978,7 @@ namespace vm
for (auto& block : g_locations)
{
if (block) block->unmap();
if (block) _unmap_block(block);
}
g_locations.clear();
@ -1783,6 +1997,106 @@ namespace vm
std::memset(g_range_lock_set, 0, sizeof(g_range_lock_set));
g_range_lock_bits = 0;
}
void save(utils::serial& ar)
{
// Shared memory lookup, sample address is saved for easy memory copy
// Just need one address for this optimization
std::vector<std::pair<utils::shm*, u32>> shared;
for (auto& loc : g_locations)
{
if (loc) loc->get_shared_memory(shared);
}
shared.erase(std::unique(shared.begin(), shared.end(), [](auto& a, auto& b) { return a.first == b.first; }), shared.end());
std::map<utils::shm*, usz> shared_map;
for (auto& p : shared)
{
shared_map.emplace(p.first, &p - shared.data());
}
// TODO: proper serialization of std::map
ar(static_cast<usz>(shared_map.size()));
for (const auto& [shm, addr] : shared)
{
// Save shared memory
ar(shm->flags());
// TODO: string_view serialization (even with load function, so the loaded address points to a position of the stream's buffer)
ar(shm->size());
save_memory_bytes(ar, vm::get_super_ptr<u8>(addr), shm->size());
}
// TODO: Serialize std::vector direcly
ar(g_locations.size());
for (auto& loc : g_locations)
{
const u8 has = loc.operator bool();
ar(has);
if (loc)
{
loc->save(ar, shared_map);
}
}
}
void load(utils::serial& ar)
{
std::vector<std::shared_ptr<utils::shm>> shared;
shared.resize(ar.operator usz());
for (auto& shm : shared)
{
// Load shared memory
const u32 flags = ar;
const u64 size = ar;
shm = std::make_shared<utils::shm>(size, flags);
// Load binary image
// elad335: I'm not proud about it as well.. (ideal situation is to not call map_self())
load_memory_bytes(ar, shm->map_self(), shm->size());
}
for (auto& block : g_locations)
{
if (block) _unmap_block(block);
}
g_locations.clear();
g_locations.resize(ar.operator usz());
for (auto& loc : g_locations)
{
const u8 has = ar;
if (has)
{
loc = std::make_shared<block_t>(ar, shared);
}
}
g_range_lock = 0;
}
u32 get_shm_addr(const std::shared_ptr<utils::shm>& shared)
{
for (auto& loc : g_locations)
{
if (u32 addr = loc ? loc->get_shm_addr(shared) : 0)
{
return addr;
}
}
return 0;
}
}
void fmt_class_string<vm::_ptr_base<const void, u32>>::format(std::string& out, u64 arg)

Some files were not shown because too many files have changed in this diff Show More