1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 10:42:36 +01:00

VSH Improvements (#13172)

* sys_prx: Implement PRX LIB register syscall

* VSH: partial log spam fix

* sys_process reboot fix

* Implement sys_memory_container_destroy_parent_with_childs

* sys_net: Implement SO_RCVTIMEO/SO_SENDTIMEO

* VSH: Implement sys_rsx_context_free

* PPU LLVM: distinguish PPU cache exec also by address

Fixes referencing multiple PRX.

* UI: Do not report size of apps inside /dev_flash
This commit is contained in:
Elad Ashkenazi 2023-01-09 19:03:01 +02:00 committed by GitHub
parent 8ec1a5627d
commit 0946e5945f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 268 additions and 58 deletions

View File

@ -2876,7 +2876,7 @@ extern void ppu_finalize(const ppu_module& info)
fmt::append(cache_path, "ppu-%s-%s/", fmt::base57(info.sha1), info.path.substr(info.path.find_last_of('/') + 1));
#ifdef LLVM_AVAILABLE
g_fxo->get<jit_module_manager>().remove(cache_path + info.name);
g_fxo->get<jit_module_manager>().remove(cache_path + info.name + "_" + std::to_string(info.segs[0].addr));
#endif
}
@ -3425,7 +3425,7 @@ bool ppu_initialize(const ppu_module& info, bool check_only)
};
// Permanently loaded compiled PPU modules (name -> data)
jit_module& jit_mod = g_fxo->get<jit_module_manager>().get(cache_path + info.name);
jit_module& jit_mod = g_fxo->get<jit_module_manager>().get(cache_path + info.name + "_" + std::to_string(info.segs[0].addr));
// Compiler instance (deferred initialization)
std::shared_ptr<jit_compiler>& jit = jit_mod.pjit;

View File

@ -394,7 +394,7 @@ const std::array<std::pair<ppu_intrp_func_t, std::string_view>, 1024> g_ppu_sysc
BIND_SYSC(sys_memory_container_destroy), //342 (0x156)
BIND_SYSC(sys_memory_container_get_size), //343 (0x157)
NULL_FUNC(sys_memory_budget_set), //344 (0x158)
null_func,//BIND_SYSC(sys_memory_...), //345 (0x159)
BIND_SYSC(sys_memory_container_destroy_parent_with_childs), //345 (0x159)
null_func,//BIND_SYSC(sys_memory_...), //346 (0x15A)
uns_func, //347 (0x15B) UNS
BIND_SYSC(sys_memory_allocate), //348 (0x15C)

View File

@ -110,8 +110,6 @@ error_code sys_hid_manager_513(u64 a1, u64 a2, vm::ptr<void> buf, u64 buf_size)
error_code sys_hid_manager_514(u32 pkg_id, vm::ptr<void> buf, u64 buf_size)
{
sys_hid.todo("sys_hid_manager_514(pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", pkg_id, buf, buf_size);
if (pkg_id == 0xE)
{
sys_hid.trace("sys_hid_manager_514(pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", pkg_id, buf, buf_size);
@ -161,10 +159,11 @@ error_code sys_hid_manager_read(u32 handle, u32 pkg_id, vm::ptr<void> buf, u64 b
return CELL_EFAULT;
}
(pkg_id == 2 || pkg_id == 0x81 ? sys_hid.trace : sys_hid.todo)
("sys_hid_manager_read(handle=0x%x, pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", handle, pkg_id, buf, buf_size);
if (pkg_id == 2)
{
sys_hid.trace("sys_hid_manager_read(handle=0x%x, pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", handle, pkg_id, buf, buf_size);
// cellPadGetData
// it returns just button array from 'CellPadData'
//auto data = vm::static_ptr_cast<u16[64]>(buf);
@ -179,8 +178,6 @@ error_code sys_hid_manager_read(u32 handle, u32 pkg_id, vm::ptr<void> buf, u64 b
}
else if (pkg_id == 0x81)
{
sys_hid.trace("sys_hid_manager_read(handle=0x%x, pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", handle, pkg_id, buf, buf_size);
// cellPadGetDataExtra?
vm::var<CellPadData> tmpData;
if ((cellPadGetData(0, +tmpData) == CELL_OK) && tmpData->len > 0)
@ -191,7 +188,5 @@ error_code sys_hid_manager_read(u32 handle, u32 pkg_id, vm::ptr<void> buf, u64 b
}
}
sys_hid.todo("sys_hid_manager_read(handle=0x%x, pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", handle, pkg_id, buf, buf_size);
return CELL_OK;
}

View File

@ -402,3 +402,17 @@ error_code sys_memory_container_get_size(cpu_thread& cpu, vm::ptr<sys_memory_inf
return CELL_OK;
}
error_code sys_memory_container_destroy_parent_with_childs(cpu_thread& cpu, u32 cid, u32 must_0, vm::ptr<u32> mc_child)
{
sys_memory.warning("sys_memory_container_destroy_parent_with_childs(cid=0x%x, must_0=%d, mc_child=*0x%x)", cid, must_0, mc_child);
if (must_0)
{
return CELL_EINVAL;
}
// Multi-process is not supported yet so child containers mean nothing at the moment
// Simply destroy parent
return sys_memory_container_destroy(cpu, cid);
}

View File

@ -137,3 +137,4 @@ error_code sys_memory_get_user_memory_stat(cpu_thread& cpu, vm::ptr<sys_memory_u
error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u32 size);
error_code sys_memory_container_destroy(cpu_thread& cpu, u32 cid);
error_code sys_memory_container_get_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info, u32 cid);
error_code sys_memory_container_destroy_parent_with_childs(cpu_thread& cpu, u32 cid, u32 must_0, vm::ptr<u32> mc_child);

View File

@ -256,6 +256,14 @@ lv2_socket::lv2_socket(utils::serial& ar, lv2_socket_type _type)
// Try to match structure between different platforms
ar.pos += 8;
#endif
const s32 version = GET_SERIALIZATION_VERSION(lv2_net);
if (version >= 2)
{
ar(so_rcvtimeo, so_sendtimeo);
}
lv2_id = idm::last_id();
ar(last_bound_addr);
@ -282,7 +290,7 @@ std::shared_ptr<lv2_socket> lv2_socket::load(utils::serial& ar)
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2ps>(ar, type); break;
}
if (std::memcmp(&sock_lv2->last_bound_addr, std::array<u8, 16>{}.data(), 16))
if (std::memcmp(&sock_lv2->last_bound_addr, std::array<u8, 16>{}.data(), 16))
{
// NOTE: It is allowed fail
sock_lv2->bind(sock_lv2->last_bound_addr);
@ -303,6 +311,7 @@ void lv2_socket::save(utils::serial& ar, bool save_only_this_class)
#else
ar(std::array<char, 8>{});
#endif
ar(so_rcvtimeo, so_sendtimeo);
ar(last_bound_addr);
return;
}
@ -830,6 +839,13 @@ error_code sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32
}
}
if (sock.so_rcvtimeo && get_guest_system_time() - ppu.start_time > sock.so_rcvtimeo)
{
result = -SYS_NET_EWOULDBLOCK;
lv2_obj::awake(&ppu);
return true;
}
sock.set_poll_event(lv2_socket::poll_t::read);
return false;
});
@ -1030,6 +1046,14 @@ error_code sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr<void> buf, u32 l
return true;
}
}
if (sock.so_sendtimeo && get_guest_system_time() - ppu.start_time > sock.so_sendtimeo)
{
result = -SYS_NET_EWOULDBLOCK;
lv2_obj::awake(&ppu);
return true;
}
sock.set_poll_event(lv2_socket::poll_t::write);
return false;
});

View File

@ -106,7 +106,7 @@ void lv2_socket::handle_events(const pollfd& native_pfd, [[maybe_unused]] bool u
if (native_pfd.revents & POLLERR && events.test_and_reset(lv2_socket::poll_t::error))
events_happening += lv2_socket::poll_t::error;
if (events_happening)
if (events_happening || (!queue.empty() && (so_rcvtimeo || so_sendtimeo)))
{
std::lock_guard lock(mutex);
#ifdef _WIN32
@ -114,7 +114,7 @@ void lv2_socket::handle_events(const pollfd& native_pfd, [[maybe_unused]] bool u
set_connecting(false);
#endif
for (auto it = queue.begin(); events_happening && it != queue.end();)
for (auto it = queue.begin(); it != queue.end();)
{
if (it->second(events_happening))
{

View File

@ -150,4 +150,8 @@ protected:
#endif
sys_net_sockaddr last_bound_addr{};
public:
u64 so_rcvtimeo = 0;
u64 so_sendtimeo = 0;
};

View File

@ -634,13 +634,18 @@ s32 lv2_socket_native::setsockopt(s32 level, s32 optname, const std::vector<u8>&
native_opt = optname == SYS_NET_SO_SNDTIMEO ? SO_SNDTIMEO : SO_RCVTIMEO;
native_val = &native_timeo;
native_len = sizeof(native_timeo);
const int tv_sec = ::narrow<int>(reinterpret_cast<const sys_net_timeval*>(optval.data())->tv_sec);
const int tv_usec = ::narrow<int>(reinterpret_cast<const sys_net_timeval*>(optval.data())->tv_usec);
#ifdef _WIN32
native_timeo = ::narrow<int>(reinterpret_cast<const sys_net_timeval*>(optval.data())->tv_sec) * 1000;
native_timeo += ::narrow<int>(reinterpret_cast<const sys_net_timeval*>(optval.data())->tv_usec) / 1000;
native_timeo = tv_sec * 1000;
native_timeo += tv_usec / 1000;
#else
native_timeo.tv_sec = ::narrow<int>(reinterpret_cast<const sys_net_timeval*>(optval.data())->tv_sec);
native_timeo.tv_usec = ::narrow<int>(reinterpret_cast<const sys_net_timeval*>(optval.data())->tv_usec);
native_timeo.tv_sec = tv_sec;
native_timeo.tv_usec = tv_usec;
#endif
// TODO: Overflow detection?
(optname == SYS_NET_SO_SNDTIMEO ? so_sendtimeo : so_rcvtimeo) = tv_usec + tv_sec * 1000000;
break;
}
case SYS_NET_SO_LINGER:

View File

@ -409,7 +409,10 @@ void lv2_exitspawn(ppu_thread& ppu, std::vector<std::string>& argv, std::vector<
{
ppu.state += cpu_flag::wait;
Emu.CallFromMainThread([argv = std::move(argv), envp = std::move(envp), data = std::move(data)]() mutable
// sys_sm_shutdown
const bool is_real_reboot = (ppu.gpr[11] == 379);
Emu.CallFromMainThread([is_real_reboot, argv = std::move(argv), envp = std::move(envp), data = std::move(data)]() mutable
{
sys_process.success("Process finished -> %s", argv[0]);
@ -428,8 +431,14 @@ void lv2_exitspawn(ppu_thread& ppu, std::vector<std::string>& argv, std::vector<
using namespace id_manager;
auto func = [old_size = g_fxo->get<lv2_memory_container>().size, vec = (reader_lock{g_mutex}, g_fxo->get<id_map<lv2_memory_container>>().vec)](u32 sdk_suggested_mem) mutable
auto func = [is_real_reboot, old_size = g_fxo->get<lv2_memory_container>().size, vec = (reader_lock{g_mutex}, g_fxo->get<id_map<lv2_memory_container>>().vec)](u32 sdk_suggested_mem) mutable
{
if (is_real_reboot)
{
// Do not save containers on actual reboot
vec.clear();
}
// Save LV2 memory containers
g_fxo->init<id_map<lv2_memory_container>>()->vec = std::move(vec);

View File

@ -19,6 +19,7 @@ extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, const std::s
extern void ppu_unload_prx(const lv2_prx& prx);
extern bool ppu_initialize(const ppu_module&, bool = false);
extern void ppu_finalize(const ppu_module&);
extern void ppu_manual_load_imports_exports(u32 imports_start, u32 imports_size, u32 exports_start, u32 exports_size, std::basic_string<bool>& loaded_flags);
LOG_CHANNEL(sys_prx);
@ -741,8 +742,6 @@ error_code _sys_prx_unload_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sy
return CELL_OK;
}
void ppu_manual_load_imports_exports(u32 imports_start, u32 imports_size, u32 exports_start, u32 exports_size, std::basic_string<bool>& loaded_flags);
void lv2_prx::load_exports()
{
if (exports_end <= exports_start)
@ -858,7 +857,14 @@ error_code _sys_prx_register_library(ppu_thread& ppu, vm::ptr<void> library)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("_sys_prx_register_library(library=*0x%x)", library);
sys_prx.notice("_sys_prx_register_library(library=*0x%x)", library);
if (!vm::check_addr(library.addr()))
{
return CELL_EFAULT;
}
ppu_manual_load_imports_exports(0, 0, library.addr(), 0x1c, *std::make_unique<std::basic_string<bool>>());
return CELL_OK;
}

View File

@ -279,21 +279,52 @@ error_code sys_rsx_context_allocate(cpu_thread& cpu, vm::ptr<u32> context_id, vm
* lv2 SysCall 671 (0x29F): sys_rsx_context_free
* @param context_id (IN): RSX context generated by sys_rsx_context_allocate to free the context.
*/
error_code sys_rsx_context_free(cpu_thread& cpu, u32 context_id)
error_code sys_rsx_context_free(ppu_thread& ppu, u32 context_id)
{
cpu.state += cpu_flag::wait;
ppu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_context_free(context_id=0x%x)", context_id);
const auto render = rsx::get_current_renderer();
rsx::eng_lock fifo_lock(render);
std::scoped_lock lock(render->sys_rsx_mtx);
if (context_id != 0x55555555 || !render->dma_address)
const u32 dma_address = render->dma_address;
render->dma_address = 0;
if (context_id != 0x55555555 || !dma_address || render->state & cpu_flag::ret)
{
return CELL_EINVAL;
}
g_fxo->get<rsx::vblank_thread>() = thread_state::finished;
const u32 queue_id = vm::_ptr<RsxDriverInfo>(render->driver_info)->handler_queue;
render->state += cpu_flag::ret;
while (render->state & cpu_flag::ret)
{
thread_ctrl::wait_for(1000);
}
sys_event_port_disconnect(ppu, render->rsx_event_port);
sys_event_port_destroy(ppu, render->rsx_event_port);
sys_event_queue_destroy(ppu, queue_id, SYS_EVENT_QUEUE_DESTROY_FORCE);
render->label_addr = 0;
render->driver_info = 0;
render->main_mem_size = 0;
render->rsx_event_port = 0;
render->display_buffers_count = 0;
render->current_display_buffer = 0;
render->ctrl = nullptr;
render->rsx_thread_running = false;
render->serialized = false;
ensure(vm::get(vm::rsx_context)->dealloc(dma_address));
return CELL_OK;
}
@ -437,13 +468,46 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
{
case 0x001: // FIFO
{
rsx::eng_lock rlock(render);
const u64 get = static_cast<u32>(a3);
const u64 put = static_cast<u32>(a4);
vm::_ref<atomic_be_t<u64>>(render->dma_address + ::offset32(&RsxDmaControl::put)).release(put << 32 | get);
render->fifo_ctrl->set_get(static_cast<u32>(get));
render->last_known_code_start = get;
render->sync_point_request.release(true);
const u64 get_put = put << 32 | get;
bool changed_value = false;
{
rsx::eng_lock rlock(render);
std::lock_guard lock(render->sys_rsx_mtx);
render->fifo_ctrl->abort();
while (render->new_get_put == umax)
{
if (render->new_get_put.compare_and_swap_test(u64{umax}, get_put))
{
changed_value = true;
break;
}
// Assume CAS can fail spuriously here
}
}
// Wait for the first store to complete (or be aborted)
while (render->new_get_put != umax)
{
if (Emu.IsStopped() && changed_value)
{
// Abort
if (render->new_get_put.compare_and_swap_test(get_put, u64{umax}))
{
if (auto cpu = cpu_thread::get_current())
{
cpu->state += cpu_flag::again;
break;
}
}
}
thread_ctrl::wait_for(1000);
}
break;
}

View File

@ -134,7 +134,7 @@ error_code sys_rsx_device_close(cpu_thread& cpu);
error_code sys_rsx_memory_allocate(cpu_thread& cpu, vm::ptr<u32> mem_handle, vm::ptr<u64> mem_addr, u32 size, u64 flags, u64 a5, u64 a6, u64 a7);
error_code sys_rsx_memory_free(cpu_thread& cpu, u32 mem_handle);
error_code sys_rsx_context_allocate(cpu_thread& cpu, vm::ptr<u32> context_id, vm::ptr<u64> lpar_dma_control, vm::ptr<u64> lpar_driver_info, vm::ptr<u64> lpar_reports, u64 mem_ctx, u64 system_mode);
error_code sys_rsx_context_free(cpu_thread& cpu, u32 context_id);
error_code sys_rsx_context_free(ppu_thread& ppu, u32 context_id);
error_code sys_rsx_context_iomap(cpu_thread& cpu, u32 context_id, u32 io, u32 ea, u32 size, u64 flags);
error_code sys_rsx_context_iounmap(cpu_thread& cpu, u32 context_id, u32 io, u32 size);
error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 a4, u64 a5, u64 a6);

View File

@ -129,7 +129,7 @@ void sys_spu_image::free() const
}
}
void sys_spu_image::deploy(u8* loc, std::span<const sys_spu_segment> segs)
void sys_spu_image::deploy(u8* loc, std::span<const sys_spu_segment> segs, bool is_verbose)
{
// Segment info dump
std::string dump;
@ -192,7 +192,7 @@ void sys_spu_image::deploy(u8* loc, std::span<const sys_spu_segment> segs)
applied += g_fxo->get<patch_engine>().apply(Emu.GetTitleID() + '-' + hash, loc);
}
spu_log.notice("Loaded SPU image: %s (<- %u)%s", hash, applied.size(), dump);
(is_verbose ? spu_log.notice : sys_spu.trace)("Loaded SPU image: %s (<- %u)%s", hash, applied.size(), dump);
}
lv2_spu_group::lv2_spu_group(utils::serial& ar) noexcept
@ -1061,7 +1061,7 @@ error_code sys_spu_thread_group_start(ppu_thread& ppu, u32 id)
auto& args = group->args[thread->lv2_id >> 24];
auto& img = group->imgs[thread->lv2_id >> 24];
sys_spu_image::deploy(thread->ls, std::span(img.second.data(), img.second.size()));
sys_spu_image::deploy(thread->ls, std::span(img.second.data(), img.second.size()), group->stop_count < 5);
thread->cpu_init();
thread->gpr[3] = v128::from64(0, args[0]);

View File

@ -227,7 +227,7 @@ struct sys_spu_image
void load(const fs::file& stream);
void free() const;
static void deploy(u8* loc, std::span<const sys_spu_segment> segs);
static void deploy(u8* loc, std::span<const sys_spu_segment> segs, bool is_verbose = true);
};
enum : u32

View File

@ -546,7 +546,7 @@ namespace rsx
if (dma_address)
{
ctrl = vm::_ptr<RsxDmaControl>(dma_address);
m_rsx_thread_exiting = false;
rsx_thread_running = true;
}
if (g_cfg.savestate.start_paused)
@ -712,7 +712,14 @@ namespace rsx
thread_ctrl::wait_for(1000);
}
on_task();
do
{
on_task();
state -= cpu_flag::ret;
}
while (!is_stopped());
on_exit();
}
@ -778,8 +785,11 @@ namespace rsx
rsx::overlays::reset_performance_overlay();
g_fxo->get<rsx::dma_manager>().init();
on_init_thread();
if (!is_initialized)
{
g_fxo->get<rsx::dma_manager>().init();
on_init_thread();
}
is_initialized = true;
is_initialized.notify_all();
@ -797,10 +807,16 @@ namespace rsx
const u64 event_flags = unsent_gcm_events.exchange(0);
Emu.CallFromMainThread([]{ Emu.RunPPU(); });
if (Emu.IsStarting())
{
Emu.CallFromMainThread([]
{
Emu.RunPPU();
});
}
// Wait for startup (TODO)
while (m_rsx_thread_exiting || Emu.IsPaused())
while (!rsx_thread_running || Emu.IsPaused())
{
// Execute backend-local tasks first
do_local_task(performance_counters.state);
@ -835,7 +851,7 @@ namespace rsx
return;
}
g_fxo->init<named_thread>("VBlank Thread", [this]()
g_fxo->get<vblank_thread>().set_thread(std::make_shared<named_thread<std::function<void()>>>("VBlank Thread", [this]()
{
// See sys_timer_usleep for details
#ifdef __linux__
@ -882,7 +898,7 @@ namespace rsx
vblank_rate = g_cfg.video.vblank_rate;
vblank_period = 1'000'000 + u64{g_cfg.video.vblank_ntsc.get()} * 1000;
}
post_vblank_event(post_event_time);
}
}
@ -909,7 +925,16 @@ namespace rsx
start_time = rsx::uclock() - start_time;
}
}
});
}));
struct join_vblank
{
~join_vblank() noexcept
{
g_fxo->get<vblank_thread>() = thread_state::finished;
}
} join_vblank_obj{};
// Raise priority above other threads
thread_ctrl::scoped_priority high_prio(+1);
@ -925,6 +950,11 @@ namespace rsx
if (external_interrupt_lock)
{
wait_pause();
if (!rsx_thread_running)
{
return;
}
}
// Note a possible rollback address
@ -967,6 +997,7 @@ namespace rsx
do_local_task(rsx::FIFO_state::lock_wait);
g_fxo->get<rsx::dma_manager>().join();
g_fxo->get<vblank_thread>() = thread_state::finished;
state += cpu_flag::exit;
}
@ -1266,6 +1297,21 @@ namespace rsx
m_invalidated_memory_range = utils::address_range::start_end(0x2 << 28, constants::local_mem_base + local_mem_size - 1);
handle_invalidated_memory_range();
}
else if (new_get_put != umax && state != FIFO_state::lock_wait)
{
const u64 get_put = new_get_put.exchange(u64{umax});
// Recheck in case aborted externally
if (get_put != umax)
{
vm::_ref<atomic_be_t<u64>>(dma_address + ::offset32(&RsxDmaControl::put)).release(get_put);
fifo_ctrl->set_get(static_cast<u32>(get_put));
fifo_ctrl->abort();
fifo_ret_addr = RSX_CALL_STACK_EMPTY;
last_known_code_start = static_cast<u32>(get_put);
sync_point_request.release(true);
}
}
}
std::array<u32, 4> thread::get_color_surface_addresses() const
@ -2387,11 +2433,12 @@ namespace rsx
dma_address = ctrlAddress;
ctrl = vm::_ptr<RsxDmaControl>(ctrlAddress);
flip_status = CELL_GCM_DISPLAY_FLIP_STATUS_DONE;
fifo_ret_addr = RSX_CALL_STACK_EMPTY;
vm::write32(device_addr + 0x30, 1);
std::memset(display_buffers, 0, sizeof(display_buffers));
m_rsx_thread_exiting = false;
rsx_thread_running = true;
}
std::pair<u32, u32> thread::calculate_memory_requirements(const vertex_input_layout& layout, u32 first_vertex, u32 vertex_count)
@ -3098,7 +3145,7 @@ namespace rsx
// we must block until RSX has invalidated the memory
// or lock m_mtx_task and do it ourselves
if (m_rsx_thread_exiting)
if (!rsx_thread_running)
return;
reader_lock lock(m_mtx_task);
@ -3117,7 +3164,7 @@ namespace rsx
void thread::on_notify_memory_unmapped(u32 address, u32 size)
{
if (!m_rsx_thread_exiting && address < rsx::constants::local_mem_base)
if (rsx_thread_running && address < rsx::constants::local_mem_base)
{
if (!isHLE)
{
@ -3245,7 +3292,7 @@ namespace rsx
external_interrupt_ack.store(true);
while (external_interrupt_lock)
while (external_interrupt_lock && (cpu_flag::ret - state))
{
// TODO: Investigate non busy-spinning method
utils::pause();
@ -3253,7 +3300,7 @@ namespace rsx
external_interrupt_ack.store(false);
}
while (external_interrupt_lock);
while (external_interrupt_lock && (cpu_flag::ret - state));
}
u32 thread::get_load()
@ -3734,4 +3781,19 @@ namespace rsx
frame_times.push_back(frame_time_t{preempt_count, current_time, current_tsc});
}
}
void vblank_thread::set_thread(std::shared_ptr<named_thread<std::function<void()>>> thread)
{
std::swap(m_thread, thread);
}
vblank_thread& vblank_thread::operator=(thread_state state)
{
if (m_thread)
{
*m_thread = state;
}
return *this;
}
} // namespace rsx

View File

@ -29,7 +29,7 @@
#include "Emu/system_config.h"
extern atomic_t<bool> g_user_asked_for_frame_capture;
extern atomic_t<bool> g_disable_frame_limit;
extern atomic_t<bool> g_disable_frame_limit;
extern rsx::frame_trace_data frame_debug;
extern rsx::frame_capture_data frame_capture;
@ -75,7 +75,7 @@ namespace rsx
{
added_wait |= !self->state.test_and_set(cpu_flag::wait);
}
if (!self || self->id_type() != 0x55u)
{
IsFullLock ? mutex_.lock() : mutex_.lock_shared();
@ -450,6 +450,20 @@ namespace rsx
}
};
class vblank_thread
{
std::shared_ptr<named_thread<std::function<void()>>> m_thread;
public:
vblank_thread() = default;
vblank_thread(const vblank_thread&) = delete;
void set_thread(std::shared_ptr<named_thread<std::function<void()>>> thread);
vblank_thread& operator=(thread_state);
vblank_thread& operator=(const vblank_thread&) = delete;
};
struct backend_configuration
{
bool supports_multidraw; // Draw call batching
@ -489,7 +503,6 @@ namespace rsx
void cpu_task() override;
protected:
atomic_t<bool> m_rsx_thread_exiting{ true };
std::array<push_buffer_vertex_info, 16> vertex_push_buffers;
std::vector<u32> element_push_buffer;
@ -504,6 +517,7 @@ namespace rsx
// FIFO
public:
std::unique_ptr<FIFO::FIFO_control> fifo_ctrl;
atomic_t<bool> rsx_thread_running{ false };
std::vector<std::pair<u32, u32>> dump_callstack_list() const override;
protected:
@ -541,6 +555,7 @@ namespace rsx
u32 dma_address{0};
rsx_iomap_table iomap_table;
u32 restore_point = 0;
atomic_t<u64> new_get_put = u64{umax};
u32 dbg_step_pc = 0;
u32 last_known_code_start = 0;
atomic_t<u32> external_interrupt_lock{ 0 };

View File

@ -40,7 +40,7 @@ SERIALIZATION_VER(ppu, 1, 1)
SERIALIZATION_VER(spu, 2, 1, 2 /*spu_limits_t ctor*/)
SERIALIZATION_VER(lv2_sync, 3, 1)
SERIALIZATION_VER(lv2_vm, 4, 1)
SERIALIZATION_VER(lv2_net, 5, 1)
SERIALIZATION_VER(lv2_net, 5, 1, 2/*RECV/SEND timeout*/)
SERIALIZATION_VER(lv2_fs, 6, 1)
SERIALIZATION_VER(lv2_prx_overlay, 7, 1, 2/*PRX dynamic exports*/, 3/*Conditionally Loaded Local Exports*/)
SERIALIZATION_VER(lv2_memory, 8, 1)
@ -57,7 +57,7 @@ namespace np
}
#ifdef _MSC_VER
// Compiler bug, lambda function body does seem to inherit used namespace atleast for function declaration
// Compiler bug, lambda function body does seem to inherit used namespace atleast for function declaration
SERIALIZATION_VER(rsx, 10)
SERIALIZATION_VER(sceNp, 11)
#endif

View File

@ -828,9 +828,20 @@ void game_list_frame::OnRefreshFinished()
m_size_watcher_cancel = std::make_shared<atomic_t<bool>>(false);
m_size_watcher.setFuture(QtConcurrent::map(m_game_data, [this, cancel = m_size_watcher_cancel](const game_info& game) -> void
m_size_watcher.setFuture(QtConcurrent::map(m_game_data, [this, cancel = m_size_watcher_cancel, dev_flash = g_cfg_vfs.get_dev_flash()](const game_info& game) -> void
{
if (game) game->info.size_on_disk = fs::get_dir_size(game->info.path, 1, cancel.get());
if (game)
{
if (game->info.path.starts_with(dev_flash))
{
// Do not report size of apps inside /dev_flash (it does not make sense to do so)
game->info.size_on_disk = 0;
}
else
{
game->info.size_on_disk = fs::get_dir_size(game->info.path, 1, cancel.get());
}
}
}));
}