From 49c6c2c52910101b9ea8df7ab41d00103601c1ea Mon Sep 17 00:00:00 2001 From: kd-11 Date: Tue, 13 Jun 2023 22:29:08 +0300 Subject: [PATCH] vk: Do not hard-sync on first sign of fragmentation - It is very likely that the resource would be available if we just waited a while for the GPU queue to clear. - Instead of also discarding the current work, first check if we can get by without a hard sync. --- rpcs3/Emu/RSX/VK/VKGSRender.cpp | 9 ++++++++- rpcs3/Emu/RSX/VK/VKGSRender.h | 2 +- rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp | 8 ++++++++ rpcs3/Emu/RSX/VK/VKHelpers.cpp | 4 ++-- rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp | 24 ++++++++++-------------- 5 files changed, 29 insertions(+), 18 deletions(-) diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.cpp b/rpcs3/Emu/RSX/VK/VKGSRender.cpp index e337165980..28d595d61d 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRender.cpp +++ b/rpcs3/Emu/RSX/VK/VKGSRender.cpp @@ -1202,8 +1202,15 @@ bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity) return any_cache_relieved; } -void VKGSRender::on_descriptor_pool_fragmentation() +void VKGSRender::on_descriptor_pool_fragmentation(bool is_fatal) { + if (!is_fatal) + { + // It is very likely that the release is simply in progress (enqueued) + m_primary_cb_list.wait_all(); + return; + } + // Just flush everything. Unless the hardware is very deficient, this should happen very rarely. flush_command_queue(true, true); } diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.h b/rpcs3/Emu/RSX/VK/VKGSRender.h index b8de04eb23..10afb04bdb 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRender.h +++ b/rpcs3/Emu/RSX/VK/VKGSRender.h @@ -264,7 +264,7 @@ public: bool on_vram_exhausted(rsx::problem_severity severity); // Handle pool creation failure due to fragmentation - void on_descriptor_pool_fragmentation(); + void on_descriptor_pool_fragmentation(bool is_fatal); // Conditional rendering void begin_conditional_rendering(const std::vector& sources) override; diff --git a/rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp b/rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp index 10d48f848b..71bdfd82bb 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp +++ b/rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp @@ -345,6 +345,14 @@ namespace vk } } + void wait_all() + { + for (auto& cb : m_cb_list) + { + cb.wait(); + } + } + inline command_buffer_chunk* next() { const auto result_id = ++m_current_index % Count; diff --git a/rpcs3/Emu/RSX/VK/VKHelpers.cpp b/rpcs3/Emu/RSX/VK/VKHelpers.cpp index f6d3ea417e..c8403de156 100644 --- a/rpcs3/Emu/RSX/VK/VKHelpers.cpp +++ b/rpcs3/Emu/RSX/VK/VKHelpers.cpp @@ -269,11 +269,11 @@ namespace vk renderer->emergency_query_cleanup(&cmd); } - void on_descriptor_pool_fragmentation() + void on_descriptor_pool_fragmentation(bool is_fatal) { if (auto vkthr = dynamic_cast(rsx::get_current_renderer())) { - vkthr->on_descriptor_pool_fragmentation(); + vkthr->on_descriptor_pool_fragmentation(is_fatal); } } } diff --git a/rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp b/rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp index d7ec16a58a..c96b5b145f 100644 --- a/rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp +++ b/rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp @@ -5,7 +5,7 @@ namespace vk { // Error handler callback - extern void on_descriptor_pool_fragmentation(); + extern void on_descriptor_pool_fragmentation(bool fatal); namespace descriptors { @@ -228,38 +228,32 @@ namespace vk m_current_subpool_offset = 0; m_current_subpool_index = umax; - // Only attempt recovery once. Can be bumped up if we have a more complex setup in future. - int retries = 1; + const int max_retries = 2; + int retries = max_retries; - while (m_current_subpool_index == umax) + do { for (u32 index = 0; index < m_device_subpools.size(); ++index) { if (!m_device_subpools[index].busy) { m_current_subpool_index = index; - break; + goto done; // Nested break } } - if (m_current_subpool_index != umax) - { - // We found something, exit early - break; - } - VkDescriptorPool subpool = VK_NULL_HANDLE; if (VkResult result = vkCreateDescriptorPool(*m_owner, &m_create_info, nullptr, &subpool)) { if (retries-- && (result == VK_ERROR_FRAGMENTATION_EXT)) { rsx_log.warning("Descriptor pool creation failed with fragmentation error. Will attempt to recover."); - vk::on_descriptor_pool_fragmentation(); + vk::on_descriptor_pool_fragmentation(!retries); continue; } vk::die_with_error(result); - break; + fmt::throw_exception("Unreachable"); } // New subpool created successfully @@ -272,8 +266,10 @@ namespace vk }); m_current_subpool_index = m_device_subpools.size() - 1; - } + } while (m_current_subpool_index == umax); + + done: m_device_subpools[m_current_subpool_index].busy = VK_TRUE; m_current_pool_handle = m_device_subpools[m_current_subpool_index].handle; }