1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 18:53:28 +01:00

vk: Do not hard-sync on first sign of fragmentation

- It is very likely that the resource would be available if we just waited a while for the GPU queue to clear.
- Instead of also discarding the current work, first check if we can get by without a hard sync.
This commit is contained in:
kd-11 2023-06-13 22:29:08 +03:00 committed by kd-11
parent 29f3eec957
commit 49c6c2c529
5 changed files with 29 additions and 18 deletions

View File

@ -1202,8 +1202,15 @@ bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity)
return any_cache_relieved;
}
void VKGSRender::on_descriptor_pool_fragmentation()
void VKGSRender::on_descriptor_pool_fragmentation(bool is_fatal)
{
if (!is_fatal)
{
// It is very likely that the release is simply in progress (enqueued)
m_primary_cb_list.wait_all();
return;
}
// Just flush everything. Unless the hardware is very deficient, this should happen very rarely.
flush_command_queue(true, true);
}

View File

@ -264,7 +264,7 @@ public:
bool on_vram_exhausted(rsx::problem_severity severity);
// Handle pool creation failure due to fragmentation
void on_descriptor_pool_fragmentation();
void on_descriptor_pool_fragmentation(bool is_fatal);
// Conditional rendering
void begin_conditional_rendering(const std::vector<rsx::reports::occlusion_query_info*>& sources) override;

View File

@ -345,6 +345,14 @@ namespace vk
}
}
void wait_all()
{
for (auto& cb : m_cb_list)
{
cb.wait();
}
}
inline command_buffer_chunk* next()
{
const auto result_id = ++m_current_index % Count;

View File

@ -269,11 +269,11 @@ namespace vk
renderer->emergency_query_cleanup(&cmd);
}
void on_descriptor_pool_fragmentation()
void on_descriptor_pool_fragmentation(bool is_fatal)
{
if (auto vkthr = dynamic_cast<VKGSRender*>(rsx::get_current_renderer()))
{
vkthr->on_descriptor_pool_fragmentation();
vkthr->on_descriptor_pool_fragmentation(is_fatal);
}
}
}

View File

@ -5,7 +5,7 @@
namespace vk
{
// Error handler callback
extern void on_descriptor_pool_fragmentation();
extern void on_descriptor_pool_fragmentation(bool fatal);
namespace descriptors
{
@ -228,38 +228,32 @@ namespace vk
m_current_subpool_offset = 0;
m_current_subpool_index = umax;
// Only attempt recovery once. Can be bumped up if we have a more complex setup in future.
int retries = 1;
const int max_retries = 2;
int retries = max_retries;
while (m_current_subpool_index == umax)
do
{
for (u32 index = 0; index < m_device_subpools.size(); ++index)
{
if (!m_device_subpools[index].busy)
{
m_current_subpool_index = index;
break;
goto done; // Nested break
}
}
if (m_current_subpool_index != umax)
{
// We found something, exit early
break;
}
VkDescriptorPool subpool = VK_NULL_HANDLE;
if (VkResult result = vkCreateDescriptorPool(*m_owner, &m_create_info, nullptr, &subpool))
{
if (retries-- && (result == VK_ERROR_FRAGMENTATION_EXT))
{
rsx_log.warning("Descriptor pool creation failed with fragmentation error. Will attempt to recover.");
vk::on_descriptor_pool_fragmentation();
vk::on_descriptor_pool_fragmentation(!retries);
continue;
}
vk::die_with_error(result);
break;
fmt::throw_exception("Unreachable");
}
// New subpool created successfully
@ -272,8 +266,10 @@ namespace vk
});
m_current_subpool_index = m_device_subpools.size() - 1;
}
} while (m_current_subpool_index == umax);
done:
m_device_subpools[m_current_subpool_index].busy = VK_TRUE;
m_current_pool_handle = m_device_subpools[m_current_subpool_index].handle;
}