mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-26 04:32:35 +01:00
rsx/vk: Improve recovery during OOM situations
- Do not spill when running on IGP with only one heap as it will just crash anyway. - Do not handle collapse operations when OOM. This will likely just crash and there are better ways to handle old surfaces. - Spill or remove everything not in the current working set - TODO: MSAA spill without VRAM allocations
This commit is contained in:
parent
5940247200
commit
d53f2f10fb
@ -1087,12 +1087,12 @@ namespace rsx
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual bool can_collapse_surface(const surface_storage_type&)
|
||||
virtual bool can_collapse_surface(const surface_storage_type&, problem_severity)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual bool handle_memory_pressure(command_list_type cmd, problem_severity /*severity*/)
|
||||
virtual bool handle_memory_pressure(command_list_type cmd, problem_severity severity)
|
||||
{
|
||||
auto process_list_function = [&](std::unordered_map<u32, surface_storage_type>& data)
|
||||
{
|
||||
@ -1102,7 +1102,7 @@ namespace rsx
|
||||
if (surface->dirty())
|
||||
{
|
||||
// Force memory barrier to release some resources
|
||||
if (can_collapse_surface(It->second))
|
||||
if (can_collapse_surface(It->second, severity))
|
||||
{
|
||||
// NOTE: Do not call memory_barrier under fatal conditions as it can create allocations!
|
||||
// It would be safer to leave the resources hanging around and spill them instead
|
||||
@ -1121,6 +1121,7 @@ namespace rsx
|
||||
}
|
||||
};
|
||||
|
||||
ensure(severity >= rsx::problem_severity::moderate);
|
||||
const auto old_usage = m_active_memory_used;
|
||||
|
||||
// Try and find old surfaces to remove
|
||||
|
@ -863,7 +863,10 @@ bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity)
|
||||
if (severity >= rsx::problem_severity::moderate)
|
||||
{
|
||||
// Check if we need to spill
|
||||
if (severity >= rsx::problem_severity::fatal && m_rtts.is_overallocated())
|
||||
const auto mem_info = m_device->get_memory_mapping();
|
||||
if (severity >= rsx::problem_severity::fatal && // Only spill for fatal errors
|
||||
mem_info.device_local != mem_info.host_visible_coherent && // Do not spill if it is an IGP, there is nowhere to spill to
|
||||
m_rtts.is_overallocated()) // Surface cache must be over-allocated by the design quota
|
||||
{
|
||||
// Queue a VRAM spill operation.
|
||||
m_rtts.spill_unused_memory();
|
||||
|
@ -35,15 +35,48 @@ namespace vk
|
||||
return quota * 0x100000;
|
||||
}
|
||||
|
||||
bool surface_cache::can_collapse_surface(const std::unique_ptr<vk::render_target>& surface)
|
||||
bool surface_cache::can_collapse_surface(const std::unique_ptr<vk::render_target>& surface, rsx::problem_severity severity)
|
||||
{
|
||||
if (surface->samples() == 1)
|
||||
{
|
||||
// No internal allocations needed for non-MSAA images
|
||||
return true;
|
||||
}
|
||||
|
||||
// MSAA surface, check if we have the memory for this...
|
||||
return vk::vmm_determine_memory_load_severity() < rsx::problem_severity::fatal;
|
||||
if (severity < rsx::problem_severity::fatal &&
|
||||
vk::vmm_determine_memory_load_severity() < rsx::problem_severity::fatal)
|
||||
{
|
||||
// We may be able to allocate what we need.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if we need to do any allocations. Do not collapse in such a situation otherwise
|
||||
if (!surface->resolve_surface)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Resolve target does exist. Scan through the entire collapse chain
|
||||
for (auto& region : surface->old_contents)
|
||||
{
|
||||
if (region.source->samples() == 1)
|
||||
{
|
||||
// Not MSAA
|
||||
continue;
|
||||
}
|
||||
|
||||
if (vk::as_rtt(region.source)->resolve_surface)
|
||||
{
|
||||
// Has a resolve target.
|
||||
continue;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool surface_cache::handle_memory_pressure(vk::command_buffer& cmd, rsx::problem_severity severity)
|
||||
@ -52,15 +85,14 @@ namespace vk
|
||||
|
||||
if (severity >= rsx::problem_severity::fatal)
|
||||
{
|
||||
std::vector<std::unique_ptr<vk::viewable_image>> resolve_target_cache;
|
||||
std::vector<vk::render_target*> deferred_spills;
|
||||
auto gc = vk::get_resource_manager();
|
||||
|
||||
// Drop MSAA resolve/unresolve caches. Only trigger when a hard sync is guaranteed to follow else it will cause even more problems!
|
||||
// 2-pass to ensure resources are available where they are most needed
|
||||
auto relieve_memory_pressure = [&](const auto& list)
|
||||
{
|
||||
// 2-pass to ensure resources are available where they are most needed
|
||||
std::vector<std::unique_ptr<vk::viewable_image>> resolve_target_cache;
|
||||
std::vector<vk::render_target*> deferred_spills;
|
||||
auto gc = vk::get_resource_manager();
|
||||
|
||||
// 1. Scan the list and spill resources that can be spilled immediately if requested. Also gather resources from those that don't need it.
|
||||
for (auto& surface : list)
|
||||
{
|
||||
auto& rtt = surface.second;
|
||||
@ -81,30 +113,52 @@ namespace vk
|
||||
if (rtt->resolve_surface || rtt->samples() == 1)
|
||||
{
|
||||
// Can spill immediately. Do it.
|
||||
rtt->spill(cmd, resolve_target_cache);
|
||||
ensure(rtt->spill(cmd, resolve_target_cache));
|
||||
any_released |= true;
|
||||
continue;
|
||||
}
|
||||
|
||||
deferred_spills.push_back(rtt.get());
|
||||
}
|
||||
|
||||
// 2. We should have enough discarded reusable memory for the second pass.
|
||||
for (auto& surface : deferred_spills)
|
||||
{
|
||||
surface->spill(cmd, resolve_target_cache);
|
||||
any_released |= true;
|
||||
}
|
||||
|
||||
// 3. Discard the now-useless resolve cache memory
|
||||
for (auto& data : resolve_target_cache)
|
||||
{
|
||||
gc->dispose(data);
|
||||
}
|
||||
};
|
||||
|
||||
// 1. Spill an strip any 'invalidated resources'. At this point it doesn't matter and we donate to the resolve cache which is a plus.
|
||||
for (auto& surface : invalidated_resources)
|
||||
{
|
||||
// Only spill anything with references. Other surfaces already marked for removal should be inevitably deleted when it is time to free_invalidated
|
||||
if (surface->has_refs() && (surface->resolve_surface || surface->samples() == 1))
|
||||
{
|
||||
ensure(surface->spill(cmd, resolve_target_cache));
|
||||
any_released |= true;
|
||||
}
|
||||
else if (surface->resolve_surface)
|
||||
{
|
||||
ensure(!surface->has_refs());
|
||||
resolve_target_cache.emplace_back(std::move(surface->resolve_surface));
|
||||
surface->msaa_flags |= rsx::surface_state_flags::require_resolve;
|
||||
any_released |= true;
|
||||
}
|
||||
else if (surface->has_refs())
|
||||
{
|
||||
deferred_spills.push_back(surface.get());
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Scan the list and spill resources that can be spilled immediately if requested. Also gather resources from those that don't need it.
|
||||
relieve_memory_pressure(m_render_targets_storage);
|
||||
relieve_memory_pressure(m_depth_stencil_storage);
|
||||
|
||||
// 3. Write to system heap everything marked to spill
|
||||
for (auto& surface : deferred_spills)
|
||||
{
|
||||
any_released |= surface->spill(cmd, resolve_target_cache);
|
||||
}
|
||||
|
||||
// 4. Cleanup; removes all the resources used up here that are no longer needed for the moment
|
||||
for (auto& data : resolve_target_cache)
|
||||
{
|
||||
gc->dispose(data);
|
||||
}
|
||||
}
|
||||
|
||||
return any_released;
|
||||
@ -467,7 +521,7 @@ namespace vk
|
||||
return result;
|
||||
}
|
||||
|
||||
void render_target::spill(vk::command_buffer& cmd, std::vector<std::unique_ptr<vk::viewable_image>>& resolve_cache)
|
||||
bool render_target::spill(vk::command_buffer& cmd, std::vector<std::unique_ptr<vk::viewable_image>>& resolve_cache)
|
||||
{
|
||||
ensure(value);
|
||||
|
||||
@ -523,7 +577,7 @@ namespace vk
|
||||
// TODO: Spill to DMA buf
|
||||
// For now, just skip this one if we don't have the capacity for it
|
||||
rsx_log.warning("Could not spill memory due to resolve failure. Will ignore spilling for the moment.");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -559,6 +613,7 @@ namespace vk
|
||||
|
||||
ensure(!memory && !value && views.empty() && !resolve_surface);
|
||||
spill_request_tag = 0ull;
|
||||
return true;
|
||||
}
|
||||
|
||||
void render_target::unspill(vk::command_buffer& cmd)
|
||||
|
@ -67,7 +67,7 @@ namespace vk
|
||||
VkImageAspectFlags mask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT) override;
|
||||
|
||||
// Memory management
|
||||
void spill(vk::command_buffer& cmd, std::vector<std::unique_ptr<vk::viewable_image>>& resolve_cache);
|
||||
bool spill(vk::command_buffer& cmd, std::vector<std::unique_ptr<vk::viewable_image>>& resolve_cache);
|
||||
|
||||
// Synchronization
|
||||
void texture_barrier(vk::command_buffer& cmd);
|
||||
@ -408,7 +408,7 @@ namespace vk
|
||||
void destroy();
|
||||
bool spill_unused_memory();
|
||||
bool is_overallocated();
|
||||
bool can_collapse_surface(const std::unique_ptr<vk::render_target>& surface) override;
|
||||
bool can_collapse_surface(const std::unique_ptr<vk::render_target>& surface, rsx::problem_severity severity) override;
|
||||
bool handle_memory_pressure(vk::command_buffer& cmd, rsx::problem_severity severity) override;
|
||||
void free_invalidated(vk::command_buffer& cmd, rsx::problem_severity memory_pressure);
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user