mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-26 12:42:41 +01:00
rsx: Trim the number of in-flight invalidated resources (temp cache)
- This drastically improves memory allocation behavior. Holding too many invalidated resources can lead to a cascading overallocation error as old resources hold refs to even older resources and nothing gets deleted.
This commit is contained in:
parent
66e1cf96e2
commit
12f213ffad
@ -74,6 +74,7 @@ namespace rsx
|
||||
std::vector<surface_type> superseded_surfaces;
|
||||
|
||||
std::list<surface_storage_type> invalidated_resources;
|
||||
const u64 max_invalidated_resources_count = 256ull;
|
||||
u64 cache_tag = 1ull; // Use 1 as the start since 0 is default tag on new surfaces
|
||||
u64 write_tag = 1ull;
|
||||
|
||||
@ -1361,7 +1362,25 @@ namespace rsx
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual bool handle_memory_pressure(command_list_type cmd, problem_severity severity)
|
||||
void trim_invalidated_resources(command_list_type cmd, problem_severity severity)
|
||||
{
|
||||
// It is possible to have stale invalidated resources holding references to other invalidated resources.
|
||||
// This can bloat the VRAM usage significantly especially if the references are never collapsed.
|
||||
for (auto& surface : invalidated_resources)
|
||||
{
|
||||
if (!surface->has_refs() || surface->old_contents.empty())
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (can_collapse_surface(surface, severity))
|
||||
{
|
||||
surface->memory_barrier(cmd, rsx::surface_access::transfer_read);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void collapse_dirty_surfaces(command_list_type cmd, problem_severity severity)
|
||||
{
|
||||
auto process_list_function = [&](surface_ranged_map& data, const utils::address_range& range)
|
||||
{
|
||||
@ -1390,14 +1409,54 @@ namespace rsx
|
||||
}
|
||||
};
|
||||
|
||||
process_list_function(m_render_targets_storage, m_render_targets_memory_range);
|
||||
process_list_function(m_depth_stencil_storage, m_depth_stencil_memory_range);
|
||||
}
|
||||
|
||||
virtual bool handle_memory_pressure(command_list_type cmd, problem_severity severity)
|
||||
{
|
||||
ensure(severity >= rsx::problem_severity::moderate);
|
||||
const auto old_usage = m_active_memory_used;
|
||||
|
||||
// Try and find old surfaces to remove
|
||||
process_list_function(m_render_targets_storage, m_render_targets_memory_range);
|
||||
process_list_function(m_depth_stencil_storage, m_depth_stencil_memory_range);
|
||||
collapse_dirty_surfaces(cmd, severity);
|
||||
|
||||
// Check invalidated resources as they can have long dependency chains
|
||||
if (invalidated_resources.size() > max_invalidated_resources_count ||
|
||||
severity >= rsx::problem_severity::severe)
|
||||
{
|
||||
trim_invalidated_resources(cmd, severity);
|
||||
}
|
||||
|
||||
return (m_active_memory_used < old_usage);
|
||||
}
|
||||
|
||||
void run_cleanup_internal(
|
||||
command_list_type cmd,
|
||||
rsx::problem_severity memory_pressure,
|
||||
u32 max_surface_store_memory_mb,
|
||||
std::function<void(command_list_type)> pre_task_callback)
|
||||
{
|
||||
if (check_memory_usage(max_surface_store_memory_mb * 0x100000))
|
||||
{
|
||||
pre_task_callback(cmd);
|
||||
|
||||
const auto severity = std::max(memory_pressure, rsx::problem_severity::moderate);
|
||||
handle_memory_pressure(cmd, severity);
|
||||
}
|
||||
else if (invalidated_resources.size() > max_invalidated_resources_count)
|
||||
{
|
||||
pre_task_callback(cmd);
|
||||
|
||||
// Check invalidated resources as they can have long dependency chains
|
||||
trim_invalidated_resources(cmd, memory_pressure);
|
||||
|
||||
if ((invalidated_resources.size() + 16u) > max_invalidated_resources_count)
|
||||
{
|
||||
// We didn't release enough resources, scan the active RTTs as well
|
||||
collapse_dirty_surfaces(cmd, memory_pressure);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -401,7 +401,7 @@ void GLGSRender::flip(const rsx::display_flip_info_t& info)
|
||||
m_gl_texture_cache.on_frame_end();
|
||||
m_vertex_cache->purge();
|
||||
|
||||
auto removed_textures = m_rtts.free_invalidated(cmd);
|
||||
auto removed_textures = m_rtts.trim(cmd);
|
||||
m_framebuffer_cache.remove_if([&](auto& fbo)
|
||||
{
|
||||
if (fbo.unused_check_count() >= 2) return true; // Remove if stale
|
||||
|
@ -390,13 +390,9 @@ struct gl_render_targets : public rsx::surface_store<gl_render_target_traits>
|
||||
invalidated_resources.clear();
|
||||
}
|
||||
|
||||
std::vector<GLuint> free_invalidated(gl::command_context& cmd)
|
||||
std::vector<GLuint> trim(gl::command_context& cmd)
|
||||
{
|
||||
// Do not allow more than 256M of RSX memory to be used by RTTs
|
||||
if (check_memory_usage(256 * 0x100000))
|
||||
{
|
||||
handle_memory_pressure(cmd, rsx::problem_severity::moderate);
|
||||
}
|
||||
run_cleanup_internal(cmd, rsx::problem_severity::moderate, 256, [](gl::command_context&) {});
|
||||
|
||||
std::vector<GLuint> removed;
|
||||
invalidated_resources.remove_if([&](auto &rtt)
|
||||
|
@ -1151,7 +1151,7 @@ bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity)
|
||||
if (m_rtts.handle_memory_pressure(*m_current_command_buffer, severity))
|
||||
{
|
||||
surface_cache_relieved = true;
|
||||
m_rtts.free_invalidated(*m_current_command_buffer, severity);
|
||||
m_rtts.trim(*m_current_command_buffer, severity);
|
||||
}
|
||||
|
||||
const bool any_cache_relieved = (texture_cache_relieved || surface_cache_relieved);
|
||||
|
@ -125,7 +125,7 @@ void VKGSRender::advance_queued_frames()
|
||||
vk::vmm_check_memory_usage();
|
||||
|
||||
// m_rtts storage is double buffered and should be safe to tag on frame boundary
|
||||
m_rtts.free_invalidated(*m_current_command_buffer, vk::vmm_determine_memory_load_severity());
|
||||
m_rtts.trim(*m_current_command_buffer, vk::vmm_determine_memory_load_severity());
|
||||
|
||||
// Texture cache is also double buffered to prevent use-after-free
|
||||
m_texture_cache.on_frame_end();
|
||||
|
@ -168,20 +168,15 @@ namespace vk
|
||||
return any_released;
|
||||
}
|
||||
|
||||
void surface_cache::free_invalidated(vk::command_buffer& cmd, rsx::problem_severity memory_pressure)
|
||||
void surface_cache::trim(vk::command_buffer& cmd, rsx::problem_severity memory_pressure)
|
||||
{
|
||||
// Do not allow more than 300M of RSX memory to be used by RTTs.
|
||||
// The actual boundary is 256M but we need to give some overallocation for performance reasons.
|
||||
if (check_memory_usage(300 * 0x100000))
|
||||
run_cleanup_internal(cmd, rsx::problem_severity::moderate, 300, [](vk::command_buffer& cmd)
|
||||
{
|
||||
if (!cmd.is_recording())
|
||||
{
|
||||
cmd.begin();
|
||||
}
|
||||
|
||||
const auto severity = std::max(memory_pressure, rsx::problem_severity::moderate);
|
||||
handle_memory_pressure(cmd, severity);
|
||||
}
|
||||
});
|
||||
|
||||
const u64 last_finished_frame = vk::get_last_completed_frame_id();
|
||||
invalidated_resources.remove_if([&](std::unique_ptr<vk::render_target>& rtt)
|
||||
@ -195,13 +190,10 @@ namespace vk
|
||||
return false;
|
||||
}
|
||||
|
||||
if (memory_pressure >= rsx::problem_severity::severe)
|
||||
if (rtt->resolve_surface && memory_pressure >= rsx::problem_severity::moderate)
|
||||
{
|
||||
if (rtt->resolve_surface)
|
||||
{
|
||||
// We do not need to keep resolve targets around if things are bad.
|
||||
vk::get_resource_manager()->dispose(rtt->resolve_surface);
|
||||
}
|
||||
// We do not need to keep resolve targets around.
|
||||
vk::get_resource_manager()->dispose(rtt->resolve_surface);
|
||||
}
|
||||
|
||||
if (rtt->frame_tag >= last_finished_frame)
|
||||
|
@ -642,7 +642,7 @@ namespace vk
|
||||
bool is_overallocated();
|
||||
bool can_collapse_surface(const std::unique_ptr<vk::render_target>& surface, rsx::problem_severity severity) override;
|
||||
bool handle_memory_pressure(vk::command_buffer& cmd, rsx::problem_severity severity) override;
|
||||
void free_invalidated(vk::command_buffer& cmd, rsx::problem_severity memory_pressure);
|
||||
void trim(vk::command_buffer& cmd, rsx::problem_severity memory_pressure);
|
||||
};
|
||||
}
|
||||
//h
|
||||
|
Loading…
Reference in New Issue
Block a user