1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-23 03:02:53 +01:00

rsx: Fix sampler descriptor updates for framebuffer resources

- Each desc manages its own lifetime now instead of relying on global timestamp check
- Fixes situation where same object remains active without update for long
This commit is contained in:
kd-11 2020-12-14 20:24:28 +03:00 committed by kd-11
parent e82bef425f
commit 0ef5743261
9 changed files with 97 additions and 47 deletions

View File

@ -69,6 +69,8 @@ namespace rsx
rsx::texture_dimension_extended image_type = texture_dimension_extended::texture_dimension_2d;
rsx::format_class format_class = RSX_FORMAT_CLASS_UNDEFINED;
bool is_cyclic_reference = false;
u32 ref_address = 0;
u64 surface_cache_tag = 0;
f32 scale_x = 1.f;
f32 scale_y = 1.f;

View File

@ -796,8 +796,6 @@ namespace rsx
{
invalidate(It->second);
m_render_targets_storage.erase(It);
cache_tag = rsx::get_shared_tag();
return;
}
}
@ -808,8 +806,6 @@ namespace rsx
{
invalidate(It->second);
m_depth_stencil_storage.erase(It);
cache_tag = rsx::get_shared_tag();
return;
}
}
@ -1033,11 +1029,6 @@ namespace rsx
}
}
void notify_memory_structure_changed()
{
cache_tag = rsx::get_shared_tag();
}
void invalidate_all()
{
// Unbind and invalidate all resources

View File

@ -263,6 +263,37 @@ namespace rsx
{
return (image_handle || external_subresource_desc.op != deferred_request_command::nop);
}
/**
* Returns a boolean true/false if the descriptor is expired
* Optionally returns a second variable that contains the surface reference.
* The surface reference can be used to insert a texture barrier or inject a deferred resource
*/
template <typename surface_store_type>
std::pair<bool, typename surface_store_type::surface_type> is_expired(surface_store_type& surface_cache)
{
if (upload_context != rsx::texture_upload_context::framebuffer_storage ||
surface_cache_tag == surface_cache.cache_tag)
{
return { false, nullptr };
}
// Expired, but may still be valid. Check if the texture is still accessible
auto ref_image = image_handle ? image_handle->image() : external_subresource_desc.external_handle;
if (ref_image)
{
if (auto as_rtt = dynamic_cast<surface_store_type::surface_type>(ref_image);
as_rtt && as_rtt == surface_cache.get_surface_at(ref_address))
{
// Fast sync
surface_cache_tag = surface_cache.cache_tag;
is_cyclic_reference = surface_cache.address_is_bound(ref_address);
return { false, as_rtt };
}
}
return { true, nullptr };
}
};
@ -322,7 +353,7 @@ namespace rsx
const std::vector<rsx::subresource_layout>& subresource_layout, rsx::texture_dimension_extended type, bool swizzled) = 0;
virtual section_storage_type* create_nul_section(commandbuffer_type&, const address_range &rsx_range, bool memory_load) = 0;
virtual void enforce_surface_creation_type(section_storage_type& section, u32 gcm_format, texture_create_flags expected) = 0;
virtual void insert_texture_barrier(commandbuffer_type&, image_storage_type* tex) = 0;
virtual void insert_texture_barrier(commandbuffer_type&, image_storage_type* tex, bool strong_ordering = true) = 0;
virtual image_view_type generate_cubemap_from_images(commandbuffer_type&, u32 gcm_format, u16 size, const std::vector<copy_region_descriptor>& sources, const texture_channel_remap_t& remap_vector) = 0;
virtual image_view_type generate_3d_from_2d_images(commandbuffer_type&, u32 gcm_format, u16 width, u16 height, u16 depth, const std::vector<copy_region_descriptor>& sources, const texture_channel_remap_t& remap_vector) = 0;
virtual image_view_type generate_atlas_from_images(commandbuffer_type&, u32 gcm_format, u16 width, u16 height, const std::vector<copy_region_descriptor>& sections_to_copy, const texture_channel_remap_t& remap_vector) = 0;
@ -1743,6 +1774,39 @@ namespace rsx
return {};
}
template <typename surface_store_type>
bool test_if_descriptor_expired(commandbuffer_type& cmd, surface_store_type& surface_cache, sampled_image_descriptor* descriptor)
{
auto result = descriptor->is_expired(surface_cache);
if (result.second && descriptor->is_cyclic_reference)
{
/* NOTE: All cyclic descriptors updated via fast update must have a barrier check
* It is possible for the following sequence of events to break common-sense tests
* 1. Cyclic ref occurs normally in upload_texture
* 2. Surface is swappd out, but texture is not updated
* 3. Surface is swapped back in. Surface cache resets layout to optimal rasterization layout
* 4. During bind, the surface is converted to shader layout because it is not in GENERAL layout
*/
if (!g_cfg.video.strict_rendering_mode)
{
insert_texture_barrier(cmd, result.second, false);
}
else if (descriptor->image_handle)
{
descriptor->external_subresource_desc.external_handle = descriptor->image_handle->image();
descriptor->external_subresource_desc.op = deferred_request_command::copy_image_dynamic;
descriptor->external_subresource_desc.do_not_cache = true;
}
else
{
// Force reupload
return true;
}
}
return result.first;
}
template <typename RsxTextureType, typename surface_store_type, typename ...Args>
sampled_image_descriptor upload_texture(commandbuffer_type& cmd, RsxTextureType& tex, surface_store_type& m_rtts, Args&&... extras)
{
@ -1847,6 +1911,9 @@ namespace rsx
result.external_subresource_desc.cache_range = lookup_range;
}
result.ref_address = attributes.address;
result.surface_cache_tag = m_rtts.cache_tag;
if (subsurface_count == 1)
{
return result;
@ -2727,7 +2794,6 @@ namespace rsx
// NOTE: This doesn't work very well in case of Cell access
// Need to lock the affected memory range and actually attach this subres to a locked_region
dst_subres.surface->on_write_copy(rsx::get_shared_tag(), false, raster_type);
m_rtts.notify_memory_structure_changed();
// Reset this object's synchronization status if it is locked
lock.upgrade();

View File

@ -274,16 +274,8 @@ void GLGSRender::load_texture_env()
{
// Load textures
gl::command_context cmd{ gl_state };
bool update_framebuffer_sourced = false;
std::lock_guard lock(m_sampler_mutex);
if (surface_store_tag != m_rtts.cache_tag)
{
update_framebuffer_sourced = true;
surface_store_tag = m_rtts.cache_tag;
}
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
@ -292,11 +284,9 @@ void GLGSRender::load_texture_env()
if (!fs_sampler_state[i])
fs_sampler_state[i] = std::make_unique<gl::texture_cache::sampled_image_descriptor>();
if (m_samplers_dirty || m_textures_dirty[i] ||
(update_framebuffer_sourced && fs_sampler_state[i]->upload_context == rsx::texture_upload_context::framebuffer_storage))
{
auto sampler_state = static_cast<gl::texture_cache::sampled_image_descriptor*>(fs_sampler_state[i].get());
if (m_samplers_dirty || m_textures_dirty[i] || m_gl_texture_cache.test_if_descriptor_expired(cmd, m_rtts, sampler_state))
{
if (rsx::method_registers.fragment_textures[i].enabled())
{
*sampler_state = m_gl_texture_cache.upload_texture(cmd, rsx::method_registers.fragment_textures[i], m_rtts);
@ -321,11 +311,9 @@ void GLGSRender::load_texture_env()
if (!vs_sampler_state[i])
vs_sampler_state[i] = std::make_unique<gl::texture_cache::sampled_image_descriptor>();
if (m_samplers_dirty || m_vertex_textures_dirty[i] ||
(update_framebuffer_sourced && vs_sampler_state[i]->upload_context == rsx::texture_upload_context::framebuffer_storage))
{
auto sampler_state = static_cast<gl::texture_cache::sampled_image_descriptor*>(vs_sampler_state[i].get());
if (m_samplers_dirty || m_vertex_textures_dirty[i] || m_gl_texture_cache.test_if_descriptor_expired(cmd, m_rtts, sampler_state))
{
if (rsx::method_registers.vertex_textures[i].enabled())
{
*sampler_state = m_gl_texture_cache.upload_texture(cmd, rsx::method_registers.vertex_textures[i], m_rtts);

View File

@ -132,7 +132,6 @@ private:
gl::vao m_vao;
shared_mutex m_sampler_mutex;
u64 surface_store_tag = 0;
atomic_t<bool> m_samplers_dirty = {true};
std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, rsx::limits::fragment_textures_count> fs_sampler_state = {};
std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, rsx::limits::vertex_textures_count> vs_sampler_state = {};

View File

@ -964,7 +964,7 @@ namespace gl
section.set_view_flags(flags);
}
void insert_texture_barrier(gl::command_context&, gl::texture*) override
void insert_texture_barrier(gl::command_context&, gl::texture*, bool) override
{
auto &caps = gl::get_driver_caps();

View File

@ -141,18 +141,21 @@ void VKGSRender::update_draw_state()
void VKGSRender::load_texture_env()
{
//Load textures
bool update_framebuffer_sourced = false;
// Load textures
bool check_for_cyclic_refs = false;
auto check_surface_cache_sampler = [&](auto descriptor)
{
if (!m_texture_cache.test_if_descriptor_expired(*m_current_command_buffer, m_rtts, descriptor))
{
check_for_cyclic_refs |= descriptor->is_cyclic_reference;
return true;
}
return false;
};
std::lock_guard lock(m_sampler_mutex);
if (surface_store_tag != m_rtts.cache_tag) [[unlikely]]
{
update_framebuffer_sourced = true;
surface_store_tag = m_rtts.cache_tag;
}
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1))
@ -161,11 +164,9 @@ void VKGSRender::load_texture_env()
if (!fs_sampler_state[i])
fs_sampler_state[i] = std::make_unique<vk::texture_cache::sampled_image_descriptor>();
if (m_samplers_dirty || m_textures_dirty[i] ||
(update_framebuffer_sourced && fs_sampler_state[i]->upload_context == rsx::texture_upload_context::framebuffer_storage))
{
auto sampler_state = static_cast<vk::texture_cache::sampled_image_descriptor*>(fs_sampler_state[i].get());
if (m_samplers_dirty || m_textures_dirty[i] || !check_surface_cache_sampler(sampler_state))
{
if (rsx::method_registers.fragment_textures[i].enabled())
{
check_heap_status(VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE);
@ -301,11 +302,9 @@ void VKGSRender::load_texture_env()
if (!vs_sampler_state[i])
vs_sampler_state[i] = std::make_unique<vk::texture_cache::sampled_image_descriptor>();
if (m_samplers_dirty || m_vertex_textures_dirty[i] ||
(update_framebuffer_sourced && vs_sampler_state[i]->upload_context == rsx::texture_upload_context::framebuffer_storage))
{
auto sampler_state = static_cast<vk::texture_cache::sampled_image_descriptor*>(vs_sampler_state[i].get());
if (m_samplers_dirty || m_vertex_textures_dirty[i] || !check_surface_cache_sampler(sampler_state))
{
if (rsx::method_registers.vertex_textures[i].enabled())
{
check_heap_status(VK_HEAP_CHECK_TEXTURE_UPLOAD_STORAGE);

View File

@ -371,7 +371,6 @@ private:
u64 m_cond_render_sync_tag = 0;
shared_mutex m_sampler_mutex;
u64 surface_store_tag = 0;
atomic_t<bool> m_samplers_dirty = { true };
std::unique_ptr<vk::sampler> m_stencil_mirror_sampler;
std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, rsx::limits::fragment_textures_count> fs_sampler_state = {};

View File

@ -1300,8 +1300,14 @@ namespace vk
section.set_view_flags(expected_flags);
}
void insert_texture_barrier(vk::command_buffer& cmd, vk::image* tex) override
void insert_texture_barrier(vk::command_buffer& cmd, vk::image* tex, bool strong_ordering) override
{
if (!strong_ordering && tex->current_layout == VK_IMAGE_LAYOUT_GENERAL)
{
// A previous barrier already exists, do nothing
return;
}
vk::as_rtt(tex)->texture_barrier(cmd);
}