1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-26 04:32:35 +01:00

rsx: Surface cache restructuring

- Further improve aliased data preservation by unconditionally scanning.
  Its is possible for cache aliasing to occur when doing memory split.
- Also sets up for RCB/RDB implementation
This commit is contained in:
kd-11 2019-08-06 14:46:21 +03:00 committed by kd-11
parent 5817e2a359
commit dfe709d464
11 changed files with 256 additions and 186 deletions

View File

@ -896,4 +896,57 @@ u32 get_remap_encoding(const std::pair<std::array<u8, 4>, std::array<u8, 4>>& re
encode |= (remap.second[2] << 12);
encode |= (remap.second[3] << 14);
return encode;
}
std::pair<u32, bool> get_compatible_gcm_format(rsx::surface_color_format format)
{
switch (format)
{
case rsx::surface_color_format::r5g6b5:
return{ CELL_GCM_TEXTURE_R5G6B5, false };
case rsx::surface_color_format::x8r8g8b8_z8r8g8b8:
case rsx::surface_color_format::x8r8g8b8_o8r8g8b8:
case rsx::surface_color_format::a8r8g8b8:
return{ CELL_GCM_TEXTURE_A8R8G8B8, true }; //verified
case rsx::surface_color_format::x8b8g8r8_o8b8g8r8:
case rsx::surface_color_format::x8b8g8r8_z8b8g8r8:
case rsx::surface_color_format::a8b8g8r8:
return{ CELL_GCM_TEXTURE_A8R8G8B8, false };
case rsx::surface_color_format::w16z16y16x16:
return{ CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT, true };
case rsx::surface_color_format::w32z32y32x32:
return{ CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT, true };
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
return{ CELL_GCM_TEXTURE_A1R5G5B5, false };
case rsx::surface_color_format::b8:
return{ CELL_GCM_TEXTURE_B8, false };
case rsx::surface_color_format::g8b8:
return{ CELL_GCM_TEXTURE_G8B8, true };
case rsx::surface_color_format::x32:
return{ CELL_GCM_TEXTURE_X32_FLOAT, true }; //verified
default:
fmt::throw_exception("Unhandled surface format 0x%x", (u32)format);
}
}
std::pair<u32, bool> get_compatible_gcm_format(rsx::surface_depth_format format)
{
switch (format)
{
case rsx::surface_depth_format::z16:
return{ CELL_GCM_TEXTURE_DEPTH16, true };
case rsx::surface_depth_format::z24s8:
return{ CELL_GCM_TEXTURE_DEPTH24_D8, true };
default:
ASSUME(0);
}
}

View File

@ -146,3 +146,9 @@ u32 get_format_packed_pitch(u32 format, u16 width, bool border = false, bool swi
* Reverse encoding
*/
u32 get_remap_encoding(const std::pair<std::array<u8, 4>, std::array<u8, 4>>& remap);
/**
* Get gcm texel layout. Returns <format, byteswapped>
*/
std::pair<u32, bool> get_compatible_gcm_format(rsx::surface_color_format format);
std::pair<u32, bool> get_compatible_gcm_format(rsx::surface_depth_format format);

View File

@ -311,7 +311,7 @@ namespace rsx
if (ignore) continue;
this_address = surface->memory_tag_samples[0].first;
this_address = surface->base_addr;
verify(HERE), this_address;
}
@ -363,6 +363,7 @@ namespace rsx
surface_storage_type new_surface_storage;
surface_type old_surface = nullptr;
surface_type new_surface = nullptr;
bool do_intersection_test = true;
bool store = true;
address_range *storage_bounds;
@ -404,10 +405,13 @@ namespace rsx
{
// Preserve memory outside the area to be inherited if needed
split_surface_region<depth>(command_list, address, Traits::get(surface), (u16)width, (u16)height, bpp, antialias);
old_surface = Traits::get(surface);
}
old_surface = Traits::get(surface);
// This will be unconditionally moved to invalidated list shortly
Traits::notify_surface_invalidated(surface);
old_surface_storage = std::move(surface);
primary_storage->erase(It);
}
}
@ -428,10 +432,9 @@ namespace rsx
new_surface_storage = std::move(surface);
Traits::notify_surface_reused(new_surface_storage);
if (old_surface)
if (old_surface_storage)
{
// Exchange this surface with the invalidated one
Traits::notify_surface_invalidated(old_surface_storage);
surface = std::move(old_surface_storage);
}
else
@ -449,10 +452,9 @@ namespace rsx
}
// Check for stale storage
if (old_surface != nullptr && new_surface == nullptr)
if (old_surface_storage)
{
// This was already determined to be invalid and is excluded from testing above
Traits::notify_surface_invalidated(old_surface_storage);
invalidated_resources.push_back(std::move(old_surface_storage));
}
@ -463,36 +465,46 @@ namespace rsx
new_surface = Traits::get(new_surface_storage);
}
if (!old_surface)
// Remove and preserve if possible any overlapping/replaced surface from the other pool
auto aliased_surface = secondary_storage->find(address);
if (aliased_surface != secondary_storage->end())
{
// Remove and preserve if possible any overlapping/replaced surface from the other pool
auto aliased_surface = secondary_storage->find(address);
if (aliased_surface != secondary_storage->end())
if (Traits::surface_is_pitch_compatible(aliased_surface->second, pitch))
{
if (Traits::surface_is_pitch_compatible(aliased_surface->second, pitch))
{
old_surface = Traits::get(aliased_surface->second);
split_surface_region<!depth>(command_list, address, old_surface, (u16)width, (u16)height, bpp, antialias);
}
auto surface = Traits::get(aliased_surface->second);
split_surface_region<!depth>(command_list, address, surface, (u16)width, (u16)height, bpp, antialias);
Traits::notify_surface_invalidated(aliased_surface->second);
invalidated_resources.push_back(std::move(aliased_surface->second));
secondary_storage->erase(aliased_surface);
if (!old_surface || old_surface->last_use_tag < surface->last_use_tag)
{
// TODO: This can leak data outside inherited bounds
old_surface = surface;
}
}
Traits::notify_surface_invalidated(aliased_surface->second);
invalidated_resources.push_back(std::move(aliased_surface->second));
secondary_storage->erase(aliased_surface);
}
bool do_intersection_test = true;
// Check if old_surface is 'new' and hopefully avoid intersection
if (old_surface && old_surface->last_use_tag >= write_tag)
if (old_surface)
{
const auto new_area = new_surface->get_normalized_memory_area();
const auto old_area = old_surface->get_normalized_memory_area();
if (new_area.x2 <= old_area.x2 && new_area.y2 <= old_area.y2)
if (old_surface->last_use_tag < new_surface->last_use_tag)
{
do_intersection_test = false;
new_surface->set_old_contents(old_surface);
// Can happen if aliasing occurs; a probable condition due to memory splitting
// This is highly unlikely but is possible in theory
old_surface = nullptr;
}
else if (old_surface->last_use_tag >= write_tag)
{
const auto new_area = new_surface->get_normalized_memory_area();
const auto old_area = old_surface->get_normalized_memory_area();
if (new_area.x2 <= old_area.x2 && new_area.y2 <= old_area.y2)
{
do_intersection_test = false;
new_surface->set_old_contents(old_surface);
}
}
}
@ -507,7 +519,7 @@ namespace rsx
(*primary_storage)[address] = std::move(new_surface_storage);
}
verify(HERE), new_surface->get_spp() == get_format_sample_count(antialias);
verify(HERE), !old_surface_storage, new_surface->get_spp() == get_format_sample_count(antialias);
return new_surface;
}
@ -831,7 +843,7 @@ namespace rsx
return result;
}
void on_write(u32 address = 0)
void on_write(bool color, bool z, u32 address = 0)
{
if (!address)
{
@ -839,14 +851,17 @@ namespace rsx
{
if (m_invalidate_on_write)
{
for (int i = m_bound_render_targets_config.first, count = 0;
count < m_bound_render_targets_config.second;
++i, ++count)
if (color)
{
m_bound_render_targets[i].second->on_invalidate_children();
for (int i = m_bound_render_targets_config.first, count = 0;
count < m_bound_render_targets_config.second;
++i, ++count)
{
m_bound_render_targets[i].second->on_invalidate_children();
}
}
if (m_bound_depth_stencil.first)
if (z && m_bound_depth_stencil.first)
{
m_bound_depth_stencil.second->on_invalidate_children();
}
@ -860,33 +875,39 @@ namespace rsx
}
// Tag all available surfaces
for (int i = m_bound_render_targets_config.first, count = 0;
count < m_bound_render_targets_config.second;
++i, ++count)
if (color)
{
m_bound_render_targets[i].second->on_write(write_tag);
for (int i = m_bound_render_targets_config.first, count = 0;
count < m_bound_render_targets_config.second;
++i, ++count)
{
m_bound_render_targets[i].second->on_write(write_tag);
}
}
if (m_bound_depth_stencil.first)
if (z && m_bound_depth_stencil.first)
{
m_bound_depth_stencil.second->on_write(write_tag);
}
}
else
{
for (int i = m_bound_render_targets_config.first, count = 0;
count < m_bound_render_targets_config.second;
++i, ++count)
if (color)
{
if (m_bound_render_targets[i].first != address)
for (int i = m_bound_render_targets_config.first, count = 0;
count < m_bound_render_targets_config.second;
++i, ++count)
{
continue;
}
if (m_bound_render_targets[i].first != address)
{
continue;
}
m_bound_render_targets[i].second->on_write(write_tag);
m_bound_render_targets[i].second->on_write(write_tag);
}
}
if (m_bound_depth_stencil.first == address)
if (z && m_bound_depth_stencil.first == address)
{
m_bound_depth_stencil.second->on_write(write_tag);
}

View File

@ -6,6 +6,8 @@
#include "TextureUtils.h"
#include "../rsx_utils.h"
#define ENABLE_SURFACE_CACHE_DEBUG 0
namespace rsx
{
enum surface_state_flags : u32
@ -128,7 +130,13 @@ namespace rsx
struct render_target_descriptor
{
u64 last_use_tag = 0; // tag indicating when this block was last confirmed to have been written to
std::array<std::pair<u32, u64>, 5> memory_tag_samples;
u64 base_addr = 0;
#if (ENABLE_SURFACE_CACHE_DEBUG)
u64 memory_hash = 0;
#else
std::array<std::pair<u32, u64>, 3> memory_tag_samples;
#endif
std::vector<deferred_clipped_region<image_storage_type>> old_contents;
@ -286,27 +294,102 @@ namespace rsx
return (state_flags != rsx::surface_state_flags::ready) || !old_contents.empty();
}
bool test() const
#if (ENABLE_SURFACE_CACHE_DEBUG)
u64 hash_block() const
{
if (dirty())
const auto padding = (rsx_pitch - native_pitch) / 8;
const auto row_length = (native_pitch) / 8;
auto num_rows = (surface_height * samples_y);
auto ptr = reinterpret_cast<u64*>(vm::g_sudo_addr + base_addr);
auto col = row_length;
u64 result = 0;
while (num_rows--)
{
// TODO
// Should RCB or mem-sync (inherit previous mem) to init memory
LOG_TODO(RSX, "Resource used before memory initialization");
while (col--)
{
result ^= *ptr++;
}
ptr += padding;
col = row_length;
}
// Tags are tested in an X pattern
for (const auto &tag : memory_tag_samples)
{
if (!tag.first)
break;
return result;
}
if (tag.second != *reinterpret_cast<u64*>(vm::g_sudo_addr + tag.first))
void queue_tag(u32 address)
{
base_addr = address;
}
void sync_tag()
{
memory_hash = hash_block();
}
void shuffle_tag()
{
memory_hash = ~memory_hash;
}
bool test() const
{
return hash_block() == memory_hash;
}
#else
void queue_tag(u32 address)
{
base_addr = address;
const u32 size_x = (native_pitch > 8)? (native_pitch - 8) : 0u;
const u32 size_y = u32(surface_height * samples_y) - 1u;
const position2u samples[] =
{
// NOTE: Sorted by probability to catch dirty flag
{0, 0},
{size_x, size_y},
{size_x / 2, size_y / 2},
// Auxilliary, highly unlikely to ever catch anything
// NOTE: Currently unused as length of samples is truncated to 3
{size_x, 0},
{0, size_y},
};
for (int n = 0; n < memory_tag_samples.size(); ++n)
{
const auto sample_offset = (samples[n].y * rsx_pitch) + samples[n].x;
memory_tag_samples[n].first = (sample_offset + base_addr);
}
}
void sync_tag()
{
for (auto &e : memory_tag_samples)
{
e.second = *reinterpret_cast<u64*>(vm::g_sudo_addr + e.first);
}
}
void shuffle_tag()
{
memory_tag_samples[0].second = memory_tag_samples[0].second;
}
bool test()
{
for (auto &e : memory_tag_samples)
{
if (e.second != *reinterpret_cast<u64*>(vm::g_sudo_addr + e.first))
return false;
}
return true;
}
#endif
void clear_rw_barrier()
{
@ -415,51 +498,6 @@ namespace rsx
}
}
void queue_tag(u32 address)
{
for (unsigned i = 0; i < memory_tag_samples.size(); ++i)
{
if (LIKELY(i))
memory_tag_samples[i].first = 0;
else
memory_tag_samples[i].first = address; // Top left
}
const u32 pitch = get_native_pitch();
if (UNLIKELY(pitch < 16))
{
// Not enough area to gather samples if pitch is too small
return;
}
// Top right corner
memory_tag_samples[1].first = address + pitch - 8;
if (const u32 h = get_surface_height(); h > 1)
{
// Last row
const u32 pitch2 = get_rsx_pitch();
const u32 last_row_offset = pitch2 * (h - 1);
memory_tag_samples[2].first = address + last_row_offset; // Bottom left corner
memory_tag_samples[3].first = address + last_row_offset + pitch - 8; // Bottom right corner
// Centroid
const u32 center_row_offset = pitch2 * (h / 2);
memory_tag_samples[4].first = address + center_row_offset + pitch / 2;
}
}
void sync_tag()
{
for (auto &tag : memory_tag_samples)
{
if (!tag.first)
break;
tag.second = *reinterpret_cast<u64*>(vm::g_sudo_addr + tag.first);
}
}
void on_write(u64 write_tag = 0, rsx::surface_state_flags resolve_flags = surface_state_flags::require_resolve)
{
if (write_tag)
@ -516,7 +554,7 @@ namespace rsx
rsx::address_range get_memory_range() const
{
const u32 internal_height = get_surface_height(rsx::surface_metrics::samples);
return rsx::address_range::start_length(memory_tag_samples[0].first, internal_height * get_rsx_pitch());
return rsx::address_range::start_length(base_addr, internal_height * get_rsx_pitch());
}
template <typename T>

View File

@ -1609,8 +1609,6 @@ namespace rsx
return;
}
section.surface->read_barrier(cmd);
// How much of this slice to read?
int rebased = int(section.dst_y) - slice_begin;
const auto src_x = section.src_x;
@ -2444,8 +2442,6 @@ namespace rsx
if (src_is_render_target)
{
src_subres.surface->read_barrier(cmd);
const auto surf = src_subres.surface;
const auto bpp = surf->get_bpp();
if (bpp != src_bpp)
@ -2460,9 +2456,6 @@ namespace rsx
if (dst_is_render_target)
{
// Full barrier is required in case of partial transfers
dst_subres.surface->read_barrier(cmd);
auto bpp = dst_subres.surface->get_bpp();
if (bpp != dst_bpp)
{

View File

@ -626,7 +626,7 @@ void GLGSRender::end()
}
} while (rsx::method_registers.current_draw_clause.next());
m_rtts.on_write();
m_rtts.on_write(rsx::method_registers.color_write_enabled(), rsx::method_registers.depth_write_enabled());
m_attrib_ring_buffer->notify();
m_index_ring_buffer->notify();
@ -1152,7 +1152,7 @@ void GLGSRender::clear_surface(u32 arg)
if (require_mem_load) ds->write_barrier(cmd);
// Memory has been initialized
m_rtts.on_write(std::get<0>(m_rtts.m_bound_depth_stencil));
m_rtts.on_write(false, true);
}
}
@ -1189,7 +1189,7 @@ void GLGSRender::clear_surface(u32 arg)
if (const auto address = rtt.first)
{
if (require_mem_load) rtt.second->write_barrier(cmd);
m_rtts.on_write(address);
m_rtts.on_write(true, false, address);
}
}

View File

@ -122,51 +122,6 @@ namespace vk
}
}
std::pair<u32, bool> get_compatible_gcm_format(rsx::surface_color_format color_format)
{
switch (color_format)
{
case rsx::surface_color_format::r5g6b5:
return{ CELL_GCM_TEXTURE_R5G6B5, false };
case rsx::surface_color_format::a8r8g8b8:
return{ CELL_GCM_TEXTURE_A8R8G8B8, true }; //verified
case rsx::surface_color_format::a8b8g8r8:
return{ CELL_GCM_TEXTURE_A8R8G8B8, false };
case rsx::surface_color_format::x8b8g8r8_o8b8g8r8:
case rsx::surface_color_format::x8b8g8r8_z8b8g8r8:
return{ CELL_GCM_TEXTURE_A8R8G8B8, true };
case rsx::surface_color_format::x8r8g8b8_z8r8g8b8:
case rsx::surface_color_format::x8r8g8b8_o8r8g8b8:
return{ CELL_GCM_TEXTURE_A8R8G8B8, false };
case rsx::surface_color_format::w16z16y16x16:
return{ CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT, true };
case rsx::surface_color_format::w32z32y32x32:
return{ CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT, true };
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
return{ CELL_GCM_TEXTURE_A1R5G5B5, false };
case rsx::surface_color_format::b8:
return{ CELL_GCM_TEXTURE_B8, false };
case rsx::surface_color_format::g8b8:
return{ CELL_GCM_TEXTURE_G8B8, true };
case rsx::surface_color_format::x32:
return{ CELL_GCM_TEXTURE_X32_FLOAT, true }; //verified
default:
return{ CELL_GCM_TEXTURE_A8R8G8B8, false };
}
}
VkLogicOp get_logic_op(rsx::logic_op op)
{
switch (op)
@ -1778,7 +1733,7 @@ void VKGSRender::end()
close_render_pass();
vk::leave_uninterruptible();
m_rtts.on_write();
m_rtts.on_write(rsx::method_registers.color_write_enabled(), rsx::method_registers.depth_write_enabled());
rsx::thread::end();
}
@ -2079,7 +2034,7 @@ void VKGSRender::clear_surface(u32 mask)
if (const auto address = rtt.first)
{
if (require_mem_load) rtt.second->write_barrier(*m_current_command_buffer);
m_rtts.on_write(address);
m_rtts.on_write(true, false, address);
}
}
}
@ -2088,10 +2043,10 @@ void VKGSRender::clear_surface(u32 mask)
if (depth_stencil_mask)
{
if (const auto address = m_rtts.m_bound_depth_stencil.first)
if (m_rtts.m_bound_depth_stencil.first)
{
if (require_mem_load) m_rtts.m_bound_depth_stencil.second->write_barrier(*m_current_command_buffer);
m_rtts.on_write(address);
m_rtts.on_write(false, true);
clear_descriptors.push_back({ (VkImageAspectFlags)depth_stencil_mask, 0, depth_stencil_clear_values });
}
}
@ -2762,22 +2717,9 @@ void VKGSRender::update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_
m_vertex_layout_ring_info.unmap();
}
void VKGSRender::init_buffers(rsx::framebuffer_creation_context context, bool skip_reading)
void VKGSRender::init_buffers(rsx::framebuffer_creation_context context, bool)
{
prepare_rtts(context);
if (!skip_reading)
{
read_buffers();
}
}
void VKGSRender::read_buffers()
{
}
void VKGSRender::write_buffers()
{
}
void VKGSRender::close_and_submit_command_buffer(VkFence fence, VkSemaphore wait_semaphore, VkSemaphore signal_semaphore, VkPipelineStageFlags pipeline_stage_flags)
@ -2948,7 +2890,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
flush_command_queue();
}
const auto color_fmt_info = vk::get_compatible_gcm_format(layout.color_format);
const auto color_fmt_info = get_compatible_gcm_format(layout.color_format);
for (u8 index : m_draw_buffers)
{
if (!m_surface_info[index].address || !m_surface_info[index].pitch) continue;
@ -3003,7 +2945,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context)
{
if (!g_cfg.video.write_color_buffers) continue;
auto info = vk::get_compatible_gcm_format(surface->get_surface_color_format());
auto info = get_compatible_gcm_format(surface->get_surface_color_format());
gcm_format = info.first;
swap_bytes = info.second;
}

View File

@ -458,8 +458,6 @@ private:
public:
void init_buffers(rsx::framebuffer_creation_context context, bool skip_reading = false);
void read_buffers();
void write_buffers();
void set_viewport();
void set_scissor(bool clip_viewport);
void bind_viewport();

View File

@ -19,6 +19,10 @@ namespace vk
std::unordered_map<u32, std::unique_ptr<image>> g_typeless_textures;
std::unordered_map<u32, std::unique_ptr<vk::compute_task>> g_compute_tasks;
// General purpose upload heap
// TODO: Clean this up and integrate cleanly with VKGSRender
data_heap g_upload_heap;
// Garbage collection
std::vector<std::unique_ptr<image>> g_deleted_typeless_textures;
@ -219,6 +223,16 @@ namespace vk
return g_scratch_buffer.get();
}
data_heap* get_upload_heap()
{
if (!g_upload_heap.heap)
{
g_upload_heap.create(VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 64 * 0x100000, "auxilliary upload heap");
}
return &g_upload_heap;
}
void acquire_global_submit_lock()
{
g_submit_mutex.lock();
@ -241,6 +255,8 @@ namespace vk
{
vk::reset_compute_tasks();
vk::reset_resolve_resources();
g_upload_heap.reset_allocation_stats();
}
void destroy_global_resources()
@ -254,6 +270,7 @@ namespace vk
g_null_texture.reset();
g_null_image_view.reset();
g_scratch_buffer.reset();
g_upload_heap.destroy();
g_typeless_textures.clear();
g_deleted_typeless_textures.clear();

View File

@ -118,6 +118,7 @@ namespace vk
image_view* null_image_view(vk::command_buffer&);
image* get_typeless_helper(VkFormat format, u32 requested_width, u32 requested_height);
buffer* get_scratch_buffer();
data_heap* get_upload_heap();
memory_type_mapping get_memory_mapping(const physical_device& dev);
gpu_formats_support get_optimal_tiling_supported_formats(const physical_device& dev);
@ -140,7 +141,7 @@ namespace vk
*/
void copy_mipmaped_image_using_buffer(VkCommandBuffer cmd, vk::image* dst_image,
const std::vector<rsx_subresource_layout>& subresource_layout, int format, bool is_swizzled, u16 mipmap_count,
VkImageAspectFlags flags, vk::data_heap &upload_heap);
VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align = 256);
//Other texture management helpers
void change_image_layout(VkCommandBuffer cmd, VkImage image, VkImageLayout current_layout, VkImageLayout new_layout, const VkImageSubresourceRange& range);

View File

@ -510,7 +510,7 @@ namespace vk
void copy_mipmaped_image_using_buffer(VkCommandBuffer cmd, vk::image* dst_image,
const std::vector<rsx_subresource_layout>& subresource_layout, int format, bool is_swizzled, u16 mipmap_count,
VkImageAspectFlags flags, vk::data_heap &upload_heap)
VkImageAspectFlags flags, vk::data_heap &upload_heap, u32 heap_align)
{
u32 mipmap_level = 0;
u32 block_in_pixel = get_format_block_size_in_texel(format);
@ -518,7 +518,8 @@ namespace vk
for (const rsx_subresource_layout &layout : subresource_layout)
{
u32 row_pitch = align(layout.width_in_block * block_size_in_bytes, 256);
u32 row_pitch = (((layout.width_in_block * block_size_in_bytes) + heap_align - 1) / heap_align) * heap_align;
if (heap_align != 256) verify(HERE), row_pitch == heap_align;
u32 image_linear_size = row_pitch * layout.height_in_block * layout.depth;
//Map with extra padding bytes in case of realignment
@ -527,7 +528,7 @@ namespace vk
VkBuffer buffer_handle = upload_heap.heap->value;
gsl::span<gsl::byte> mapped{ (gsl::byte*)mapped_buffer, ::narrow<int>(image_linear_size) };
upload_texture_subresource(mapped, layout, format, is_swizzled, false, 256);
upload_texture_subresource(mapped, layout, format, is_swizzled, false, heap_align);
upload_heap.unmap();
VkBufferImageCopy copy_info = {};