1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 18:53:28 +01:00

vulkan: Use simpler texture object

This commit is contained in:
Vincent Lejeune 2016-03-16 00:42:40 +01:00
parent c6bd5d09f8
commit 36aace57ca
8 changed files with 337 additions and 200 deletions

View File

@ -39,6 +39,32 @@ VkFormat get_compatible_depth_surface_format(const gpu_formats_support &support,
throw EXCEPTION("Invalid format (0x%x)", format);
}
std::tuple<VkFilter, VkSamplerMipmapMode> get_min_filter_and_mip(u8 min_filter)
{
switch (min_filter)
{
case CELL_GCM_TEXTURE_NEAREST: return std::make_tuple(VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST);
case CELL_GCM_TEXTURE_LINEAR: return std::make_tuple(VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_NEAREST);
case CELL_GCM_TEXTURE_NEAREST_NEAREST: return std::make_tuple(VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_NEAREST);
case CELL_GCM_TEXTURE_LINEAR_NEAREST: return std::make_tuple(VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_NEAREST);
case CELL_GCM_TEXTURE_NEAREST_LINEAR: return std::make_tuple(VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_LINEAR);
case CELL_GCM_TEXTURE_LINEAR_LINEAR: return std::make_tuple(VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_LINEAR);
case CELL_GCM_TEXTURE_CONVOLUTION_MIN: return std::make_tuple(VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_LINEAR);
}
throw EXCEPTION("Invalid max filter (0x%x)", min_filter);
}
VkFilter get_mag_filter(u8 mag_filter)
{
switch (mag_filter)
{
case CELL_GCM_TEXTURE_NEAREST: return VK_FILTER_NEAREST;
case CELL_GCM_TEXTURE_LINEAR: return VK_FILTER_LINEAR;
case CELL_GCM_TEXTURE_CONVOLUTION_MAG: return VK_FILTER_LINEAR;
}
throw EXCEPTION("Invalid mag filter (0x%x)", mag_filter);
}
VkSamplerAddressMode vk_wrap_mode(u32 gcm_wrap)
{
switch (gcm_wrap)

View File

@ -1,5 +1,6 @@
#pragma once
#include "VKHelpers.h"
#include <tuple>
namespace vk
{
@ -11,6 +12,9 @@ namespace vk
gpu_formats_support get_optimal_tiling_supported_formats(VkPhysicalDevice physical_device);
VkFormat get_compatible_depth_surface_format(const gpu_formats_support &support, rsx::surface_depth_format format);
std::tuple<VkFilter, VkSamplerMipmapMode> get_min_filter_and_mip(u8 min_filter);
VkFilter get_mag_filter(u8 mag_filter);
VkSamplerAddressMode vk_wrap_mode(u32 gcm_wrap);
float max_aniso(u32 gcm_aniso);
VkComponentMapping get_component_mapping(u32 format, u8 swizzle_mask);

View File

@ -415,6 +415,8 @@ VKGSRender::VKGSRender() : GSRender(frame_type::Vulkan)
m_uniform_buffer.reset(new vk::buffer(*m_device, RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, 0));
m_index_buffer_ring_info.init(RING_BUFFER_SIZE);
m_index_buffer.reset(new vk::buffer(*m_device, RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, 0));
m_texture_upload_buffer_ring_info.init(8 * RING_BUFFER_SIZE);
m_texture_upload_buffer.reset(new vk::buffer(*m_device, 8 * RING_BUFFER_SIZE, m_memory_type_mapping.host_visible_coherent, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 0));
m_render_passes = get_precomputed_render_passes(*m_device, m_optimal_tiling_supported_formats);
@ -464,15 +466,19 @@ VKGSRender::~VKGSRender()
m_index_buffer.release();
m_uniform_buffer.release();
m_attrib_buffers.release();
m_texture_upload_buffer.release();
null_buffer.release();
null_buffer_view.release();
m_buffer_view_to_clean.clear();
m_sampler_to_clean.clear();
m_framebuffer_to_clean.clear();
for (auto &render_pass : m_render_passes)
if (render_pass)
vkDestroyRenderPass(*m_device, render_pass, nullptr);
m_rtts.destroy();
vkFreeDescriptorSets(*m_device, descriptor_pool, 1, &descriptor_sets);
vkDestroyPipelineLayout(*m_device, pipeline_layout, nullptr);
vkDestroyDescriptorSetLayout(*m_device, descriptor_layouts, nullptr);
@ -551,26 +557,28 @@ void VKGSRender::end()
(u8)vk::get_draw_buffers(rsx::to_surface_target(rsx::method_registers[NV4097_SET_SURFACE_COLOR_TARGET])).size());
VkRenderPass current_render_pass = m_render_passes[idx];
vk::texture *texture0 = nullptr;
for (int i = 0; i < rsx::limits::textures_count; ++i)
{
if (m_program->has_uniform("tex" + std::to_string(i)))
{
if (!textures[i].enabled())
{
m_program->bind_uniform({ vk::null_sampler(), vk::null_image_view(), VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}, "tex" + std::to_string(i), descriptor_sets);
m_program->bind_uniform({ vk::null_sampler(), vk::null_image_view(), VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL }, "tex" + std::to_string(i), descriptor_sets);
continue;
}
vk::image_view *texture0 = m_texture_cache.upload_texture(m_command_buffer, textures[i], m_rtts, m_memory_type_mapping, m_texture_upload_buffer_ring_info, m_texture_upload_buffer.get());
vk::texture &tex = (texture0)? (*texture0): m_texture_cache.upload_texture(m_command_buffer, textures[i], m_rtts);
vk::sampler sampler(*m_device,
VkFilter min_filter;
VkSamplerMipmapMode mip_mode;
std::tie(min_filter, mip_mode) = vk::get_min_filter_and_mip(textures[i].min_filter());
m_sampler_to_clean.push_back(std::make_unique<vk::sampler>(
*m_device,
vk::vk_wrap_mode(textures[i].wrap_s()), vk::vk_wrap_mode(textures[i].wrap_t()), vk::vk_wrap_mode(textures[i].wrap_r()),
!!(textures[i].format() & CELL_GCM_TEXTURE_UN),
textures[i].bias(), vk::max_aniso(textures[i].max_aniso()), textures[i].min_lod(), textures[i].max_lod(),
VK_FILTER_LINEAR, VK_FILTER_LINEAR, VK_SAMPLER_MIPMAP_MODE_NEAREST
);
m_program->bind_uniform({ sampler.value, tex, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL }, "tex" + std::to_string(i), descriptor_sets);
texture0 = &tex;
min_filter, vk::get_mag_filter(textures[i].mag_filter()), mip_mode
));
m_program->bind_uniform({ m_sampler_to_clean.back()->value, texture0->value, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL }, "tex" + std::to_string(i), descriptor_sets);
}
}
@ -733,22 +741,19 @@ void VKGSRender::clear_surface(u32 mask)
{
if (std::get<1>(m_rtts.m_bound_render_targets[i]) == nullptr) continue;
VkImage color_image = (*std::get<1>(m_rtts.m_bound_render_targets[i]));
VkImageLayout old_layout = std::get<1>(m_rtts.m_bound_render_targets[i])->get_layout();
std::get<1>(m_rtts.m_bound_render_targets[i])->change_layout(m_command_buffer, VK_IMAGE_LAYOUT_GENERAL);
VkImage color_image = std::get<1>(m_rtts.m_bound_render_targets[i])->value;
change_image_layout(m_command_buffer, color_image, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_ASPECT_COLOR_BIT);
vkCmdClearColorImage(m_command_buffer, color_image, VK_IMAGE_LAYOUT_GENERAL, &color_clear_values.color, 1, &range);
std::get<1>(m_rtts.m_bound_render_targets[i])->change_layout(m_command_buffer, old_layout);
change_image_layout(m_command_buffer, color_image, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_ASPECT_COLOR_BIT);
}
}
if (mask & 0x3)
{
VkImageLayout old_layout = std::get<1>(m_rtts.m_bound_depth_stencil)->get_layout();
std::get<1>(m_rtts.m_bound_depth_stencil)->change_layout(m_command_buffer, VK_IMAGE_LAYOUT_GENERAL);
vkCmdClearDepthStencilImage(m_command_buffer, (*std::get<1>(m_rtts.m_bound_depth_stencil)), VK_IMAGE_LAYOUT_GENERAL, &depth_stencil_clear_values.depthStencil, 1, &depth_range);
std::get<1>(m_rtts.m_bound_depth_stencil)->change_layout(m_command_buffer, old_layout);
VkImage depth_stencil_image = std::get<1>(m_rtts.m_bound_depth_stencil)->value;
change_image_layout(m_command_buffer, depth_stencil_image, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_ASPECT_DEPTH_BIT);
vkCmdClearDepthStencilImage(m_command_buffer, std::get<1>(m_rtts.m_bound_depth_stencil)->value, VK_IMAGE_LAYOUT_GENERAL, &depth_stencil_clear_values.depthStencil, 1, &depth_range);
change_image_layout(m_command_buffer, depth_stencil_image, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_ASPECT_DEPTH_BIT);
}
if (!was_recording)
@ -1062,16 +1067,16 @@ void VKGSRender::prepare_rtts()
clip_horizontal, clip_vertical,
rsx::to_surface_target(rsx::method_registers[NV4097_SET_SURFACE_COLOR_TARGET]),
get_color_surface_addresses(), get_zeta_surface_address(),
(*m_device), &m_command_buffer, m_optimal_tiling_supported_formats);
(*m_device), &m_command_buffer, m_optimal_tiling_supported_formats, m_memory_type_mapping);
//Bind created rtts as current fbo...
std::vector<u8> draw_buffers = vk::get_draw_buffers(rsx::to_surface_target(rsx::method_registers[NV4097_SET_SURFACE_COLOR_TARGET]));
std::vector<std::unique_ptr<vk::image_view> > fbo_images;
std::vector<std::unique_ptr<vk::image_view>> fbo_images;
for (u8 index: draw_buffers)
{
vk::texture *raw = std::get<1>(m_rtts.m_bound_render_targets[index]);
vk::image *raw = std::get<1>(m_rtts.m_bound_render_targets[index]);
VkImageSubresourceRange subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
@ -1080,14 +1085,14 @@ void VKGSRender::prepare_rtts()
subres.layerCount = 1;
subres.levelCount = 1;
fbo_images.push_back(std::make_unique<vk::image_view>(*m_device, *raw, VK_IMAGE_VIEW_TYPE_2D, raw->get_format(), vk::default_component_map(), subres));
fbo_images.push_back(std::make_unique<vk::image_view>(*m_device, raw->value, VK_IMAGE_VIEW_TYPE_2D, raw->info.format, vk::default_component_map(), subres));
}
m_draw_buffers_count = fbo_images.size();
if (std::get<1>(m_rtts.m_bound_depth_stencil) != nullptr)
{
vk::texture *raw = (std::get<1>(m_rtts.m_bound_depth_stencil));
vk::image *raw = (std::get<1>(m_rtts.m_bound_depth_stencil));
VkImageSubresourceRange subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
@ -1096,7 +1101,7 @@ void VKGSRender::prepare_rtts()
subres.layerCount = 1;
subres.levelCount = 1;
fbo_images.push_back(std::make_unique<vk::image_view>(*m_device, *raw, VK_IMAGE_VIEW_TYPE_2D, raw->get_format(), vk::default_component_map(), subres));
fbo_images.push_back(std::make_unique<vk::image_view>(*m_device, raw->value, VK_IMAGE_VIEW_TYPE_2D, raw->info.format, vk::default_component_map(), subres));
}
size_t idx = vk::get_render_pass_location(vk::get_compatible_surface_format(m_surface.color_format), vk::get_compatible_depth_surface_format(m_optimal_tiling_supported_formats, m_surface.depth_format), (u8)draw_buffers.size());
@ -1190,9 +1195,9 @@ void VKGSRender::flip(int buffer)
VkImage image_to_flip = nullptr;
if (std::get<1>(m_rtts.m_bound_render_targets[0]) != nullptr)
image_to_flip = (*std::get<1>(m_rtts.m_bound_render_targets[0]));
image_to_flip = std::get<1>(m_rtts.m_bound_render_targets[0])->value;
else
image_to_flip = (*std::get<1>(m_rtts.m_bound_render_targets[1]));
image_to_flip = std::get<1>(m_rtts.m_bound_render_targets[1])->value;
VkImage target_image = m_swap_chain->get_swap_chain_image(m_current_present_image);
vk::copy_scaled_image(m_command_buffer, image_to_flip, target_image, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
@ -1232,6 +1237,7 @@ void VKGSRender::flip(int buffer)
m_uniform_buffer_ring_info.m_get_pos = m_uniform_buffer_ring_info.get_current_put_pos_minus_one();
m_index_buffer_ring_info.m_get_pos = m_index_buffer_ring_info.get_current_put_pos_minus_one();
m_attrib_ring_info.m_get_pos = m_attrib_ring_info.get_current_put_pos_minus_one();
m_texture_upload_buffer_ring_info.m_get_pos = m_texture_upload_buffer_ring_info.get_current_put_pos_minus_one();
if (m_present_semaphore)
{
vkDestroySemaphore((*m_device), m_present_semaphore, nullptr);
@ -1239,10 +1245,11 @@ void VKGSRender::flip(int buffer)
}
//Feed back damaged resources to the main texture cache for management...
m_texture_cache.merge_dirty_textures(m_rtts.invalidated_resources);
// m_texture_cache.merge_dirty_textures(m_rtts.invalidated_resources);
m_rtts.invalidated_resources.clear();
m_buffer_view_to_clean.clear();
m_sampler_to_clean.clear();
m_draw_calls = 0;
dirty_frame = true;

View File

@ -12,87 +12,6 @@
#pragma comment(lib, "VKstatic.1.lib")
namespace vk
{
// TODO: factorize between backends
class data_heap
{
/**
* Does alloc cross get position ?
*/
template<int Alignement>
bool can_alloc(size_t size) const
{
size_t alloc_size = align(size, Alignement);
size_t aligned_put_pos = align(m_put_pos, Alignement);
if (aligned_put_pos + alloc_size < m_size)
{
// range before get
if (aligned_put_pos + alloc_size < m_get_pos)
return true;
// range after get
if (aligned_put_pos > m_get_pos)
return true;
return false;
}
else
{
// ..]....[..get..
if (aligned_put_pos < m_get_pos)
return false;
// ..get..]...[...
// Actually all resources extending beyond heap space starts at 0
if (alloc_size > m_get_pos)
return false;
return true;
}
}
size_t m_size;
size_t m_put_pos; // Start of free space
public:
data_heap() = default;
~data_heap() = default;
data_heap(const data_heap&) = delete;
data_heap(data_heap&&) = delete;
size_t m_get_pos; // End of free space
void init(size_t heap_size)
{
m_size = heap_size;
m_put_pos = 0;
m_get_pos = heap_size - 1;
}
template<int Alignement>
size_t alloc(size_t size)
{
if (!can_alloc<Alignement>(size)) throw EXCEPTION("Working buffer not big enough");
size_t alloc_size = align(size, Alignement);
size_t aligned_put_pos = align(m_put_pos, Alignement);
if (aligned_put_pos + alloc_size < m_size)
{
m_put_pos = aligned_put_pos + alloc_size;
return aligned_put_pos;
}
else
{
m_put_pos = alloc_size;
return 0;
}
}
/**
* return current putpos - 1
*/
size_t get_current_put_pos_minus_one() const
{
return (m_put_pos - 1 > 0) ? m_put_pos - 1 : m_size - 1;
}
};
}
class VKGSRender : public GSRender
{
private:
@ -130,6 +49,8 @@ private:
std::unique_ptr<vk::buffer> m_uniform_buffer;
vk::data_heap m_index_buffer_ring_info;
std::unique_ptr<vk::buffer> m_index_buffer;
vk::data_heap m_texture_upload_buffer_ring_info;
std::unique_ptr<vk::buffer> m_texture_upload_buffer;
//Vulkan internals
u32 m_current_present_image = 0xFFFF;
@ -152,6 +73,7 @@ private:
std::vector<std::unique_ptr<vk::buffer_view> > m_buffer_view_to_clean;
std::vector<std::unique_ptr<vk::framebuffer> > m_framebuffer_to_clean;
std::vector<std::unique_ptr<vk::sampler> > m_sampler_to_clean;
u32 m_draw_calls = 0;

View File

@ -14,6 +14,7 @@
#include "Emu/state.h"
#include "VulkanAPI.h"
#include "../GCM.h"
#include "../Common/TextureUtils.h"
namespace rsx
{
@ -332,6 +333,60 @@ namespace vk
}
};
struct image
{
VkImage value;
VkImageCreateInfo info = {};
std::shared_ptr<vk::memory_block> memory;
image(VkDevice dev, uint32_t memory_type_index,
VkImageType image_type,
VkFormat format,
uint32_t width, uint32_t height, uint32_t depth,
VkDeviceSize mipmaps, VkDeviceSize layers,
VkSampleCountFlagBits samples,
VkImageLayout initial_layout,
VkImageTiling tiling,
VkImageUsageFlags usage,
VkImageCreateFlags image_flags)
: m_device(dev)
{
info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
info.imageType = image_type;
info.format = format;
info.extent = { width, height, depth };
info.mipLevels = mipmaps;
info.arrayLayers = layers;
info.samples = samples;
info.tiling = tiling;
info.usage = usage;
info.flags = image_flags;
info.initialLayout = initial_layout;
info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
CHECK_RESULT(vkCreateImage(m_device, &info, nullptr, &value));
VkMemoryRequirements memory_req;
vkGetImageMemoryRequirements(m_device, value, &memory_req);
memory = std::make_shared<vk::memory_block>(m_device, memory_req.size, memory_type_index);
CHECK_RESULT(vkBindImageMemory(m_device, value, memory->memory, 0));
}
// TODO: Ctor that uses a provided memory heap
~image()
{
vkDestroyImage(m_device, value, nullptr);
}
image(const image&) = delete;
image(image&&) = delete;
private:
VkDevice m_device;
};
struct image_view
{
VkImageView value;
@ -1202,8 +1257,9 @@ namespace vk
struct bound_sampler
{
VkImageView image_view = nullptr;
VkSampler sampler = nullptr;
VkFormat format;
VkImage image;
VkComponentMapping mapping;
};
struct bound_buffer
@ -1251,4 +1307,94 @@ namespace vk
void bind_uniform(const VkBufferView &buffer_view, const std::string &binding_name, VkDescriptorSet &descriptor_set);
};
}
// TODO: factorize between backends
class data_heap
{
/**
* Does alloc cross get position ?
*/
template<int Alignement>
bool can_alloc(size_t size) const
{
size_t alloc_size = align(size, Alignement);
size_t aligned_put_pos = align(m_put_pos, Alignement);
if (aligned_put_pos + alloc_size < m_size)
{
// range before get
if (aligned_put_pos + alloc_size < m_get_pos)
return true;
// range after get
if (aligned_put_pos > m_get_pos)
return true;
return false;
}
else
{
// ..]....[..get..
if (aligned_put_pos < m_get_pos)
return false;
// ..get..]...[...
// Actually all resources extending beyond heap space starts at 0
if (alloc_size > m_get_pos)
return false;
return true;
}
}
size_t m_size;
size_t m_put_pos; // Start of free space
public:
data_heap() = default;
~data_heap() = default;
data_heap(const data_heap&) = delete;
data_heap(data_heap&&) = delete;
size_t m_get_pos; // End of free space
void init(size_t heap_size)
{
m_size = heap_size;
m_put_pos = 0;
m_get_pos = heap_size - 1;
}
template<int Alignement>
size_t alloc(size_t size)
{
if (!can_alloc<Alignement>(size)) throw EXCEPTION("Working buffer not big enough");
size_t alloc_size = align(size, Alignement);
size_t aligned_put_pos = align(m_put_pos, Alignement);
if (aligned_put_pos + alloc_size < m_size)
{
m_put_pos = aligned_put_pos + alloc_size;
return aligned_put_pos;
}
else
{
m_put_pos = alloc_size;
return 0;
}
}
/**
* return current putpos - 1
*/
size_t get_current_put_pos_minus_one() const
{
return (m_put_pos - 1 > 0) ? m_put_pos - 1 : m_size - 1;
}
};
/**
* Allocate enough space in upload_buffer and write all mipmap/layer data into the subbuffer.
* Then copy all layers into dst_image.
* dst_image must be in TRANSFER_DST_OPTIMAL layout and upload_buffer have TRANSFER_SRC_BIT usage flag.
*/
void copy_mipmaped_image_using_buffer(VkCommandBuffer cmd, VkImage dst_image,
const std::vector<rsx_subresource_layout> subresource_layout, int format, bool is_swizzled,
vk::data_heap &upload_heap, vk::buffer* upload_buffer);
}

View File

@ -10,41 +10,49 @@ namespace rsx
{
struct vk_render_target_traits
{
using surface_storage_type = vk::texture ;
using surface_type = vk::texture*;
using surface_storage_type = std::unique_ptr<vk::image>;
using surface_type = vk::image*;
using command_list_type = vk::command_buffer*;
using download_buffer_object = void*;
static vk::texture create_new_surface(u32 address, surface_color_format format, size_t width, size_t height, vk::render_device &device, vk::command_buffer *cmd, const vk::gpu_formats_support &support)
static std::unique_ptr<vk::image> create_new_surface(u32 address, surface_color_format format, size_t width, size_t height, vk::render_device &device, vk::command_buffer *cmd, const vk::gpu_formats_support &support, const vk::memory_type_mapping &mem_mapping)
{
VkFormat requested_format = vk::get_compatible_surface_format(format);
vk::texture rtt;
rtt.create(device, requested_format, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|VK_IMAGE_USAGE_TRANSFER_SRC_BIT|VK_IMAGE_USAGE_SAMPLED_BIT, width, height, 1, true);
rtt.change_layout(*cmd, VK_IMAGE_LAYOUT_GENERAL);
std::unique_ptr<vk::image> rtt;
rtt.reset(new vk::image(device, mem_mapping.device_local,
VK_IMAGE_TYPE_2D,
requested_format,
width, height, 1, 1, 1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|VK_IMAGE_USAGE_TRANSFER_SRC_BIT|VK_IMAGE_USAGE_SAMPLED_BIT,
0));
change_image_layout(*cmd, rtt->value, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_ASPECT_COLOR_BIT);
//Clear new surface
VkClearColorValue clear_color;
VkImageSubresourceRange range = vk::default_image_subresource_range();
clear_color.float32[0] = 0.f;
clear_color.float32[1] = 0.f;
clear_color.float32[2] = 0.f;
clear_color.float32[3] = 0.f;
vkCmdClearColorImage(*cmd, rtt, rtt.get_layout(), &clear_color, 1, &range);
rtt.change_layout(*cmd, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
vkCmdClearColorImage(*cmd, rtt->value, VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &range);
change_image_layout(*cmd, rtt->value, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_ASPECT_COLOR_BIT);
return rtt;
}
static vk::texture create_new_surface(u32 address, surface_depth_format format, size_t width, size_t height, vk::render_device &device, vk::command_buffer *cmd, const vk::gpu_formats_support &support)
static std::unique_ptr<vk::image> create_new_surface(u32 address, surface_depth_format format, size_t width, size_t height, vk::render_device &device, vk::command_buffer *cmd, const vk::gpu_formats_support &support, const vk::memory_type_mapping &mem_mapping)
{
VkFormat requested_format = vk::get_compatible_depth_surface_format(support, format);
vk::texture rtt;
rtt.create(device, requested_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT|VK_IMAGE_USAGE_SAMPLED_BIT, width, height, 1, true);
rtt.change_layout(*cmd, VK_IMAGE_LAYOUT_GENERAL);
std::unique_ptr<vk::image> ds;
ds.reset(new vk::image(device, mem_mapping.device_local,
VK_IMAGE_TYPE_2D, requested_format, width, height, 1, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT|VK_IMAGE_USAGE_SAMPLED_BIT, 0));
change_image_layout(*cmd, ds->value, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
//Clear new surface..
VkClearDepthStencilValue clear_depth = {};
@ -61,54 +69,52 @@ namespace rsx
clear_depth.depth = 1.f;
clear_depth.stencil = 0;
vkCmdClearDepthStencilImage(*cmd, rtt, rtt.get_layout(), &clear_depth, 1, &range);
rtt.change_layout(*cmd, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
return rtt;
vkCmdClearDepthStencilImage(*cmd, ds->value, VK_IMAGE_LAYOUT_GENERAL, &clear_depth, 1, &range);
change_image_layout(*cmd, ds->value, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
return ds;
}
static void prepare_rtt_for_drawing(vk::command_buffer* pcmd, vk::texture *surface)
static void prepare_rtt_for_drawing(vk::command_buffer* pcmd, vk::image *surface)
{
surface->change_layout(*pcmd, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
// surface->change_layout(*pcmd, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
}
static void prepare_rtt_for_sampling(vk::command_buffer* pcmd, vk::texture *surface)
static void prepare_rtt_for_sampling(vk::command_buffer* pcmd, vk::image *surface)
{
surface->change_layout(*pcmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
// surface->change_layout(*pcmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
static void prepare_ds_for_drawing(vk::command_buffer* pcmd, vk::texture *surface)
static void prepare_ds_for_drawing(vk::command_buffer* pcmd, vk::image *surface)
{
surface->change_layout(*pcmd, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
// surface->change_layout(*pcmd, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
}
static void prepare_ds_for_sampling(vk::command_buffer* pcmd, vk::texture *surface)
static void prepare_ds_for_sampling(vk::command_buffer* pcmd, vk::image *surface)
{
surface->change_layout(*pcmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
// surface->change_layout(*pcmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
static bool rtt_has_format_width_height(const vk::texture &rtt, surface_color_format format, size_t width, size_t height)
static bool rtt_has_format_width_height(const std::unique_ptr<vk::image> &rtt, surface_color_format format, size_t width, size_t height)
{
VkFormat fmt = vk::get_compatible_surface_format(format);
vk::texture &tex = const_cast<vk::texture&>(rtt);
if (tex.get_format() == fmt &&
tex.width() == width &&
tex.height() == height)
if (rtt->info.format == fmt &&
rtt->info.extent.width == width &&
rtt->info.extent.height == height)
return true;
return false;
}
static bool ds_has_format_width_height(const vk::texture &ds, surface_depth_format format, size_t width, size_t height)
static bool ds_has_format_width_height(const std::unique_ptr<vk::image> &ds, surface_depth_format format, size_t width, size_t height)
{
// TODO: check format
//VkFormat fmt = vk::get_compatible_depth_surface_format(format);
vk::texture &tex = const_cast<vk::texture&>(ds);
if (//tex.get_format() == fmt &&
tex.width() == width &&
tex.height() == height)
ds->info.extent.width == width &&
ds->info.extent.height == height)
return true;
return false;
@ -138,13 +144,19 @@ namespace rsx
{
}
static vk::texture *get(const vk::texture &tex)
static vk::image *get(const std::unique_ptr<vk::image> &tex)
{
return const_cast<vk::texture*>(&tex);
return tex.get();
}
};
struct vk_render_targets : public rsx::surface_store<vk_render_target_traits>
{
void destroy()
{
m_render_targets_storage.clear();
m_depth_stencil_storage.clear();
}
};
}

View File

@ -4,7 +4,6 @@
#include "../RSXThread.h"
#include "../RSXTexture.h"
#include "../rsx_utils.h"
#include "../Common/TextureUtils.h"
#include "VKFormats.h"
namespace vk
@ -36,8 +35,8 @@ namespace vk
subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subres.baseArrayLayer = 0;
subres.baseMipLevel = 0;
subres.layerCount = 1;
subres.levelCount = 1;
subres.layerCount = 100;
subres.levelCount = 100;
return subres;
}
@ -129,6 +128,39 @@ namespace vk
copy_image(cmd, isrc, idst, srcLayout, dstLayout, width, height, mipmaps, aspect);
}
void copy_mipmaped_image_using_buffer(VkCommandBuffer cmd, VkImage dst_image,
const std::vector<rsx_subresource_layout> subresource_layout, int format, bool is_swizzled,
vk::data_heap &upload_heap, vk::buffer* upload_buffer)
{
u32 mipmap_level = 0;
u32 block_in_pixel = get_format_block_size_in_texel(format);
u8 block_size_in_bytes = get_format_block_size_in_bytes(format);
for (const rsx_subresource_layout &layout : subresource_layout)
{
u32 row_pitch = align(layout.width_in_block * block_size_in_bytes, 256);
size_t image_linear_size = row_pitch * layout.height_in_block * layout.depth;
size_t offset_in_buffer = upload_heap.alloc<512>(image_linear_size);
void *mapped_buffer = upload_buffer->map(offset_in_buffer, image_linear_size);
gsl::span<gsl::byte> mapped{ (gsl::byte*)mapped_buffer, gsl::narrow<int>(image_linear_size) };
upload_texture_subresource(mapped, layout, format, is_swizzled, 256);
upload_buffer->unmap();
VkBufferImageCopy copy_info = {};
copy_info.bufferOffset = offset_in_buffer;
copy_info.imageExtent.height = layout.height_in_block * block_in_pixel;
copy_info.imageExtent.width = layout.width_in_block * block_in_pixel;
copy_info.imageExtent.depth = layout.depth;
copy_info.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_info.imageSubresource.layerCount = 1;
copy_info.imageSubresource.mipLevel = mipmap_level;
copy_info.bufferRowLength = block_in_pixel * row_pitch / block_size_in_bytes;
vkCmdCopyBufferToImage(cmd, upload_buffer->value, dst_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_info);
mipmap_level++;
}
}
texture::texture(vk::swap_chain_image &img)
{
m_image_contents = img;

View File

@ -15,8 +15,8 @@ namespace vk
u16 height;
u16 depth;
u16 mipmaps;
vk::texture uploaded_texture;
std::unique_ptr<vk::image_view> uploaded_image_view;
std::unique_ptr<vk::image> uploaded_texture;
u64 protected_rgn_start;
u64 protected_rgn_end;
@ -32,6 +32,8 @@ namespace vk
std::vector<cached_texture_object> m_cache;
u32 num_dirty_textures = 0;
std::vector<std::unique_ptr<vk::image_view> > m_temporary_image_view;
bool lock_memory_region(u32 start, u32 size)
{
static const u32 memory_page_size = 4096;
@ -98,7 +100,7 @@ namespace vk
{
if (tex.exists)
{
tex.uploaded_texture.destroy();
tex.uploaded_texture.reset();
tex.exists = false;
}
@ -106,8 +108,7 @@ namespace vk
}
}
cached_texture_object object;
m_cache.push_back(object);
m_cache.push_back(cached_texture_object());
return m_cache[m_cache.size() - 1];
}
@ -133,7 +134,7 @@ namespace vk
{
if (tex.dirty && tex.exists)
{
tex.uploaded_texture.destroy();
tex.uploaded_texture.reset();
tex.exists = false;
}
}
@ -148,19 +149,10 @@ namespace vk
void destroy()
{
for (cached_texture_object &tex : m_cache)
{
if (tex.exists)
{
tex.uploaded_texture.destroy();
tex.exists = false;
}
}
m_cache.resize(0);
}
vk::texture& upload_texture(command_buffer cmd, rsx::texture &tex, rsx::vk_render_targets &m_rtts)
vk::image_view* upload_texture(command_buffer cmd, rsx::texture &tex, rsx::vk_render_targets &m_rtts, const vk::memory_type_mapping &memory_type_mapping, data_heap& upload_heap, vk::buffer* upload_buffer)
{
if (num_dirty_textures > 32)
{
@ -175,21 +167,27 @@ namespace vk
const u32 range = (u32)get_texture_size(tex);
//First check if it exists as an rtt...
vk::texture *rtt_texture = nullptr;
vk::image *rtt_texture = nullptr;
if (rtt_texture = m_rtts.get_texture_from_render_target_if_applicable(texaddr))
{
return *rtt_texture;
m_temporary_image_view.push_back(std::make_unique<vk::image_view>(*vk::get_current_renderer(), rtt_texture->value, VK_IMAGE_VIEW_TYPE_2D, rtt_texture->info.format,
vk::default_component_map(),
vk::default_image_subresource_range()));
return m_temporary_image_view.back().get();
}
if (rtt_texture = m_rtts.get_texture_from_depth_stencil_if_applicable(texaddr))
{
return *rtt_texture;
m_temporary_image_view.push_back(std::make_unique<vk::image_view>(*vk::get_current_renderer(), rtt_texture->value, VK_IMAGE_VIEW_TYPE_2D, rtt_texture->info.format,
vk::default_component_map(),
vk::default_image_subresource_range()));
return m_temporary_image_view.back().get();
}
cached_texture_object& cto = find_cached_texture(texaddr, range, true, tex.width(), tex.height(), tex.mipmap());
if (cto.exists && !cto.dirty)
{
return cto.uploaded_texture;
return cto.uploaded_image_view.get();
}
u32 raw_format = tex.format();
@ -198,21 +196,32 @@ namespace vk
VkComponentMapping mapping = vk::get_component_mapping(format, tex.remap());
VkFormat vk_format = get_compatible_sampler_format(format);
cto.uploaded_texture.create(*vk::get_current_renderer(), vk_format, VK_IMAGE_USAGE_SAMPLED_BIT, tex.width(), tex.height(), tex.mipmap(), false, mapping);
cto.uploaded_texture.init(tex, cmd);
cto.uploaded_texture.change_layout(cmd, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
cto.uploaded_texture = std::make_unique<vk::image>(*vk::get_current_renderer(), memory_type_mapping.device_local,
VK_IMAGE_TYPE_2D,
vk_format,
tex.width(), tex.height(), 1, tex.mipmap(), 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, 0);
change_image_layout(cmd, cto.uploaded_texture->value, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_ASPECT_COLOR_BIT);
cto.uploaded_image_view = std::make_unique<vk::image_view>(*vk::get_current_renderer(), cto.uploaded_texture->value, VK_IMAGE_VIEW_TYPE_2D, vk_format,
vk::get_component_mapping(tex.format() & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN), tex.remap()),
vk::default_image_subresource_range());
copy_mipmaped_image_using_buffer(cmd, cto.uploaded_texture->value, get_subresources_layout(tex), format, !(tex.format() & CELL_GCM_TEXTURE_LN), upload_heap, upload_buffer);
change_image_layout(cmd, cto.uploaded_texture->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_ASPECT_COLOR_BIT);
cto.exists = true;
cto.dirty = false;
cto.native_rsx_address = texaddr;
cto.native_rsx_size = range;
cto.width = cto.uploaded_texture.width();
cto.height = cto.uploaded_texture.height();
cto.mipmaps = cto.uploaded_texture.mipmaps();
cto.width = tex.width();
cto.height = tex.height();
cto.mipmaps = tex.mipmap();
lock_object(cto);
return cto.uploaded_texture;
return cto.uploaded_image_view.get();
}
bool invalidate_address(u32 rsx_address)
@ -239,28 +248,7 @@ namespace vk
void flush(vk::command_buffer &cmd)
{
//Finish all pending transactions for any cache managed textures..
for (cached_texture_object &tex : m_cache)
{
if (tex.dirty || !tex.exists) continue;
tex.uploaded_texture.flush(cmd);
}
}
void merge_dirty_textures(std::list<vk::texture> dirty_textures)
{
for (vk::texture &tex : dirty_textures)
{
cached_texture_object cto;
cto.uploaded_texture = tex;
cto.locked = false;
cto.exists = true;
cto.dirty = true;
cto.native_rsx_address = 0;
num_dirty_textures++;
m_cache.push_back(cto);
}
m_temporary_image_view.clear();
}
};
}