1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 02:32:36 +01:00

vk: Handle corner cases

- Fix up flush sequence in DMA handling (WCB)
- Do not request resource sharing if queue family is not different!
This commit is contained in:
kd-11 2022-07-24 20:39:15 +03:00 committed by kd-11
parent d846142f0c
commit 5af50cfd55

View File

@ -965,9 +965,18 @@ namespace vk
}
}
const rsx::flags32_t create_flags = g_fxo->get<AsyncTaskScheduler>().is_host_mode()
? (texture_create_flags::shareable | texture_create_flags::do_not_reuse)
: 0;
const bool upload_async = rsx::get_current_renderer()->get_backend_config().supports_asynchronous_compute;
rsx::flags32_t create_flags = 0;
if (upload_async && g_fxo->get<AsyncTaskScheduler>().is_host_mode())
{
create_flags |= texture_create_flags::do_not_reuse;
if (m_device->get_graphics_queue() != m_device->get_transfer_queue())
{
create_flags |= texture_create_flags::shareable;
}
}
auto section = create_new_texture(cmd, rsx_range, width, height, depth, mipmaps, pitch, gcm_format, context, type, swizzled,
rsx::component_order::default_, create_flags);
@ -984,8 +993,7 @@ namespace vk
}
rsx::flags32_t upload_command_flags = initialize_image_layout | upload_contents_inline;
if (context == rsx::texture_upload_context::shader_read &&
rsx::get_current_renderer()->get_backend_config().supports_asynchronous_compute)
if (context == rsx::texture_upload_context::shader_read && upload_async)
{
upload_command_flags |= upload_contents_async;
}
@ -1123,41 +1131,36 @@ namespace vk
{
// Flush any pending async jobs in case of blockers
// TODO: Context-level manager should handle this logic
auto& async_scheduler = g_fxo->get<AsyncTaskScheduler>();
auto async_scheduler = g_fxo->try_get<AsyncTaskScheduler>();
vk::semaphore* async_sema = nullptr;
if (async_scheduler.is_recording())
if (async_scheduler && async_scheduler->is_recording())
{
if (async_scheduler.is_host_mode())
if (async_scheduler->is_host_mode())
{
async_sema = async_scheduler.get_sema();
async_sema = async_scheduler->get_sema();
}
else
{
vk::queue_submit_t submit_info{};
async_scheduler.flush(submit_info, VK_TRUE);
async_scheduler->flush(submit_info, VK_TRUE);
}
}
// Primary access command queue, must restart it after
// Primary access command queue, must restart it after
vk::fence submit_fence(*m_device);
vk::queue_submit_t submit_info{ m_submit_queue, &submit_fence };
if (async_sema)
{
submit_info.queue_signal(*async_sema);
vk::queue_submit_t submit_info2{};
submit_info2.queue_signal(*async_sema);
async_scheduler->flush(submit_info2, VK_FALSE);
submit_info.wait_on(*async_sema, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
}
cmd.submit(submit_info, VK_TRUE);
if (async_sema)
{
vk::queue_submit_t submit_info2{};
submit_info2.wait_on(*async_sema, VK_PIPELINE_STAGE_TRANSFER_BIT);
async_scheduler.flush(submit_info2, VK_FALSE);
}
vk::wait_for_fence(&submit_fence, GENERAL_WAIT_TIMEOUT);
CHECK_RESULT(vkResetCommandBuffer(cmd, 0));