diff --git a/backends/vulkan/runtime/api/Context.cpp b/backends/vulkan/runtime/api/Context.cpp index 326391424df..80aef97fc04 100644 --- a/backends/vulkan/runtime/api/Context.cpp +++ b/backends/vulkan/runtime/api/Context.cpp @@ -226,13 +226,14 @@ void Context::submit_cmd_to_gpu(VkFence fence_handle, const bool final_use) { } } -void Context::flush() { +void Context::wait_for_queue() { VK_CHECK(vkQueueWaitIdle(queue().handle)); +} +void Context::clear_resources() { command_pool_.flush(); descriptor_pool_.flush(); - // If there is an existing command buffer, invalidate it if (cmd_) { cmd_.invalidate(); } @@ -243,6 +244,11 @@ void Context::flush() { images_to_clear_.clear(); } +void Context::flush() { + wait_for_queue(); + clear_resources(); +} + bool available() { return context(); } diff --git a/backends/vulkan/runtime/api/Context.h b/backends/vulkan/runtime/api/Context.h index 5764cb6a894..49003435e3b 100644 --- a/backends/vulkan/runtime/api/Context.h +++ b/backends/vulkan/runtime/api/Context.h @@ -232,6 +232,10 @@ class Context final { return cmd_; } + void wait_for_queue(); + + void clear_resources(); + void flush(); #if defined(VK_KHR_pipeline_executable_properties) && \ diff --git a/backends/vulkan/runtime/graph/ComputeGraph.cpp b/backends/vulkan/runtime/graph/ComputeGraph.cpp index 3a5aabb1c7d..bb2df30a174 100644 --- a/backends/vulkan/runtime/graph/ComputeGraph.cpp +++ b/backends/vulkan/runtime/graph/ComputeGraph.cpp @@ -183,13 +183,22 @@ ComputeGraph::ComputeGraph(GraphConfig config) } ComputeGraph::~ComputeGraph() { - values_.clear(); + // Wait for all currently executing commands to complete before cleaning up. + // If wait_for_queue() throws an exception, still proceed with cleanup. + try { + context_->wait_for_queue(); + } catch (...) { + } - prepack_nodes_.clear(); - execute_nodes_.clear(); - clear_deferred_cmds(); + // Wrap in try/catch to ensure that destructor does not throw + try { + values_.clear(); - context_->flush(); + prepack_nodes_.clear(); + execute_nodes_.clear(); + clear_deferred_cmds(); + } catch (...) { + } } std::vector ComputeGraph::extract_int_or_symint_list( diff --git a/backends/vulkan/runtime/vk_api/Pipeline.cpp b/backends/vulkan/runtime/vk_api/Pipeline.cpp index 6fa85924223..522c4b8589b 100644 --- a/backends/vulkan/runtime/vk_api/Pipeline.cpp +++ b/backends/vulkan/runtime/vk_api/Pipeline.cpp @@ -459,7 +459,22 @@ void ComputePipelineCache::create_pipelines( const std::unordered_set& descriptors) { std::lock_guard lock(cache_mutex_); - const auto num_pipelines = descriptors.size(); + // Filter out descriptors already in cache to avoid creating duplicate + // pipelines and to ensure correct indexing between created pipelines and + // cache insertion. + std::vector keys_to_create; + keys_to_create.reserve(descriptors.size()); + for (const auto& key : descriptors) { + if (cache_.find(key) == cache_.cend()) { + keys_to_create.push_back(key); + } + } + + if (keys_to_create.empty()) { + return; + } + + const auto num_pipelines = keys_to_create.size(); std::vector pipelines(num_pipelines); std::vector> map_entries; @@ -474,7 +489,7 @@ void ComputePipelineCache::create_pipelines( std::vector create_infos; create_infos.reserve(num_pipelines); - for (auto& key : descriptors) { + for (const auto& key : keys_to_create) { map_entries.push_back(key.specialization_constants.generate_map_entries()); specialization_infos.push_back(VkSpecializationInfo{ @@ -513,14 +528,10 @@ void ComputePipelineCache::create_pipelines( nullptr, pipelines.data())); - uint32_t i = 0; - for (auto& key : descriptors) { - auto it = cache_.find(key); - if (it != cache_.cend()) { - continue; - } - cache_.insert({key, ComputePipelineCache::Value(device_, pipelines[i])}); - ++i; + for (size_t i = 0; i < keys_to_create.size(); ++i) { + cache_.insert( + {keys_to_create[i], + ComputePipelineCache::Value(device_, pipelines[i])}); } }