scripts: Fix mem leak from vkResetDescriptorPool
Track which descriptor sets are allocated from each pool in the handle
wrapping code. Clean up implicitly freed descriptor sets when a
descriptor pool is reset or destroyed.
Change-Id: Idb473562a7b5e571281de991e0fccc5ea92a9b57
diff --git a/scripts/layer_chassis_dispatch_generator.py b/scripts/layer_chassis_dispatch_generator.py
index 4711c75..0d3be16 100644
--- a/scripts/layer_chassis_dispatch_generator.py
+++ b/scripts/layer_chassis_dispatch_generator.py
@@ -442,6 +442,13 @@
const VkAllocationCallbacks *pAllocator) {
if (!wrap_handles) return layer_data->device_dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
std::unique_lock<std::mutex> lock(dispatch_lock);
+
+ // remove references to implicitly freed descriptor sets
+ for(auto descriptor_set : layer_data->pool_descriptor_sets_map[descriptorPool]) {
+ unique_id_mapping.erase(reinterpret_cast<uint64_t &>(descriptor_set));
+ }
+ layer_data->pool_descriptor_sets_map.erase(descriptorPool);
+
uint64_t descriptorPool_id = reinterpret_cast<uint64_t &>(descriptorPool);
descriptorPool = (VkDescriptorPool)unique_id_mapping[descriptorPool_id];
unique_id_mapping.erase(descriptorPool_id);
@@ -452,11 +459,20 @@
VkResult DispatchResetDescriptorPool(ValidationObject *layer_data, VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) {
if (!wrap_handles) return layer_data->device_dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
+ VkDescriptorPool local_descriptor_pool = VK_NULL_HANDLE;
{
std::lock_guard<std::mutex> lock(dispatch_lock);
- descriptorPool = layer_data->Unwrap(descriptorPool);
+ local_descriptor_pool = layer_data->Unwrap(descriptorPool);
}
- VkResult result = layer_data->device_dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
+ VkResult result = layer_data->device_dispatch_table.ResetDescriptorPool(device, local_descriptor_pool, flags);
+ if (VK_SUCCESS == result) {
+ std::lock_guard<std::mutex> lock(dispatch_lock);
+ // remove references to implicitly freed descriptor sets
+ for(auto descriptor_set : layer_data->pool_descriptor_sets_map[descriptorPool]) {
+ unique_id_mapping.erase(reinterpret_cast<uint64_t &>(descriptor_set));
+ }
+ layer_data->pool_descriptor_sets_map[descriptorPool].clear();
+ }
return result;
}
@@ -486,8 +502,10 @@
}
if (VK_SUCCESS == result) {
std::lock_guard<std::mutex> lock(dispatch_lock);
+ auto &pool_descriptor_sets = layer_data->pool_descriptor_sets_map[pAllocateInfo->descriptorPool];
for (uint32_t index0 = 0; index0 < pAllocateInfo->descriptorSetCount; index0++) {
pDescriptorSets[index0] = layer_data->WrapNew(pDescriptorSets[index0]);
+ pool_descriptor_sets.insert(pDescriptorSets[index0]);
}
}
return result;
@@ -498,9 +516,10 @@
if (!wrap_handles)
return layer_data->device_dispatch_table.FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets);
VkDescriptorSet *local_pDescriptorSets = NULL;
+ VkDescriptorPool local_descriptor_pool = VK_NULL_HANDLE;
{
std::lock_guard<std::mutex> lock(dispatch_lock);
- descriptorPool = layer_data->Unwrap(descriptorPool);
+ local_descriptor_pool = layer_data->Unwrap(descriptorPool);
if (pDescriptorSets) {
local_pDescriptorSets = new VkDescriptorSet[descriptorSetCount];
for (uint32_t index0 = 0; index0 < descriptorSetCount; ++index0) {
@@ -508,13 +527,15 @@
}
}
}
- VkResult result = layer_data->device_dispatch_table.FreeDescriptorSets(device, descriptorPool, descriptorSetCount,
+ VkResult result = layer_data->device_dispatch_table.FreeDescriptorSets(device, local_descriptor_pool, descriptorSetCount,
(const VkDescriptorSet *)local_pDescriptorSets);
if (local_pDescriptorSets) delete[] local_pDescriptorSets;
if ((VK_SUCCESS == result) && (pDescriptorSets)) {
std::unique_lock<std::mutex> lock(dispatch_lock);
+ auto &pool_descriptor_sets = layer_data->pool_descriptor_sets_map[descriptorPool];
for (uint32_t index0 = 0; index0 < descriptorSetCount; index0++) {
VkDescriptorSet handle = pDescriptorSets[index0];
+ pool_descriptor_sets.erase(handle);
uint64_t unique_id = reinterpret_cast<uint64_t &>(handle);
unique_id_mapping.erase(unique_id);
}
diff --git a/scripts/layer_chassis_generator.py b/scripts/layer_chassis_generator.py
index 1e18937..5655675 100644
--- a/scripts/layer_chassis_generator.py
+++ b/scripts/layer_chassis_generator.py
@@ -221,6 +221,8 @@
// Map of wrapped swapchain handles to arrays of wrapped swapchain image IDs
// Each swapchain has an immutable list of wrapped swapchain image IDs -- always return these IDs if they exist
std::unordered_map<VkSwapchainKHR, std::vector<VkImage>> swapchain_wrapped_image_handle_map;
+ // Map of wrapped descriptor pools to set of wrapped descriptor sets allocated from each pool
+ std::unordered_map<VkDescriptorPool, std::unordered_set<VkDescriptorSet>> pool_descriptor_sets_map;
// Unwrap a handle. Must hold lock.