vulkan: Updated vulkan.h for revision 79 -- Bug #13464

Implemented the changes required for adding fine-grained synchronization
to vkBindMemoryObject and related APIs.
diff --git a/tests/blit_tests.cpp b/tests/blit_tests.cpp
index 1118dc9..756e1b3 100644
--- a/tests/blit_tests.cpp
+++ b/tests/blit_tests.cpp
@@ -793,7 +793,7 @@
     err = vkAllocMemory(dev_.obj(), &mem_info, &event_mem);
     ASSERT_VK_SUCCESS(err);
 
-    err = vkBindObjectMemory(event, 0, event_mem, 0);
+    err = vkQueueBindObjectMemory(queue_.obj(), event, 0, event_mem, 0);
     ASSERT_VK_SUCCESS(err);
 
     err = vkResetEvent(event);
@@ -871,7 +871,7 @@
     bufs[2].unmap();
 
     // All done with event memory, clean up
-    err = vkBindObjectMemory(event, 0, VK_NULL_HANDLE, 0);
+    err = vkQueueBindObjectMemory(queue_.obj(), event, 0, VK_NULL_HANDLE, 0);
     ASSERT_VK_SUCCESS(err);
 
     err = vkDestroyObject(event);
diff --git a/tests/image_tests.cpp b/tests/image_tests.cpp
index be0c81a..d2b2e69 100644
--- a/tests/image_tests.cpp
+++ b/tests/image_tests.cpp
@@ -243,7 +243,8 @@
         ASSERT_VK_SUCCESS(err);
 
         /* bind memory */
-        err = vkBindObjectMemory(m_image, i, m_image_mem[i], 0);
+        VkQueue queue = m_device->graphics_queues()[0]->obj();
+        err = vkQueueBindObjectMemory(queue, m_image, i, m_image_mem[i], 0);
         ASSERT_VK_SUCCESS(err);
     }
 }
@@ -252,7 +253,8 @@
 {
     VkResult err;
     // All done with image memory, clean up
-    ASSERT_VK_SUCCESS(vkBindObjectMemory(m_image, 0, VK_NULL_HANDLE, 0));
+    VkQueue queue = m_device->graphics_queues()[0]->obj();
+    ASSERT_VK_SUCCESS(vkQueueBindObjectMemory(queue, m_image, 0, VK_NULL_HANDLE, 0));
 
     for (uint32_t i = 0 ; i < m_num_mem; i++) {
         err = vkFreeMemory(m_image_mem[i]);
diff --git a/tests/init.cpp b/tests/init.cpp
index 5599446..554b9c8 100644
--- a/tests/init.cpp
+++ b/tests/init.cpp
@@ -253,7 +253,8 @@
     err = vkAllocMemory(device(), &mem_info, &event_mem);
     ASSERT_VK_SUCCESS(err);
 
-    err = vkBindObjectMemory(event, 0, event_mem, 0);
+    VkQueue queue = m_device->graphics_queues()[0]->obj();
+    err = vkQueueBindObjectMemory(queue, event, 0, event_mem, 0);
     ASSERT_VK_SUCCESS(err);
 
     err = vkResetEvent(event);
@@ -271,7 +272,7 @@
     // TODO: Test actual synchronization with command buffer event.
 
     // All done with event memory, clean up
-    err = vkBindObjectMemory(event, 0, VK_NULL_HANDLE, 0);
+    err = vkQueueBindObjectMemory(queue, event, 0, VK_NULL_HANDLE, 0);
     ASSERT_VK_SUCCESS(err);
 
     err = vkDestroyObject(event);
@@ -384,7 +385,8 @@
     err = vkAllocMemory(device(), &mem_info, &query_mem);
     ASSERT_VK_SUCCESS(err);
 
-    err = vkBindObjectMemory(query_pool, 0, query_mem, 0);
+    VkQueue queue = m_device->graphics_queues()[0]->obj();
+    err = vkQueueBindObjectMemory(queue, query_pool, 0, query_mem, 0);
     ASSERT_VK_SUCCESS(err);
 
     // TODO: Test actual synchronization with command buffer event.
@@ -409,7 +411,7 @@
     }
 
     // All done with QueryPool memory, clean up
-    err = vkBindObjectMemory(query_pool, 0, VK_NULL_HANDLE, 0);
+    err = vkQueueBindObjectMemory(queue, query_pool, 0, VK_NULL_HANDLE, 0);
     ASSERT_VK_SUCCESS(err);
 
     err = vkDestroyObject(query_pool);
@@ -424,6 +426,7 @@
 
     const VkPhysicalGpuQueueProperties props = device->gpu().queue_properties()[queue_node_index];
     for (que_idx = 0; que_idx < props.queueCount; que_idx++) {
+        // TODO: Need to add support for separate MEMMGR and work queues, including synchronization
         err = vkGetDeviceQueue(device->obj(), queue_node_index, que_idx, &queue);
         ASSERT_EQ(VK_SUCCESS, err) << "vkGetDeviceQueue: " << qname << " queue #" << que_idx << ": Failed with error: " << vk_result_string(err);
     }
@@ -463,6 +466,7 @@
 //                    VK_QUEUE_GRAPHICS_BIT                                  = 0x00000001,   // Queue supports graphics operations
 //                    VK_QUEUE_COMPUTE_BIT                                   = 0x00000002,   // Queue supports compute operations
 //                    VK_QUEUE_DMA_BIT                                       = 0x00000004,   // Queue supports DMA operations
+//                    VK_QUEUE_MEMMGR_BIT                                    = 0x00000008,   // Queue supports MEMMGR operations
 //                    VK_QUEUE_EXTENDED_BIT                                  = 0x80000000    // Extended queue
 //                } VkQueueFlags;
 
@@ -649,7 +653,8 @@
     err = vkAllocMemory(device(), &mem_info, &image_mem);
     ASSERT_VK_SUCCESS(err);
 
-    err = vkBindObjectMemory(image, 0, image_mem, 0);
+    VkQueue queue = m_device->graphics_queues()[0]->obj();
+    err = vkQueueBindObjectMemory(queue, image, 0, image_mem, 0);
     ASSERT_VK_SUCCESS(err);
 
 //    typedef struct VkImageViewCreateInfo_
@@ -692,7 +697,7 @@
     // TODO: Test image memory.
 
     // All done with image memory, clean up
-    ASSERT_VK_SUCCESS(vkBindObjectMemory(image, 0, VK_NULL_HANDLE, 0));
+    ASSERT_VK_SUCCESS(vkQueueBindObjectMemory(queue, image, 0, VK_NULL_HANDLE, 0));
 
     ASSERT_VK_SUCCESS(vkFreeMemory(image_mem));
 
diff --git a/tests/vkrenderframework.cpp b/tests/vkrenderframework.cpp
index d3060de..7e5aa68 100644
--- a/tests/vkrenderframework.cpp
+++ b/tests/vkrenderframework.cpp
@@ -93,7 +93,7 @@
     }
     while (!m_renderTargets.empty()) {
         vkDestroyObject(m_renderTargets.back()->targetView());
-        vkBindObjectMemory(m_renderTargets.back()->image(), 0, VK_NULL_HANDLE, 0);
+        vkQueueBindObjectMemory(m_device->m_queue, m_renderTargets.back()->image(), 0, VK_NULL_HANDLE, 0);
         vkDestroyObject(m_renderTargets.back()->image());
         vkFreeMemory(m_renderTargets.back()->memory());
         m_renderTargets.pop_back();
@@ -369,7 +369,7 @@
     m_layout_chain.init(*m_device, layouts);
 
     // create VkDescriptorSet
-    m_set = alloc_sets(VK_DESCRIPTOR_SET_USAGE_STATIC, m_layout);
+    m_set = alloc_sets(*m_device, VK_DESCRIPTOR_SET_USAGE_STATIC, m_layout);
 
     // build the update array
     vector<const void *> update_array;
diff --git a/tests/vktestbinding.cpp b/tests/vktestbinding.cpp
index fa4d4fb..eedeef7 100644
--- a/tests/vktestbinding.cpp
+++ b/tests/vktestbinding.cpp
@@ -25,16 +25,18 @@
 #include "vktestbinding.h"
 
 namespace {
-
 #define DERIVED_OBJECT_INIT(create_func, ...)                       \
     do {                                                            \
         obj_type obj;                                               \
-        if (EXPECT(create_func(__VA_ARGS__, &obj) == VK_SUCCESS))  \
+        if (EXPECT(create_func(__VA_ARGS__, &obj) == VK_SUCCESS))   \
             base_type::init(obj);                                   \
     } while (0)
 
 #define STRINGIFY(x) #x
 #define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
+#define DEV_INIT(device) dev_ = &device;
+
+
 
 vk_testing::ErrorCallback error_callback;
 
@@ -222,7 +224,7 @@
         return;
 
     if(bound) {
-        unbind_memory();
+       unbind_memory(*dev_);
     }
 
     if (internal_mems_) {
@@ -237,28 +239,31 @@
         EXPECT(vkDestroyObject(obj()) == VK_SUCCESS);
 }
 
-void Object::bind_memory(uint32_t alloc_idx, const GpuMemory &mem, VkGpuSize mem_offset)
+void Object::bind_memory(const Device &dev, uint32_t alloc_idx, const GpuMemory &mem, VkGpuSize mem_offset)
 {
     bound = true;
-    EXPECT(vkBindObjectMemory(obj(), alloc_idx, mem.obj(), mem_offset) == VK_SUCCESS);
+    VkQueue queue = dev.graphics_queues()[0]->obj();
+    EXPECT(vkQueueBindObjectMemory(queue, obj(), alloc_idx, mem.obj(), mem_offset) == VK_SUCCESS);
 }
 
-void Object::bind_memory(uint32_t alloc_idx, VkGpuSize offset, VkGpuSize size,
+void Object::bind_memory(const Device &dev, uint32_t alloc_idx, VkGpuSize offset, VkGpuSize size,
                          const GpuMemory &mem, VkGpuSize mem_offset)
 {
     bound = true;
-    EXPECT(!alloc_idx && vkBindObjectMemoryRange(obj(), 0, offset, size, mem.obj(), mem_offset) == VK_SUCCESS);
+    VkQueue queue = dev.graphics_queues()[0]->obj();
+    EXPECT(!alloc_idx && vkQueueBindObjectMemoryRange(queue, obj(), 0, offset, size, mem.obj(), mem_offset) == VK_SUCCESS);
 }
 
-void Object::unbind_memory(uint32_t alloc_idx)
+void Object::unbind_memory(const Device &dev, uint32_t alloc_idx)
 {
-    EXPECT(vkBindObjectMemory(obj(), alloc_idx, VK_NULL_HANDLE, 0) == VK_SUCCESS);
+    VkQueue queue = dev.graphics_queues()[0]->obj();
+    EXPECT(vkQueueBindObjectMemory(queue, obj(), alloc_idx, VK_NULL_HANDLE, 0) == VK_SUCCESS);
 }
 
-void Object::unbind_memory()
+void Object::unbind_memory(const Device &dev)
 {
     for (uint32_t i = 0; i < mem_alloc_count_; i++)
-        unbind_memory(i);
+        unbind_memory(dev, i);
 }
 
 void Object::alloc_memory(const Device &dev)
@@ -275,11 +280,11 @@
         info = GpuMemory::alloc_info(mem_reqs[i], next_info);
         primary_mem_ = &internal_mems_[i];
         internal_mems_[i].init(dev, info);
-        bind_memory(i, internal_mems_[i], 0);
+        bind_memory(dev, i, internal_mems_[i], 0);
     }
 }
 
-void Object::alloc_memory(const std::vector<VkGpuMemory> &mems)
+void Object::alloc_memory(const Device &dev, const std::vector<VkGpuMemory> &mems)
 {
     if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
         return;
@@ -294,7 +299,7 @@
         primary_mem_ = &internal_mems_[i];
 
         internal_mems_[i].init(mems[i]);
-        bind_memory(i, internal_mems_[i], 0);
+        bind_memory(dev, i, internal_mems_[i], 0);
     }
 }
 
@@ -397,6 +402,7 @@
         VkQueue queue;
 
         for (int j = 0; j < queue_props[i].queueCount; j++) {
+            // TODO: Need to add support for separate MEMMGR and work queues, including synchronization
             err = vkGetDeviceQueue(obj(), i, j, &queue);
             EXPECT(err == VK_SUCCESS);
 
@@ -580,17 +586,20 @@
 
 void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateSemaphore, dev.obj(), &info);
     alloc_memory(dev);
 }
 
 void Semaphore::init(const Device &dev, const VkSemaphoreOpenInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkOpenSharedSemaphore, dev.obj(), &info);
 }
 
 void Event::init(const Device &dev, const VkEventCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateEvent, dev.obj(), &info);
     alloc_memory(dev);
 }
@@ -607,6 +616,7 @@
 
 void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateQueryPool, dev.obj(), &info);
     alloc_memory(dev);
 }
@@ -633,6 +643,7 @@
 
 void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateBuffer, dev.obj(), &info);
     create_info_ = info;
 }
@@ -651,6 +662,7 @@
 
 void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateImage, dev.obj(), &info);
     init_info(dev, info);
 }
@@ -663,7 +675,7 @@
     Object::init(img);
 
     init_info(dev, original_info);
-    alloc_memory(std::vector<VkGpuMemory>(1, mem));
+    alloc_memory(dev, std::vector<VkGpuMemory>(1, mem));
 }
 
 void Image::init_info(const Device &dev, const VkImageCreateInfo &info)
@@ -678,10 +690,11 @@
     }
 }
 
-void Image::bind_memory(uint32_t alloc_idx, const VkImageMemoryBindInfo &info,
+void Image::bind_memory(const Device &dev, uint32_t alloc_idx, const VkImageMemoryBindInfo &info,
                         const GpuMemory &mem, VkGpuSize mem_offset)
 {
-    EXPECT(!alloc_idx && vkBindImageMemoryRange(obj(), 0, &info, mem.obj(), mem_offset) == VK_SUCCESS);
+    VkQueue queue = dev.graphics_queues()[0]->obj();
+    EXPECT(!alloc_idx && vkQueueBindImageMemoryRange(queue, obj(), 0, &info, mem.obj(), mem_offset) == VK_SUCCESS);
 }
 
 VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const
@@ -723,6 +736,7 @@
 
 void Shader::init(const Device &dev, const VkShaderCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateShader, dev.obj(), &info);
 }
 
@@ -738,6 +752,7 @@
 
 void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateGraphicsPipeline, dev.obj(), &info);
     alloc_memory(dev);
 }
@@ -747,18 +762,21 @@
         const VkGraphicsPipelineCreateInfo &info,
         const VkPipeline basePipeline)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateGraphicsPipelineDerivative, dev.obj(), &info, basePipeline);
     alloc_memory(dev);
 }
 
 void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateComputePipeline, dev.obj(), &info);
     alloc_memory(dev);
 }
 
 void Pipeline::init(const Device&dev, size_t size, const void *data)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkLoadPipeline, dev.obj(), size, data);
     alloc_memory(dev);
 }
@@ -769,6 +787,7 @@
         const void *data,
         const VkPipeline basePipeline)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkLoadPipelineDerivative, dev.obj(), size, data, basePipeline);
     alloc_memory(dev);
 }
@@ -783,18 +802,21 @@
 
 void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateSampler, dev.obj(), &info);
     alloc_memory(dev);
 }
 
 void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateDescriptorSetLayout, dev.obj(), &info);
     alloc_memory(dev);
 }
 
 void DescriptorSetLayoutChain::init(const Device &dev, const std::vector<const DescriptorSetLayout *> &layouts)
 {
+    DEV_INIT(dev);
     const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
 
     DERIVED_OBJECT_INIT(vkCreateDescriptorSetLayoutChain, dev.obj(), layout_objs.size(), &layout_objs[0]);
@@ -804,6 +826,7 @@
 void DescriptorPool::init(const Device &dev, VkDescriptorPoolUsage usage,
                           uint32_t max_sets, const VkDescriptorPoolCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateDescriptorPool, dev.obj(), usage, max_sets, &info);
     alloc_memory(dev);
 }
@@ -813,7 +836,7 @@
     EXPECT(vkResetDescriptorPool(obj()) == VK_SUCCESS);
 }
 
-std::vector<DescriptorSet *> DescriptorPool::alloc_sets(VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts)
+std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts)
 {
     const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
 
@@ -830,20 +853,21 @@
     sets.reserve(set_count);
     for (std::vector<VkDescriptorSet>::const_iterator it = set_objs.begin(); it != set_objs.end(); it++) {
         // do descriptor sets need memories bound?
-        sets.push_back(new DescriptorSet(*it));
+        DescriptorSet *descriptorSet = new DescriptorSet(*it);
+        descriptorSet->dev_ = &dev;
+        sets.push_back(descriptorSet);
     }
-
     return sets;
 }
 
-std::vector<DescriptorSet *> DescriptorPool::alloc_sets(VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count)
+std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count)
 {
-    return alloc_sets(usage, std::vector<const DescriptorSetLayout *>(count, &layout));
+    return alloc_sets(dev, usage, std::vector<const DescriptorSetLayout *>(count, &layout));
 }
 
-DescriptorSet *DescriptorPool::alloc_sets(VkDescriptorSetUsage usage, const DescriptorSetLayout &layout)
+DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout)
 {
-    std::vector<DescriptorSet *> set = alloc_sets(usage, layout, 1);
+    std::vector<DescriptorSet *> set = alloc_sets(dev, usage, layout, 1);
     return (set.empty()) ? NULL : set[0];
 }
 
@@ -884,6 +908,7 @@
 
 void CmdBuffer::init(const Device &dev, const VkCmdBufferCreateInfo &info)
 {
+    DEV_INIT(dev);
     DERIVED_OBJECT_INIT(vkCreateCommandBuffer, dev.obj(), &info);
 }
 
diff --git a/tests/vktestbinding.h b/tests/vktestbinding.h
index cec874e..f036a88 100644
--- a/tests/vktestbinding.h
+++ b/tests/vktestbinding.h
@@ -127,13 +127,13 @@
 public:
     const VkObject &obj() const { return reinterpret_cast<const VkObject &>(BaseObject::obj()); }
 
-    // vkBindObjectMemory()
-    void bind_memory(uint32_t alloc_idx, const GpuMemory &mem, VkGpuSize mem_offset);
-    void unbind_memory(uint32_t alloc_idx);
-    void unbind_memory();
+    // vkQueueBindObjectMemory()
+    void bind_memory(const Device &dev, uint32_t alloc_idx, const GpuMemory &mem, VkGpuSize mem_offset);
+    void unbind_memory(const Device &dev, uint32_t alloc_idx);
+    void unbind_memory(const Device &dev);
 
-    // vkBindObjectMemoryRange()
-    void bind_memory(uint32_t alloc_idx, VkGpuSize offset, VkGpuSize size,
+    // vkQueueBindObjectMemoryRange()
+    void bind_memory(const Device &dev, uint32_t alloc_idx, VkGpuSize offset, VkGpuSize size,
                      const GpuMemory &mem, VkGpuSize mem_offset);
 
     // Unless an object is initialized with init_no_mem(), memories are
@@ -147,6 +147,7 @@
           void *map()       { return map(0); }
 
     void unmap() const;
+    const Device* dev_;
 
 protected:
     explicit Object() : mem_alloc_count_(0), internal_mems_(NULL), primary_mem_(NULL), bound(false) {}
@@ -161,7 +162,7 @@
 
     // allocate and bind internal memories
     void alloc_memory(const Device &dev);
-    void alloc_memory(const std::vector<VkGpuMemory> &mems);
+    void alloc_memory(const Device &dev, const std::vector<VkGpuMemory> &mems);
 
 private:
     void cleanup();
@@ -207,7 +208,7 @@
     const PhysicalGpu &gpu() const { return gpu_; }
 
     // vkGetDeviceQueue()
-    const std::vector<Queue *> &graphics_queues() { return queues_[GRAPHICS]; }
+    const std::vector<Queue *> &graphics_queues() const { return queues_[GRAPHICS]; }
     const std::vector<Queue *> &compute_queues() { return queues_[COMPUTE]; }
     const std::vector<Queue *> &dma_queues() { return queues_[DMA]; }
     uint32_t graphics_queue_node_index_;
@@ -398,8 +399,8 @@
     // vkOpenPeerImage()
     void init(const Device &dev, const VkPeerImageOpenInfo &info, const VkImageCreateInfo &original_info);
 
-    // vkBindImageMemoryRange()
-    void bind_memory(uint32_t alloc_idx, const VkImageMemoryBindInfo &info,
+    // vkQueueBindImageMemoryRange()
+    void bind_memory(const Device &dev, uint32_t alloc_idx, const VkImageMemoryBindInfo &info,
                      const GpuMemory &mem, VkGpuSize mem_offset);
 
     // vkGetImageSubresourceInfo()
@@ -523,9 +524,9 @@
     void reset();
 
     // vkAllocDescriptorSets()
-    std::vector<DescriptorSet *> alloc_sets(VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts);
-    std::vector<DescriptorSet *> alloc_sets(VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count);
-    DescriptorSet *alloc_sets(VkDescriptorSetUsage usage, const DescriptorSetLayout &layout);
+    std::vector<DescriptorSet *> alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts);
+    std::vector<DescriptorSet *> alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count);
+    DescriptorSet *alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout);
 
     // vkClearDescriptorSets()
     void clear_sets(const std::vector<DescriptorSet *> &sets);