layers: object_tracker - use std mutex

Change-Id: I5f9ed9dc92daccaada895808e3fea3189ef81212
diff --git a/layers/object_tracker.h b/layers/object_tracker.h
index 664bf61..2e61a15 100644
--- a/layers/object_tracker.h
+++ b/layers/object_tracker.h
@@ -27,6 +27,8 @@
  * Author: Tobin Ehlis <tobin@lunarg.com>
  */
 
+#include <mutex>
+
 #include "vulkan/vk_layer.h"
 #include "vk_layer_extension_utils.h"
 #include "vk_enum_string_helper.h"
@@ -99,8 +101,7 @@
 static unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap;
 
 static long long unsigned int object_track_index = 0;
-static int objLockInitialized = 0;
-static loader_platform_thread_mutex objLock;
+static std::mutex global_lock;
 
 // Objects stored in a global map w/ struct containing basic info
 // unordered_map<const void*, OBJTRACK_NODE*> objMap;
@@ -330,16 +331,6 @@
 static void init_object_tracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
 
     layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
-
-    if (!objLockInitialized) {
-        // TODO/TBD: Need to delete this mutex sometime.  How???  One
-        // suggestion is to call this during vkCreateInstance(), and then we
-        // can clean it up during vkDestroyInstance().  However, that requires
-        // that the layer have per-instance locks.  We need to come back and
-        // address this soon.
-        loader_platform_thread_create_mutex(&objLock);
-        objLockInitialized = 1;
-    }
 }
 
 //
@@ -647,15 +638,15 @@
 void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) {
     get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
 
-    loader_platform_thread_lock_mutex(&objLock);
-    if (pProperties != NULL)
+    std::lock_guard<std::mutex> lock(global_lock);
+    if (pProperties != NULL) {
         setGpuQueueInfoState(*pCount, pProperties);
-    loader_platform_thread_unlock_mutex(&objLock);
+    }
 }
 
 VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
                                VkDevice *pDevice) {
-    loader_platform_thread_lock_mutex(&objLock);
+    std::lock_guard<std::mutex> lock(global_lock);
     VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
 
     assert(chain_info->u.pLayerInfo);
@@ -663,7 +654,6 @@
     PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
     PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
     if (fpCreateDevice == NULL) {
-        loader_platform_thread_unlock_mutex(&objLock);
         return VK_ERROR_INITIALIZATION_FAILED;
     }
 
@@ -672,7 +662,6 @@
 
     VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
     if (result != VK_SUCCESS) {
-        loader_platform_thread_unlock_mutex(&objLock);
         return result;
     }
 
@@ -689,21 +678,20 @@
         create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
     }
 
-    loader_platform_thread_unlock_mutex(&objLock);
     return result;
 }
 
 VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
                                            VkPhysicalDevice *pPhysicalDevices) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     if (skipCall)
         return VK_ERROR_VALIDATION_FAILED_EXT;
     VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)
                           ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     if (result == VK_SUCCESS) {
         if (pPhysicalDevices) {
             for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
@@ -711,30 +699,29 @@
             }
         }
     }
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     return result;
 }
 
 void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) {
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
 
     get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
 
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     addQueueInfo(queueNodeIndex, *pQueue);
     create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT);
-    loader_platform_thread_unlock_mutex(&objLock);
 }
 
 VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
                             void **ppData) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= set_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
     skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     if (skipCall == VK_TRUE)
         return VK_ERROR_VALIDATION_FAILED_EXT;
 
@@ -746,10 +733,10 @@
 
 void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= reset_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
     skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     if (skipCall == VK_TRUE)
         return;
 
@@ -757,7 +744,7 @@
 }
 
 VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     validateQueueFlags(queue, "QueueBindSparse");
 
     for (uint32_t i = 0; i < bindInfoCount; i++) {
@@ -768,8 +755,7 @@
         for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++)
             validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
     }
-
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
 
     VkResult result =
         get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
@@ -779,10 +765,10 @@
 VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
                                          VkCommandBuffer *pCommandBuffers) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
     skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
 
     if (skipCall) {
         return VK_ERROR_VALIDATION_FAILED_EXT;
@@ -791,12 +777,12 @@
     VkResult result =
         get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
 
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
         alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
                              pAllocateInfo->level);
     }
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
 
     return result;
 }
@@ -804,7 +790,7 @@
 VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
                                          VkDescriptorSet *pDescriptorSets) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
     skipCall |=
         validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
@@ -812,7 +798,7 @@
         skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i],
                                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
     }
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     if (skipCall)
         return VK_ERROR_VALIDATION_FAILED_EXT;
 
@@ -820,12 +806,12 @@
         get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
 
     if (VK_SUCCESS == result) {
-        loader_platform_thread_lock_mutex(&objLock);
+        lock.lock();
         for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
             alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i],
                                  VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
         }
-        loader_platform_thread_unlock_mutex(&objLock);
+        lock.unlock();
     }
 
     return result;
@@ -833,24 +819,23 @@
 
 void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
                                  const VkCommandBuffer *pCommandBuffers) {
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
     validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
 
     get_dispatch_table(object_tracker_device_table_map, device)
         ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
 
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     for (uint32_t i = 0; i < commandBufferCount; i++) {
         free_command_buffer(device, commandPool, *pCommandBuffers);
         pCommandBuffers++;
     }
-    loader_platform_thread_unlock_mutex(&objLock);
 }
 
 void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     // A swapchain's images are implicitly deleted when the swapchain is deleted.
     // Remove this swapchain's images from our map of such images.
     unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin();
@@ -863,52 +848,51 @@
         }
     }
     destroy_swapchain_khr(device, swapchain);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
 
     get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
 }
 
 void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
 
     get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator);
 
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     destroy_device_memory(device, mem);
-    loader_platform_thread_unlock_mutex(&objLock);
 }
 
 VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
                                      const VkDescriptorSet *pDescriptorSets) {
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
     validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
                           ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
 
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     for (uint32_t i = 0; i < count; i++) {
         free_descriptor_set(device, descriptorPool, *pDescriptorSets++);
     }
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     return result;
 }
 
 void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
     skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     if (skipCall) {
         return;
     }
     // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
     // Remove this pool's descriptor sets from our descriptorSet map.
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin();
     while (itr != VkDescriptorSetMap.end()) {
         OBJTRACK_NODE *pNode = (*itr).second;
@@ -918,20 +902,20 @@
         }
     }
     destroy_descriptor_pool(device, descriptorPool);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
 }
 
 void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
     skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     if (skipCall) {
         return;
     }
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
     // Remove this pool's cmdBuffers from our cmd buffer map.
     unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin();
@@ -945,15 +929,15 @@
         }
     }
     destroy_command_pool(device, commandPool);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
 }
 
 VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     if (skipCall)
         return VK_ERROR_VALIDATION_FAILED_EXT;
 
@@ -961,11 +945,11 @@
                           ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
 
     if (pSwapchainImages != NULL) {
-        loader_platform_thread_lock_mutex(&objLock);
+        lock.lock();
         for (uint32_t i = 0; i < *pCount; i++) {
             create_swapchain_image_obj(device, pSwapchainImages[i], swapchain);
         }
-        loader_platform_thread_unlock_mutex(&objLock);
+        lock.unlock();
     }
     return result;
 }
@@ -975,7 +959,7 @@
                                           const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
                                           VkPipeline *pPipelines) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
     if (pCreateInfos) {
         for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
@@ -1004,18 +988,18 @@
     if (pipelineCache) {
         skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
     }
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     if (skipCall)
         return VK_ERROR_VALIDATION_FAILED_EXT;
     VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
                           ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     if (result == VK_SUCCESS) {
         for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) {
             create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
         }
     }
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     return result;
 }
 
@@ -1024,7 +1008,7 @@
                                          const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
                                          VkPipeline *pPipelines) {
     VkBool32 skipCall = VK_FALSE;
-    loader_platform_thread_lock_mutex(&objLock);
+    std::unique_lock<std::mutex> lock(global_lock);
     skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
     if (pCreateInfos) {
         for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
@@ -1045,17 +1029,17 @@
     if (pipelineCache) {
         skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
     }
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     if (skipCall)
         return VK_ERROR_VALIDATION_FAILED_EXT;
     VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
                           ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
-    loader_platform_thread_lock_mutex(&objLock);
+    lock.lock();
     if (result == VK_SUCCESS) {
         for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
             create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
         }
     }
-    loader_platform_thread_unlock_mutex(&objLock);
+    lock.unlock();
     return result;
 }
diff --git a/vk-layer-generate.py b/vk-layer-generate.py
index 0f32807..16654e1 100755
--- a/vk-layer-generate.py
+++ b/vk-layer-generate.py
@@ -962,7 +962,7 @@
         gedi_txt.append('VkInstance instance,')
         gedi_txt.append('const VkAllocationCallbacks* pAllocator)')
         gedi_txt.append('{')
-        gedi_txt.append('    loader_platform_thread_lock_mutex(&objLock);')
+        gedi_txt.append('    std::unique_lock<std::mutex> lock(global_lock);')
         gedi_txt.append('    validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);')
         gedi_txt.append('')
         gedi_txt.append('    destroy_instance(instance, instance);')
@@ -1010,14 +1010,9 @@
         gedi_txt.append('    layer_data_map.erase(pInstanceTable);')
         gedi_txt.append('')
         gedi_txt.append('    instanceExtMap.erase(pInstanceTable);')
-        gedi_txt.append('    loader_platform_thread_unlock_mutex(&objLock);')
+        gedi_txt.append('    lock.unlock();')
         # The loader holds a mutex that protects this from other threads
         gedi_txt.append('    object_tracker_instance_table_map.erase(key);')
-        gedi_txt.append('    if (object_tracker_instance_table_map.empty()) {')
-        gedi_txt.append('        // Release mutex when destroying last instance.')
-        gedi_txt.append('        loader_platform_thread_delete_mutex(&objLock);')
-        gedi_txt.append('        objLockInitialized = 0;')
-        gedi_txt.append('    }')
         gedi_txt.append('}')
         gedi_txt.append('')
         return "\n".join(gedi_txt)
@@ -1029,7 +1024,7 @@
         gedd_txt.append('VkDevice device,')
         gedd_txt.append('const VkAllocationCallbacks* pAllocator)')
         gedd_txt.append('{')
-        gedd_txt.append('    loader_platform_thread_lock_mutex(&objLock);')
+        gedd_txt.append('    std::unique_lock<std::mutex> lock(global_lock);')
         gedd_txt.append('    validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);')
         gedd_txt.append('')
         gedd_txt.append('    destroy_device(device, device);')
@@ -1053,7 +1048,7 @@
         gedd_txt.append("    // Clean up Queue's MemRef Linked Lists")
         gedd_txt.append('    destroyQueueMemRefLists();')
         gedd_txt.append('')
-        gedd_txt.append('    loader_platform_thread_unlock_mutex(&objLock);')
+        gedd_txt.append('    lock.unlock();')
         gedd_txt.append('')
         gedd_txt.append('    dispatch_key key = get_dispatch_key(device);')
         gedd_txt.append('    VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);')
@@ -1287,11 +1282,12 @@
                 typ = proto.params[-1].ty.strip('*').replace('const ', '');
                 name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', typ)
                 name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()[3:]
-                create_line =  '    loader_platform_thread_lock_mutex(&objLock);\n'
-                create_line += '    if (result == VK_SUCCESS) {\n'
-                create_line += '        create_%s(%s, *%s, %s);\n' % (name, param0_name, proto.params[-1].name, obj_type_mapping[typ])
+                create_line =  '    {\n'
+                create_line += '        std::lock_guard<std::mutex> lock(global_lock);\n'
+                create_line += '        if (result == VK_SUCCESS) {\n'
+                create_line += '            create_%s(%s, *%s, %s);\n' % (name, param0_name, proto.params[-1].name, obj_type_mapping[typ])
+                create_line += '        }\n'
                 create_line += '    }\n'
-                create_line += '    loader_platform_thread_unlock_mutex(&objLock);\n'
             if 'FreeCommandBuffers' in proto.name:
                 typ = proto.params[-1].ty.strip('*').replace('const ', '');
                 name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', typ)
@@ -1307,19 +1303,23 @@
                 name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', typ)
                 name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()[3:]
                 funcs.append('%s\n' % self.lineinfo.get())
-                destroy_line =  '    loader_platform_thread_lock_mutex(&objLock);\n'
-                destroy_line += '    destroy_%s(%s, %s);\n' % (name, param0_name, proto.params[-2].name)
-                destroy_line += '    loader_platform_thread_unlock_mutex(&objLock);\n'
+                destroy_line =  '    {\n'
+                destroy_line += '        std::lock_guard<std::mutex> lock(global_lock);\n'
+                destroy_line += '        destroy_%s(%s, %s);\n' % (name, param0_name, proto.params[-2].name)
+                destroy_line += '    }\n'
             indent = '    '
             if len(struct_uses) > 0:
                 using_line += '%sVkBool32 skipCall = VK_FALSE;\n' % (indent)
                 if not mutex_unlock:
-                    using_line += '%sloader_platform_thread_lock_mutex(&objLock);\n' % (indent)
+                    using_line += '%s{\n' % (indent)
+                    indent += '    '
+                    using_line += '%sstd::lock_guard<std::mutex> lock(global_lock);\n' % (indent)
                     mutex_unlock = True
                 using_line += '// objects to validate: %s\n' % str(sorted(struct_uses))
-                using_line += self._gen_obj_validate_code(struct_uses, obj_type_mapping, proto.name, valid_null_object_names, param0_name, '    ', '', 0)
+                using_line += self._gen_obj_validate_code(struct_uses, obj_type_mapping, proto.name, valid_null_object_names, param0_name, indent, '', 0)
             if mutex_unlock:
-                using_line += '%sloader_platform_thread_unlock_mutex(&objLock);\n' % (indent)
+                indent = indent[4:]
+                using_line += '%s}\n' % (indent)
             if len(struct_uses) > 0:
                 using_line += '    if (skipCall)\n'
                 if proto.ret == "VkBool32":