| Jeff Bolz | fdd0d85 | 2019-02-03 21:55:12 -0600 | [diff] [blame] | 1 | /* Copyright (c) 2015-2019 The Khronos Group Inc. |
| 2 | * Copyright (c) 2015-2019 Valve Corporation |
| 3 | * Copyright (c) 2015-2019 LunarG, Inc. |
| 4 | * Copyright (C) 2015-2019 Google Inc. |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 5 | * |
| 6 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | * you may not use this file except in compliance with the License. |
| 8 | * You may obtain a copy of the License at |
| 9 | * |
| 10 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | * |
| 12 | * Unless required by applicable law or agreed to in writing, software |
| 13 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | * See the License for the specific language governing permissions and |
| 16 | * limitations under the License. |
| 17 | * |
| 18 | * Author: Mark Lobodzinski <mark@lunarg.com> |
| 19 | * Author: Jon Ashburn <jon@lunarg.com> |
| 20 | * Author: Tobin Ehlis <tobine@google.com> |
| 21 | */ |
| 22 | |
| Jeff Bolz | c31aae4 | 2019-08-12 20:29:43 -0500 | [diff] [blame] | 23 | // shared_mutex support added in MSVC 2015 update 2 |
| 24 | #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 |
| 25 | #include <shared_mutex> |
| 26 | typedef std::shared_mutex object_lifetime_mutex_t; |
| 27 | typedef std::shared_lock<object_lifetime_mutex_t> read_object_lifetime_mutex_t; |
| 28 | typedef std::unique_lock<object_lifetime_mutex_t> write_object_lifetime_mutex_t; |
| 29 | #else |
| 30 | typedef std::mutex object_lifetime_mutex_t; |
| 31 | typedef std::unique_lock<object_lifetime_mutex_t> read_object_lifetime_mutex_t; |
| 32 | typedef std::unique_lock<object_lifetime_mutex_t> write_object_lifetime_mutex_t; |
| 33 | #endif |
| 34 | |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 35 | // Suppress unused warning on Linux |
| 36 | #if defined(__GNUC__) |
| 37 | #define DECORATE_UNUSED __attribute__((unused)) |
| 38 | #else |
| 39 | #define DECORATE_UNUSED |
| 40 | #endif |
| 41 | |
| 42 | // clang-format off |
| 43 | static const char DECORATE_UNUSED *kVUID_ObjectTracker_Info = "UNASSIGNED-ObjectTracker-Info"; |
| 44 | static const char DECORATE_UNUSED *kVUID_ObjectTracker_InternalError = "UNASSIGNED-ObjectTracker-InternalError"; |
| 45 | static const char DECORATE_UNUSED *kVUID_ObjectTracker_ObjectLeak = "UNASSIGNED-ObjectTracker-ObjectLeak"; |
| 46 | static const char DECORATE_UNUSED *kVUID_ObjectTracker_UnknownObject = "UNASSIGNED-ObjectTracker-UnknownObject"; |
| 47 | // clang-format on |
| 48 | |
| 49 | #undef DECORATE_UNUSED |
| 50 | |
| 51 | extern uint64_t object_track_index; |
| 52 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 53 | // Object Status -- used to track state of individual objects |
| 54 | typedef VkFlags ObjectStatusFlags; |
| 55 | enum ObjectStatusFlagBits { |
| 56 | OBJSTATUS_NONE = 0x00000000, // No status is set |
| 57 | OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted |
| 58 | OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound |
| 59 | OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound |
| 60 | OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound |
| 61 | OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound |
| 62 | OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped |
| 63 | OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY |
| 64 | OBJSTATUS_CUSTOM_ALLOCATOR = 0x00000080, // Allocated with custom allocator |
| 65 | }; |
| 66 | |
| 67 | // Object and state information structure |
| 68 | struct ObjTrackState { |
| Jeff Bolz | cf802bc | 2019-02-10 00:18:00 -0600 | [diff] [blame] | 69 | uint64_t handle; // Object handle (new) |
| 70 | VulkanObjectType object_type; // Object type identifier |
| 71 | ObjectStatusFlags status; // Object state |
| 72 | uint64_t parent_object; // Parent object |
| 73 | std::unique_ptr<std::unordered_set<uint64_t> > child_objects; // Child objects (used for VkDescriptorPool only) |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 74 | }; |
| 75 | |
| 76 | // Track Queue information |
| 77 | struct ObjTrackQueueInfo { |
| 78 | uint32_t queue_node_index; |
| 79 | VkQueue queue; |
| 80 | }; |
| 81 | |
| 82 | typedef std::unordered_map<uint64_t, ObjTrackState *> object_map_type; |
| 83 | |
| 84 | class ObjectLifetimes : public ValidationObject { |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 85 | public: |
| Jeff Bolz | c31aae4 | 2019-08-12 20:29:43 -0500 | [diff] [blame] | 86 | // Override chassis read/write locks for this validation object |
| 87 | // This override takes a deferred lock. i.e. it is not acquired. |
| 88 | // This class does its own locking with a shared mutex. |
| 89 | virtual std::unique_lock<std::mutex> write_lock() { |
| 90 | return std::unique_lock<std::mutex>(validation_object_mutex, std::defer_lock); |
| 91 | } |
| 92 | |
| 93 | object_lifetime_mutex_t object_lifetime_mutex; |
| 94 | write_object_lifetime_mutex_t write_shared_lock() { return write_object_lifetime_mutex_t(object_lifetime_mutex); } |
| 95 | read_object_lifetime_mutex_t read_shared_lock() { return read_object_lifetime_mutex_t(object_lifetime_mutex); } |
| 96 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 97 | uint64_t num_objects[kVulkanObjectTypeMax + 1]; |
| 98 | uint64_t num_total_objects; |
| 99 | // Vector of unordered_maps per object type to hold ObjTrackState info |
| 100 | std::vector<object_map_type> object_map; |
| 101 | // Special-case map for swapchain images |
| 102 | std::unordered_map<uint64_t, ObjTrackState *> swapchainImageMap; |
| 103 | // Map of queue information structures, one per queue |
| 104 | std::unordered_map<VkQueue, ObjTrackQueueInfo *> queue_info_map; |
| 105 | |
| 106 | std::vector<VkQueueFamilyProperties> queue_family_properties; |
| 107 | |
| 108 | // Constructor for object lifetime tracking |
| 109 | ObjectLifetimes() : num_objects{}, num_total_objects(0), object_map{} { object_map.resize(kVulkanObjectTypeMax + 1); } |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 110 | |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 111 | bool DeviceReportUndestroyedObjects(VkDevice device, VulkanObjectType object_type, const std::string &error_code); |
| 112 | void DeviceDestroyUndestroyedObjects(VkDevice device, VulkanObjectType object_type); |
| 113 | void CreateQueue(VkDevice device, VkQueue vkObj); |
| 114 | void AddQueueInfo(VkDevice device, uint32_t queue_node_index, VkQueue queue); |
| 115 | void ValidateQueueFlags(VkQueue queue, const char *function); |
| 116 | void AllocateCommandBuffer(VkDevice device, const VkCommandPool command_pool, const VkCommandBuffer command_buffer, |
| 117 | VkCommandBufferLevel level); |
| 118 | void AllocateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set); |
| 119 | void CreateSwapchainImageObject(VkDevice dispatchable_object, VkImage swapchain_image, VkSwapchainKHR swapchain); |
| 120 | bool ReportUndestroyedObjects(VkDevice device, const std::string &error_code); |
| 121 | void DestroyUndestroyedObjects(VkDevice device); |
| John Zulauf | 1c3844a | 2019-04-01 17:39:48 -0600 | [diff] [blame] | 122 | bool ValidateDeviceObject(const VulkanTypedHandle &device_typed, const char *invalid_handle_code, |
| 123 | const char *wrong_device_code); |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 124 | void DestroyQueueDataStructures(VkDevice device); |
| 125 | bool ValidateCommandBuffer(VkDevice device, VkCommandPool command_pool, VkCommandBuffer command_buffer); |
| 126 | bool ValidateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set); |
| 127 | bool ValidateSamplerObjects(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo); |
| 128 | template <typename DispObj> |
| 129 | bool ValidateDescriptorWrite(DispObj disp, VkWriteDescriptorSet const *desc, bool isPush); |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 130 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 131 | ObjectLifetimes *GetObjectLifetimeData(std::vector<ValidationObject *> &object_dispatch) { |
| 132 | for (auto layer_object : object_dispatch) { |
| 133 | if (layer_object->container_type == LayerObjectTypeObjectTracker) { |
| 134 | return (reinterpret_cast<ObjectLifetimes *>(layer_object)); |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 135 | } |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 136 | } |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 137 | return nullptr; |
| 138 | }; |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 139 | |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 140 | template <typename T1, typename T2> |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 141 | bool ValidateObject(T1 dispatchable_object, T2 object, VulkanObjectType object_type, bool null_allowed, |
| Jeff Bolz | fdd0d85 | 2019-02-03 21:55:12 -0600 | [diff] [blame] | 142 | const char *invalid_handle_code, const char *wrong_device_code) { |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 143 | if (null_allowed && (object == VK_NULL_HANDLE)) { |
| 144 | return false; |
| 145 | } |
| 146 | auto object_handle = HandleToUint64(object); |
| 147 | |
| 148 | if (object_type == kVulkanObjectTypeDevice) { |
| John Zulauf | 1c3844a | 2019-04-01 17:39:48 -0600 | [diff] [blame] | 149 | return ValidateDeviceObject(VulkanTypedHandle(object, object_type), invalid_handle_code, wrong_device_code); |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 150 | } |
| 151 | |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 152 | VkDebugReportObjectTypeEXT debug_object_type = get_debug_report_enum[object_type]; |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 153 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 154 | // Look for object in object map |
| 155 | if (object_map[object_type].find(object_handle) == object_map[object_type].end()) { |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 156 | // If object is an image, also look for it in the swapchain image map |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 157 | if ((object_type != kVulkanObjectTypeImage) || (swapchainImageMap.find(object_handle) == swapchainImageMap.end())) { |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 158 | // Object not found, look for it in other device object maps |
| 159 | for (auto other_device_data : layer_data_map) { |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 160 | for (auto layer_object_data : other_device_data.second->object_dispatch) { |
| 161 | if (layer_object_data->container_type == LayerObjectTypeObjectTracker) { |
| 162 | auto object_lifetime_data = reinterpret_cast<ObjectLifetimes *>(layer_object_data); |
| 163 | if (object_lifetime_data && (object_lifetime_data != this)) { |
| 164 | if (object_lifetime_data->object_map[object_type].find(object_handle) != |
| 165 | object_lifetime_data->object_map[object_type].end() || |
| 166 | (object_type == kVulkanObjectTypeImage && |
| 167 | object_lifetime_data->swapchainImageMap.find(object_handle) != |
| 168 | object_lifetime_data->swapchainImageMap.end())) { |
| 169 | // Object found on other device, report an error if object has a device parent error code |
| 170 | if ((wrong_device_code != kVUIDUndefined) && (object_type != kVulkanObjectTypeSurfaceKHR)) { |
| 171 | return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, debug_object_type, object_handle, |
| 172 | wrong_device_code, |
| 173 | "Object 0x%" PRIxLEAST64 |
| 174 | " was not created, allocated or retrieved from the correct device.", |
| 175 | object_handle); |
| 176 | } else { |
| 177 | return false; |
| 178 | } |
| 179 | } |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 180 | } |
| 181 | } |
| 182 | } |
| 183 | } |
| 184 | // Report an error if object was not found anywhere |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 185 | return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, debug_object_type, object_handle, invalid_handle_code, |
| 186 | "Invalid %s Object 0x%" PRIxLEAST64 ".", object_string[object_type], object_handle); |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 187 | } |
| 188 | } |
| 189 | return false; |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 190 | } |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 191 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 192 | template <typename T1, typename T2> |
| 193 | void CreateObject(T1 dispatchable_object, T2 object, VulkanObjectType object_type, const VkAllocationCallbacks *pAllocator) { |
| 194 | uint64_t object_handle = HandleToUint64(object); |
| 195 | bool custom_allocator = (pAllocator != nullptr); |
| 196 | if (!object_map[object_type].count(object_handle)) { |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 197 | ObjTrackState *pNewObjNode = new ObjTrackState; |
| 198 | pNewObjNode->object_type = object_type; |
| 199 | pNewObjNode->status = custom_allocator ? OBJSTATUS_CUSTOM_ALLOCATOR : OBJSTATUS_NONE; |
| 200 | pNewObjNode->handle = object_handle; |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 201 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 202 | object_map[object_type][object_handle] = pNewObjNode; |
| 203 | num_objects[object_type]++; |
| 204 | num_total_objects++; |
| Jeff Bolz | cf802bc | 2019-02-10 00:18:00 -0600 | [diff] [blame] | 205 | |
| 206 | if (object_type == kVulkanObjectTypeDescriptorPool) { |
| 207 | pNewObjNode->child_objects.reset(new std::unordered_set<uint64_t>); |
| 208 | } |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 209 | } |
| 210 | } |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 211 | |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 212 | template <typename T1> |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 213 | void DestroyObjectSilently(T1 object, VulkanObjectType object_type) { |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 214 | auto object_handle = HandleToUint64(object); |
| 215 | assert(object_handle != VK_NULL_HANDLE); |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 216 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 217 | auto item = object_map[object_type].find(object_handle); |
| 218 | assert(item != object_map[object_type].end()); |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 219 | |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 220 | ObjTrackState *pNode = item->second; |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 221 | assert(num_total_objects > 0); |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 222 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 223 | num_total_objects--; |
| 224 | assert(num_objects[pNode->object_type] > 0); |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 225 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 226 | num_objects[pNode->object_type]--; |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 227 | |
| 228 | delete pNode; |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 229 | object_map[object_type].erase(item); |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 230 | } |
| 231 | |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 232 | template <typename T1, typename T2> |
| 233 | void RecordDestroyObject(T1 dispatchable_object, T2 object, VulkanObjectType object_type) { |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 234 | auto object_handle = HandleToUint64(object); |
| 235 | if (object_handle != VK_NULL_HANDLE) { |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 236 | auto item = object_map[object_type].find(object_handle); |
| 237 | if (item != object_map[object_type].end()) { |
| 238 | DestroyObjectSilently(object, object_type); |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 239 | } |
| 240 | } |
| 241 | } |
| Mark Lobodzinski | 63902f0 | 2018-09-21 10:36:44 -0600 | [diff] [blame] | 242 | |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 243 | template <typename T1, typename T2> |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 244 | bool ValidateDestroyObject(T1 dispatchable_object, T2 object, VulkanObjectType object_type, |
| Jeff Bolz | fdd0d85 | 2019-02-03 21:55:12 -0600 | [diff] [blame] | 245 | const VkAllocationCallbacks *pAllocator, const char *expected_custom_allocator_code, |
| 246 | const char *expected_default_allocator_code) { |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 247 | auto object_handle = HandleToUint64(object); |
| 248 | bool custom_allocator = pAllocator != nullptr; |
| 249 | VkDebugReportObjectTypeEXT debug_object_type = get_debug_report_enum[object_type]; |
| 250 | bool skip = false; |
| 251 | |
| 252 | if (object_handle != VK_NULL_HANDLE) { |
| Mark Lobodzinski | add9323 | 2018-10-09 11:49:42 -0600 | [diff] [blame] | 253 | auto item = object_map[object_type].find(object_handle); |
| 254 | if (item != object_map[object_type].end()) { |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 255 | ObjTrackState *pNode = item->second; |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 256 | auto allocated_with_custom = (pNode->status & OBJSTATUS_CUSTOM_ALLOCATOR) ? true : false; |
| 257 | if (allocated_with_custom && !custom_allocator && expected_custom_allocator_code != kVUIDUndefined) { |
| 258 | // This check only verifies that custom allocation callbacks were provided to both Create and Destroy calls, |
| 259 | // it cannot verify that these allocation callbacks are compatible with each other. |
| 260 | skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, debug_object_type, object_handle, |
| 261 | expected_custom_allocator_code, |
| 262 | "Custom allocator not specified while destroying %s obj 0x%" PRIxLEAST64 |
| 263 | " but specified at creation.", |
| 264 | object_string[object_type], object_handle); |
| 265 | } else if (!allocated_with_custom && custom_allocator && expected_default_allocator_code != kVUIDUndefined) { |
| 266 | skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, debug_object_type, object_handle, |
| 267 | expected_default_allocator_code, |
| 268 | "Custom allocator specified while destroying %s obj 0x%" PRIxLEAST64 |
| 269 | " but not specified at creation.", |
| 270 | object_string[object_type], object_handle); |
| 271 | } |
| 272 | } |
| 273 | } |
| 274 | return skip; |
| 275 | } |
| 276 | |
| Mark Lobodzinski | 0c66846 | 2018-09-27 10:13:19 -0600 | [diff] [blame] | 277 | #include "object_tracker.h" |
| 278 | }; |