blob: 5b3d5d5ca722aa7c0ae98f1a3735a5250db418b1 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
Tobin Ehlisd34a4c52015-12-08 10:50:10 -07005 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070012 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070013 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070015 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070016 * The Materials are Confidential Information as defined by the Khronos
17 * Membership Agreement until designated non-confidential by Khronos, at which
18 * point this condition clause shall be removed.
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070019 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070020 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070021 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070022 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 *
24 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
27 * USE OR OTHER DEALINGS IN THE MATERIALS
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070028 *
29 * Author: Tobin Ehlis <tobine@google.com>
30 */
31
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070032#include <stdio.h>
33#include <stdlib.h>
34#include <string.h>
35#include <inttypes.h>
36
37#include "vulkan/vulkan.h"
38#include "vk_loader_platform.h"
39
40#include <vector>
41#include <unordered_map>
42
43#include "vulkan/vk_layer.h"
44#include "vk_layer_config.h"
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070045#include "vk_layer_table.h"
46#include "vk_layer_data.h"
47#include "vk_layer_logging.h"
48#include "vk_layer_extension_utils.h"
49
50struct layer_data {
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070051 bool wsi_enabled;
52
53 layer_data() :
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070054 wsi_enabled(false)
55 {};
56};
57
58struct instExts {
59 bool wsi_enabled;
Tobin Ehlisa39c26a2016-01-05 16:34:59 -070060 bool xlib_enabled;
61 bool xcb_enabled;
62 bool wayland_enabled;
63 bool mir_enabled;
64 bool android_enabled;
65 bool win32_enabled;
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070066};
67
68static std::unordered_map<void*, struct instExts> instanceExtMap;
69static std::unordered_map<void*, layer_data *> layer_data_map;
70static device_table_map unique_objects_device_table_map;
71static instance_table_map unique_objects_instance_table_map;
72// Structure to wrap returned non-dispatchable objects to guarantee they have unique handles
73// address of struct will be used as the unique handle
74struct VkUniqueObject
75{
76 uint64_t actualObject;
77};
78
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070079// Handle CreateInstance
80static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreateInfo, VkInstance instance)
81{
82 uint32_t i;
83 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(unique_objects_instance_table_map, instance);
84 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
85 pDisp->GetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
86 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
87 pDisp->GetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
88 pDisp->GetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR) gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
Tobin Ehlisa39c26a2016-01-05 16:34:59 -070089#ifdef VK_USE_PLATFORM_WIN32_KHR
Tobin Ehlisd34a4c52015-12-08 10:50:10 -070090 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR) gpa(instance, "vkCreateWin32SurfaceKHR");
91 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
92#endif // VK_USE_PLATFORM_WIN32_KHR
93#ifdef VK_USE_PLATFORM_XCB_KHR
94 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR) gpa(instance, "vkCreateXcbSurfaceKHR");
95 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
96#endif // VK_USE_PLATFORM_XCB_KHR
97#ifdef VK_USE_PLATFORM_XLIB_KHR
98 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR) gpa(instance, "vkCreateXlibSurfaceKHR");
99 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
100#endif // VK_USE_PLATFORM_XLIB_KHR
101#ifdef VK_USE_PLATFORM_MIR_KHR
102 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR) gpa(instance, "vkCreateMirSurfaceKHR");
103 pDisp->GetPhysicalDeviceMirPresentationSupportKHR = (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
104#endif // VK_USE_PLATFORM_MIR_KHR
105#ifdef VK_USE_PLATFORM_WAYLAND_KHR
106 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR) gpa(instance, "vkCreateWaylandSurfaceKHR");
107 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
108#endif // VK_USE_PLATFORM_WAYLAND_KHR
109#ifdef VK_USE_PLATFORM_ANDROID_KHR
110 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR) gpa(instance, "vkCreateAndroidSurfaceKHR");
111#endif // VK_USE_PLATFORM_ANDROID_KHR
112
Tobin Ehlisa39c26a2016-01-05 16:34:59 -0700113 instanceExtMap[pDisp] = {};
Jon Ashburnf19916e2016-01-11 13:12:43 -0700114 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700115 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
116 instanceExtMap[pDisp].wsi_enabled = true;
Tobin Ehlisa39c26a2016-01-05 16:34:59 -0700117#ifdef VK_USE_PLATFORM_XLIB_KHR
118 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME) == 0)
119 instanceExtMap[pDisp].xlib_enabled = true;
120#endif
121#ifdef VK_USE_PLATFORM_XCB_KHR
122 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME) == 0)
123 instanceExtMap[pDisp].xcb_enabled = true;
124#endif
125#ifdef VK_USE_PLATFORM_WAYLAND_KHR
126 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME) == 0)
127 instanceExtMap[pDisp].wayland_enabled = true;
128#endif
129#ifdef VK_USE_PLATFORM_MIR_KHR
130 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME) == 0)
131 instanceExtMap[pDisp].mir_enabled = true;
132#endif
133#ifdef VK_USE_PLATFORM_ANDROID_KHR
134 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME) == 0)
135 instanceExtMap[pDisp].android_enabled = true;
136#endif
137#ifdef VK_USE_PLATFORM_WIN32_KHR
138 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME) == 0)
139 instanceExtMap[pDisp].win32_enabled = true;
140#endif
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700141 }
142}
143
144VkResult
145explicit_CreateInstance(
146 const VkInstanceCreateInfo *pCreateInfo,
147 const VkAllocationCallbacks *pAllocator,
148 VkInstance *pInstance)
149{
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700150 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700151
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700152 assert(chain_info->u.pLayerInfo);
153 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
154 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
155 if (fpCreateInstance == NULL) {
156 return VK_ERROR_INITIALIZATION_FAILED;
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700157 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700158
159 // Advance the link info for the next element on the chain
160 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
161
162 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
163 if (result != VK_SUCCESS) {
164 return result;
165 }
166
167 initInstanceTable(*pInstance, fpGetInstanceProcAddr, unique_objects_instance_table_map);
168
169 createInstanceRegisterExtensions(pCreateInfo, *pInstance);
170
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700171 return result;
172}
173
174// Handle CreateDevice
175static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
176{
177 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
178 VkLayerDispatchTable *pDisp = get_dispatch_table(unique_objects_device_table_map, device);
179 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
180 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) gpa(device, "vkCreateSwapchainKHR");
181 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) gpa(device, "vkDestroySwapchainKHR");
182 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) gpa(device, "vkGetSwapchainImagesKHR");
183 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) gpa(device, "vkAcquireNextImageKHR");
184 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR) gpa(device, "vkQueuePresentKHR");
185 my_device_data->wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700186 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700187 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
188 my_device_data->wsi_enabled = true;
189 }
190}
191
192VkResult
193explicit_CreateDevice(
194 VkPhysicalDevice gpu,
195 const VkDeviceCreateInfo *pCreateInfo,
196 const VkAllocationCallbacks *pAllocator,
197 VkDevice *pDevice)
198{
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700199 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
200
201 assert(chain_info->u.pLayerInfo);
202 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
203 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
204 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
205 if (fpCreateDevice == NULL) {
206 return VK_ERROR_INITIALIZATION_FAILED;
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700207 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700208
209 // Advance the link info for the next element on the chain
210 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
211
212 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
213 if (result != VK_SUCCESS) {
214 return result;
215 }
216
217 // Setup layer's device dispatch table
218 initDeviceTable(*pDevice, fpGetDeviceProcAddr, unique_objects_device_table_map);
219
220 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
221
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700222 return result;
223}
224
225VkResult explicit_QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence)
226{
227// UNWRAP USES:
228// 0 : fence,VkFence
229 if (VK_NULL_HANDLE != fence) {
230 fence = (VkFence)((VkUniqueObject*)fence)->actualObject;
231 }
232// waitSemaphoreCount : pSubmits[submitCount]->pWaitSemaphores,VkSemaphore
233 std::vector<VkSemaphore> original_pWaitSemaphores = {};
234// signalSemaphoreCount : pSubmits[submitCount]->pSignalSemaphores,VkSemaphore
235 std::vector<VkSemaphore> original_pSignalSemaphores = {};
236 if (pSubmits) {
237 for (uint32_t index0=0; index0<submitCount; ++index0) {
238 if (pSubmits[index0].pWaitSemaphores) {
239 for (uint32_t index1=0; index1<pSubmits[index0].waitSemaphoreCount; ++index1) {
240 VkSemaphore** ppSemaphore = (VkSemaphore**)&(pSubmits[index0].pWaitSemaphores);
241 original_pWaitSemaphores.push_back(pSubmits[index0].pWaitSemaphores[index1]);
242 *(ppSemaphore[index1]) = (VkSemaphore)((VkUniqueObject*)pSubmits[index0].pWaitSemaphores[index1])->actualObject;
243 }
244 }
245 if (pSubmits[index0].pSignalSemaphores) {
246 for (uint32_t index1=0; index1<pSubmits[index0].signalSemaphoreCount; ++index1) {
247 VkSemaphore** ppSemaphore = (VkSemaphore**)&(pSubmits[index0].pSignalSemaphores);
248 original_pSignalSemaphores.push_back(pSubmits[index0].pSignalSemaphores[index1]);
249 *(ppSemaphore[index1]) = (VkSemaphore)((VkUniqueObject*)pSubmits[index0].pSignalSemaphores[index1])->actualObject;
250 }
251 }
252 }
253 }
254 VkResult result = get_dispatch_table(unique_objects_device_table_map, queue)->QueueSubmit(queue, submitCount, pSubmits, fence);
255 if (pSubmits) {
256 for (uint32_t index0=0; index0<submitCount; ++index0) {
257 if (pSubmits[index0].pWaitSemaphores) {
258 for (uint32_t index1=0; index1<pSubmits[index0].waitSemaphoreCount; ++index1) {
259 VkSemaphore** ppSemaphore = (VkSemaphore**)&(pSubmits[index0].pWaitSemaphores);
260 *(ppSemaphore[index1]) = original_pWaitSemaphores[index1];
261 }
262 }
263 if (pSubmits[index0].pSignalSemaphores) {
264 for (uint32_t index1=0; index1<pSubmits[index0].signalSemaphoreCount; ++index1) {
265 VkSemaphore** ppSemaphore = (VkSemaphore**)&(pSubmits[index0].pSignalSemaphores);
266 *(ppSemaphore[index1]) = original_pSignalSemaphores[index1];
267 }
268 }
269 }
270 }
271 return result;
272}
273
274VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence)
275{
276// UNWRAP USES:
277// 0 : pBindInfo[bindInfoCount]->pBufferBinds[bufferBindCount]->buffer,VkBuffer, pBindInfo[bindInfoCount]->pBufferBinds[bufferBindCount]->pBinds[bindCount]->memory,VkDeviceMemory, pBindInfo[bindInfoCount]->pImageOpaqueBinds[imageOpaqueBindCount]->image,VkImage, pBindInfo[bindInfoCount]->pImageOpaqueBinds[imageOpaqueBindCount]->pBinds[bindCount]->memory,VkDeviceMemory, pBindInfo[bindInfoCount]->pImageBinds[imageBindCount]->image,VkImage, pBindInfo[bindInfoCount]->pImageBinds[imageBindCount]->pBinds[bindCount]->memory,VkDeviceMemory
278 std::vector<VkBuffer> original_buffer = {};
279 std::vector<VkDeviceMemory> original_memory1 = {};
280 std::vector<VkImage> original_image1 = {};
281 std::vector<VkDeviceMemory> original_memory2 = {};
282 std::vector<VkImage> original_image2 = {};
283 std::vector<VkDeviceMemory> original_memory3 = {};
284 std::vector<VkSemaphore> original_pWaitSemaphores = {};
285 std::vector<VkSemaphore> original_pSignalSemaphores = {};
286 if (pBindInfo) {
287 for (uint32_t index0=0; index0<bindInfoCount; ++index0) {
288 if (pBindInfo[index0].pBufferBinds) {
289 for (uint32_t index1=0; index1<pBindInfo[index0].bufferBindCount; ++index1) {
290 if (pBindInfo[index0].pBufferBinds[index1].buffer) {
291 VkBuffer* pBuffer = (VkBuffer*)&(pBindInfo[index0].pBufferBinds[index1].buffer);
292 original_buffer.push_back(pBindInfo[index0].pBufferBinds[index1].buffer);
293 *(pBuffer) = (VkBuffer)((VkUniqueObject*)pBindInfo[index0].pBufferBinds[index1].buffer)->actualObject;
294 }
295 if (pBindInfo[index0].pBufferBinds[index1].pBinds) {
296 for (uint32_t index2=0; index2<pBindInfo[index0].pBufferBinds[index1].bindCount; ++index2) {
297 if (pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory) {
298 VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
299 original_memory1.push_back(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
300 *(pDeviceMemory) = (VkDeviceMemory)((VkUniqueObject*)pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory)->actualObject;
301 }
302 }
303 }
304 }
305 }
306 if (pBindInfo[index0].pImageOpaqueBinds) {
307 for (uint32_t index1=0; index1<pBindInfo[index0].imageOpaqueBindCount; ++index1) {
308 if (pBindInfo[index0].pImageOpaqueBinds[index1].image) {
309 VkImage* pImage = (VkImage*)&(pBindInfo[index0].pImageOpaqueBinds[index1].image);
310 original_image1.push_back(pBindInfo[index0].pImageOpaqueBinds[index1].image);
311 *(pImage) = (VkImage)((VkUniqueObject*)pBindInfo[index0].pImageOpaqueBinds[index1].image)->actualObject;
312 }
313 if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds) {
314 for (uint32_t index2=0; index2<pBindInfo[index0].pImageOpaqueBinds[index1].bindCount; ++index2) {
315 if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory) {
316 VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
317 original_memory2.push_back(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
318 *(pDeviceMemory) = (VkDeviceMemory)((VkUniqueObject*)pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory)->actualObject;
319 }
320 }
321 }
322 }
323 }
324 if (pBindInfo[index0].pImageBinds) {
325 for (uint32_t index1=0; index1<pBindInfo[index0].imageBindCount; ++index1) {
326 if (pBindInfo[index0].pImageBinds[index1].image) {
327 VkImage* pImage = (VkImage*)&(pBindInfo[index0].pImageBinds[index1].image);
328 original_image2.push_back(pBindInfo[index0].pImageBinds[index1].image);
329 *(pImage) = (VkImage)((VkUniqueObject*)pBindInfo[index0].pImageBinds[index1].image)->actualObject;
330 }
331 if (pBindInfo[index0].pImageBinds[index1].pBinds) {
332 for (uint32_t index2=0; index2<pBindInfo[index0].pImageBinds[index1].bindCount; ++index2) {
333 if (pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory) {
334 VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
335 original_memory3.push_back(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
336 *(pDeviceMemory) = (VkDeviceMemory)((VkUniqueObject*)pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory)->actualObject;
337 }
338 }
339 }
340 }
341 }
342 if (pBindInfo[index0].pWaitSemaphores) {
343 for (uint32_t index1=0; index1<pBindInfo[index0].waitSemaphoreCount; ++index1) {
344 VkSemaphore** ppSemaphore = (VkSemaphore**)&(pBindInfo[index0].pWaitSemaphores);
345 original_pWaitSemaphores.push_back(pBindInfo[index0].pWaitSemaphores[index1]);
346 *(ppSemaphore[index1]) = (VkSemaphore)((VkUniqueObject*)pBindInfo[index0].pWaitSemaphores[index1])->actualObject;
347 }
348 }
349 if (pBindInfo[index0].pSignalSemaphores) {
350 for (uint32_t index1=0; index1<pBindInfo[index0].signalSemaphoreCount; ++index1) {
351 VkSemaphore** ppSemaphore = (VkSemaphore**)&(pBindInfo[index0].pSignalSemaphores);
352 original_pSignalSemaphores.push_back(pBindInfo[index0].pSignalSemaphores[index1]);
353 *(ppSemaphore[index1]) = (VkSemaphore)((VkUniqueObject*)pBindInfo[index0].pSignalSemaphores[index1])->actualObject;
354 }
355 }
356 }
357 }
358 if (VK_NULL_HANDLE != fence) {
359 fence = (VkFence)((VkUniqueObject*)fence)->actualObject;
360 }
361 VkResult result = get_dispatch_table(unique_objects_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
362 if (pBindInfo) {
363 for (uint32_t index0=0; index0<bindInfoCount; ++index0) {
364 if (pBindInfo[index0].pBufferBinds) {
365 for (uint32_t index1=0; index1<pBindInfo[index0].bufferBindCount; ++index1) {
366 if (pBindInfo[index0].pBufferBinds[index1].buffer) {
367 VkBuffer* pBuffer = (VkBuffer*)&(pBindInfo[index0].pBufferBinds[index1].buffer);
368 *(pBuffer) = original_buffer[index1];
369 }
370 if (pBindInfo[index0].pBufferBinds[index1].pBinds) {
371 for (uint32_t index2=0; index2<pBindInfo[index0].pBufferBinds[index1].bindCount; ++index2) {
372 if (pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory) {
373 VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
374 *(pDeviceMemory) = original_memory1[index2];
375 }
376 }
377 }
378 }
379 }
380 if (pBindInfo[index0].pImageOpaqueBinds) {
381 for (uint32_t index1=0; index1<pBindInfo[index0].imageOpaqueBindCount; ++index1) {
382 if (pBindInfo[index0].pImageOpaqueBinds[index1].image) {
383 VkImage* pImage = (VkImage*)&(pBindInfo[index0].pImageOpaqueBinds[index1].image);
384 *(pImage) = original_image1[index1];
385 }
386 if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds) {
387 for (uint32_t index2=0; index2<pBindInfo[index0].pImageOpaqueBinds[index1].bindCount; ++index2) {
388 if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory) {
389 VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
390 *(pDeviceMemory) = original_memory2[index2];
391 }
392 }
393 }
394 }
395 }
396 if (pBindInfo[index0].pImageBinds) {
397 for (uint32_t index1=0; index1<pBindInfo[index0].imageBindCount; ++index1) {
398 if (pBindInfo[index0].pImageBinds[index1].image) {
399 VkImage* pImage = (VkImage*)&(pBindInfo[index0].pImageBinds[index1].image);
400 *(pImage) = original_image2[index1];
401 }
402 if (pBindInfo[index0].pImageBinds[index1].pBinds) {
403 for (uint32_t index2=0; index2<pBindInfo[index0].pImageBinds[index1].bindCount; ++index2) {
404 if (pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory) {
405 VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
406 *(pDeviceMemory) = original_memory3[index2];
407 }
408 }
409 }
410 }
411 }
412 if (pBindInfo[index0].pWaitSemaphores) {
413 for (uint32_t index1=0; index1<pBindInfo[index0].waitSemaphoreCount; ++index1) {
414 VkSemaphore** ppSemaphore = (VkSemaphore**)&(pBindInfo[index0].pWaitSemaphores);
415 *(ppSemaphore[index1]) = original_pWaitSemaphores[index1];
416 }
417 }
418 if (pBindInfo[index0].pSignalSemaphores) {
419 for (uint32_t index1=0; index1<pBindInfo[index0].signalSemaphoreCount; ++index1) {
420 VkSemaphore** ppSemaphore = (VkSemaphore**)&(pBindInfo[index0].pSignalSemaphores);
421 *(ppSemaphore[index1]) = original_pSignalSemaphores[index1];
422 }
423 }
424 }
425 }
426 return result;
427}
428
429VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
430{
431// UNWRAP USES:
432// 0 : pipelineCache,VkPipelineCache, pCreateInfos[createInfoCount]->stage[0]->module,VkShaderModule, pCreateInfos[createInfoCount]->layout,VkPipelineLayout, pCreateInfos[createInfoCount]->basePipelineHandle,VkPipeline
433 if (VK_NULL_HANDLE != pipelineCache) {
434 pipelineCache = (VkPipelineCache)((VkUniqueObject*)pipelineCache)->actualObject;
435 }
436 std::vector<VkShaderModule> original_module = {};
437 std::vector<VkPipelineLayout> original_layout = {};
438 std::vector<VkPipeline> original_basePipelineHandle = {};
439 if (pCreateInfos) {
440 for (uint32_t index0=0; index0<createInfoCount; ++index0) {
441 if (pCreateInfos[index0].stage.module) {
442 VkShaderModule* pShaderModule = (VkShaderModule*)&(pCreateInfos[index0].stage.module);
443 original_module.push_back(pCreateInfos[index0].stage.module);
444 *(pShaderModule) = (VkShaderModule)((VkUniqueObject*)pCreateInfos[index0].stage.module)->actualObject;
445 }
446 if (pCreateInfos[index0].layout) {
447 VkPipelineLayout* pPipelineLayout = (VkPipelineLayout*)&(pCreateInfos[index0].layout);
448 original_layout.push_back(pCreateInfos[index0].layout);
449 *(pPipelineLayout) = (VkPipelineLayout)((VkUniqueObject*)pCreateInfos[index0].layout)->actualObject;
450 }
451 if (pCreateInfos[index0].basePipelineHandle) {
452 VkPipeline* pPipeline = (VkPipeline*)&(pCreateInfos[index0].basePipelineHandle);
453 original_basePipelineHandle.push_back(pCreateInfos[index0].basePipelineHandle);
454 *(pPipeline) = (VkPipeline)((VkUniqueObject*)pCreateInfos[index0].basePipelineHandle)->actualObject;
455 }
456 }
457 }
458 VkResult result = get_dispatch_table(unique_objects_device_table_map, device)->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
459 if (pCreateInfos) {
460 for (uint32_t index0=0; index0<createInfoCount; ++index0) {
461 if (pCreateInfos[index0].stage.module) {
462 VkShaderModule* pShaderModule = (VkShaderModule*)&(pCreateInfos[index0].stage.module);
463 *(pShaderModule) = original_module[index0];
464 }
465 if (pCreateInfos[index0].layout) {
466 VkPipelineLayout* pPipelineLayout = (VkPipelineLayout*)&(pCreateInfos[index0].layout);
467 *(pPipelineLayout) = original_layout[index0];
468 }
469 if (pCreateInfos[index0].basePipelineHandle) {
470 VkPipeline* pPipeline = (VkPipeline*)&(pCreateInfos[index0].basePipelineHandle);
471 *(pPipeline) = original_basePipelineHandle[index0];
472 }
473 }
474 }
475 if (VK_SUCCESS == result) {
Tobin Ehlisa39c26a2016-01-05 16:34:59 -0700476 VkUniqueObject* pUO = NULL;
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700477 for (uint32_t i=0; i<createInfoCount; ++i) {
Tobin Ehlisa39c26a2016-01-05 16:34:59 -0700478 pUO = new VkUniqueObject();
479 pUO->actualObject = (uint64_t)pPipelines[i];
480 pPipelines[i] = (VkPipeline)pUO;
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700481 }
482 }
483 return result;
484}
485
486VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
487{
488// UNWRAP USES:
489// 0 : pipelineCache,VkPipelineCache, pCreateInfos[createInfoCount]->pStages[stageCount]->module,VkShaderModule, pCreateInfos[createInfoCount]->layout,VkPipelineLayout, pCreateInfos[createInfoCount]->renderPass,VkRenderPass, pCreateInfos[createInfoCount]->basePipelineHandle,VkPipeline
490 if (VK_NULL_HANDLE != pipelineCache) {
491 pipelineCache = (VkPipelineCache)((VkUniqueObject*)pipelineCache)->actualObject;
492 }
493 std::vector<VkShaderModule> original_module = {};
494 std::vector<VkPipelineLayout> original_layout = {};
495 std::vector<VkRenderPass> original_renderPass = {};
496 std::vector<VkPipeline> original_basePipelineHandle = {};
497 if (pCreateInfos) {
498 for (uint32_t index0=0; index0<createInfoCount; ++index0) {
499 if (pCreateInfos[index0].pStages) {
500 for (uint32_t index1=0; index1<pCreateInfos[index0].stageCount; ++index1) {
501 if (pCreateInfos[index0].pStages[index1].module) {
502 VkShaderModule* pShaderModule = (VkShaderModule*)&(pCreateInfos[index0].pStages[index1].module);
503 original_module.push_back(pCreateInfos[index0].pStages[index1].module);
504 *(pShaderModule) = (VkShaderModule)((VkUniqueObject*)pCreateInfos[index0].pStages[index1].module)->actualObject;
505 }
506 }
507 }
508 if (pCreateInfos[index0].layout) {
509 VkPipelineLayout* pPipelineLayout = (VkPipelineLayout*)&(pCreateInfos[index0].layout);
510 original_layout.push_back(pCreateInfos[index0].layout);
511 *(pPipelineLayout) = (VkPipelineLayout)((VkUniqueObject*)pCreateInfos[index0].layout)->actualObject;
512 }
513 if (pCreateInfos[index0].renderPass) {
514 VkRenderPass* pRenderPass = (VkRenderPass*)&(pCreateInfos[index0].renderPass);
515 original_renderPass.push_back(pCreateInfos[index0].renderPass);
516 *(pRenderPass) = (VkRenderPass)((VkUniqueObject*)pCreateInfos[index0].renderPass)->actualObject;
517 }
518 if (pCreateInfos[index0].basePipelineHandle) {
519 VkPipeline* pPipeline = (VkPipeline*)&(pCreateInfos[index0].basePipelineHandle);
520 original_basePipelineHandle.push_back(pCreateInfos[index0].basePipelineHandle);
521 *(pPipeline) = (VkPipeline)((VkUniqueObject*)pCreateInfos[index0].basePipelineHandle)->actualObject;
522 }
523 }
524 }
525 VkResult result = get_dispatch_table(unique_objects_device_table_map, device)->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
526 if (pCreateInfos) {
527 for (uint32_t index0=0; index0<createInfoCount; ++index0) {
528 if (pCreateInfos[index0].pStages) {
529 for (uint32_t index1=0; index1<pCreateInfos[index0].stageCount; ++index1) {
530 if (pCreateInfos[index0].pStages[index1].module) {
531 VkShaderModule* pShaderModule = (VkShaderModule*)&(pCreateInfos[index0].pStages[index1].module);
532 *(pShaderModule) = original_module[index1];
533 }
534 }
535 }
536 if (pCreateInfos[index0].layout) {
537 VkPipelineLayout* pPipelineLayout = (VkPipelineLayout*)&(pCreateInfos[index0].layout);
538 *(pPipelineLayout) = original_layout[index0];
539 }
540 if (pCreateInfos[index0].renderPass) {
541 VkRenderPass* pRenderPass = (VkRenderPass*)&(pCreateInfos[index0].renderPass);
542 *(pRenderPass) = original_renderPass[index0];
543 }
544 if (pCreateInfos[index0].basePipelineHandle) {
545 VkPipeline* pPipeline = (VkPipeline*)&(pCreateInfos[index0].basePipelineHandle);
546 *(pPipeline) = original_basePipelineHandle[index0];
547 }
548 }
549 }
550 if (VK_SUCCESS == result) {
Tobin Ehlisa39c26a2016-01-05 16:34:59 -0700551 VkUniqueObject* pUO = NULL;
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700552 for (uint32_t i=0; i<createInfoCount; ++i) {
Tobin Ehlisa39c26a2016-01-05 16:34:59 -0700553 pUO = new VkUniqueObject();
554 pUO->actualObject = (uint64_t)pPipelines[i];
555 pPipelines[i] = (VkPipeline)pUO;
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700556 }
557 }
558 return result;
559}
560
Tobin Ehlisd34a4c52015-12-08 10:50:10 -0700561VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages)
562{
563// UNWRAP USES:
564// 0 : swapchain,VkSwapchainKHR, pSwapchainImages,VkImage
565 if (VK_NULL_HANDLE != swapchain) {
566 swapchain = (VkSwapchainKHR)((VkUniqueObject*)swapchain)->actualObject;
567 }
568 VkResult result = get_dispatch_table(unique_objects_device_table_map, device)->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
569 // TODO : Need to add corresponding code to delete these images
570 if (VK_SUCCESS == result) {
571 if ((*pSwapchainImageCount > 0) && pSwapchainImages) {
572 std::vector<VkUniqueObject*> uniqueImages = {};
573 for (uint32_t i=0; i<*pSwapchainImageCount; ++i) {
574 uniqueImages.push_back(new VkUniqueObject());
575 uniqueImages[i]->actualObject = (uint64_t)pSwapchainImages[i];
576 pSwapchainImages[i] = (VkImage)uniqueImages[i];
577 }
578 }
579 }
580 return result;
581}