blob: f96b1f89792165795d6a75c8fd22e3e27b28f4f8 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Greg Danielcd558522016-11-17 13:31:40 -050019#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050020#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050021#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050022#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050023
Greg Danielac2d2322017-07-12 11:30:15 -040024#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025#include <GrContext.h>
26#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040027#include <GrTypes.h>
28#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050029#include <vk/GrVkTypes.h>
30
31namespace android {
32namespace uirenderer {
33namespace renderthread {
34
Greg Daniel2ff202712018-06-14 11:50:10 -040035#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
36#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
37#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050038
John Reck1bcacfd2017-11-03 10:12:19 -070039VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050040
41void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050042 mRenderThread.setGrContext(nullptr);
43
Greg Daniel26e0dca2018-09-18 10:33:19 -040044 // We don't need to explicitly free the command buffer since it automatically gets freed when we
45 // delete the VkCommandPool below.
46 mDummyCB = VK_NULL_HANDLE;
47
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050048 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040049 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050 mCommandPool = VK_NULL_HANDLE;
51 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050052
Greg Daniel2ff202712018-06-14 11:50:10 -040053 if (mDevice != VK_NULL_HANDLE) {
54 mDeviceWaitIdle(mDevice);
55 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070056 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050057
Greg Daniel2ff202712018-06-14 11:50:10 -040058 if (mInstance != VK_NULL_HANDLE) {
59 mDestroyInstance(mInstance, nullptr);
60 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050061
Greg Daniel2ff202712018-06-14 11:50:10 -040062 mGraphicsQueue = VK_NULL_HANDLE;
63 mPresentQueue = VK_NULL_HANDLE;
64 mDevice = VK_NULL_HANDLE;
65 mPhysicalDevice = VK_NULL_HANDLE;
66 mInstance = VK_NULL_HANDLE;
67}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050068
Greg Daniela227dbb2018-08-20 09:19:48 -040069bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040070 VkResult err;
71
72 constexpr VkApplicationInfo app_info = {
73 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
74 nullptr, // pNext
75 "android framework", // pApplicationName
76 0, // applicationVersion
77 "android framework", // pEngineName
78 0, // engineVerison
Greg Daniel96259622018-10-01 14:42:56 -040079 VK_MAKE_VERSION(1, 1, 0), // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -040080 };
81
82 std::vector<const char*> instanceExtensions;
83 {
84 GET_PROC(EnumerateInstanceExtensionProperties);
85
86 uint32_t extensionCount = 0;
87 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
88 if (VK_SUCCESS != err) {
89 return false;
90 }
91 std::unique_ptr<VkExtensionProperties[]> extensions(
92 new VkExtensionProperties[extensionCount]);
93 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
94 if (VK_SUCCESS != err) {
95 return false;
96 }
97 bool hasKHRSurfaceExtension = false;
98 bool hasKHRAndroidSurfaceExtension = false;
99 for (uint32_t i = 0; i < extensionCount; ++i) {
100 instanceExtensions.push_back(extensions[i].extensionName);
101 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
102 hasKHRSurfaceExtension = true;
103 }
104 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
105 hasKHRAndroidSurfaceExtension = true;
106 }
107 }
108 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
109 this->destroy();
110 return false;
111 }
112 }
113
114 const VkInstanceCreateInfo instance_create = {
115 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
116 nullptr, // pNext
117 0, // flags
118 &app_info, // pApplicationInfo
119 0, // enabledLayerNameCount
120 nullptr, // ppEnabledLayerNames
121 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
122 instanceExtensions.data(), // ppEnabledExtensionNames
123 };
124
125 GET_PROC(CreateInstance);
126 err = mCreateInstance(&instance_create, nullptr, &mInstance);
127 if (err < 0) {
128 this->destroy();
129 return false;
130 }
131
132 GET_INST_PROC(DestroyInstance);
133 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400134 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400135 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400136 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400137 GET_INST_PROC(CreateDevice);
138 GET_INST_PROC(EnumerateDeviceExtensionProperties);
139 GET_INST_PROC(CreateAndroidSurfaceKHR);
140 GET_INST_PROC(DestroySurfaceKHR);
141 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
142 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
143 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
144 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
145
146 uint32_t gpuCount;
147 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
148 if (err) {
149 this->destroy();
150 return false;
151 }
152 if (!gpuCount) {
153 this->destroy();
154 return false;
155 }
156 // Just returning the first physical device instead of getting the whole array. Since there
157 // should only be one device on android.
158 gpuCount = 1;
159 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
160 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
161 if (err && VK_INCOMPLETE != err) {
162 this->destroy();
163 return false;
164 }
165
Greg Daniel96259622018-10-01 14:42:56 -0400166 VkPhysicalDeviceProperties physDeviceProperties;
167 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
168 if (physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
169 this->destroy();
170 return false;
171 }
172
Greg Daniel2ff202712018-06-14 11:50:10 -0400173 // query to get the initial queue props size
174 uint32_t queueCount;
175 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
176 if (!queueCount) {
177 this->destroy();
178 return false;
179 }
180
181 // now get the actual queue props
182 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
183 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
184
185 // iterate to find the graphics queue
186 mGraphicsQueueIndex = queueCount;
187 for (uint32_t i = 0; i < queueCount; i++) {
188 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
189 mGraphicsQueueIndex = i;
190 break;
191 }
192 }
193 if (mGraphicsQueueIndex == queueCount) {
194 this->destroy();
195 return false;
196 }
197
198 // All physical devices and queue families on Android must be capable of
199 // presentation with any native window. So just use the first one.
200 mPresentQueueIndex = 0;
201
202 std::vector<const char*> deviceExtensions;
203 {
204 uint32_t extensionCount = 0;
205 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
206 nullptr);
207 if (VK_SUCCESS != err) {
208 this->destroy();
209 return false;
210 }
211 std::unique_ptr<VkExtensionProperties[]> extensions(
212 new VkExtensionProperties[extensionCount]);
213 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
214 extensions.get());
215 if (VK_SUCCESS != err) {
216 this->destroy();
217 return false;
218 }
219 bool hasKHRSwapchainExtension = false;
220 for (uint32_t i = 0; i < extensionCount; ++i) {
221 deviceExtensions.push_back(extensions[i].extensionName);
222 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
223 hasKHRSwapchainExtension = true;
224 }
225 }
226 if (!hasKHRSwapchainExtension) {
227 this->destroy();
228 return false;
229 }
230 }
231
Greg Daniela227dbb2018-08-20 09:19:48 -0400232 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
233 if (device != VK_NULL_HANDLE) {
234 return vkGetDeviceProcAddr(device, proc_name);
235 }
236 return vkGetInstanceProcAddr(instance, proc_name);
237 };
238 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
239 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
240
Greg Daniel26e0dca2018-09-18 10:33:19 -0400241 if (!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
242 this->destroy();
243 return false;
244 }
245
Greg Daniela227dbb2018-08-20 09:19:48 -0400246 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
247 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
248 features.pNext = nullptr;
249
250 // Setup all extension feature structs we may want to use.
251 void** tailPNext = &features.pNext;
252
253 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
254 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
255 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
256 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
257 LOG_ALWAYS_FATAL_IF(!blend);
258 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
259 blend->pNext = nullptr;
260 *tailPNext = blend;
261 tailPNext = &blend->pNext;
262 }
263
264 // query to get the physical device features
265 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400266 // this looks like it would slow things down,
267 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400268 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400269
270 float queuePriorities[1] = { 0.0 };
271
272 const VkDeviceQueueCreateInfo queueInfo[2] = {
273 {
274 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
275 nullptr, // pNext
276 0, // VkDeviceQueueCreateFlags
277 mGraphicsQueueIndex, // queueFamilyIndex
278 1, // queueCount
279 queuePriorities, // pQueuePriorities
280 },
281 {
282 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
283 nullptr, // pNext
284 0, // VkDeviceQueueCreateFlags
285 mPresentQueueIndex, // queueFamilyIndex
286 1, // queueCount
287 queuePriorities, // pQueuePriorities
288 }
289 };
290 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
291
292 const VkDeviceCreateInfo deviceInfo = {
293 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400294 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400295 0, // VkDeviceCreateFlags
296 queueInfoCount, // queueCreateInfoCount
297 queueInfo, // pQueueCreateInfos
298 0, // layerCount
299 nullptr, // ppEnabledLayerNames
300 (uint32_t) deviceExtensions.size(), // extensionCount
301 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400302 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400303 };
304
305 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
306 if (err) {
307 this->destroy();
308 return false;
309 }
310
311 GET_DEV_PROC(GetDeviceQueue);
312 GET_DEV_PROC(DeviceWaitIdle);
313 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500314 GET_DEV_PROC(CreateSwapchainKHR);
315 GET_DEV_PROC(DestroySwapchainKHR);
316 GET_DEV_PROC(GetSwapchainImagesKHR);
317 GET_DEV_PROC(AcquireNextImageKHR);
318 GET_DEV_PROC(QueuePresentKHR);
319 GET_DEV_PROC(CreateCommandPool);
320 GET_DEV_PROC(DestroyCommandPool);
321 GET_DEV_PROC(AllocateCommandBuffers);
322 GET_DEV_PROC(FreeCommandBuffers);
323 GET_DEV_PROC(ResetCommandBuffer);
324 GET_DEV_PROC(BeginCommandBuffer);
325 GET_DEV_PROC(EndCommandBuffer);
326 GET_DEV_PROC(CmdPipelineBarrier);
327 GET_DEV_PROC(GetDeviceQueue);
328 GET_DEV_PROC(QueueSubmit);
329 GET_DEV_PROC(QueueWaitIdle);
330 GET_DEV_PROC(DeviceWaitIdle);
331 GET_DEV_PROC(CreateSemaphore);
332 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400333 GET_DEV_PROC(ImportSemaphoreFdKHR);
334 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500335 GET_DEV_PROC(CreateFence);
336 GET_DEV_PROC(DestroyFence);
337 GET_DEV_PROC(WaitForFences);
338 GET_DEV_PROC(ResetFences);
339
Greg Daniel2ff202712018-06-14 11:50:10 -0400340 return true;
341}
342
Greg Daniela227dbb2018-08-20 09:19:48 -0400343static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
344 // All Vulkan structs that could be part of the features chain will start with the
345 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
346 // so we can get access to the pNext for the next struct.
347 struct CommonVulkanHeader {
348 VkStructureType sType;
349 void* pNext;
350 };
351
352 void* pNext = features.pNext;
353 while (pNext) {
354 void* current = pNext;
355 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
356 free(current);
357 }
358}
359
Greg Daniel2ff202712018-06-14 11:50:10 -0400360void VulkanManager::initialize() {
361 if (mDevice != VK_NULL_HANDLE) {
362 return;
363 }
364
Greg Daniela227dbb2018-08-20 09:19:48 -0400365 GET_PROC(EnumerateInstanceVersion);
366 uint32_t instanceVersion = 0;
367 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
368 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
369
370 GrVkExtensions extensions;
371 VkPhysicalDeviceFeatures2 features;
372 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400373
374 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
375
Greg Daniel2ff202712018-06-14 11:50:10 -0400376 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
377 if (device != VK_NULL_HANDLE) {
378 return vkGetDeviceProcAddr(device, proc_name);
379 }
380 return vkGetInstanceProcAddr(instance, proc_name);
381 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400382
383 GrVkBackendContext backendContext;
384 backendContext.fInstance = mInstance;
385 backendContext.fPhysicalDevice = mPhysicalDevice;
386 backendContext.fDevice = mDevice;
387 backendContext.fQueue = mGraphicsQueue;
388 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400389 backendContext.fInstanceVersion = instanceVersion;
390 backendContext.fVkExtensions = &extensions;
391 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400392 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400393
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500394 // create the command pool for the command buffers
395 if (VK_NULL_HANDLE == mCommandPool) {
396 VkCommandPoolCreateInfo commandPoolInfo;
397 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
398 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
399 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400400 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500401 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400402 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
403 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500404 SkASSERT(VK_SUCCESS == res);
405 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400406 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
407
408 if (!setupDummyCommandBuffer()) {
409 this->destroy();
410 return;
411 }
412 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
413
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500414
Greg Daniel2ff202712018-06-14 11:50:10 -0400415 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500416
Stan Ilievd495f432017-10-09 15:49:32 -0400417 GrContextOptions options;
418 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800419 // TODO: get a string describing the SPIR-V compiler version and use it here
420 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400421 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500422 LOG_ALWAYS_FATAL_IF(!grContext.get());
423 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400424
425 free_features_extensions_structs(features);
426
Greg Danielcd558522016-11-17 13:31:40 -0500427 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
428 mSwapBehavior = SwapBehavior::BufferAge;
429 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500430}
431
432// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
433// previous uses have finished before returning.
434VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
435 SkASSERT(surface->mBackbuffers);
436
437 ++surface->mCurrentBackbufferIndex;
438 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
439 surface->mCurrentBackbufferIndex = 0;
440 }
441
John Reck1bcacfd2017-11-03 10:12:19 -0700442 VulkanSurface::BackbufferInfo* backbuffer =
443 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500444
445 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
446 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400447 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500448 if (res != VK_SUCCESS) {
449 return nullptr;
450 }
451
452 return backbuffer;
453}
454
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500455SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
456 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
457 SkASSERT(backbuffer);
458
459 VkResult res;
460
Greg Daniel2ff202712018-06-14 11:50:10 -0400461 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500462 SkASSERT(VK_SUCCESS == res);
463
464 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
465 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400466 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700467 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
468 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500469
470 if (VK_ERROR_SURFACE_LOST_KHR == res) {
471 // need to figure out how to create a new vkSurface without the platformData*
472 // maybe use attach somehow? but need a Window
473 return nullptr;
474 }
475 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
476 // tear swapchain down and try again
477 if (!createSwapchain(surface)) {
478 return nullptr;
479 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500480 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400481 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500482 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500483
484 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400485 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700486 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
487 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500488
489 if (VK_SUCCESS != res) {
490 return nullptr;
491 }
492 }
493
494 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500495 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500496 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400497 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500498 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400499 VkAccessFlags srcAccessMask = 0;
500 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
501 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500502
503 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700504 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
505 NULL, // pNext
506 srcAccessMask, // outputMask
507 dstAccessMask, // inputMask
508 layout, // oldLayout
509 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
510 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400511 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700512 surface->mImages[backbuffer->mImageIndex], // image
513 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500514 };
515 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
516
517 VkCommandBufferBeginInfo info;
518 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
519 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
520 info.flags = 0;
521 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
522
John Reck1bcacfd2017-11-03 10:12:19 -0700523 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
524 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500525
526 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
527
528 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
529 // insert the layout transfer into the queue and wait on the acquire
530 VkSubmitInfo submitInfo;
531 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
532 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
533 submitInfo.waitSemaphoreCount = 1;
534 // Wait to make sure aquire semaphore set above has signaled.
535 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
536 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
537 submitInfo.commandBufferCount = 1;
538 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
539 submitInfo.signalSemaphoreCount = 0;
540
541 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400542 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500543
544 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500545 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400546 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
547 SkSurface::kFlushRead_BackendHandleAccess);
548 if (!backendRT.isValid()) {
549 SkASSERT(backendRT.isValid());
550 return nullptr;
551 }
552 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500553
554 surface->mBackbuffer = std::move(skSurface);
555 return surface->mBackbuffer.get();
556}
557
558void VulkanManager::destroyBuffers(VulkanSurface* surface) {
559 if (surface->mBackbuffers) {
560 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400561 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500562 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400563 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
564 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
565 mFreeCommandBuffers(mDevice, mCommandPool, 2,
566 surface->mBackbuffers[i].mTransitionCmdBuffers);
567 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
568 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500569 }
570 }
571
572 delete[] surface->mBackbuffers;
573 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500574 delete[] surface->mImageInfos;
575 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500576 delete[] surface->mImages;
577 surface->mImages = nullptr;
578}
579
580void VulkanManager::destroySurface(VulkanSurface* surface) {
581 // Make sure all submit commands have finished before starting to destroy objects.
582 if (VK_NULL_HANDLE != mPresentQueue) {
583 mQueueWaitIdle(mPresentQueue);
584 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400585 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500586
587 destroyBuffers(surface);
588
589 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400590 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500591 surface->mSwapchain = VK_NULL_HANDLE;
592 }
593
594 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400595 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500596 surface->mVkSurface = VK_NULL_HANDLE;
597 }
598 delete surface;
599}
600
601void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400602 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500603 SkASSERT(surface->mImageCount);
604 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400605 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500606
607 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
608
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500609 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500610 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500611 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500612 GrVkImageInfo info;
613 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500614 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500615 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
616 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
617 info.fFormat = format;
618 info.fLevelCount = 1;
619
Greg Danielac2d2322017-07-12 11:30:15 -0400620 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500621
Greg Danielcd558522016-11-17 13:31:40 -0500622 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700623 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400624 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
Stan Iliev79351f32018-09-19 14:23:49 -0400625 surface->mColorMode == ColorMode::WideColorGamut ? kRGBA_F16_SkColorType
626 : kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500627 }
628
629 SkASSERT(mCommandPool != VK_NULL_HANDLE);
630
631 // set up the backbuffers
632 VkSemaphoreCreateInfo semaphoreInfo;
633 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
634 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
635 semaphoreInfo.pNext = nullptr;
636 semaphoreInfo.flags = 0;
637 VkCommandBufferAllocateInfo commandBuffersInfo;
638 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
639 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
640 commandBuffersInfo.pNext = nullptr;
641 commandBuffersInfo.commandPool = mCommandPool;
642 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
643 commandBuffersInfo.commandBufferCount = 2;
644 VkFenceCreateInfo fenceInfo;
645 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
646 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
647 fenceInfo.pNext = nullptr;
648 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
649
650 // we create one additional backbuffer structure here, because we want to
651 // give the command buffers they contain a chance to finish before we cycle back
652 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
653 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
654 SkDEBUGCODE(VkResult res);
655 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400656 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700657 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400658 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700659 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400660 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700661 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400662 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700663 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400664 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700665 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500666 SkASSERT(VK_SUCCESS == res);
667 }
668 surface->mCurrentBackbufferIndex = surface->mImageCount;
669}
670
671bool VulkanManager::createSwapchain(VulkanSurface* surface) {
672 // check for capabilities
673 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400674 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700675 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500676 if (VK_SUCCESS != res) {
677 return false;
678 }
679
680 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400681 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700682 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500683 if (VK_SUCCESS != res) {
684 return false;
685 }
686
Ben Wagnereec27d52017-01-11 15:32:07 -0500687 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400688 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700689 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500690 if (VK_SUCCESS != res) {
691 return false;
692 }
693
694 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400695 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700696 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500697 if (VK_SUCCESS != res) {
698 return false;
699 }
700
Ben Wagnereec27d52017-01-11 15:32:07 -0500701 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400702 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700703 surface->mVkSurface, &presentModeCount,
704 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500705 if (VK_SUCCESS != res) {
706 return false;
707 }
708
709 VkExtent2D extent = caps.currentExtent;
710 // clamp width; to handle currentExtent of -1 and protect us from broken hints
711 if (extent.width < caps.minImageExtent.width) {
712 extent.width = caps.minImageExtent.width;
713 }
714 SkASSERT(extent.width <= caps.maxImageExtent.width);
715 // clamp height
716 if (extent.height < caps.minImageExtent.height) {
717 extent.height = caps.minImageExtent.height;
718 }
719 SkASSERT(extent.height <= caps.maxImageExtent.height);
720
721 uint32_t imageCount = caps.minImageCount + 2;
722 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
723 // Application must settle for fewer images than desired:
724 imageCount = caps.maxImageCount;
725 }
726
727 // Currently Skia requires the images to be color attchments and support all transfer
728 // operations.
729 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
730 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
731 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
732 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
733 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700734 SkASSERT(caps.supportedCompositeAlpha &
735 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500736 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700737 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
738 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
739 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500740
Stan Iliev79351f32018-09-19 14:23:49 -0400741 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500742 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Stan Iliev79351f32018-09-19 14:23:49 -0400743 if (surface->mColorMode == ColorMode::WideColorGamut) {
744 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
745 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
746 }
747 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500748 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400749 if (surfaceFormat == surfaceFormats[i].format
750 && colorSpace == surfaceFormats[i].colorSpace) {
751 foundSurfaceFormat = true;
752 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500753 }
754 }
755
Stan Iliev79351f32018-09-19 14:23:49 -0400756 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500757 return false;
758 }
759
Greg Daniel8a2a7542018-10-04 13:46:55 -0400760 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500761 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500762
763 VkSwapchainCreateInfoKHR swapchainCreateInfo;
764 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
765 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
766 swapchainCreateInfo.surface = surface->mVkSurface;
767 swapchainCreateInfo.minImageCount = imageCount;
768 swapchainCreateInfo.imageFormat = surfaceFormat;
769 swapchainCreateInfo.imageColorSpace = colorSpace;
770 swapchainCreateInfo.imageExtent = extent;
771 swapchainCreateInfo.imageArrayLayers = 1;
772 swapchainCreateInfo.imageUsage = usageFlags;
773
Greg Daniel2ff202712018-06-14 11:50:10 -0400774 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
775 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500776 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
777 swapchainCreateInfo.queueFamilyIndexCount = 2;
778 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
779 } else {
780 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
781 swapchainCreateInfo.queueFamilyIndexCount = 0;
782 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
783 }
784
785 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
786 swapchainCreateInfo.compositeAlpha = composite_alpha;
787 swapchainCreateInfo.presentMode = mode;
788 swapchainCreateInfo.clipped = true;
789 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
790
Greg Daniel2ff202712018-06-14 11:50:10 -0400791 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500792 if (VK_SUCCESS != res) {
793 return false;
794 }
795
796 // destroy the old swapchain
797 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400798 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500799
800 destroyBuffers(surface);
801
Greg Daniel2ff202712018-06-14 11:50:10 -0400802 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500803 }
804
805 createBuffers(surface, surfaceFormat, extent);
806
807 return true;
808}
809
Stan Iliev79351f32018-09-19 14:23:49 -0400810VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500811 initialize();
812
813 if (!window) {
814 return nullptr;
815 }
816
Stan Iliev79351f32018-09-19 14:23:49 -0400817 VulkanSurface* surface = new VulkanSurface(colorMode);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500818
819 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
820 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
821 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
822 surfaceCreateInfo.pNext = nullptr;
823 surfaceCreateInfo.flags = 0;
824 surfaceCreateInfo.window = window;
825
Greg Daniel2ff202712018-06-14 11:50:10 -0400826 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
827 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500828 if (VK_SUCCESS != res) {
829 delete surface;
830 return nullptr;
831 }
832
John Reck1bcacfd2017-11-03 10:12:19 -0700833 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400834 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
835 // All physical devices and queue families on Android must be capable of
836 // presentation with any native window.
837 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500838
839 if (!createSwapchain(surface)) {
840 destroySurface(surface);
841 return nullptr;
842 }
843
844 return surface;
845}
846
847// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400848static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500849 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
850 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
851 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
852 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
853 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400854 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
855 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
856 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
857 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
858 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
859 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
860 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500861 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
862 return VK_PIPELINE_STAGE_HOST_BIT;
863 }
864
865 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
866 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
867}
868
869// Helper to know which src access mask we need to set when transitioning to the present layout
870static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
871 VkAccessFlags flags = 0;
872 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
873 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700874 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
875 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
876 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500877 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
878 flags = VK_ACCESS_HOST_WRITE_BIT;
879 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
880 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
881 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
882 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
883 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
884 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
885 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
886 flags = VK_ACCESS_TRANSFER_READ_BIT;
887 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
888 flags = VK_ACCESS_SHADER_READ_BIT;
889 }
890 return flags;
891}
892
893void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500894 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
895 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400896 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500897 }
898
Greg Daniel74ea2012017-11-10 11:32:58 -0500899 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700900 VulkanSurface::BackbufferInfo* backbuffer =
901 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400902
Greg Danielcd558522016-11-17 13:31:40 -0500903 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400904 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
905 SkSurface::kFlushRead_BackendHandleAccess);
906 SkASSERT(backendRT.isValid());
907
908 GrVkImageInfo imageInfo;
909 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
910
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500911 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400912 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500913
914 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
915 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400916 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400917 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500918 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
919 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400920 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500921
922 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700923 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
924 NULL, // pNext
925 srcAccessMask, // outputMask
926 dstAccessMask, // inputMask
927 layout, // oldLayout
928 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400929 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700930 mPresentQueueIndex, // dstQueueFamilyIndex
931 surface->mImages[backbuffer->mImageIndex], // image
932 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500933 };
934
935 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
936 VkCommandBufferBeginInfo info;
937 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
938 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
939 info.flags = 0;
940 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700941 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
942 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500943 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
944
Greg Danielcd558522016-11-17 13:31:40 -0500945 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500946
947 // insert the layout transfer into the queue and wait on the acquire
948 VkSubmitInfo submitInfo;
949 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
950 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
951 submitInfo.waitSemaphoreCount = 0;
952 submitInfo.pWaitDstStageMask = 0;
953 submitInfo.commandBufferCount = 1;
954 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
955 submitInfo.signalSemaphoreCount = 1;
956 // When this command buffer finishes we will signal this semaphore so that we know it is now
957 // safe to present the image to the screen.
958 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
959
960 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400961 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500962
963 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
964 // to the image is complete and that the layout has been change to present on the graphics
965 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700966 const VkPresentInfoKHR presentInfo = {
967 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
968 NULL, // pNext
969 1, // waitSemaphoreCount
970 &backbuffer->mRenderSemaphore, // pWaitSemaphores
971 1, // swapchainCount
972 &surface->mSwapchain, // pSwapchains
973 &backbuffer->mImageIndex, // pImageIndices
974 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500975 };
976
977 mQueuePresentKHR(mPresentQueue, &presentInfo);
978
979 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500980 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
981 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
982 surface->mCurrentTime++;
983}
984
985int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -0500986 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700987 VulkanSurface::BackbufferInfo* backbuffer =
988 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
989 if (mSwapBehavior == SwapBehavior::Discard ||
990 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500991 return 0;
992 }
993 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
994 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500995}
996
Greg Daniel26e0dca2018-09-18 10:33:19 -0400997bool VulkanManager::setupDummyCommandBuffer() {
998 if (mDummyCB != VK_NULL_HANDLE) {
999 return true;
1000 }
1001
1002 VkCommandBufferAllocateInfo commandBuffersInfo;
1003 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1004 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1005 commandBuffersInfo.pNext = nullptr;
1006 commandBuffersInfo.commandPool = mCommandPool;
1007 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1008 commandBuffersInfo.commandBufferCount = 1;
1009
1010 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1011 if (err != VK_SUCCESS) {
1012 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1013 // make sure the driver didn't set a value and then return a failure.
1014 mDummyCB = VK_NULL_HANDLE;
1015 return false;
1016 }
1017
1018 VkCommandBufferBeginInfo beginInfo;
1019 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1020 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1021 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1022
1023 mBeginCommandBuffer(mDummyCB, &beginInfo);
1024 mEndCommandBuffer(mDummyCB);
1025 return true;
1026}
1027
Stan Iliev564ca3e2018-09-04 22:00:00 +00001028status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001029 if (!hasVkContext()) {
1030 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1031 return INVALID_OPERATION;
1032 }
1033
Stan Iliev7a081272018-10-26 17:54:18 -04001034 // Block GPU on the fence.
1035 int fenceFd = fence->dup();
1036 if (fenceFd == -1) {
1037 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1038 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001039 }
Stan Iliev7a081272018-10-26 17:54:18 -04001040
1041 VkSemaphoreCreateInfo semaphoreInfo;
1042 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1043 semaphoreInfo.pNext = nullptr;
1044 semaphoreInfo.flags = 0;
1045 VkSemaphore semaphore;
1046 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1047 if (VK_SUCCESS != err) {
1048 ALOGE("Failed to create import semaphore, err: %d", err);
1049 return UNKNOWN_ERROR;
1050 }
1051 VkImportSemaphoreFdInfoKHR importInfo;
1052 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1053 importInfo.pNext = nullptr;
1054 importInfo.semaphore = semaphore;
1055 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1056 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1057 importInfo.fd = fenceFd;
1058
1059 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1060 if (VK_SUCCESS != err) {
1061 ALOGE("Failed to import semaphore, err: %d", err);
1062 return UNKNOWN_ERROR;
1063 }
1064
1065 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1066
1067 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1068
1069 VkSubmitInfo submitInfo;
1070 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1071 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1072 submitInfo.waitSemaphoreCount = 1;
1073 // Wait to make sure aquire semaphore set above has signaled.
1074 submitInfo.pWaitSemaphores = &semaphore;
1075 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1076 submitInfo.commandBufferCount = 1;
1077 submitInfo.pCommandBuffers = &mDummyCB;
1078 submitInfo.signalSemaphoreCount = 0;
1079
1080 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1081
1082 // On Android when we import a semaphore, it is imported using temporary permanence. That
1083 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1084 // state before importing. This means it will now be in an idle state with no pending
1085 // signal or wait operations, so it is safe to immediately delete it.
1086 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001087 return OK;
1088}
1089
1090status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001091 if (!hasVkContext()) {
1092 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1093 return INVALID_OPERATION;
1094 }
1095
Greg Daniel26e0dca2018-09-18 10:33:19 -04001096 VkExportSemaphoreCreateInfo exportInfo;
1097 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1098 exportInfo.pNext = nullptr;
1099 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1100
1101 VkSemaphoreCreateInfo semaphoreInfo;
1102 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1103 semaphoreInfo.pNext = &exportInfo;
1104 semaphoreInfo.flags = 0;
1105 VkSemaphore semaphore;
1106 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1107 if (VK_SUCCESS != err) {
1108 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1109 return INVALID_OPERATION;
1110 }
1111
1112 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1113
1114 VkSubmitInfo submitInfo;
1115 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1116 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1117 submitInfo.waitSemaphoreCount = 0;
1118 submitInfo.pWaitSemaphores = nullptr;
1119 submitInfo.pWaitDstStageMask = nullptr;
1120 submitInfo.commandBufferCount = 1;
1121 submitInfo.pCommandBuffers = &mDummyCB;
1122 submitInfo.signalSemaphoreCount = 1;
1123 submitInfo.pSignalSemaphores = &semaphore;
1124
1125 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1126
1127 VkSemaphoreGetFdInfoKHR getFdInfo;
1128 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1129 getFdInfo.pNext = nullptr;
1130 getFdInfo.semaphore = semaphore;
1131 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1132
1133 int fenceFd = 0;
1134
1135 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1136 if (VK_SUCCESS != err) {
1137 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1138 return INVALID_OPERATION;
1139 }
1140 nativeFence = new Fence(fenceFd);
1141
1142 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1143 // destroying the semaphore and creating a new one with the same handle, and the payloads
1144 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1145 // it and we don't need to wait on the command buffer we submitted to finish.
1146 mDestroySemaphore(mDevice, semaphore, nullptr);
1147
Stan Iliev564ca3e2018-09-04 22:00:00 +00001148 return OK;
1149}
1150
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001151} /* namespace renderthread */
1152} /* namespace uirenderer */
1153} /* namespace android */