blob: d84ec8508ee431c0e920119c8736e18afd3d27e5 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Greg Daniel2ff202712018-06-14 11:50:10 -040037#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
38#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
39#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050040
John Reck1bcacfd2017-11-03 10:12:19 -070041VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050042
43void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050044 mRenderThread.setGrContext(nullptr);
45
Greg Daniel26e0dca2018-09-18 10:33:19 -040046 // We don't need to explicitly free the command buffer since it automatically gets freed when we
47 // delete the VkCommandPool below.
48 mDummyCB = VK_NULL_HANDLE;
49
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040051 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050052 mCommandPool = VK_NULL_HANDLE;
53 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050054
Greg Daniel2ff202712018-06-14 11:50:10 -040055 if (mDevice != VK_NULL_HANDLE) {
56 mDeviceWaitIdle(mDevice);
57 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070058 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
Greg Daniel2ff202712018-06-14 11:50:10 -040060 if (mInstance != VK_NULL_HANDLE) {
61 mDestroyInstance(mInstance, nullptr);
62 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063
Greg Daniel2ff202712018-06-14 11:50:10 -040064 mGraphicsQueue = VK_NULL_HANDLE;
65 mPresentQueue = VK_NULL_HANDLE;
66 mDevice = VK_NULL_HANDLE;
67 mPhysicalDevice = VK_NULL_HANDLE;
68 mInstance = VK_NULL_HANDLE;
69}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050070
Greg Daniela227dbb2018-08-20 09:19:48 -040071bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040072 VkResult err;
73
74 constexpr VkApplicationInfo app_info = {
75 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
76 nullptr, // pNext
77 "android framework", // pApplicationName
78 0, // applicationVersion
79 "android framework", // pEngineName
80 0, // engineVerison
Greg Daniel96259622018-10-01 14:42:56 -040081 VK_MAKE_VERSION(1, 1, 0), // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -040082 };
83
84 std::vector<const char*> instanceExtensions;
85 {
86 GET_PROC(EnumerateInstanceExtensionProperties);
87
88 uint32_t extensionCount = 0;
89 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
90 if (VK_SUCCESS != err) {
91 return false;
92 }
93 std::unique_ptr<VkExtensionProperties[]> extensions(
94 new VkExtensionProperties[extensionCount]);
95 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
96 if (VK_SUCCESS != err) {
97 return false;
98 }
99 bool hasKHRSurfaceExtension = false;
100 bool hasKHRAndroidSurfaceExtension = false;
101 for (uint32_t i = 0; i < extensionCount; ++i) {
102 instanceExtensions.push_back(extensions[i].extensionName);
103 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
104 hasKHRSurfaceExtension = true;
105 }
106 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
107 hasKHRAndroidSurfaceExtension = true;
108 }
109 }
110 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
111 this->destroy();
112 return false;
113 }
114 }
115
116 const VkInstanceCreateInfo instance_create = {
117 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
118 nullptr, // pNext
119 0, // flags
120 &app_info, // pApplicationInfo
121 0, // enabledLayerNameCount
122 nullptr, // ppEnabledLayerNames
123 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
124 instanceExtensions.data(), // ppEnabledExtensionNames
125 };
126
127 GET_PROC(CreateInstance);
128 err = mCreateInstance(&instance_create, nullptr, &mInstance);
129 if (err < 0) {
130 this->destroy();
131 return false;
132 }
133
134 GET_INST_PROC(DestroyInstance);
135 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400136 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400137 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400138 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400139 GET_INST_PROC(CreateDevice);
140 GET_INST_PROC(EnumerateDeviceExtensionProperties);
141 GET_INST_PROC(CreateAndroidSurfaceKHR);
142 GET_INST_PROC(DestroySurfaceKHR);
143 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
144 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
145 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
146 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
147
148 uint32_t gpuCount;
149 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
150 if (err) {
151 this->destroy();
152 return false;
153 }
154 if (!gpuCount) {
155 this->destroy();
156 return false;
157 }
158 // Just returning the first physical device instead of getting the whole array. Since there
159 // should only be one device on android.
160 gpuCount = 1;
161 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
162 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
163 if (err && VK_INCOMPLETE != err) {
164 this->destroy();
165 return false;
166 }
167
Greg Daniel96259622018-10-01 14:42:56 -0400168 VkPhysicalDeviceProperties physDeviceProperties;
169 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
170 if (physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
171 this->destroy();
172 return false;
173 }
174
Greg Daniel2ff202712018-06-14 11:50:10 -0400175 // query to get the initial queue props size
176 uint32_t queueCount;
177 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
178 if (!queueCount) {
179 this->destroy();
180 return false;
181 }
182
183 // now get the actual queue props
184 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
185 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
186
187 // iterate to find the graphics queue
188 mGraphicsQueueIndex = queueCount;
189 for (uint32_t i = 0; i < queueCount; i++) {
190 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
191 mGraphicsQueueIndex = i;
192 break;
193 }
194 }
195 if (mGraphicsQueueIndex == queueCount) {
196 this->destroy();
197 return false;
198 }
199
200 // All physical devices and queue families on Android must be capable of
201 // presentation with any native window. So just use the first one.
202 mPresentQueueIndex = 0;
203
204 std::vector<const char*> deviceExtensions;
205 {
206 uint32_t extensionCount = 0;
207 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
208 nullptr);
209 if (VK_SUCCESS != err) {
210 this->destroy();
211 return false;
212 }
213 std::unique_ptr<VkExtensionProperties[]> extensions(
214 new VkExtensionProperties[extensionCount]);
215 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
216 extensions.get());
217 if (VK_SUCCESS != err) {
218 this->destroy();
219 return false;
220 }
221 bool hasKHRSwapchainExtension = false;
222 for (uint32_t i = 0; i < extensionCount; ++i) {
223 deviceExtensions.push_back(extensions[i].extensionName);
224 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
225 hasKHRSwapchainExtension = true;
226 }
227 }
228 if (!hasKHRSwapchainExtension) {
229 this->destroy();
230 return false;
231 }
232 }
233
Greg Daniela227dbb2018-08-20 09:19:48 -0400234 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
235 if (device != VK_NULL_HANDLE) {
236 return vkGetDeviceProcAddr(device, proc_name);
237 }
238 return vkGetInstanceProcAddr(instance, proc_name);
239 };
240 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
241 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
242
Greg Daniel26e0dca2018-09-18 10:33:19 -0400243 if (!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
244 this->destroy();
245 return false;
246 }
247
Greg Daniela227dbb2018-08-20 09:19:48 -0400248 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
249 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
250 features.pNext = nullptr;
251
252 // Setup all extension feature structs we may want to use.
253 void** tailPNext = &features.pNext;
254
255 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
256 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
257 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
258 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
259 LOG_ALWAYS_FATAL_IF(!blend);
260 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
261 blend->pNext = nullptr;
262 *tailPNext = blend;
263 tailPNext = &blend->pNext;
264 }
265
266 // query to get the physical device features
267 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400268 // this looks like it would slow things down,
269 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400270 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400271
272 float queuePriorities[1] = { 0.0 };
273
274 const VkDeviceQueueCreateInfo queueInfo[2] = {
275 {
276 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
277 nullptr, // pNext
278 0, // VkDeviceQueueCreateFlags
279 mGraphicsQueueIndex, // queueFamilyIndex
280 1, // queueCount
281 queuePriorities, // pQueuePriorities
282 },
283 {
284 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
285 nullptr, // pNext
286 0, // VkDeviceQueueCreateFlags
287 mPresentQueueIndex, // queueFamilyIndex
288 1, // queueCount
289 queuePriorities, // pQueuePriorities
290 }
291 };
292 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
293
294 const VkDeviceCreateInfo deviceInfo = {
295 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400296 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400297 0, // VkDeviceCreateFlags
298 queueInfoCount, // queueCreateInfoCount
299 queueInfo, // pQueueCreateInfos
300 0, // layerCount
301 nullptr, // ppEnabledLayerNames
302 (uint32_t) deviceExtensions.size(), // extensionCount
303 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400304 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400305 };
306
307 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
308 if (err) {
309 this->destroy();
310 return false;
311 }
312
313 GET_DEV_PROC(GetDeviceQueue);
314 GET_DEV_PROC(DeviceWaitIdle);
315 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500316 GET_DEV_PROC(CreateSwapchainKHR);
317 GET_DEV_PROC(DestroySwapchainKHR);
318 GET_DEV_PROC(GetSwapchainImagesKHR);
319 GET_DEV_PROC(AcquireNextImageKHR);
320 GET_DEV_PROC(QueuePresentKHR);
321 GET_DEV_PROC(CreateCommandPool);
322 GET_DEV_PROC(DestroyCommandPool);
323 GET_DEV_PROC(AllocateCommandBuffers);
324 GET_DEV_PROC(FreeCommandBuffers);
325 GET_DEV_PROC(ResetCommandBuffer);
326 GET_DEV_PROC(BeginCommandBuffer);
327 GET_DEV_PROC(EndCommandBuffer);
328 GET_DEV_PROC(CmdPipelineBarrier);
329 GET_DEV_PROC(GetDeviceQueue);
330 GET_DEV_PROC(QueueSubmit);
331 GET_DEV_PROC(QueueWaitIdle);
332 GET_DEV_PROC(DeviceWaitIdle);
333 GET_DEV_PROC(CreateSemaphore);
334 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400335 GET_DEV_PROC(ImportSemaphoreFdKHR);
336 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500337 GET_DEV_PROC(CreateFence);
338 GET_DEV_PROC(DestroyFence);
339 GET_DEV_PROC(WaitForFences);
340 GET_DEV_PROC(ResetFences);
341
Greg Daniel2ff202712018-06-14 11:50:10 -0400342 return true;
343}
344
Greg Daniela227dbb2018-08-20 09:19:48 -0400345static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
346 // All Vulkan structs that could be part of the features chain will start with the
347 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
348 // so we can get access to the pNext for the next struct.
349 struct CommonVulkanHeader {
350 VkStructureType sType;
351 void* pNext;
352 };
353
354 void* pNext = features.pNext;
355 while (pNext) {
356 void* current = pNext;
357 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
358 free(current);
359 }
360}
361
Greg Daniel2ff202712018-06-14 11:50:10 -0400362void VulkanManager::initialize() {
363 if (mDevice != VK_NULL_HANDLE) {
364 return;
365 }
366
Greg Daniela227dbb2018-08-20 09:19:48 -0400367 GET_PROC(EnumerateInstanceVersion);
368 uint32_t instanceVersion = 0;
369 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
370 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
371
372 GrVkExtensions extensions;
373 VkPhysicalDeviceFeatures2 features;
374 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400375
376 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
377
Greg Daniel2ff202712018-06-14 11:50:10 -0400378 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
379 if (device != VK_NULL_HANDLE) {
380 return vkGetDeviceProcAddr(device, proc_name);
381 }
382 return vkGetInstanceProcAddr(instance, proc_name);
383 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400384
385 GrVkBackendContext backendContext;
386 backendContext.fInstance = mInstance;
387 backendContext.fPhysicalDevice = mPhysicalDevice;
388 backendContext.fDevice = mDevice;
389 backendContext.fQueue = mGraphicsQueue;
390 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400391 backendContext.fInstanceVersion = instanceVersion;
392 backendContext.fVkExtensions = &extensions;
393 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400394 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400395
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500396 // create the command pool for the command buffers
397 if (VK_NULL_HANDLE == mCommandPool) {
398 VkCommandPoolCreateInfo commandPoolInfo;
399 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
400 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
401 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400402 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500403 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400404 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
405 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500406 SkASSERT(VK_SUCCESS == res);
407 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400408 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
409
410 if (!setupDummyCommandBuffer()) {
411 this->destroy();
412 return;
413 }
414 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
415
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500416
Greg Daniel2ff202712018-06-14 11:50:10 -0400417 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500418
Stan Ilievd495f432017-10-09 15:49:32 -0400419 GrContextOptions options;
420 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800421 // TODO: get a string describing the SPIR-V compiler version and use it here
422 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400423 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500424 LOG_ALWAYS_FATAL_IF(!grContext.get());
425 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400426
427 free_features_extensions_structs(features);
428
Greg Danielcd558522016-11-17 13:31:40 -0500429 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
430 mSwapBehavior = SwapBehavior::BufferAge;
431 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500432}
433
434// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
435// previous uses have finished before returning.
436VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
437 SkASSERT(surface->mBackbuffers);
438
439 ++surface->mCurrentBackbufferIndex;
440 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
441 surface->mCurrentBackbufferIndex = 0;
442 }
443
John Reck1bcacfd2017-11-03 10:12:19 -0700444 VulkanSurface::BackbufferInfo* backbuffer =
445 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500446
447 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
448 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400449 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500450 if (res != VK_SUCCESS) {
451 return nullptr;
452 }
453
454 return backbuffer;
455}
456
Stan Iliev305e13a2018-11-13 11:14:48 -0500457SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
458 // Recreate VulkanSurface, if ANativeWindow has been resized.
459 VulkanSurface* surface = *surfaceOut;
460 int windowWidth = 0, windowHeight = 0;
461 ANativeWindow* window = surface->mNativeWindow;
462 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
463 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
464 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
465 ColorMode colorMode = surface->mColorMode;
466 destroySurface(surface);
467 *surfaceOut = createSurface(window, colorMode);
468 surface = *surfaceOut;
469 }
470
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500471 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
472 SkASSERT(backbuffer);
473
474 VkResult res;
475
Greg Daniel2ff202712018-06-14 11:50:10 -0400476 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500477 SkASSERT(VK_SUCCESS == res);
478
479 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
480 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400481 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700482 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
483 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500484
485 if (VK_ERROR_SURFACE_LOST_KHR == res) {
486 // need to figure out how to create a new vkSurface without the platformData*
487 // maybe use attach somehow? but need a Window
488 return nullptr;
489 }
490 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
491 // tear swapchain down and try again
492 if (!createSwapchain(surface)) {
493 return nullptr;
494 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500495 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400496 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500497 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500498
499 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400500 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700501 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
502 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500503
504 if (VK_SUCCESS != res) {
505 return nullptr;
506 }
507 }
508
509 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500510 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500511 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400512 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500513 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400514 VkAccessFlags srcAccessMask = 0;
515 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
516 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500517
518 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700519 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
520 NULL, // pNext
521 srcAccessMask, // outputMask
522 dstAccessMask, // inputMask
523 layout, // oldLayout
524 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
525 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400526 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700527 surface->mImages[backbuffer->mImageIndex], // image
528 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500529 };
530 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
531
532 VkCommandBufferBeginInfo info;
533 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
534 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
535 info.flags = 0;
536 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
537
John Reck1bcacfd2017-11-03 10:12:19 -0700538 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
539 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500540
541 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
542
543 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
544 // insert the layout transfer into the queue and wait on the acquire
545 VkSubmitInfo submitInfo;
546 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
547 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
548 submitInfo.waitSemaphoreCount = 1;
549 // Wait to make sure aquire semaphore set above has signaled.
550 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
551 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
552 submitInfo.commandBufferCount = 1;
553 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
554 submitInfo.signalSemaphoreCount = 0;
555
556 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400557 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500558
559 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500560 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400561 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
562 SkSurface::kFlushRead_BackendHandleAccess);
563 if (!backendRT.isValid()) {
564 SkASSERT(backendRT.isValid());
565 return nullptr;
566 }
567 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500568
569 surface->mBackbuffer = std::move(skSurface);
570 return surface->mBackbuffer.get();
571}
572
573void VulkanManager::destroyBuffers(VulkanSurface* surface) {
574 if (surface->mBackbuffers) {
575 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400576 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500577 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400578 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
579 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
580 mFreeCommandBuffers(mDevice, mCommandPool, 2,
581 surface->mBackbuffers[i].mTransitionCmdBuffers);
582 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
583 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500584 }
585 }
586
587 delete[] surface->mBackbuffers;
588 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500589 delete[] surface->mImageInfos;
590 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500591 delete[] surface->mImages;
592 surface->mImages = nullptr;
593}
594
595void VulkanManager::destroySurface(VulkanSurface* surface) {
596 // Make sure all submit commands have finished before starting to destroy objects.
597 if (VK_NULL_HANDLE != mPresentQueue) {
598 mQueueWaitIdle(mPresentQueue);
599 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400600 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500601
602 destroyBuffers(surface);
603
604 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400605 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500606 surface->mSwapchain = VK_NULL_HANDLE;
607 }
608
609 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400610 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500611 surface->mVkSurface = VK_NULL_HANDLE;
612 }
613 delete surface;
614}
615
616void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400617 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500618 SkASSERT(surface->mImageCount);
619 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400620 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500621
622 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
623
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500624 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500625 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500626 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500627 GrVkImageInfo info;
628 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500629 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500630 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
631 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
632 info.fFormat = format;
633 info.fLevelCount = 1;
634
Greg Danielac2d2322017-07-12 11:30:15 -0400635 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500636
Greg Danielcd558522016-11-17 13:31:40 -0500637 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700638 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400639 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
Stan Iliev79351f32018-09-19 14:23:49 -0400640 surface->mColorMode == ColorMode::WideColorGamut ? kRGBA_F16_SkColorType
641 : kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500642 }
643
644 SkASSERT(mCommandPool != VK_NULL_HANDLE);
645
646 // set up the backbuffers
647 VkSemaphoreCreateInfo semaphoreInfo;
648 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
649 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
650 semaphoreInfo.pNext = nullptr;
651 semaphoreInfo.flags = 0;
652 VkCommandBufferAllocateInfo commandBuffersInfo;
653 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
654 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
655 commandBuffersInfo.pNext = nullptr;
656 commandBuffersInfo.commandPool = mCommandPool;
657 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
658 commandBuffersInfo.commandBufferCount = 2;
659 VkFenceCreateInfo fenceInfo;
660 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
661 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
662 fenceInfo.pNext = nullptr;
663 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
664
665 // we create one additional backbuffer structure here, because we want to
666 // give the command buffers they contain a chance to finish before we cycle back
667 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
668 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
669 SkDEBUGCODE(VkResult res);
670 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400671 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700672 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400673 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700674 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400675 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700676 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400677 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700678 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400679 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700680 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500681 SkASSERT(VK_SUCCESS == res);
682 }
683 surface->mCurrentBackbufferIndex = surface->mImageCount;
684}
685
686bool VulkanManager::createSwapchain(VulkanSurface* surface) {
687 // check for capabilities
688 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400689 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700690 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500691 if (VK_SUCCESS != res) {
692 return false;
693 }
694
695 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400696 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700697 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500698 if (VK_SUCCESS != res) {
699 return false;
700 }
701
Ben Wagnereec27d52017-01-11 15:32:07 -0500702 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400703 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700704 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500705 if (VK_SUCCESS != res) {
706 return false;
707 }
708
709 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400710 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700711 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500712 if (VK_SUCCESS != res) {
713 return false;
714 }
715
Ben Wagnereec27d52017-01-11 15:32:07 -0500716 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400717 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700718 surface->mVkSurface, &presentModeCount,
719 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500720 if (VK_SUCCESS != res) {
721 return false;
722 }
723
724 VkExtent2D extent = caps.currentExtent;
725 // clamp width; to handle currentExtent of -1 and protect us from broken hints
726 if (extent.width < caps.minImageExtent.width) {
727 extent.width = caps.minImageExtent.width;
728 }
729 SkASSERT(extent.width <= caps.maxImageExtent.width);
730 // clamp height
731 if (extent.height < caps.minImageExtent.height) {
732 extent.height = caps.minImageExtent.height;
733 }
734 SkASSERT(extent.height <= caps.maxImageExtent.height);
Stan Iliev305e13a2018-11-13 11:14:48 -0500735 surface->mWindowWidth = extent.width;
736 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500737
738 uint32_t imageCount = caps.minImageCount + 2;
739 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
740 // Application must settle for fewer images than desired:
741 imageCount = caps.maxImageCount;
742 }
743
744 // Currently Skia requires the images to be color attchments and support all transfer
745 // operations.
746 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
747 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
748 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
749 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
750 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700751 SkASSERT(caps.supportedCompositeAlpha &
752 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500753 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700754 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
755 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
756 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500757
Stan Iliev79351f32018-09-19 14:23:49 -0400758 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500759 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Stan Iliev79351f32018-09-19 14:23:49 -0400760 if (surface->mColorMode == ColorMode::WideColorGamut) {
761 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
762 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
763 }
764 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500765 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400766 if (surfaceFormat == surfaceFormats[i].format
767 && colorSpace == surfaceFormats[i].colorSpace) {
768 foundSurfaceFormat = true;
769 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500770 }
771 }
772
Stan Iliev79351f32018-09-19 14:23:49 -0400773 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500774 return false;
775 }
776
Greg Daniel8a2a7542018-10-04 13:46:55 -0400777 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500778 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500779
780 VkSwapchainCreateInfoKHR swapchainCreateInfo;
781 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
782 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
783 swapchainCreateInfo.surface = surface->mVkSurface;
784 swapchainCreateInfo.minImageCount = imageCount;
785 swapchainCreateInfo.imageFormat = surfaceFormat;
786 swapchainCreateInfo.imageColorSpace = colorSpace;
787 swapchainCreateInfo.imageExtent = extent;
788 swapchainCreateInfo.imageArrayLayers = 1;
789 swapchainCreateInfo.imageUsage = usageFlags;
790
Greg Daniel2ff202712018-06-14 11:50:10 -0400791 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
792 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500793 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
794 swapchainCreateInfo.queueFamilyIndexCount = 2;
795 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
796 } else {
797 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
798 swapchainCreateInfo.queueFamilyIndexCount = 0;
799 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
800 }
801
802 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
803 swapchainCreateInfo.compositeAlpha = composite_alpha;
804 swapchainCreateInfo.presentMode = mode;
805 swapchainCreateInfo.clipped = true;
806 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
807
Greg Daniel2ff202712018-06-14 11:50:10 -0400808 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500809 if (VK_SUCCESS != res) {
810 return false;
811 }
812
813 // destroy the old swapchain
814 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400815 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500816
817 destroyBuffers(surface);
818
Greg Daniel2ff202712018-06-14 11:50:10 -0400819 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500820 }
821
822 createBuffers(surface, surfaceFormat, extent);
823
824 return true;
825}
826
Stan Iliev79351f32018-09-19 14:23:49 -0400827VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500828 initialize();
829
830 if (!window) {
831 return nullptr;
832 }
833
Stan Iliev305e13a2018-11-13 11:14:48 -0500834 VulkanSurface* surface = new VulkanSurface(colorMode, window);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500835
836 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
837 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
838 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
839 surfaceCreateInfo.pNext = nullptr;
840 surfaceCreateInfo.flags = 0;
841 surfaceCreateInfo.window = window;
842
Greg Daniel2ff202712018-06-14 11:50:10 -0400843 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
844 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500845 if (VK_SUCCESS != res) {
846 delete surface;
847 return nullptr;
848 }
849
John Reck1bcacfd2017-11-03 10:12:19 -0700850 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400851 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
852 // All physical devices and queue families on Android must be capable of
853 // presentation with any native window.
854 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500855
856 if (!createSwapchain(surface)) {
857 destroySurface(surface);
858 return nullptr;
859 }
860
861 return surface;
862}
863
864// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400865static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500866 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
867 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
868 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
869 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
870 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400871 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
872 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
873 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
874 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
875 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
876 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
877 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500878 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
879 return VK_PIPELINE_STAGE_HOST_BIT;
880 }
881
882 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
883 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
884}
885
886// Helper to know which src access mask we need to set when transitioning to the present layout
887static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
888 VkAccessFlags flags = 0;
889 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
890 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700891 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
892 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
893 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500894 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
895 flags = VK_ACCESS_HOST_WRITE_BIT;
896 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
897 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
898 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
899 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
900 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
901 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
902 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
903 flags = VK_ACCESS_TRANSFER_READ_BIT;
904 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
905 flags = VK_ACCESS_SHADER_READ_BIT;
906 }
907 return flags;
908}
909
910void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500911 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
912 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400913 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500914 }
915
Greg Daniel74ea2012017-11-10 11:32:58 -0500916 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700917 VulkanSurface::BackbufferInfo* backbuffer =
918 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400919
Greg Danielcd558522016-11-17 13:31:40 -0500920 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400921 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
922 SkSurface::kFlushRead_BackendHandleAccess);
923 SkASSERT(backendRT.isValid());
924
925 GrVkImageInfo imageInfo;
926 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
927
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500928 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400929 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500930
931 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
932 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400933 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400934 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500935 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
936 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400937 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500938
939 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700940 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
941 NULL, // pNext
942 srcAccessMask, // outputMask
943 dstAccessMask, // inputMask
944 layout, // oldLayout
945 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400946 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700947 mPresentQueueIndex, // dstQueueFamilyIndex
948 surface->mImages[backbuffer->mImageIndex], // image
949 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500950 };
951
952 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
953 VkCommandBufferBeginInfo info;
954 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
955 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
956 info.flags = 0;
957 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700958 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
959 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500960 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
961
Greg Danielcd558522016-11-17 13:31:40 -0500962 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500963
964 // insert the layout transfer into the queue and wait on the acquire
965 VkSubmitInfo submitInfo;
966 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
967 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
968 submitInfo.waitSemaphoreCount = 0;
969 submitInfo.pWaitDstStageMask = 0;
970 submitInfo.commandBufferCount = 1;
971 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
972 submitInfo.signalSemaphoreCount = 1;
973 // When this command buffer finishes we will signal this semaphore so that we know it is now
974 // safe to present the image to the screen.
975 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
976
977 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400978 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500979
980 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
981 // to the image is complete and that the layout has been change to present on the graphics
982 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700983 const VkPresentInfoKHR presentInfo = {
984 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
985 NULL, // pNext
986 1, // waitSemaphoreCount
987 &backbuffer->mRenderSemaphore, // pWaitSemaphores
988 1, // swapchainCount
989 &surface->mSwapchain, // pSwapchains
990 &backbuffer->mImageIndex, // pImageIndices
991 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500992 };
993
994 mQueuePresentKHR(mPresentQueue, &presentInfo);
995
996 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500997 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
998 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
999 surface->mCurrentTime++;
1000}
1001
1002int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001003 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001004 VulkanSurface::BackbufferInfo* backbuffer =
1005 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1006 if (mSwapBehavior == SwapBehavior::Discard ||
1007 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001008 return 0;
1009 }
1010 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1011 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001012}
1013
Greg Daniel26e0dca2018-09-18 10:33:19 -04001014bool VulkanManager::setupDummyCommandBuffer() {
1015 if (mDummyCB != VK_NULL_HANDLE) {
1016 return true;
1017 }
1018
1019 VkCommandBufferAllocateInfo commandBuffersInfo;
1020 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1021 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1022 commandBuffersInfo.pNext = nullptr;
1023 commandBuffersInfo.commandPool = mCommandPool;
1024 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1025 commandBuffersInfo.commandBufferCount = 1;
1026
1027 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1028 if (err != VK_SUCCESS) {
1029 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1030 // make sure the driver didn't set a value and then return a failure.
1031 mDummyCB = VK_NULL_HANDLE;
1032 return false;
1033 }
1034
1035 VkCommandBufferBeginInfo beginInfo;
1036 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1037 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1038 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1039
1040 mBeginCommandBuffer(mDummyCB, &beginInfo);
1041 mEndCommandBuffer(mDummyCB);
1042 return true;
1043}
1044
Stan Iliev564ca3e2018-09-04 22:00:00 +00001045status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001046 if (!hasVkContext()) {
1047 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1048 return INVALID_OPERATION;
1049 }
1050
Stan Iliev7a081272018-10-26 17:54:18 -04001051 // Block GPU on the fence.
1052 int fenceFd = fence->dup();
1053 if (fenceFd == -1) {
1054 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1055 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001056 }
Stan Iliev7a081272018-10-26 17:54:18 -04001057
1058 VkSemaphoreCreateInfo semaphoreInfo;
1059 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1060 semaphoreInfo.pNext = nullptr;
1061 semaphoreInfo.flags = 0;
1062 VkSemaphore semaphore;
1063 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1064 if (VK_SUCCESS != err) {
1065 ALOGE("Failed to create import semaphore, err: %d", err);
1066 return UNKNOWN_ERROR;
1067 }
1068 VkImportSemaphoreFdInfoKHR importInfo;
1069 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1070 importInfo.pNext = nullptr;
1071 importInfo.semaphore = semaphore;
1072 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1073 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1074 importInfo.fd = fenceFd;
1075
1076 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1077 if (VK_SUCCESS != err) {
1078 ALOGE("Failed to import semaphore, err: %d", err);
1079 return UNKNOWN_ERROR;
1080 }
1081
1082 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1083
1084 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1085
1086 VkSubmitInfo submitInfo;
1087 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1088 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1089 submitInfo.waitSemaphoreCount = 1;
1090 // Wait to make sure aquire semaphore set above has signaled.
1091 submitInfo.pWaitSemaphores = &semaphore;
1092 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1093 submitInfo.commandBufferCount = 1;
1094 submitInfo.pCommandBuffers = &mDummyCB;
1095 submitInfo.signalSemaphoreCount = 0;
1096
1097 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1098
1099 // On Android when we import a semaphore, it is imported using temporary permanence. That
1100 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1101 // state before importing. This means it will now be in an idle state with no pending
1102 // signal or wait operations, so it is safe to immediately delete it.
1103 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001104 return OK;
1105}
1106
1107status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001108 if (!hasVkContext()) {
1109 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1110 return INVALID_OPERATION;
1111 }
1112
Greg Daniel26e0dca2018-09-18 10:33:19 -04001113 VkExportSemaphoreCreateInfo exportInfo;
1114 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1115 exportInfo.pNext = nullptr;
1116 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1117
1118 VkSemaphoreCreateInfo semaphoreInfo;
1119 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1120 semaphoreInfo.pNext = &exportInfo;
1121 semaphoreInfo.flags = 0;
1122 VkSemaphore semaphore;
1123 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1124 if (VK_SUCCESS != err) {
1125 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1126 return INVALID_OPERATION;
1127 }
1128
1129 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1130
1131 VkSubmitInfo submitInfo;
1132 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1133 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1134 submitInfo.waitSemaphoreCount = 0;
1135 submitInfo.pWaitSemaphores = nullptr;
1136 submitInfo.pWaitDstStageMask = nullptr;
1137 submitInfo.commandBufferCount = 1;
1138 submitInfo.pCommandBuffers = &mDummyCB;
1139 submitInfo.signalSemaphoreCount = 1;
1140 submitInfo.pSignalSemaphores = &semaphore;
1141
1142 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1143
1144 VkSemaphoreGetFdInfoKHR getFdInfo;
1145 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1146 getFdInfo.pNext = nullptr;
1147 getFdInfo.semaphore = semaphore;
1148 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1149
1150 int fenceFd = 0;
1151
1152 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1153 if (VK_SUCCESS != err) {
1154 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1155 return INVALID_OPERATION;
1156 }
1157 nativeFence = new Fence(fenceFd);
1158
1159 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1160 // destroying the semaphore and creating a new one with the same handle, and the payloads
1161 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1162 // it and we don't need to wait on the command buffer we submitted to finish.
1163 mDestroySemaphore(mDevice, semaphore, nullptr);
1164
Stan Iliev564ca3e2018-09-04 22:00:00 +00001165 return OK;
1166}
1167
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001168} /* namespace renderthread */
1169} /* namespace uirenderer */
1170} /* namespace android */