blob: e1f8307343a7b2578a98ba7102174cf3e0ddbe2b [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Greg Daniel2ff202712018-06-14 11:50:10 -040037#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
38#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
39#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050040
John Reck1bcacfd2017-11-03 10:12:19 -070041VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050042
43void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050044 mRenderThread.setGrContext(nullptr);
45
Greg Daniel26e0dca2018-09-18 10:33:19 -040046 // We don't need to explicitly free the command buffer since it automatically gets freed when we
47 // delete the VkCommandPool below.
48 mDummyCB = VK_NULL_HANDLE;
49
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040051 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050052 mCommandPool = VK_NULL_HANDLE;
53 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050054
Greg Daniel2ff202712018-06-14 11:50:10 -040055 if (mDevice != VK_NULL_HANDLE) {
56 mDeviceWaitIdle(mDevice);
57 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070058 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
Greg Daniel2ff202712018-06-14 11:50:10 -040060 if (mInstance != VK_NULL_HANDLE) {
61 mDestroyInstance(mInstance, nullptr);
62 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063
Greg Daniel2ff202712018-06-14 11:50:10 -040064 mGraphicsQueue = VK_NULL_HANDLE;
65 mPresentQueue = VK_NULL_HANDLE;
66 mDevice = VK_NULL_HANDLE;
67 mPhysicalDevice = VK_NULL_HANDLE;
68 mInstance = VK_NULL_HANDLE;
69}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050070
Greg Daniela227dbb2018-08-20 09:19:48 -040071bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040072 VkResult err;
73
74 constexpr VkApplicationInfo app_info = {
75 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
76 nullptr, // pNext
77 "android framework", // pApplicationName
78 0, // applicationVersion
79 "android framework", // pEngineName
80 0, // engineVerison
Greg Daniel96259622018-10-01 14:42:56 -040081 VK_MAKE_VERSION(1, 1, 0), // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -040082 };
83
84 std::vector<const char*> instanceExtensions;
85 {
86 GET_PROC(EnumerateInstanceExtensionProperties);
87
88 uint32_t extensionCount = 0;
89 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
90 if (VK_SUCCESS != err) {
91 return false;
92 }
93 std::unique_ptr<VkExtensionProperties[]> extensions(
94 new VkExtensionProperties[extensionCount]);
95 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
96 if (VK_SUCCESS != err) {
97 return false;
98 }
99 bool hasKHRSurfaceExtension = false;
100 bool hasKHRAndroidSurfaceExtension = false;
101 for (uint32_t i = 0; i < extensionCount; ++i) {
102 instanceExtensions.push_back(extensions[i].extensionName);
103 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
104 hasKHRSurfaceExtension = true;
105 }
106 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
107 hasKHRAndroidSurfaceExtension = true;
108 }
109 }
110 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
111 this->destroy();
112 return false;
113 }
114 }
115
116 const VkInstanceCreateInfo instance_create = {
117 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
118 nullptr, // pNext
119 0, // flags
120 &app_info, // pApplicationInfo
121 0, // enabledLayerNameCount
122 nullptr, // ppEnabledLayerNames
123 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
124 instanceExtensions.data(), // ppEnabledExtensionNames
125 };
126
127 GET_PROC(CreateInstance);
128 err = mCreateInstance(&instance_create, nullptr, &mInstance);
129 if (err < 0) {
130 this->destroy();
131 return false;
132 }
133
134 GET_INST_PROC(DestroyInstance);
135 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400136 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400137 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400138 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400139 GET_INST_PROC(CreateDevice);
140 GET_INST_PROC(EnumerateDeviceExtensionProperties);
141 GET_INST_PROC(CreateAndroidSurfaceKHR);
142 GET_INST_PROC(DestroySurfaceKHR);
143 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
144 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
145 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
146 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
147
148 uint32_t gpuCount;
149 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
150 if (err) {
151 this->destroy();
152 return false;
153 }
154 if (!gpuCount) {
155 this->destroy();
156 return false;
157 }
158 // Just returning the first physical device instead of getting the whole array. Since there
159 // should only be one device on android.
160 gpuCount = 1;
161 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
162 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
163 if (err && VK_INCOMPLETE != err) {
164 this->destroy();
165 return false;
166 }
167
Greg Daniel96259622018-10-01 14:42:56 -0400168 VkPhysicalDeviceProperties physDeviceProperties;
169 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
170 if (physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
171 this->destroy();
172 return false;
173 }
174
Greg Daniel2ff202712018-06-14 11:50:10 -0400175 // query to get the initial queue props size
176 uint32_t queueCount;
177 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
178 if (!queueCount) {
179 this->destroy();
180 return false;
181 }
182
183 // now get the actual queue props
184 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
185 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
186
187 // iterate to find the graphics queue
188 mGraphicsQueueIndex = queueCount;
189 for (uint32_t i = 0; i < queueCount; i++) {
190 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
191 mGraphicsQueueIndex = i;
192 break;
193 }
194 }
195 if (mGraphicsQueueIndex == queueCount) {
196 this->destroy();
197 return false;
198 }
199
200 // All physical devices and queue families on Android must be capable of
201 // presentation with any native window. So just use the first one.
202 mPresentQueueIndex = 0;
203
204 std::vector<const char*> deviceExtensions;
205 {
206 uint32_t extensionCount = 0;
207 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
208 nullptr);
209 if (VK_SUCCESS != err) {
210 this->destroy();
211 return false;
212 }
213 std::unique_ptr<VkExtensionProperties[]> extensions(
214 new VkExtensionProperties[extensionCount]);
215 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
216 extensions.get());
217 if (VK_SUCCESS != err) {
218 this->destroy();
219 return false;
220 }
221 bool hasKHRSwapchainExtension = false;
222 for (uint32_t i = 0; i < extensionCount; ++i) {
223 deviceExtensions.push_back(extensions[i].extensionName);
224 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
225 hasKHRSwapchainExtension = true;
226 }
227 }
228 if (!hasKHRSwapchainExtension) {
229 this->destroy();
230 return false;
231 }
232 }
233
Greg Daniela227dbb2018-08-20 09:19:48 -0400234 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
235 if (device != VK_NULL_HANDLE) {
236 return vkGetDeviceProcAddr(device, proc_name);
237 }
238 return vkGetInstanceProcAddr(instance, proc_name);
239 };
240 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
241 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
242
Greg Daniel26e0dca2018-09-18 10:33:19 -0400243 if (!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
244 this->destroy();
245 return false;
246 }
247
Greg Daniela227dbb2018-08-20 09:19:48 -0400248 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
249 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
250 features.pNext = nullptr;
251
252 // Setup all extension feature structs we may want to use.
253 void** tailPNext = &features.pNext;
254
255 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
256 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
257 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
258 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
259 LOG_ALWAYS_FATAL_IF(!blend);
260 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
261 blend->pNext = nullptr;
262 *tailPNext = blend;
263 tailPNext = &blend->pNext;
264 }
265
Greg Daniel05036172018-11-28 17:08:04 -0500266 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
267 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
268 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
269 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
270 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
271 ycbcrFeature->pNext = nullptr;
272 *tailPNext = ycbcrFeature;
273 tailPNext = &ycbcrFeature->pNext;
274
Greg Daniela227dbb2018-08-20 09:19:48 -0400275 // query to get the physical device features
276 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400277 // this looks like it would slow things down,
278 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400279 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400280
281 float queuePriorities[1] = { 0.0 };
282
283 const VkDeviceQueueCreateInfo queueInfo[2] = {
284 {
285 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
286 nullptr, // pNext
287 0, // VkDeviceQueueCreateFlags
288 mGraphicsQueueIndex, // queueFamilyIndex
289 1, // queueCount
290 queuePriorities, // pQueuePriorities
291 },
292 {
293 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
294 nullptr, // pNext
295 0, // VkDeviceQueueCreateFlags
296 mPresentQueueIndex, // queueFamilyIndex
297 1, // queueCount
298 queuePriorities, // pQueuePriorities
299 }
300 };
301 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
302
303 const VkDeviceCreateInfo deviceInfo = {
304 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400305 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400306 0, // VkDeviceCreateFlags
307 queueInfoCount, // queueCreateInfoCount
308 queueInfo, // pQueueCreateInfos
309 0, // layerCount
310 nullptr, // ppEnabledLayerNames
311 (uint32_t) deviceExtensions.size(), // extensionCount
312 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400313 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400314 };
315
316 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
317 if (err) {
318 this->destroy();
319 return false;
320 }
321
322 GET_DEV_PROC(GetDeviceQueue);
323 GET_DEV_PROC(DeviceWaitIdle);
324 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500325 GET_DEV_PROC(CreateSwapchainKHR);
326 GET_DEV_PROC(DestroySwapchainKHR);
327 GET_DEV_PROC(GetSwapchainImagesKHR);
328 GET_DEV_PROC(AcquireNextImageKHR);
329 GET_DEV_PROC(QueuePresentKHR);
330 GET_DEV_PROC(CreateCommandPool);
331 GET_DEV_PROC(DestroyCommandPool);
332 GET_DEV_PROC(AllocateCommandBuffers);
333 GET_DEV_PROC(FreeCommandBuffers);
334 GET_DEV_PROC(ResetCommandBuffer);
335 GET_DEV_PROC(BeginCommandBuffer);
336 GET_DEV_PROC(EndCommandBuffer);
337 GET_DEV_PROC(CmdPipelineBarrier);
338 GET_DEV_PROC(GetDeviceQueue);
339 GET_DEV_PROC(QueueSubmit);
340 GET_DEV_PROC(QueueWaitIdle);
341 GET_DEV_PROC(DeviceWaitIdle);
342 GET_DEV_PROC(CreateSemaphore);
343 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400344 GET_DEV_PROC(ImportSemaphoreFdKHR);
345 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500346 GET_DEV_PROC(CreateFence);
347 GET_DEV_PROC(DestroyFence);
348 GET_DEV_PROC(WaitForFences);
349 GET_DEV_PROC(ResetFences);
350
Greg Daniel2ff202712018-06-14 11:50:10 -0400351 return true;
352}
353
Greg Daniela227dbb2018-08-20 09:19:48 -0400354static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
355 // All Vulkan structs that could be part of the features chain will start with the
356 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
357 // so we can get access to the pNext for the next struct.
358 struct CommonVulkanHeader {
359 VkStructureType sType;
360 void* pNext;
361 };
362
363 void* pNext = features.pNext;
364 while (pNext) {
365 void* current = pNext;
366 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
367 free(current);
368 }
369}
370
Greg Daniel2ff202712018-06-14 11:50:10 -0400371void VulkanManager::initialize() {
372 if (mDevice != VK_NULL_HANDLE) {
373 return;
374 }
375
Greg Daniela227dbb2018-08-20 09:19:48 -0400376 GET_PROC(EnumerateInstanceVersion);
377 uint32_t instanceVersion = 0;
378 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
379 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
380
381 GrVkExtensions extensions;
382 VkPhysicalDeviceFeatures2 features;
383 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400384
385 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
386
Greg Daniel2ff202712018-06-14 11:50:10 -0400387 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
388 if (device != VK_NULL_HANDLE) {
389 return vkGetDeviceProcAddr(device, proc_name);
390 }
391 return vkGetInstanceProcAddr(instance, proc_name);
392 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400393
394 GrVkBackendContext backendContext;
395 backendContext.fInstance = mInstance;
396 backendContext.fPhysicalDevice = mPhysicalDevice;
397 backendContext.fDevice = mDevice;
398 backendContext.fQueue = mGraphicsQueue;
399 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400400 backendContext.fInstanceVersion = instanceVersion;
401 backendContext.fVkExtensions = &extensions;
402 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400403 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400404
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500405 // create the command pool for the command buffers
406 if (VK_NULL_HANDLE == mCommandPool) {
407 VkCommandPoolCreateInfo commandPoolInfo;
408 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
409 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
410 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400411 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500412 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400413 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
414 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500415 SkASSERT(VK_SUCCESS == res);
416 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400417 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
418
419 if (!setupDummyCommandBuffer()) {
420 this->destroy();
421 return;
422 }
423 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
424
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500425
Greg Daniel2ff202712018-06-14 11:50:10 -0400426 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500427
Stan Ilievd495f432017-10-09 15:49:32 -0400428 GrContextOptions options;
429 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800430 // TODO: get a string describing the SPIR-V compiler version and use it here
431 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400432 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500433 LOG_ALWAYS_FATAL_IF(!grContext.get());
434 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400435
436 free_features_extensions_structs(features);
437
Greg Danielcd558522016-11-17 13:31:40 -0500438 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
439 mSwapBehavior = SwapBehavior::BufferAge;
440 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500441}
442
443// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
444// previous uses have finished before returning.
445VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
446 SkASSERT(surface->mBackbuffers);
447
448 ++surface->mCurrentBackbufferIndex;
449 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
450 surface->mCurrentBackbufferIndex = 0;
451 }
452
John Reck1bcacfd2017-11-03 10:12:19 -0700453 VulkanSurface::BackbufferInfo* backbuffer =
454 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500455
456 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
457 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400458 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500459 if (res != VK_SUCCESS) {
460 return nullptr;
461 }
462
463 return backbuffer;
464}
465
Stan Iliev305e13a2018-11-13 11:14:48 -0500466SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
467 // Recreate VulkanSurface, if ANativeWindow has been resized.
468 VulkanSurface* surface = *surfaceOut;
469 int windowWidth = 0, windowHeight = 0;
470 ANativeWindow* window = surface->mNativeWindow;
471 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
472 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
473 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
474 ColorMode colorMode = surface->mColorMode;
Stan Iliev987a80c2018-12-04 10:07:21 -0500475 sk_sp<SkColorSpace> colorSpace = surface->mColorSpace;
Stan Iliev305e13a2018-11-13 11:14:48 -0500476 destroySurface(surface);
Stan Iliev987a80c2018-12-04 10:07:21 -0500477 *surfaceOut = createSurface(window, colorMode, colorSpace);
Stan Iliev305e13a2018-11-13 11:14:48 -0500478 surface = *surfaceOut;
479 }
480
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500481 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
482 SkASSERT(backbuffer);
483
484 VkResult res;
485
Greg Daniel2ff202712018-06-14 11:50:10 -0400486 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500487 SkASSERT(VK_SUCCESS == res);
488
489 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
490 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400491 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700492 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
493 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500494
495 if (VK_ERROR_SURFACE_LOST_KHR == res) {
496 // need to figure out how to create a new vkSurface without the platformData*
497 // maybe use attach somehow? but need a Window
498 return nullptr;
499 }
500 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
501 // tear swapchain down and try again
502 if (!createSwapchain(surface)) {
503 return nullptr;
504 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500505 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400506 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500507 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500508
509 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400510 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700511 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
512 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500513
514 if (VK_SUCCESS != res) {
515 return nullptr;
516 }
517 }
518
519 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500520 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500521 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400522 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500523 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400524 VkAccessFlags srcAccessMask = 0;
525 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
526 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500527
528 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700529 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
530 NULL, // pNext
531 srcAccessMask, // outputMask
532 dstAccessMask, // inputMask
533 layout, // oldLayout
534 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
535 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400536 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700537 surface->mImages[backbuffer->mImageIndex], // image
538 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500539 };
540 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
541
542 VkCommandBufferBeginInfo info;
543 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
544 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
545 info.flags = 0;
546 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
547
John Reck1bcacfd2017-11-03 10:12:19 -0700548 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
549 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500550
551 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
552
553 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
554 // insert the layout transfer into the queue and wait on the acquire
555 VkSubmitInfo submitInfo;
556 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
557 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
558 submitInfo.waitSemaphoreCount = 1;
559 // Wait to make sure aquire semaphore set above has signaled.
560 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
561 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
562 submitInfo.commandBufferCount = 1;
563 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
564 submitInfo.signalSemaphoreCount = 0;
565
566 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400567 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500568
569 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500570 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400571 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
572 SkSurface::kFlushRead_BackendHandleAccess);
573 if (!backendRT.isValid()) {
574 SkASSERT(backendRT.isValid());
575 return nullptr;
576 }
577 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500578
579 surface->mBackbuffer = std::move(skSurface);
580 return surface->mBackbuffer.get();
581}
582
583void VulkanManager::destroyBuffers(VulkanSurface* surface) {
584 if (surface->mBackbuffers) {
585 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400586 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500587 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400588 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
589 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
590 mFreeCommandBuffers(mDevice, mCommandPool, 2,
591 surface->mBackbuffers[i].mTransitionCmdBuffers);
592 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
593 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500594 }
595 }
596
597 delete[] surface->mBackbuffers;
598 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500599 delete[] surface->mImageInfos;
600 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500601 delete[] surface->mImages;
602 surface->mImages = nullptr;
603}
604
605void VulkanManager::destroySurface(VulkanSurface* surface) {
606 // Make sure all submit commands have finished before starting to destroy objects.
607 if (VK_NULL_HANDLE != mPresentQueue) {
608 mQueueWaitIdle(mPresentQueue);
609 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400610 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500611
612 destroyBuffers(surface);
613
614 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400615 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500616 surface->mSwapchain = VK_NULL_HANDLE;
617 }
618
619 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400620 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500621 surface->mVkSurface = VK_NULL_HANDLE;
622 }
623 delete surface;
624}
625
626void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400627 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500628 SkASSERT(surface->mImageCount);
629 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400630 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500631
632 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
633
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500634 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500635 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500636 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500637 GrVkImageInfo info;
638 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500639 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500640 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
641 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
642 info.fFormat = format;
643 info.fLevelCount = 1;
644
Greg Danielac2d2322017-07-12 11:30:15 -0400645 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500646
Greg Danielcd558522016-11-17 13:31:40 -0500647 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700648 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400649 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
Stan Iliev79351f32018-09-19 14:23:49 -0400650 surface->mColorMode == ColorMode::WideColorGamut ? kRGBA_F16_SkColorType
Stan Iliev987a80c2018-12-04 10:07:21 -0500651 : kRGBA_8888_SkColorType, surface->mColorSpace, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500652 }
653
654 SkASSERT(mCommandPool != VK_NULL_HANDLE);
655
656 // set up the backbuffers
657 VkSemaphoreCreateInfo semaphoreInfo;
658 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
659 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
660 semaphoreInfo.pNext = nullptr;
661 semaphoreInfo.flags = 0;
662 VkCommandBufferAllocateInfo commandBuffersInfo;
663 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
664 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
665 commandBuffersInfo.pNext = nullptr;
666 commandBuffersInfo.commandPool = mCommandPool;
667 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
668 commandBuffersInfo.commandBufferCount = 2;
669 VkFenceCreateInfo fenceInfo;
670 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
671 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
672 fenceInfo.pNext = nullptr;
673 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
674
675 // we create one additional backbuffer structure here, because we want to
676 // give the command buffers they contain a chance to finish before we cycle back
677 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
678 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
679 SkDEBUGCODE(VkResult res);
680 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400681 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700682 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400683 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700684 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400685 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700686 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400687 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700688 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400689 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700690 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500691 SkASSERT(VK_SUCCESS == res);
692 }
693 surface->mCurrentBackbufferIndex = surface->mImageCount;
694}
695
696bool VulkanManager::createSwapchain(VulkanSurface* surface) {
697 // check for capabilities
698 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400699 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700700 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500701 if (VK_SUCCESS != res) {
702 return false;
703 }
704
705 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400706 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700707 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500708 if (VK_SUCCESS != res) {
709 return false;
710 }
711
Ben Wagnereec27d52017-01-11 15:32:07 -0500712 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400713 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700714 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500715 if (VK_SUCCESS != res) {
716 return false;
717 }
718
719 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400720 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700721 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500722 if (VK_SUCCESS != res) {
723 return false;
724 }
725
Ben Wagnereec27d52017-01-11 15:32:07 -0500726 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400727 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700728 surface->mVkSurface, &presentModeCount,
729 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500730 if (VK_SUCCESS != res) {
731 return false;
732 }
733
734 VkExtent2D extent = caps.currentExtent;
735 // clamp width; to handle currentExtent of -1 and protect us from broken hints
736 if (extent.width < caps.minImageExtent.width) {
737 extent.width = caps.minImageExtent.width;
738 }
739 SkASSERT(extent.width <= caps.maxImageExtent.width);
740 // clamp height
741 if (extent.height < caps.minImageExtent.height) {
742 extent.height = caps.minImageExtent.height;
743 }
744 SkASSERT(extent.height <= caps.maxImageExtent.height);
Stan Iliev305e13a2018-11-13 11:14:48 -0500745 surface->mWindowWidth = extent.width;
746 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500747
748 uint32_t imageCount = caps.minImageCount + 2;
749 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
750 // Application must settle for fewer images than desired:
751 imageCount = caps.maxImageCount;
752 }
753
754 // Currently Skia requires the images to be color attchments and support all transfer
755 // operations.
756 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
757 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
758 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
759 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
760 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700761 SkASSERT(caps.supportedCompositeAlpha &
762 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500763 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700764 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
765 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
766 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500767
Stan Iliev79351f32018-09-19 14:23:49 -0400768 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500769 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Stan Iliev79351f32018-09-19 14:23:49 -0400770 if (surface->mColorMode == ColorMode::WideColorGamut) {
771 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
772 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
773 }
774 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500775 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400776 if (surfaceFormat == surfaceFormats[i].format
777 && colorSpace == surfaceFormats[i].colorSpace) {
778 foundSurfaceFormat = true;
779 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500780 }
781 }
782
Stan Iliev79351f32018-09-19 14:23:49 -0400783 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500784 return false;
785 }
786
Greg Daniel8a2a7542018-10-04 13:46:55 -0400787 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500788 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500789
790 VkSwapchainCreateInfoKHR swapchainCreateInfo;
791 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
792 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
793 swapchainCreateInfo.surface = surface->mVkSurface;
794 swapchainCreateInfo.minImageCount = imageCount;
795 swapchainCreateInfo.imageFormat = surfaceFormat;
796 swapchainCreateInfo.imageColorSpace = colorSpace;
797 swapchainCreateInfo.imageExtent = extent;
798 swapchainCreateInfo.imageArrayLayers = 1;
799 swapchainCreateInfo.imageUsage = usageFlags;
800
Greg Daniel2ff202712018-06-14 11:50:10 -0400801 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
802 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500803 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
804 swapchainCreateInfo.queueFamilyIndexCount = 2;
805 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
806 } else {
807 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
808 swapchainCreateInfo.queueFamilyIndexCount = 0;
809 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
810 }
811
812 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
813 swapchainCreateInfo.compositeAlpha = composite_alpha;
814 swapchainCreateInfo.presentMode = mode;
815 swapchainCreateInfo.clipped = true;
816 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
817
Greg Daniel2ff202712018-06-14 11:50:10 -0400818 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500819 if (VK_SUCCESS != res) {
820 return false;
821 }
822
823 // destroy the old swapchain
824 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400825 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500826
827 destroyBuffers(surface);
828
Greg Daniel2ff202712018-06-14 11:50:10 -0400829 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500830 }
831
832 createBuffers(surface, surfaceFormat, extent);
833
834 return true;
835}
836
Stan Iliev987a80c2018-12-04 10:07:21 -0500837VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
838 sk_sp<SkColorSpace> surfaceColorSpace) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500839 initialize();
840
841 if (!window) {
842 return nullptr;
843 }
844
Stan Iliev987a80c2018-12-04 10:07:21 -0500845 VulkanSurface* surface = new VulkanSurface(colorMode, window, surfaceColorSpace);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500846
847 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
848 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
849 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
850 surfaceCreateInfo.pNext = nullptr;
851 surfaceCreateInfo.flags = 0;
852 surfaceCreateInfo.window = window;
853
Greg Daniel2ff202712018-06-14 11:50:10 -0400854 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
855 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500856 if (VK_SUCCESS != res) {
857 delete surface;
858 return nullptr;
859 }
860
John Reck1bcacfd2017-11-03 10:12:19 -0700861 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400862 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
863 // All physical devices and queue families on Android must be capable of
864 // presentation with any native window.
865 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500866
867 if (!createSwapchain(surface)) {
868 destroySurface(surface);
869 return nullptr;
870 }
871
872 return surface;
873}
874
875// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400876static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500877 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
878 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
879 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
880 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
881 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400882 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
883 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
884 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
885 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
886 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
887 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
888 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500889 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
890 return VK_PIPELINE_STAGE_HOST_BIT;
891 }
892
893 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
894 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
895}
896
897// Helper to know which src access mask we need to set when transitioning to the present layout
898static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
899 VkAccessFlags flags = 0;
900 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
901 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700902 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
903 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
904 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500905 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
906 flags = VK_ACCESS_HOST_WRITE_BIT;
907 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
908 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
909 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
910 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
911 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
912 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
913 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
914 flags = VK_ACCESS_TRANSFER_READ_BIT;
915 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
916 flags = VK_ACCESS_SHADER_READ_BIT;
917 }
918 return flags;
919}
920
921void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500922 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
923 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400924 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500925 }
926
Greg Daniel74ea2012017-11-10 11:32:58 -0500927 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700928 VulkanSurface::BackbufferInfo* backbuffer =
929 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400930
Greg Danielcd558522016-11-17 13:31:40 -0500931 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400932 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
933 SkSurface::kFlushRead_BackendHandleAccess);
934 SkASSERT(backendRT.isValid());
935
936 GrVkImageInfo imageInfo;
937 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
938
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500939 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400940 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500941
942 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
943 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400944 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400945 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500946 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
947 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400948 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500949
950 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700951 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
952 NULL, // pNext
953 srcAccessMask, // outputMask
954 dstAccessMask, // inputMask
955 layout, // oldLayout
956 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400957 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700958 mPresentQueueIndex, // dstQueueFamilyIndex
959 surface->mImages[backbuffer->mImageIndex], // image
960 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500961 };
962
963 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
964 VkCommandBufferBeginInfo info;
965 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
966 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
967 info.flags = 0;
968 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700969 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
970 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500971 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
972
Greg Danielcd558522016-11-17 13:31:40 -0500973 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500974
975 // insert the layout transfer into the queue and wait on the acquire
976 VkSubmitInfo submitInfo;
977 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
978 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
979 submitInfo.waitSemaphoreCount = 0;
980 submitInfo.pWaitDstStageMask = 0;
981 submitInfo.commandBufferCount = 1;
982 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
983 submitInfo.signalSemaphoreCount = 1;
984 // When this command buffer finishes we will signal this semaphore so that we know it is now
985 // safe to present the image to the screen.
986 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
987
988 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400989 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500990
991 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
992 // to the image is complete and that the layout has been change to present on the graphics
993 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700994 const VkPresentInfoKHR presentInfo = {
995 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
996 NULL, // pNext
997 1, // waitSemaphoreCount
998 &backbuffer->mRenderSemaphore, // pWaitSemaphores
999 1, // swapchainCount
1000 &surface->mSwapchain, // pSwapchains
1001 &backbuffer->mImageIndex, // pImageIndices
1002 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001003 };
1004
1005 mQueuePresentKHR(mPresentQueue, &presentInfo);
1006
1007 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -05001008 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
1009 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
1010 surface->mCurrentTime++;
1011}
1012
1013int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001014 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001015 VulkanSurface::BackbufferInfo* backbuffer =
1016 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1017 if (mSwapBehavior == SwapBehavior::Discard ||
1018 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001019 return 0;
1020 }
1021 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1022 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001023}
1024
Greg Daniel26e0dca2018-09-18 10:33:19 -04001025bool VulkanManager::setupDummyCommandBuffer() {
1026 if (mDummyCB != VK_NULL_HANDLE) {
1027 return true;
1028 }
1029
1030 VkCommandBufferAllocateInfo commandBuffersInfo;
1031 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1032 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1033 commandBuffersInfo.pNext = nullptr;
1034 commandBuffersInfo.commandPool = mCommandPool;
1035 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1036 commandBuffersInfo.commandBufferCount = 1;
1037
1038 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1039 if (err != VK_SUCCESS) {
1040 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1041 // make sure the driver didn't set a value and then return a failure.
1042 mDummyCB = VK_NULL_HANDLE;
1043 return false;
1044 }
1045
1046 VkCommandBufferBeginInfo beginInfo;
1047 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1048 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1049 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1050
1051 mBeginCommandBuffer(mDummyCB, &beginInfo);
1052 mEndCommandBuffer(mDummyCB);
1053 return true;
1054}
1055
Stan Iliev564ca3e2018-09-04 22:00:00 +00001056status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001057 if (!hasVkContext()) {
1058 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1059 return INVALID_OPERATION;
1060 }
1061
Stan Iliev7a081272018-10-26 17:54:18 -04001062 // Block GPU on the fence.
1063 int fenceFd = fence->dup();
1064 if (fenceFd == -1) {
1065 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1066 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001067 }
Stan Iliev7a081272018-10-26 17:54:18 -04001068
1069 VkSemaphoreCreateInfo semaphoreInfo;
1070 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1071 semaphoreInfo.pNext = nullptr;
1072 semaphoreInfo.flags = 0;
1073 VkSemaphore semaphore;
1074 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1075 if (VK_SUCCESS != err) {
1076 ALOGE("Failed to create import semaphore, err: %d", err);
1077 return UNKNOWN_ERROR;
1078 }
1079 VkImportSemaphoreFdInfoKHR importInfo;
1080 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1081 importInfo.pNext = nullptr;
1082 importInfo.semaphore = semaphore;
1083 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1084 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1085 importInfo.fd = fenceFd;
1086
1087 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1088 if (VK_SUCCESS != err) {
1089 ALOGE("Failed to import semaphore, err: %d", err);
1090 return UNKNOWN_ERROR;
1091 }
1092
1093 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1094
1095 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1096
1097 VkSubmitInfo submitInfo;
1098 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1099 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1100 submitInfo.waitSemaphoreCount = 1;
1101 // Wait to make sure aquire semaphore set above has signaled.
1102 submitInfo.pWaitSemaphores = &semaphore;
1103 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1104 submitInfo.commandBufferCount = 1;
1105 submitInfo.pCommandBuffers = &mDummyCB;
1106 submitInfo.signalSemaphoreCount = 0;
1107
1108 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1109
1110 // On Android when we import a semaphore, it is imported using temporary permanence. That
1111 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1112 // state before importing. This means it will now be in an idle state with no pending
1113 // signal or wait operations, so it is safe to immediately delete it.
1114 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001115 return OK;
1116}
1117
1118status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001119 if (!hasVkContext()) {
1120 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1121 return INVALID_OPERATION;
1122 }
1123
Greg Daniel26e0dca2018-09-18 10:33:19 -04001124 VkExportSemaphoreCreateInfo exportInfo;
1125 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1126 exportInfo.pNext = nullptr;
1127 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1128
1129 VkSemaphoreCreateInfo semaphoreInfo;
1130 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1131 semaphoreInfo.pNext = &exportInfo;
1132 semaphoreInfo.flags = 0;
1133 VkSemaphore semaphore;
1134 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1135 if (VK_SUCCESS != err) {
1136 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1137 return INVALID_OPERATION;
1138 }
1139
1140 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1141
1142 VkSubmitInfo submitInfo;
1143 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1144 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1145 submitInfo.waitSemaphoreCount = 0;
1146 submitInfo.pWaitSemaphores = nullptr;
1147 submitInfo.pWaitDstStageMask = nullptr;
1148 submitInfo.commandBufferCount = 1;
1149 submitInfo.pCommandBuffers = &mDummyCB;
1150 submitInfo.signalSemaphoreCount = 1;
1151 submitInfo.pSignalSemaphores = &semaphore;
1152
1153 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1154
1155 VkSemaphoreGetFdInfoKHR getFdInfo;
1156 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1157 getFdInfo.pNext = nullptr;
1158 getFdInfo.semaphore = semaphore;
1159 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1160
1161 int fenceFd = 0;
1162
1163 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1164 if (VK_SUCCESS != err) {
1165 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1166 return INVALID_OPERATION;
1167 }
1168 nativeFence = new Fence(fenceFd);
1169
1170 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1171 // destroying the semaphore and creating a new one with the same handle, and the payloads
1172 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1173 // it and we don't need to wait on the command buffer we submitted to finish.
1174 mDestroySemaphore(mDevice, semaphore, nullptr);
1175
Stan Iliev564ca3e2018-09-04 22:00:00 +00001176 return OK;
1177}
1178
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001179} /* namespace renderthread */
1180} /* namespace uirenderer */
1181} /* namespace android */