blob: 4be8bd9a863e7df7af4ab8306e1c12837a6fe113 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Greg Daniel2ff202712018-06-14 11:50:10 -040037#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
38#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
39#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050040
John Reck1bcacfd2017-11-03 10:12:19 -070041VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050042
43void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050044 mRenderThread.setGrContext(nullptr);
45
Greg Daniel26e0dca2018-09-18 10:33:19 -040046 // We don't need to explicitly free the command buffer since it automatically gets freed when we
47 // delete the VkCommandPool below.
48 mDummyCB = VK_NULL_HANDLE;
49
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040051 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050052 mCommandPool = VK_NULL_HANDLE;
53 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050054
Greg Daniel2ff202712018-06-14 11:50:10 -040055 if (mDevice != VK_NULL_HANDLE) {
56 mDeviceWaitIdle(mDevice);
57 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070058 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
Greg Daniel2ff202712018-06-14 11:50:10 -040060 if (mInstance != VK_NULL_HANDLE) {
61 mDestroyInstance(mInstance, nullptr);
62 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063
Greg Daniel2ff202712018-06-14 11:50:10 -040064 mGraphicsQueue = VK_NULL_HANDLE;
65 mPresentQueue = VK_NULL_HANDLE;
66 mDevice = VK_NULL_HANDLE;
67 mPhysicalDevice = VK_NULL_HANDLE;
68 mInstance = VK_NULL_HANDLE;
69}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050070
Greg Daniela227dbb2018-08-20 09:19:48 -040071bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040072 VkResult err;
73
74 constexpr VkApplicationInfo app_info = {
75 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
76 nullptr, // pNext
77 "android framework", // pApplicationName
78 0, // applicationVersion
79 "android framework", // pEngineName
80 0, // engineVerison
Greg Daniel96259622018-10-01 14:42:56 -040081 VK_MAKE_VERSION(1, 1, 0), // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -040082 };
83
84 std::vector<const char*> instanceExtensions;
85 {
86 GET_PROC(EnumerateInstanceExtensionProperties);
87
88 uint32_t extensionCount = 0;
89 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
90 if (VK_SUCCESS != err) {
91 return false;
92 }
93 std::unique_ptr<VkExtensionProperties[]> extensions(
94 new VkExtensionProperties[extensionCount]);
95 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
96 if (VK_SUCCESS != err) {
97 return false;
98 }
99 bool hasKHRSurfaceExtension = false;
100 bool hasKHRAndroidSurfaceExtension = false;
101 for (uint32_t i = 0; i < extensionCount; ++i) {
102 instanceExtensions.push_back(extensions[i].extensionName);
103 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
104 hasKHRSurfaceExtension = true;
105 }
106 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
107 hasKHRAndroidSurfaceExtension = true;
108 }
109 }
110 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
111 this->destroy();
112 return false;
113 }
114 }
115
116 const VkInstanceCreateInfo instance_create = {
117 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
118 nullptr, // pNext
119 0, // flags
120 &app_info, // pApplicationInfo
121 0, // enabledLayerNameCount
122 nullptr, // ppEnabledLayerNames
123 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
124 instanceExtensions.data(), // ppEnabledExtensionNames
125 };
126
127 GET_PROC(CreateInstance);
128 err = mCreateInstance(&instance_create, nullptr, &mInstance);
129 if (err < 0) {
130 this->destroy();
131 return false;
132 }
133
134 GET_INST_PROC(DestroyInstance);
135 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400136 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400137 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400138 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400139 GET_INST_PROC(CreateDevice);
140 GET_INST_PROC(EnumerateDeviceExtensionProperties);
141 GET_INST_PROC(CreateAndroidSurfaceKHR);
142 GET_INST_PROC(DestroySurfaceKHR);
143 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
144 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
145 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
146 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
147
148 uint32_t gpuCount;
149 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
150 if (err) {
151 this->destroy();
152 return false;
153 }
154 if (!gpuCount) {
155 this->destroy();
156 return false;
157 }
158 // Just returning the first physical device instead of getting the whole array. Since there
159 // should only be one device on android.
160 gpuCount = 1;
161 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
162 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
163 if (err && VK_INCOMPLETE != err) {
164 this->destroy();
165 return false;
166 }
167
Greg Daniel96259622018-10-01 14:42:56 -0400168 VkPhysicalDeviceProperties physDeviceProperties;
169 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
170 if (physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
171 this->destroy();
172 return false;
173 }
174
Greg Daniel2ff202712018-06-14 11:50:10 -0400175 // query to get the initial queue props size
176 uint32_t queueCount;
177 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
178 if (!queueCount) {
179 this->destroy();
180 return false;
181 }
182
183 // now get the actual queue props
184 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
185 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
186
187 // iterate to find the graphics queue
188 mGraphicsQueueIndex = queueCount;
189 for (uint32_t i = 0; i < queueCount; i++) {
190 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
191 mGraphicsQueueIndex = i;
192 break;
193 }
194 }
195 if (mGraphicsQueueIndex == queueCount) {
196 this->destroy();
197 return false;
198 }
199
200 // All physical devices and queue families on Android must be capable of
201 // presentation with any native window. So just use the first one.
202 mPresentQueueIndex = 0;
203
204 std::vector<const char*> deviceExtensions;
205 {
206 uint32_t extensionCount = 0;
207 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
208 nullptr);
209 if (VK_SUCCESS != err) {
210 this->destroy();
211 return false;
212 }
213 std::unique_ptr<VkExtensionProperties[]> extensions(
214 new VkExtensionProperties[extensionCount]);
215 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
216 extensions.get());
217 if (VK_SUCCESS != err) {
218 this->destroy();
219 return false;
220 }
221 bool hasKHRSwapchainExtension = false;
222 for (uint32_t i = 0; i < extensionCount; ++i) {
223 deviceExtensions.push_back(extensions[i].extensionName);
224 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
225 hasKHRSwapchainExtension = true;
226 }
227 }
228 if (!hasKHRSwapchainExtension) {
229 this->destroy();
230 return false;
231 }
232 }
233
Greg Daniela227dbb2018-08-20 09:19:48 -0400234 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
235 if (device != VK_NULL_HANDLE) {
236 return vkGetDeviceProcAddr(device, proc_name);
237 }
238 return vkGetInstanceProcAddr(instance, proc_name);
239 };
240 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
241 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
242
Greg Daniel26e0dca2018-09-18 10:33:19 -0400243 if (!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
244 this->destroy();
245 return false;
246 }
247
Greg Daniela227dbb2018-08-20 09:19:48 -0400248 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
249 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
250 features.pNext = nullptr;
251
252 // Setup all extension feature structs we may want to use.
253 void** tailPNext = &features.pNext;
254
255 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
256 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
257 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
258 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
259 LOG_ALWAYS_FATAL_IF(!blend);
260 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
261 blend->pNext = nullptr;
262 *tailPNext = blend;
263 tailPNext = &blend->pNext;
264 }
265
Greg Daniel05036172018-11-28 17:08:04 -0500266 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
267 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
268 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
269 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
270 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
271 ycbcrFeature->pNext = nullptr;
272 *tailPNext = ycbcrFeature;
273 tailPNext = &ycbcrFeature->pNext;
274
Greg Daniela227dbb2018-08-20 09:19:48 -0400275 // query to get the physical device features
276 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400277 // this looks like it would slow things down,
278 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400279 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400280
281 float queuePriorities[1] = { 0.0 };
282
283 const VkDeviceQueueCreateInfo queueInfo[2] = {
284 {
285 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
286 nullptr, // pNext
287 0, // VkDeviceQueueCreateFlags
288 mGraphicsQueueIndex, // queueFamilyIndex
289 1, // queueCount
290 queuePriorities, // pQueuePriorities
291 },
292 {
293 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
294 nullptr, // pNext
295 0, // VkDeviceQueueCreateFlags
296 mPresentQueueIndex, // queueFamilyIndex
297 1, // queueCount
298 queuePriorities, // pQueuePriorities
299 }
300 };
301 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
302
303 const VkDeviceCreateInfo deviceInfo = {
304 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400305 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400306 0, // VkDeviceCreateFlags
307 queueInfoCount, // queueCreateInfoCount
308 queueInfo, // pQueueCreateInfos
309 0, // layerCount
310 nullptr, // ppEnabledLayerNames
311 (uint32_t) deviceExtensions.size(), // extensionCount
312 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400313 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400314 };
315
316 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
317 if (err) {
318 this->destroy();
319 return false;
320 }
321
322 GET_DEV_PROC(GetDeviceQueue);
323 GET_DEV_PROC(DeviceWaitIdle);
324 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500325 GET_DEV_PROC(CreateSwapchainKHR);
326 GET_DEV_PROC(DestroySwapchainKHR);
327 GET_DEV_PROC(GetSwapchainImagesKHR);
328 GET_DEV_PROC(AcquireNextImageKHR);
329 GET_DEV_PROC(QueuePresentKHR);
330 GET_DEV_PROC(CreateCommandPool);
331 GET_DEV_PROC(DestroyCommandPool);
332 GET_DEV_PROC(AllocateCommandBuffers);
333 GET_DEV_PROC(FreeCommandBuffers);
334 GET_DEV_PROC(ResetCommandBuffer);
335 GET_DEV_PROC(BeginCommandBuffer);
336 GET_DEV_PROC(EndCommandBuffer);
337 GET_DEV_PROC(CmdPipelineBarrier);
338 GET_DEV_PROC(GetDeviceQueue);
339 GET_DEV_PROC(QueueSubmit);
340 GET_DEV_PROC(QueueWaitIdle);
341 GET_DEV_PROC(DeviceWaitIdle);
342 GET_DEV_PROC(CreateSemaphore);
343 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400344 GET_DEV_PROC(ImportSemaphoreFdKHR);
345 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500346 GET_DEV_PROC(CreateFence);
347 GET_DEV_PROC(DestroyFence);
348 GET_DEV_PROC(WaitForFences);
349 GET_DEV_PROC(ResetFences);
350
Greg Daniel2ff202712018-06-14 11:50:10 -0400351 return true;
352}
353
Greg Daniela227dbb2018-08-20 09:19:48 -0400354static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
355 // All Vulkan structs that could be part of the features chain will start with the
356 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
357 // so we can get access to the pNext for the next struct.
358 struct CommonVulkanHeader {
359 VkStructureType sType;
360 void* pNext;
361 };
362
363 void* pNext = features.pNext;
364 while (pNext) {
365 void* current = pNext;
366 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
367 free(current);
368 }
369}
370
Greg Daniel2ff202712018-06-14 11:50:10 -0400371void VulkanManager::initialize() {
372 if (mDevice != VK_NULL_HANDLE) {
373 return;
374 }
375
Greg Daniela227dbb2018-08-20 09:19:48 -0400376 GET_PROC(EnumerateInstanceVersion);
377 uint32_t instanceVersion = 0;
378 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
379 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
380
381 GrVkExtensions extensions;
382 VkPhysicalDeviceFeatures2 features;
383 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400384
385 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
386
Greg Daniel2ff202712018-06-14 11:50:10 -0400387 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
388 if (device != VK_NULL_HANDLE) {
389 return vkGetDeviceProcAddr(device, proc_name);
390 }
391 return vkGetInstanceProcAddr(instance, proc_name);
392 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400393
394 GrVkBackendContext backendContext;
395 backendContext.fInstance = mInstance;
396 backendContext.fPhysicalDevice = mPhysicalDevice;
397 backendContext.fDevice = mDevice;
398 backendContext.fQueue = mGraphicsQueue;
399 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400400 backendContext.fInstanceVersion = instanceVersion;
401 backendContext.fVkExtensions = &extensions;
402 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400403 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400404
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500405 // create the command pool for the command buffers
406 if (VK_NULL_HANDLE == mCommandPool) {
407 VkCommandPoolCreateInfo commandPoolInfo;
408 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
409 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
410 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400411 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500412 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400413 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
414 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500415 SkASSERT(VK_SUCCESS == res);
416 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400417 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
418
419 if (!setupDummyCommandBuffer()) {
420 this->destroy();
421 return;
422 }
423 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
424
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500425
Greg Daniel2ff202712018-06-14 11:50:10 -0400426 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500427
Stan Ilievd495f432017-10-09 15:49:32 -0400428 GrContextOptions options;
429 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800430 // TODO: get a string describing the SPIR-V compiler version and use it here
431 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400432 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500433 LOG_ALWAYS_FATAL_IF(!grContext.get());
434 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400435
436 free_features_extensions_structs(features);
437
Greg Danielcd558522016-11-17 13:31:40 -0500438 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
439 mSwapBehavior = SwapBehavior::BufferAge;
440 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500441}
442
443// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
444// previous uses have finished before returning.
445VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
446 SkASSERT(surface->mBackbuffers);
447
448 ++surface->mCurrentBackbufferIndex;
449 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
450 surface->mCurrentBackbufferIndex = 0;
451 }
452
John Reck1bcacfd2017-11-03 10:12:19 -0700453 VulkanSurface::BackbufferInfo* backbuffer =
454 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500455
456 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
457 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400458 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500459 if (res != VK_SUCCESS) {
460 return nullptr;
461 }
462
463 return backbuffer;
464}
465
Stan Iliev305e13a2018-11-13 11:14:48 -0500466SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
467 // Recreate VulkanSurface, if ANativeWindow has been resized.
468 VulkanSurface* surface = *surfaceOut;
469 int windowWidth = 0, windowHeight = 0;
470 ANativeWindow* window = surface->mNativeWindow;
471 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
472 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
473 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
474 ColorMode colorMode = surface->mColorMode;
Stan Iliev987a80c2018-12-04 10:07:21 -0500475 sk_sp<SkColorSpace> colorSpace = surface->mColorSpace;
Stan Iliev305e13a2018-11-13 11:14:48 -0500476 destroySurface(surface);
Stan Iliev987a80c2018-12-04 10:07:21 -0500477 *surfaceOut = createSurface(window, colorMode, colorSpace);
Stan Iliev305e13a2018-11-13 11:14:48 -0500478 surface = *surfaceOut;
479 }
480
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500481 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
482 SkASSERT(backbuffer);
483
484 VkResult res;
485
Greg Daniel2ff202712018-06-14 11:50:10 -0400486 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500487 SkASSERT(VK_SUCCESS == res);
488
489 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
490 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400491 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700492 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
493 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500494
495 if (VK_ERROR_SURFACE_LOST_KHR == res) {
496 // need to figure out how to create a new vkSurface without the platformData*
497 // maybe use attach somehow? but need a Window
498 return nullptr;
499 }
500 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
501 // tear swapchain down and try again
502 if (!createSwapchain(surface)) {
503 return nullptr;
504 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500505 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400506 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500507 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500508
509 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400510 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700511 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
512 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500513
514 if (VK_SUCCESS != res) {
515 return nullptr;
516 }
517 }
518
519 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500520 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500521 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400522 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500523 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400524 VkAccessFlags srcAccessMask = 0;
525 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
526 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500527
528 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700529 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
530 NULL, // pNext
531 srcAccessMask, // outputMask
532 dstAccessMask, // inputMask
533 layout, // oldLayout
534 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
535 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400536 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700537 surface->mImages[backbuffer->mImageIndex], // image
538 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500539 };
540 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
541
542 VkCommandBufferBeginInfo info;
543 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
544 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
545 info.flags = 0;
546 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
547
John Reck1bcacfd2017-11-03 10:12:19 -0700548 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
549 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500550
551 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
552
553 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
554 // insert the layout transfer into the queue and wait on the acquire
555 VkSubmitInfo submitInfo;
556 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
557 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
558 submitInfo.waitSemaphoreCount = 1;
559 // Wait to make sure aquire semaphore set above has signaled.
560 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
561 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
562 submitInfo.commandBufferCount = 1;
563 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
564 submitInfo.signalSemaphoreCount = 0;
565
566 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400567 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500568
569 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500570 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400571 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
572 SkSurface::kFlushRead_BackendHandleAccess);
573 if (!backendRT.isValid()) {
574 SkASSERT(backendRT.isValid());
575 return nullptr;
576 }
577 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500578
579 surface->mBackbuffer = std::move(skSurface);
580 return surface->mBackbuffer.get();
581}
582
583void VulkanManager::destroyBuffers(VulkanSurface* surface) {
584 if (surface->mBackbuffers) {
585 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400586 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500587 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400588 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
589 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
590 mFreeCommandBuffers(mDevice, mCommandPool, 2,
591 surface->mBackbuffers[i].mTransitionCmdBuffers);
592 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
593 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500594 }
595 }
596
597 delete[] surface->mBackbuffers;
598 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500599 delete[] surface->mImageInfos;
600 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500601 delete[] surface->mImages;
602 surface->mImages = nullptr;
603}
604
605void VulkanManager::destroySurface(VulkanSurface* surface) {
606 // Make sure all submit commands have finished before starting to destroy objects.
607 if (VK_NULL_HANDLE != mPresentQueue) {
608 mQueueWaitIdle(mPresentQueue);
609 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400610 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500611
612 destroyBuffers(surface);
613
614 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400615 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500616 surface->mSwapchain = VK_NULL_HANDLE;
617 }
618
619 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400620 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500621 surface->mVkSurface = VK_NULL_HANDLE;
622 }
623 delete surface;
624}
625
626void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400627 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500628 SkASSERT(surface->mImageCount);
629 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400630 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500631
632 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
633
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500634 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500635 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500636 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500637 GrVkImageInfo info;
638 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500639 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500640 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
641 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
642 info.fFormat = format;
643 info.fLevelCount = 1;
644
Greg Danielac2d2322017-07-12 11:30:15 -0400645 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500646
Greg Danielcd558522016-11-17 13:31:40 -0500647 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700648 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400649 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
Stan Iliev79351f32018-09-19 14:23:49 -0400650 surface->mColorMode == ColorMode::WideColorGamut ? kRGBA_F16_SkColorType
Stan Iliev987a80c2018-12-04 10:07:21 -0500651 : kRGBA_8888_SkColorType, surface->mColorSpace, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500652 }
653
654 SkASSERT(mCommandPool != VK_NULL_HANDLE);
655
656 // set up the backbuffers
657 VkSemaphoreCreateInfo semaphoreInfo;
658 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
659 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
660 semaphoreInfo.pNext = nullptr;
661 semaphoreInfo.flags = 0;
662 VkCommandBufferAllocateInfo commandBuffersInfo;
663 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
664 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
665 commandBuffersInfo.pNext = nullptr;
666 commandBuffersInfo.commandPool = mCommandPool;
667 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
668 commandBuffersInfo.commandBufferCount = 2;
669 VkFenceCreateInfo fenceInfo;
670 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
671 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
672 fenceInfo.pNext = nullptr;
673 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
674
675 // we create one additional backbuffer structure here, because we want to
676 // give the command buffers they contain a chance to finish before we cycle back
677 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
678 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
679 SkDEBUGCODE(VkResult res);
680 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400681 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700682 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400683 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700684 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400685 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700686 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400687 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700688 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400689 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700690 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500691 SkASSERT(VK_SUCCESS == res);
692 }
693 surface->mCurrentBackbufferIndex = surface->mImageCount;
694}
695
696bool VulkanManager::createSwapchain(VulkanSurface* surface) {
697 // check for capabilities
698 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400699 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700700 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500701 if (VK_SUCCESS != res) {
702 return false;
703 }
704
705 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400706 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700707 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500708 if (VK_SUCCESS != res) {
709 return false;
710 }
711
Ben Wagnereec27d52017-01-11 15:32:07 -0500712 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400713 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700714 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500715 if (VK_SUCCESS != res) {
716 return false;
717 }
718
719 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400720 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700721 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500722 if (VK_SUCCESS != res) {
723 return false;
724 }
725
Ben Wagnereec27d52017-01-11 15:32:07 -0500726 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400727 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700728 surface->mVkSurface, &presentModeCount,
729 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500730 if (VK_SUCCESS != res) {
731 return false;
732 }
733
734 VkExtent2D extent = caps.currentExtent;
735 // clamp width; to handle currentExtent of -1 and protect us from broken hints
736 if (extent.width < caps.minImageExtent.width) {
737 extent.width = caps.minImageExtent.width;
738 }
739 SkASSERT(extent.width <= caps.maxImageExtent.width);
740 // clamp height
741 if (extent.height < caps.minImageExtent.height) {
742 extent.height = caps.minImageExtent.height;
743 }
744 SkASSERT(extent.height <= caps.maxImageExtent.height);
Stan Iliev305e13a2018-11-13 11:14:48 -0500745 surface->mWindowWidth = extent.width;
746 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500747
Greg Daniel4d5bf2a2018-12-04 12:17:28 -0500748 uint32_t imageCount = std::max<uint32_t>(3, caps.minImageCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500749 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
750 // Application must settle for fewer images than desired:
751 imageCount = caps.maxImageCount;
752 }
753
754 // Currently Skia requires the images to be color attchments and support all transfer
755 // operations.
756 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
757 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
758 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
759 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
760 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700761 SkASSERT(caps.supportedCompositeAlpha &
762 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500763 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700764 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
765 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
766 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500767
Stan Iliev79351f32018-09-19 14:23:49 -0400768 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500769 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Stan Iliev79351f32018-09-19 14:23:49 -0400770 if (surface->mColorMode == ColorMode::WideColorGamut) {
771 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
772 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
773 }
774 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500775 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400776 if (surfaceFormat == surfaceFormats[i].format
777 && colorSpace == surfaceFormats[i].colorSpace) {
778 foundSurfaceFormat = true;
779 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500780 }
781 }
782
Stan Iliev79351f32018-09-19 14:23:49 -0400783 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500784 return false;
785 }
786
Greg Daniel8a2a7542018-10-04 13:46:55 -0400787 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500788 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500789
790 VkSwapchainCreateInfoKHR swapchainCreateInfo;
791 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
792 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
793 swapchainCreateInfo.surface = surface->mVkSurface;
794 swapchainCreateInfo.minImageCount = imageCount;
795 swapchainCreateInfo.imageFormat = surfaceFormat;
796 swapchainCreateInfo.imageColorSpace = colorSpace;
797 swapchainCreateInfo.imageExtent = extent;
798 swapchainCreateInfo.imageArrayLayers = 1;
799 swapchainCreateInfo.imageUsage = usageFlags;
800
Greg Daniel2ff202712018-06-14 11:50:10 -0400801 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
802 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500803 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
804 swapchainCreateInfo.queueFamilyIndexCount = 2;
805 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
806 } else {
807 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
808 swapchainCreateInfo.queueFamilyIndexCount = 0;
809 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
810 }
811
812 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
813 swapchainCreateInfo.compositeAlpha = composite_alpha;
814 swapchainCreateInfo.presentMode = mode;
815 swapchainCreateInfo.clipped = true;
816 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
817
Greg Daniel2ff202712018-06-14 11:50:10 -0400818 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500819 if (VK_SUCCESS != res) {
820 return false;
821 }
822
823 // destroy the old swapchain
824 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400825 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500826
827 destroyBuffers(surface);
828
Greg Daniel2ff202712018-06-14 11:50:10 -0400829 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500830 }
831
832 createBuffers(surface, surfaceFormat, extent);
833
Stan Ilievbc462582018-12-10 13:13:41 -0500834 // The window content is not updated (frozen) until a buffer of the window size is received.
835 // This prevents temporary stretching of the window after it is resized, but before the first
836 // buffer with new size is enqueued.
837 native_window_set_scaling_mode(surface->mNativeWindow, NATIVE_WINDOW_SCALING_MODE_FREEZE);
838
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500839 return true;
840}
841
Stan Iliev987a80c2018-12-04 10:07:21 -0500842VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
843 sk_sp<SkColorSpace> surfaceColorSpace) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500844 initialize();
845
846 if (!window) {
847 return nullptr;
848 }
849
Stan Iliev987a80c2018-12-04 10:07:21 -0500850 VulkanSurface* surface = new VulkanSurface(colorMode, window, surfaceColorSpace);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500851
852 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
853 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
854 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
855 surfaceCreateInfo.pNext = nullptr;
856 surfaceCreateInfo.flags = 0;
857 surfaceCreateInfo.window = window;
858
Greg Daniel2ff202712018-06-14 11:50:10 -0400859 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
860 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500861 if (VK_SUCCESS != res) {
862 delete surface;
863 return nullptr;
864 }
865
John Reck1bcacfd2017-11-03 10:12:19 -0700866 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400867 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
868 // All physical devices and queue families on Android must be capable of
869 // presentation with any native window.
870 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500871
872 if (!createSwapchain(surface)) {
873 destroySurface(surface);
874 return nullptr;
875 }
876
877 return surface;
878}
879
880// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400881static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500882 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
883 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
884 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
885 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
886 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400887 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
888 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
889 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
890 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
891 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
892 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
893 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500894 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
895 return VK_PIPELINE_STAGE_HOST_BIT;
896 }
897
898 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
899 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
900}
901
902// Helper to know which src access mask we need to set when transitioning to the present layout
903static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
904 VkAccessFlags flags = 0;
905 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
906 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700907 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
908 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
909 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500910 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
911 flags = VK_ACCESS_HOST_WRITE_BIT;
912 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
913 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
914 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
915 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
916 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
917 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
918 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
919 flags = VK_ACCESS_TRANSFER_READ_BIT;
920 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
921 flags = VK_ACCESS_SHADER_READ_BIT;
922 }
923 return flags;
924}
925
926void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500927 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
928 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400929 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500930 }
931
Greg Daniel74ea2012017-11-10 11:32:58 -0500932 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700933 VulkanSurface::BackbufferInfo* backbuffer =
934 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400935
Greg Danielcd558522016-11-17 13:31:40 -0500936 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400937 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
938 SkSurface::kFlushRead_BackendHandleAccess);
939 SkASSERT(backendRT.isValid());
940
941 GrVkImageInfo imageInfo;
942 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
943
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500944 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400945 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500946
947 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
948 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400949 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400950 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500951 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
952 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400953 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500954
955 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700956 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
957 NULL, // pNext
958 srcAccessMask, // outputMask
959 dstAccessMask, // inputMask
960 layout, // oldLayout
961 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400962 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700963 mPresentQueueIndex, // dstQueueFamilyIndex
964 surface->mImages[backbuffer->mImageIndex], // image
965 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500966 };
967
968 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
969 VkCommandBufferBeginInfo info;
970 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
971 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
972 info.flags = 0;
973 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700974 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
975 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500976 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
977
Greg Danielcd558522016-11-17 13:31:40 -0500978 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500979
980 // insert the layout transfer into the queue and wait on the acquire
981 VkSubmitInfo submitInfo;
982 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
983 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
984 submitInfo.waitSemaphoreCount = 0;
985 submitInfo.pWaitDstStageMask = 0;
986 submitInfo.commandBufferCount = 1;
987 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
988 submitInfo.signalSemaphoreCount = 1;
989 // When this command buffer finishes we will signal this semaphore so that we know it is now
990 // safe to present the image to the screen.
991 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
992
993 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400994 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500995
996 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
997 // to the image is complete and that the layout has been change to present on the graphics
998 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700999 const VkPresentInfoKHR presentInfo = {
1000 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
1001 NULL, // pNext
1002 1, // waitSemaphoreCount
1003 &backbuffer->mRenderSemaphore, // pWaitSemaphores
1004 1, // swapchainCount
1005 &surface->mSwapchain, // pSwapchains
1006 &backbuffer->mImageIndex, // pImageIndices
1007 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001008 };
1009
1010 mQueuePresentKHR(mPresentQueue, &presentInfo);
1011
1012 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -05001013 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
1014 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
1015 surface->mCurrentTime++;
1016}
1017
1018int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001019 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001020 VulkanSurface::BackbufferInfo* backbuffer =
1021 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1022 if (mSwapBehavior == SwapBehavior::Discard ||
1023 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001024 return 0;
1025 }
1026 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1027 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001028}
1029
Greg Daniel26e0dca2018-09-18 10:33:19 -04001030bool VulkanManager::setupDummyCommandBuffer() {
1031 if (mDummyCB != VK_NULL_HANDLE) {
1032 return true;
1033 }
1034
1035 VkCommandBufferAllocateInfo commandBuffersInfo;
1036 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1037 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1038 commandBuffersInfo.pNext = nullptr;
1039 commandBuffersInfo.commandPool = mCommandPool;
1040 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1041 commandBuffersInfo.commandBufferCount = 1;
1042
1043 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1044 if (err != VK_SUCCESS) {
1045 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1046 // make sure the driver didn't set a value and then return a failure.
1047 mDummyCB = VK_NULL_HANDLE;
1048 return false;
1049 }
1050
1051 VkCommandBufferBeginInfo beginInfo;
1052 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1053 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1054 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1055
1056 mBeginCommandBuffer(mDummyCB, &beginInfo);
1057 mEndCommandBuffer(mDummyCB);
1058 return true;
1059}
1060
Stan Iliev564ca3e2018-09-04 22:00:00 +00001061status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001062 if (!hasVkContext()) {
1063 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1064 return INVALID_OPERATION;
1065 }
1066
Stan Iliev7a081272018-10-26 17:54:18 -04001067 // Block GPU on the fence.
1068 int fenceFd = fence->dup();
1069 if (fenceFd == -1) {
1070 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1071 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001072 }
Stan Iliev7a081272018-10-26 17:54:18 -04001073
1074 VkSemaphoreCreateInfo semaphoreInfo;
1075 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1076 semaphoreInfo.pNext = nullptr;
1077 semaphoreInfo.flags = 0;
1078 VkSemaphore semaphore;
1079 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1080 if (VK_SUCCESS != err) {
1081 ALOGE("Failed to create import semaphore, err: %d", err);
1082 return UNKNOWN_ERROR;
1083 }
1084 VkImportSemaphoreFdInfoKHR importInfo;
1085 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1086 importInfo.pNext = nullptr;
1087 importInfo.semaphore = semaphore;
1088 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1089 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1090 importInfo.fd = fenceFd;
1091
1092 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1093 if (VK_SUCCESS != err) {
1094 ALOGE("Failed to import semaphore, err: %d", err);
1095 return UNKNOWN_ERROR;
1096 }
1097
1098 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1099
1100 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1101
1102 VkSubmitInfo submitInfo;
1103 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1104 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1105 submitInfo.waitSemaphoreCount = 1;
1106 // Wait to make sure aquire semaphore set above has signaled.
1107 submitInfo.pWaitSemaphores = &semaphore;
1108 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1109 submitInfo.commandBufferCount = 1;
1110 submitInfo.pCommandBuffers = &mDummyCB;
1111 submitInfo.signalSemaphoreCount = 0;
1112
1113 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1114
1115 // On Android when we import a semaphore, it is imported using temporary permanence. That
1116 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1117 // state before importing. This means it will now be in an idle state with no pending
1118 // signal or wait operations, so it is safe to immediately delete it.
1119 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001120 return OK;
1121}
1122
1123status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001124 if (!hasVkContext()) {
1125 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1126 return INVALID_OPERATION;
1127 }
1128
Greg Daniel26e0dca2018-09-18 10:33:19 -04001129 VkExportSemaphoreCreateInfo exportInfo;
1130 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1131 exportInfo.pNext = nullptr;
1132 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1133
1134 VkSemaphoreCreateInfo semaphoreInfo;
1135 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1136 semaphoreInfo.pNext = &exportInfo;
1137 semaphoreInfo.flags = 0;
1138 VkSemaphore semaphore;
1139 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1140 if (VK_SUCCESS != err) {
1141 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1142 return INVALID_OPERATION;
1143 }
1144
1145 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1146
1147 VkSubmitInfo submitInfo;
1148 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1149 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1150 submitInfo.waitSemaphoreCount = 0;
1151 submitInfo.pWaitSemaphores = nullptr;
1152 submitInfo.pWaitDstStageMask = nullptr;
1153 submitInfo.commandBufferCount = 1;
1154 submitInfo.pCommandBuffers = &mDummyCB;
1155 submitInfo.signalSemaphoreCount = 1;
1156 submitInfo.pSignalSemaphores = &semaphore;
1157
1158 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1159
1160 VkSemaphoreGetFdInfoKHR getFdInfo;
1161 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1162 getFdInfo.pNext = nullptr;
1163 getFdInfo.semaphore = semaphore;
1164 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1165
1166 int fenceFd = 0;
1167
1168 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1169 if (VK_SUCCESS != err) {
1170 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1171 return INVALID_OPERATION;
1172 }
1173 nativeFence = new Fence(fenceFd);
1174
1175 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1176 // destroying the semaphore and creating a new one with the same handle, and the payloads
1177 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1178 // it and we don't need to wait on the command buffer we submitted to finish.
1179 mDestroySemaphore(mDevice, semaphore, nullptr);
1180
Stan Iliev564ca3e2018-09-04 22:00:00 +00001181 return OK;
1182}
1183
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001184} /* namespace renderthread */
1185} /* namespace uirenderer */
1186} /* namespace android */