blob: aa7a141f6da367d880da5d15848c096ce7b6609a [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Greg Daniel2ff202712018-06-14 11:50:10 -040037#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
38#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
39#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050040
John Reck1bcacfd2017-11-03 10:12:19 -070041VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050042
43void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050044 mRenderThread.setGrContext(nullptr);
45
Greg Daniel26e0dca2018-09-18 10:33:19 -040046 // We don't need to explicitly free the command buffer since it automatically gets freed when we
47 // delete the VkCommandPool below.
48 mDummyCB = VK_NULL_HANDLE;
49
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040051 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050052 mCommandPool = VK_NULL_HANDLE;
53 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050054
Greg Daniel2ff202712018-06-14 11:50:10 -040055 if (mDevice != VK_NULL_HANDLE) {
56 mDeviceWaitIdle(mDevice);
57 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070058 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
Greg Daniel2ff202712018-06-14 11:50:10 -040060 if (mInstance != VK_NULL_HANDLE) {
61 mDestroyInstance(mInstance, nullptr);
62 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063
Greg Daniel2ff202712018-06-14 11:50:10 -040064 mGraphicsQueue = VK_NULL_HANDLE;
65 mPresentQueue = VK_NULL_HANDLE;
66 mDevice = VK_NULL_HANDLE;
67 mPhysicalDevice = VK_NULL_HANDLE;
68 mInstance = VK_NULL_HANDLE;
69}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050070
Greg Daniela227dbb2018-08-20 09:19:48 -040071bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040072 VkResult err;
73
74 constexpr VkApplicationInfo app_info = {
75 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
76 nullptr, // pNext
77 "android framework", // pApplicationName
78 0, // applicationVersion
79 "android framework", // pEngineName
80 0, // engineVerison
Greg Daniel96259622018-10-01 14:42:56 -040081 VK_MAKE_VERSION(1, 1, 0), // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -040082 };
83
84 std::vector<const char*> instanceExtensions;
85 {
86 GET_PROC(EnumerateInstanceExtensionProperties);
87
88 uint32_t extensionCount = 0;
89 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
90 if (VK_SUCCESS != err) {
91 return false;
92 }
93 std::unique_ptr<VkExtensionProperties[]> extensions(
94 new VkExtensionProperties[extensionCount]);
95 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
96 if (VK_SUCCESS != err) {
97 return false;
98 }
99 bool hasKHRSurfaceExtension = false;
100 bool hasKHRAndroidSurfaceExtension = false;
101 for (uint32_t i = 0; i < extensionCount; ++i) {
102 instanceExtensions.push_back(extensions[i].extensionName);
103 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
104 hasKHRSurfaceExtension = true;
105 }
106 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
107 hasKHRAndroidSurfaceExtension = true;
108 }
109 }
110 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
111 this->destroy();
112 return false;
113 }
114 }
115
116 const VkInstanceCreateInfo instance_create = {
117 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
118 nullptr, // pNext
119 0, // flags
120 &app_info, // pApplicationInfo
121 0, // enabledLayerNameCount
122 nullptr, // ppEnabledLayerNames
123 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
124 instanceExtensions.data(), // ppEnabledExtensionNames
125 };
126
127 GET_PROC(CreateInstance);
128 err = mCreateInstance(&instance_create, nullptr, &mInstance);
129 if (err < 0) {
130 this->destroy();
131 return false;
132 }
133
134 GET_INST_PROC(DestroyInstance);
135 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400136 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400137 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400138 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400139 GET_INST_PROC(CreateDevice);
140 GET_INST_PROC(EnumerateDeviceExtensionProperties);
141 GET_INST_PROC(CreateAndroidSurfaceKHR);
142 GET_INST_PROC(DestroySurfaceKHR);
143 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
144 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
145 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
146 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
147
148 uint32_t gpuCount;
149 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
150 if (err) {
151 this->destroy();
152 return false;
153 }
154 if (!gpuCount) {
155 this->destroy();
156 return false;
157 }
158 // Just returning the first physical device instead of getting the whole array. Since there
159 // should only be one device on android.
160 gpuCount = 1;
161 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
162 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
163 if (err && VK_INCOMPLETE != err) {
164 this->destroy();
165 return false;
166 }
167
Greg Daniel96259622018-10-01 14:42:56 -0400168 VkPhysicalDeviceProperties physDeviceProperties;
169 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
170 if (physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
171 this->destroy();
172 return false;
173 }
174
Greg Daniel2ff202712018-06-14 11:50:10 -0400175 // query to get the initial queue props size
176 uint32_t queueCount;
177 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
178 if (!queueCount) {
179 this->destroy();
180 return false;
181 }
182
183 // now get the actual queue props
184 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
185 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
186
187 // iterate to find the graphics queue
188 mGraphicsQueueIndex = queueCount;
189 for (uint32_t i = 0; i < queueCount; i++) {
190 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
191 mGraphicsQueueIndex = i;
192 break;
193 }
194 }
195 if (mGraphicsQueueIndex == queueCount) {
196 this->destroy();
197 return false;
198 }
199
200 // All physical devices and queue families on Android must be capable of
201 // presentation with any native window. So just use the first one.
202 mPresentQueueIndex = 0;
203
204 std::vector<const char*> deviceExtensions;
205 {
206 uint32_t extensionCount = 0;
207 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
208 nullptr);
209 if (VK_SUCCESS != err) {
210 this->destroy();
211 return false;
212 }
213 std::unique_ptr<VkExtensionProperties[]> extensions(
214 new VkExtensionProperties[extensionCount]);
215 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
216 extensions.get());
217 if (VK_SUCCESS != err) {
218 this->destroy();
219 return false;
220 }
221 bool hasKHRSwapchainExtension = false;
222 for (uint32_t i = 0; i < extensionCount; ++i) {
223 deviceExtensions.push_back(extensions[i].extensionName);
224 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
225 hasKHRSwapchainExtension = true;
226 }
227 }
228 if (!hasKHRSwapchainExtension) {
229 this->destroy();
230 return false;
231 }
232 }
233
Greg Daniela227dbb2018-08-20 09:19:48 -0400234 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
235 if (device != VK_NULL_HANDLE) {
236 return vkGetDeviceProcAddr(device, proc_name);
237 }
238 return vkGetInstanceProcAddr(instance, proc_name);
239 };
240 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
241 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
242
Greg Daniel26e0dca2018-09-18 10:33:19 -0400243 if (!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
244 this->destroy();
245 return false;
246 }
247
Greg Daniela227dbb2018-08-20 09:19:48 -0400248 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
249 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
250 features.pNext = nullptr;
251
252 // Setup all extension feature structs we may want to use.
253 void** tailPNext = &features.pNext;
254
255 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
256 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
257 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
258 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
259 LOG_ALWAYS_FATAL_IF(!blend);
260 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
261 blend->pNext = nullptr;
262 *tailPNext = blend;
263 tailPNext = &blend->pNext;
264 }
265
Greg Daniel05036172018-11-28 17:08:04 -0500266 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
267 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
268 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
269 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
270 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
271 ycbcrFeature->pNext = nullptr;
272 *tailPNext = ycbcrFeature;
273 tailPNext = &ycbcrFeature->pNext;
274
Greg Daniela227dbb2018-08-20 09:19:48 -0400275 // query to get the physical device features
276 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400277 // this looks like it would slow things down,
278 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400279 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400280
281 float queuePriorities[1] = { 0.0 };
282
283 const VkDeviceQueueCreateInfo queueInfo[2] = {
284 {
285 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
286 nullptr, // pNext
287 0, // VkDeviceQueueCreateFlags
288 mGraphicsQueueIndex, // queueFamilyIndex
289 1, // queueCount
290 queuePriorities, // pQueuePriorities
291 },
292 {
293 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
294 nullptr, // pNext
295 0, // VkDeviceQueueCreateFlags
296 mPresentQueueIndex, // queueFamilyIndex
297 1, // queueCount
298 queuePriorities, // pQueuePriorities
299 }
300 };
301 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
302
303 const VkDeviceCreateInfo deviceInfo = {
304 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400305 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400306 0, // VkDeviceCreateFlags
307 queueInfoCount, // queueCreateInfoCount
308 queueInfo, // pQueueCreateInfos
309 0, // layerCount
310 nullptr, // ppEnabledLayerNames
311 (uint32_t) deviceExtensions.size(), // extensionCount
312 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400313 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400314 };
315
316 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
317 if (err) {
318 this->destroy();
319 return false;
320 }
321
322 GET_DEV_PROC(GetDeviceQueue);
323 GET_DEV_PROC(DeviceWaitIdle);
324 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500325 GET_DEV_PROC(CreateSwapchainKHR);
326 GET_DEV_PROC(DestroySwapchainKHR);
327 GET_DEV_PROC(GetSwapchainImagesKHR);
328 GET_DEV_PROC(AcquireNextImageKHR);
329 GET_DEV_PROC(QueuePresentKHR);
330 GET_DEV_PROC(CreateCommandPool);
331 GET_DEV_PROC(DestroyCommandPool);
332 GET_DEV_PROC(AllocateCommandBuffers);
333 GET_DEV_PROC(FreeCommandBuffers);
334 GET_DEV_PROC(ResetCommandBuffer);
335 GET_DEV_PROC(BeginCommandBuffer);
336 GET_DEV_PROC(EndCommandBuffer);
337 GET_DEV_PROC(CmdPipelineBarrier);
338 GET_DEV_PROC(GetDeviceQueue);
339 GET_DEV_PROC(QueueSubmit);
340 GET_DEV_PROC(QueueWaitIdle);
341 GET_DEV_PROC(DeviceWaitIdle);
342 GET_DEV_PROC(CreateSemaphore);
343 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400344 GET_DEV_PROC(ImportSemaphoreFdKHR);
345 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500346 GET_DEV_PROC(CreateFence);
347 GET_DEV_PROC(DestroyFence);
348 GET_DEV_PROC(WaitForFences);
349 GET_DEV_PROC(ResetFences);
350
Greg Daniel2ff202712018-06-14 11:50:10 -0400351 return true;
352}
353
Greg Daniela227dbb2018-08-20 09:19:48 -0400354static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
355 // All Vulkan structs that could be part of the features chain will start with the
356 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
357 // so we can get access to the pNext for the next struct.
358 struct CommonVulkanHeader {
359 VkStructureType sType;
360 void* pNext;
361 };
362
363 void* pNext = features.pNext;
364 while (pNext) {
365 void* current = pNext;
366 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
367 free(current);
368 }
369}
370
Greg Daniel2ff202712018-06-14 11:50:10 -0400371void VulkanManager::initialize() {
372 if (mDevice != VK_NULL_HANDLE) {
373 return;
374 }
375
Greg Daniela227dbb2018-08-20 09:19:48 -0400376 GET_PROC(EnumerateInstanceVersion);
377 uint32_t instanceVersion = 0;
378 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
379 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
380
381 GrVkExtensions extensions;
382 VkPhysicalDeviceFeatures2 features;
383 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400384
385 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
386
Greg Daniel2ff202712018-06-14 11:50:10 -0400387 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
388 if (device != VK_NULL_HANDLE) {
389 return vkGetDeviceProcAddr(device, proc_name);
390 }
391 return vkGetInstanceProcAddr(instance, proc_name);
392 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400393
394 GrVkBackendContext backendContext;
395 backendContext.fInstance = mInstance;
396 backendContext.fPhysicalDevice = mPhysicalDevice;
397 backendContext.fDevice = mDevice;
398 backendContext.fQueue = mGraphicsQueue;
399 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400400 backendContext.fInstanceVersion = instanceVersion;
401 backendContext.fVkExtensions = &extensions;
402 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400403 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400404
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500405 // create the command pool for the command buffers
406 if (VK_NULL_HANDLE == mCommandPool) {
407 VkCommandPoolCreateInfo commandPoolInfo;
408 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
409 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
410 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400411 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500412 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400413 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
414 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500415 SkASSERT(VK_SUCCESS == res);
416 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400417 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
418
419 if (!setupDummyCommandBuffer()) {
420 this->destroy();
421 return;
422 }
423 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
424
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500425
Greg Daniel2ff202712018-06-14 11:50:10 -0400426 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500427
Stan Ilievd495f432017-10-09 15:49:32 -0400428 GrContextOptions options;
429 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800430 // TODO: get a string describing the SPIR-V compiler version and use it here
431 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400432 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500433 LOG_ALWAYS_FATAL_IF(!grContext.get());
434 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400435
436 free_features_extensions_structs(features);
437
Greg Danielcd558522016-11-17 13:31:40 -0500438 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
439 mSwapBehavior = SwapBehavior::BufferAge;
440 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500441}
442
443// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
444// previous uses have finished before returning.
445VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
446 SkASSERT(surface->mBackbuffers);
447
448 ++surface->mCurrentBackbufferIndex;
449 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
450 surface->mCurrentBackbufferIndex = 0;
451 }
452
John Reck1bcacfd2017-11-03 10:12:19 -0700453 VulkanSurface::BackbufferInfo* backbuffer =
454 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500455
456 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
457 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400458 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500459 if (res != VK_SUCCESS) {
460 return nullptr;
461 }
462
463 return backbuffer;
464}
465
Stan Iliev305e13a2018-11-13 11:14:48 -0500466SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
467 // Recreate VulkanSurface, if ANativeWindow has been resized.
468 VulkanSurface* surface = *surfaceOut;
469 int windowWidth = 0, windowHeight = 0;
470 ANativeWindow* window = surface->mNativeWindow;
471 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
472 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
473 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
474 ColorMode colorMode = surface->mColorMode;
Stan Iliev987a80c2018-12-04 10:07:21 -0500475 sk_sp<SkColorSpace> colorSpace = surface->mColorSpace;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800476 SkColorSpace::Gamut colorGamut = surface->mColorGamut;
477 SkColorType colorType = surface->mColorType;
Stan Iliev305e13a2018-11-13 11:14:48 -0500478 destroySurface(surface);
Peiyong Lin3bff1352018-12-11 07:56:07 -0800479 *surfaceOut = createSurface(window, colorMode, colorSpace, colorGamut, colorType);
Stan Iliev305e13a2018-11-13 11:14:48 -0500480 surface = *surfaceOut;
481 }
482
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500483 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
484 SkASSERT(backbuffer);
485
486 VkResult res;
487
Greg Daniel2ff202712018-06-14 11:50:10 -0400488 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500489 SkASSERT(VK_SUCCESS == res);
490
491 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
492 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400493 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700494 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
495 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500496
497 if (VK_ERROR_SURFACE_LOST_KHR == res) {
498 // need to figure out how to create a new vkSurface without the platformData*
499 // maybe use attach somehow? but need a Window
500 return nullptr;
501 }
502 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
503 // tear swapchain down and try again
504 if (!createSwapchain(surface)) {
505 return nullptr;
506 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500507 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400508 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500509 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500510
511 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400512 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700513 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
514 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500515
516 if (VK_SUCCESS != res) {
517 return nullptr;
518 }
519 }
520
521 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500522 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500523 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400524 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500525 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400526 VkAccessFlags srcAccessMask = 0;
527 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
528 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500529
530 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700531 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
532 NULL, // pNext
533 srcAccessMask, // outputMask
534 dstAccessMask, // inputMask
535 layout, // oldLayout
536 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
537 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400538 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700539 surface->mImages[backbuffer->mImageIndex], // image
540 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500541 };
542 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
543
544 VkCommandBufferBeginInfo info;
545 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
546 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
547 info.flags = 0;
548 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
549
John Reck1bcacfd2017-11-03 10:12:19 -0700550 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
551 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500552
553 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
554
555 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
556 // insert the layout transfer into the queue and wait on the acquire
557 VkSubmitInfo submitInfo;
558 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
559 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
560 submitInfo.waitSemaphoreCount = 1;
561 // Wait to make sure aquire semaphore set above has signaled.
562 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
563 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
564 submitInfo.commandBufferCount = 1;
565 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
566 submitInfo.signalSemaphoreCount = 0;
567
568 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400569 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500570
571 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500572 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400573 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
574 SkSurface::kFlushRead_BackendHandleAccess);
575 if (!backendRT.isValid()) {
576 SkASSERT(backendRT.isValid());
577 return nullptr;
578 }
579 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500580
581 surface->mBackbuffer = std::move(skSurface);
582 return surface->mBackbuffer.get();
583}
584
585void VulkanManager::destroyBuffers(VulkanSurface* surface) {
586 if (surface->mBackbuffers) {
587 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400588 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500589 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400590 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
591 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
592 mFreeCommandBuffers(mDevice, mCommandPool, 2,
593 surface->mBackbuffers[i].mTransitionCmdBuffers);
594 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
595 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500596 }
597 }
598
599 delete[] surface->mBackbuffers;
600 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500601 delete[] surface->mImageInfos;
602 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500603 delete[] surface->mImages;
604 surface->mImages = nullptr;
605}
606
607void VulkanManager::destroySurface(VulkanSurface* surface) {
608 // Make sure all submit commands have finished before starting to destroy objects.
609 if (VK_NULL_HANDLE != mPresentQueue) {
610 mQueueWaitIdle(mPresentQueue);
611 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400612 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500613
614 destroyBuffers(surface);
615
616 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400617 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500618 surface->mSwapchain = VK_NULL_HANDLE;
619 }
620
621 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400622 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500623 surface->mVkSurface = VK_NULL_HANDLE;
624 }
625 delete surface;
626}
627
628void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400629 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500630 SkASSERT(surface->mImageCount);
631 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400632 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500633
634 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
635
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500636 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500637 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500638 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500639 GrVkImageInfo info;
640 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500641 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500642 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
643 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
644 info.fFormat = format;
645 info.fLevelCount = 1;
646
Greg Danielac2d2322017-07-12 11:30:15 -0400647 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500648
Greg Danielcd558522016-11-17 13:31:40 -0500649 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700650 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400651 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800652 surface->mColorType, surface->mColorSpace, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500653 }
654
655 SkASSERT(mCommandPool != VK_NULL_HANDLE);
656
657 // set up the backbuffers
658 VkSemaphoreCreateInfo semaphoreInfo;
659 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
660 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
661 semaphoreInfo.pNext = nullptr;
662 semaphoreInfo.flags = 0;
663 VkCommandBufferAllocateInfo commandBuffersInfo;
664 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
665 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
666 commandBuffersInfo.pNext = nullptr;
667 commandBuffersInfo.commandPool = mCommandPool;
668 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
669 commandBuffersInfo.commandBufferCount = 2;
670 VkFenceCreateInfo fenceInfo;
671 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
672 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
673 fenceInfo.pNext = nullptr;
674 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
675
676 // we create one additional backbuffer structure here, because we want to
677 // give the command buffers they contain a chance to finish before we cycle back
678 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
679 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
680 SkDEBUGCODE(VkResult res);
681 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400682 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700683 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400684 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700685 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400686 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700687 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400688 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700689 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400690 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700691 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500692 SkASSERT(VK_SUCCESS == res);
693 }
694 surface->mCurrentBackbufferIndex = surface->mImageCount;
695}
696
697bool VulkanManager::createSwapchain(VulkanSurface* surface) {
698 // check for capabilities
699 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400700 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700701 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500702 if (VK_SUCCESS != res) {
703 return false;
704 }
705
706 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400707 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700708 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500709 if (VK_SUCCESS != res) {
710 return false;
711 }
712
Ben Wagnereec27d52017-01-11 15:32:07 -0500713 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400714 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700715 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500716 if (VK_SUCCESS != res) {
717 return false;
718 }
719
720 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400721 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700722 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500723 if (VK_SUCCESS != res) {
724 return false;
725 }
726
Ben Wagnereec27d52017-01-11 15:32:07 -0500727 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400728 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700729 surface->mVkSurface, &presentModeCount,
730 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500731 if (VK_SUCCESS != res) {
732 return false;
733 }
734
735 VkExtent2D extent = caps.currentExtent;
736 // clamp width; to handle currentExtent of -1 and protect us from broken hints
737 if (extent.width < caps.minImageExtent.width) {
738 extent.width = caps.minImageExtent.width;
739 }
740 SkASSERT(extent.width <= caps.maxImageExtent.width);
741 // clamp height
742 if (extent.height < caps.minImageExtent.height) {
743 extent.height = caps.minImageExtent.height;
744 }
745 SkASSERT(extent.height <= caps.maxImageExtent.height);
Stan Iliev305e13a2018-11-13 11:14:48 -0500746 surface->mWindowWidth = extent.width;
747 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500748
Greg Daniel4d5bf2a2018-12-04 12:17:28 -0500749 uint32_t imageCount = std::max<uint32_t>(3, caps.minImageCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500750 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
751 // Application must settle for fewer images than desired:
752 imageCount = caps.maxImageCount;
753 }
754
755 // Currently Skia requires the images to be color attchments and support all transfer
756 // operations.
757 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
758 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
759 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
760 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
761 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700762 SkASSERT(caps.supportedCompositeAlpha &
763 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500764 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700765 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
766 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
767 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500768
Stan Iliev79351f32018-09-19 14:23:49 -0400769 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500770 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800771 if (surface->mColorType == SkColorType::kRGBA_F16_SkColorType) {
Stan Iliev79351f32018-09-19 14:23:49 -0400772 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
Stan Iliev79351f32018-09-19 14:23:49 -0400773 }
Peiyong Lin3bff1352018-12-11 07:56:07 -0800774
775 if (surface->mColorMode == ColorMode::WideColorGamut) {
776 if (surface->mColorGamut == SkColorSpace::Gamut::kSRGB_Gamut) {
777 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
778 } else if (surface->mColorGamut == SkColorSpace::Gamut::kDCIP3_D65_Gamut) {
779 colorSpace = VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT;
780 } else {
781 LOG_ALWAYS_FATAL("Unreachable: unsupported wide color space.");
782 }
783 }
784
Stan Iliev79351f32018-09-19 14:23:49 -0400785 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500786 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400787 if (surfaceFormat == surfaceFormats[i].format
788 && colorSpace == surfaceFormats[i].colorSpace) {
789 foundSurfaceFormat = true;
790 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500791 }
792 }
793
Stan Iliev79351f32018-09-19 14:23:49 -0400794 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500795 return false;
796 }
797
Greg Daniel8a2a7542018-10-04 13:46:55 -0400798 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500799 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500800
801 VkSwapchainCreateInfoKHR swapchainCreateInfo;
802 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
803 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
804 swapchainCreateInfo.surface = surface->mVkSurface;
805 swapchainCreateInfo.minImageCount = imageCount;
806 swapchainCreateInfo.imageFormat = surfaceFormat;
807 swapchainCreateInfo.imageColorSpace = colorSpace;
808 swapchainCreateInfo.imageExtent = extent;
809 swapchainCreateInfo.imageArrayLayers = 1;
810 swapchainCreateInfo.imageUsage = usageFlags;
811
Greg Daniel2ff202712018-06-14 11:50:10 -0400812 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
813 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500814 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
815 swapchainCreateInfo.queueFamilyIndexCount = 2;
816 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
817 } else {
818 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
819 swapchainCreateInfo.queueFamilyIndexCount = 0;
820 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
821 }
822
823 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
824 swapchainCreateInfo.compositeAlpha = composite_alpha;
825 swapchainCreateInfo.presentMode = mode;
826 swapchainCreateInfo.clipped = true;
827 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
828
Greg Daniel2ff202712018-06-14 11:50:10 -0400829 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500830 if (VK_SUCCESS != res) {
831 return false;
832 }
833
834 // destroy the old swapchain
835 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400836 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500837
838 destroyBuffers(surface);
839
Greg Daniel2ff202712018-06-14 11:50:10 -0400840 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500841 }
842
843 createBuffers(surface, surfaceFormat, extent);
844
Stan Ilievbc462582018-12-10 13:13:41 -0500845 // The window content is not updated (frozen) until a buffer of the window size is received.
846 // This prevents temporary stretching of the window after it is resized, but before the first
847 // buffer with new size is enqueued.
848 native_window_set_scaling_mode(surface->mNativeWindow, NATIVE_WINDOW_SCALING_MODE_FREEZE);
849
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500850 return true;
851}
852
Stan Iliev987a80c2018-12-04 10:07:21 -0500853VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800854 sk_sp<SkColorSpace> surfaceColorSpace,
855 SkColorSpace::Gamut surfaceColorGamut,
856 SkColorType surfaceColorType) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500857 initialize();
858
859 if (!window) {
860 return nullptr;
861 }
862
Peiyong Lin3bff1352018-12-11 07:56:07 -0800863 VulkanSurface* surface = new VulkanSurface(colorMode, window, surfaceColorSpace,
864 surfaceColorGamut, surfaceColorType);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500865
866 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
867 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
868 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
869 surfaceCreateInfo.pNext = nullptr;
870 surfaceCreateInfo.flags = 0;
871 surfaceCreateInfo.window = window;
872
Greg Daniel2ff202712018-06-14 11:50:10 -0400873 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
874 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500875 if (VK_SUCCESS != res) {
876 delete surface;
877 return nullptr;
878 }
879
John Reck1bcacfd2017-11-03 10:12:19 -0700880 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400881 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
882 // All physical devices and queue families on Android must be capable of
883 // presentation with any native window.
884 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500885
886 if (!createSwapchain(surface)) {
887 destroySurface(surface);
888 return nullptr;
889 }
890
891 return surface;
892}
893
894// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400895static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500896 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
897 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
898 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
899 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
900 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400901 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
902 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
903 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
904 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
905 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
906 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
907 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500908 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
909 return VK_PIPELINE_STAGE_HOST_BIT;
910 }
911
912 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
913 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
914}
915
916// Helper to know which src access mask we need to set when transitioning to the present layout
917static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
918 VkAccessFlags flags = 0;
919 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
920 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700921 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
922 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
923 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500924 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
925 flags = VK_ACCESS_HOST_WRITE_BIT;
926 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
927 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
928 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
929 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
930 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
931 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
932 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
933 flags = VK_ACCESS_TRANSFER_READ_BIT;
934 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
935 flags = VK_ACCESS_SHADER_READ_BIT;
936 }
937 return flags;
938}
939
940void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500941 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
942 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400943 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500944 }
945
Greg Daniel74ea2012017-11-10 11:32:58 -0500946 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700947 VulkanSurface::BackbufferInfo* backbuffer =
948 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400949
Greg Danielcd558522016-11-17 13:31:40 -0500950 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400951 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
952 SkSurface::kFlushRead_BackendHandleAccess);
953 SkASSERT(backendRT.isValid());
954
955 GrVkImageInfo imageInfo;
956 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
957
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500958 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400959 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500960
961 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
962 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400963 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400964 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500965 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
966 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400967 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500968
969 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700970 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
971 NULL, // pNext
972 srcAccessMask, // outputMask
973 dstAccessMask, // inputMask
974 layout, // oldLayout
975 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400976 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700977 mPresentQueueIndex, // dstQueueFamilyIndex
978 surface->mImages[backbuffer->mImageIndex], // image
979 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500980 };
981
982 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
983 VkCommandBufferBeginInfo info;
984 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
985 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
986 info.flags = 0;
987 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700988 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
989 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500990 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
991
Greg Danielcd558522016-11-17 13:31:40 -0500992 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500993
994 // insert the layout transfer into the queue and wait on the acquire
995 VkSubmitInfo submitInfo;
996 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
997 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
998 submitInfo.waitSemaphoreCount = 0;
999 submitInfo.pWaitDstStageMask = 0;
1000 submitInfo.commandBufferCount = 1;
1001 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
1002 submitInfo.signalSemaphoreCount = 1;
1003 // When this command buffer finishes we will signal this semaphore so that we know it is now
1004 // safe to present the image to the screen.
1005 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
1006
1007 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -04001008 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001009
1010 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
1011 // to the image is complete and that the layout has been change to present on the graphics
1012 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -07001013 const VkPresentInfoKHR presentInfo = {
1014 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
1015 NULL, // pNext
1016 1, // waitSemaphoreCount
1017 &backbuffer->mRenderSemaphore, // pWaitSemaphores
1018 1, // swapchainCount
1019 &surface->mSwapchain, // pSwapchains
1020 &backbuffer->mImageIndex, // pImageIndices
1021 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001022 };
1023
1024 mQueuePresentKHR(mPresentQueue, &presentInfo);
1025
1026 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -05001027 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
1028 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
1029 surface->mCurrentTime++;
1030}
1031
1032int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001033 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001034 VulkanSurface::BackbufferInfo* backbuffer =
1035 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1036 if (mSwapBehavior == SwapBehavior::Discard ||
1037 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001038 return 0;
1039 }
1040 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1041 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001042}
1043
Greg Daniel26e0dca2018-09-18 10:33:19 -04001044bool VulkanManager::setupDummyCommandBuffer() {
1045 if (mDummyCB != VK_NULL_HANDLE) {
1046 return true;
1047 }
1048
1049 VkCommandBufferAllocateInfo commandBuffersInfo;
1050 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1051 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1052 commandBuffersInfo.pNext = nullptr;
1053 commandBuffersInfo.commandPool = mCommandPool;
1054 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1055 commandBuffersInfo.commandBufferCount = 1;
1056
1057 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1058 if (err != VK_SUCCESS) {
1059 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1060 // make sure the driver didn't set a value and then return a failure.
1061 mDummyCB = VK_NULL_HANDLE;
1062 return false;
1063 }
1064
1065 VkCommandBufferBeginInfo beginInfo;
1066 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1067 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1068 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1069
1070 mBeginCommandBuffer(mDummyCB, &beginInfo);
1071 mEndCommandBuffer(mDummyCB);
1072 return true;
1073}
1074
Stan Iliev564ca3e2018-09-04 22:00:00 +00001075status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001076 if (!hasVkContext()) {
1077 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1078 return INVALID_OPERATION;
1079 }
1080
Stan Iliev7a081272018-10-26 17:54:18 -04001081 // Block GPU on the fence.
1082 int fenceFd = fence->dup();
1083 if (fenceFd == -1) {
1084 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1085 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001086 }
Stan Iliev7a081272018-10-26 17:54:18 -04001087
1088 VkSemaphoreCreateInfo semaphoreInfo;
1089 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1090 semaphoreInfo.pNext = nullptr;
1091 semaphoreInfo.flags = 0;
1092 VkSemaphore semaphore;
1093 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1094 if (VK_SUCCESS != err) {
1095 ALOGE("Failed to create import semaphore, err: %d", err);
1096 return UNKNOWN_ERROR;
1097 }
1098 VkImportSemaphoreFdInfoKHR importInfo;
1099 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1100 importInfo.pNext = nullptr;
1101 importInfo.semaphore = semaphore;
1102 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1103 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1104 importInfo.fd = fenceFd;
1105
1106 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1107 if (VK_SUCCESS != err) {
1108 ALOGE("Failed to import semaphore, err: %d", err);
1109 return UNKNOWN_ERROR;
1110 }
1111
1112 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1113
1114 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1115
1116 VkSubmitInfo submitInfo;
1117 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1118 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1119 submitInfo.waitSemaphoreCount = 1;
1120 // Wait to make sure aquire semaphore set above has signaled.
1121 submitInfo.pWaitSemaphores = &semaphore;
1122 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1123 submitInfo.commandBufferCount = 1;
1124 submitInfo.pCommandBuffers = &mDummyCB;
1125 submitInfo.signalSemaphoreCount = 0;
1126
1127 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1128
1129 // On Android when we import a semaphore, it is imported using temporary permanence. That
1130 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1131 // state before importing. This means it will now be in an idle state with no pending
1132 // signal or wait operations, so it is safe to immediately delete it.
1133 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001134 return OK;
1135}
1136
1137status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001138 if (!hasVkContext()) {
1139 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1140 return INVALID_OPERATION;
1141 }
1142
Greg Daniel26e0dca2018-09-18 10:33:19 -04001143 VkExportSemaphoreCreateInfo exportInfo;
1144 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1145 exportInfo.pNext = nullptr;
1146 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1147
1148 VkSemaphoreCreateInfo semaphoreInfo;
1149 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1150 semaphoreInfo.pNext = &exportInfo;
1151 semaphoreInfo.flags = 0;
1152 VkSemaphore semaphore;
1153 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1154 if (VK_SUCCESS != err) {
1155 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1156 return INVALID_OPERATION;
1157 }
1158
1159 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1160
1161 VkSubmitInfo submitInfo;
1162 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1163 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1164 submitInfo.waitSemaphoreCount = 0;
1165 submitInfo.pWaitSemaphores = nullptr;
1166 submitInfo.pWaitDstStageMask = nullptr;
1167 submitInfo.commandBufferCount = 1;
1168 submitInfo.pCommandBuffers = &mDummyCB;
1169 submitInfo.signalSemaphoreCount = 1;
1170 submitInfo.pSignalSemaphores = &semaphore;
1171
1172 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1173
1174 VkSemaphoreGetFdInfoKHR getFdInfo;
1175 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1176 getFdInfo.pNext = nullptr;
1177 getFdInfo.semaphore = semaphore;
1178 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1179
1180 int fenceFd = 0;
1181
1182 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1183 if (VK_SUCCESS != err) {
1184 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1185 return INVALID_OPERATION;
1186 }
1187 nativeFence = new Fence(fenceFd);
1188
1189 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1190 // destroying the semaphore and creating a new one with the same handle, and the payloads
1191 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1192 // it and we don't need to wait on the command buffer we submitted to finish.
1193 mDestroySemaphore(mDevice, semaphore, nullptr);
1194
Stan Iliev564ca3e2018-09-04 22:00:00 +00001195 return OK;
1196}
1197
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001198} /* namespace renderthread */
1199} /* namespace uirenderer */
1200} /* namespace android */