blob: 9a6df75fedd9dc69155990427ab15f01a2f6a1e9 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Greg Daniel2ff202712018-06-14 11:50:10 -040037#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
38#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
39#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050040
John Reck1bcacfd2017-11-03 10:12:19 -070041VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050042
43void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050044 mRenderThread.setGrContext(nullptr);
45
Greg Daniel26e0dca2018-09-18 10:33:19 -040046 // We don't need to explicitly free the command buffer since it automatically gets freed when we
47 // delete the VkCommandPool below.
48 mDummyCB = VK_NULL_HANDLE;
49
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040051 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050052 mCommandPool = VK_NULL_HANDLE;
53 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050054
Greg Daniel2ff202712018-06-14 11:50:10 -040055 if (mDevice != VK_NULL_HANDLE) {
56 mDeviceWaitIdle(mDevice);
57 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070058 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
Greg Daniel2ff202712018-06-14 11:50:10 -040060 if (mInstance != VK_NULL_HANDLE) {
61 mDestroyInstance(mInstance, nullptr);
62 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063
Greg Daniel2ff202712018-06-14 11:50:10 -040064 mGraphicsQueue = VK_NULL_HANDLE;
65 mPresentQueue = VK_NULL_HANDLE;
66 mDevice = VK_NULL_HANDLE;
67 mPhysicalDevice = VK_NULL_HANDLE;
68 mInstance = VK_NULL_HANDLE;
69}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050070
Greg Daniela227dbb2018-08-20 09:19:48 -040071bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040072 VkResult err;
73
74 constexpr VkApplicationInfo app_info = {
75 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
76 nullptr, // pNext
77 "android framework", // pApplicationName
78 0, // applicationVersion
79 "android framework", // pEngineName
80 0, // engineVerison
Greg Daniel96259622018-10-01 14:42:56 -040081 VK_MAKE_VERSION(1, 1, 0), // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -040082 };
83
84 std::vector<const char*> instanceExtensions;
85 {
86 GET_PROC(EnumerateInstanceExtensionProperties);
87
88 uint32_t extensionCount = 0;
89 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
90 if (VK_SUCCESS != err) {
91 return false;
92 }
93 std::unique_ptr<VkExtensionProperties[]> extensions(
94 new VkExtensionProperties[extensionCount]);
95 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
96 if (VK_SUCCESS != err) {
97 return false;
98 }
99 bool hasKHRSurfaceExtension = false;
100 bool hasKHRAndroidSurfaceExtension = false;
101 for (uint32_t i = 0; i < extensionCount; ++i) {
102 instanceExtensions.push_back(extensions[i].extensionName);
103 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
104 hasKHRSurfaceExtension = true;
105 }
106 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
107 hasKHRAndroidSurfaceExtension = true;
108 }
109 }
110 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
111 this->destroy();
112 return false;
113 }
114 }
115
116 const VkInstanceCreateInfo instance_create = {
117 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
118 nullptr, // pNext
119 0, // flags
120 &app_info, // pApplicationInfo
121 0, // enabledLayerNameCount
122 nullptr, // ppEnabledLayerNames
123 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
124 instanceExtensions.data(), // ppEnabledExtensionNames
125 };
126
127 GET_PROC(CreateInstance);
128 err = mCreateInstance(&instance_create, nullptr, &mInstance);
129 if (err < 0) {
130 this->destroy();
131 return false;
132 }
133
134 GET_INST_PROC(DestroyInstance);
135 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400136 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400137 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400138 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400139 GET_INST_PROC(CreateDevice);
140 GET_INST_PROC(EnumerateDeviceExtensionProperties);
141 GET_INST_PROC(CreateAndroidSurfaceKHR);
142 GET_INST_PROC(DestroySurfaceKHR);
143 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
144 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
145 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
146 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
147
148 uint32_t gpuCount;
149 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
150 if (err) {
151 this->destroy();
152 return false;
153 }
154 if (!gpuCount) {
155 this->destroy();
156 return false;
157 }
158 // Just returning the first physical device instead of getting the whole array. Since there
159 // should only be one device on android.
160 gpuCount = 1;
161 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
162 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
163 if (err && VK_INCOMPLETE != err) {
164 this->destroy();
165 return false;
166 }
167
Greg Daniel96259622018-10-01 14:42:56 -0400168 VkPhysicalDeviceProperties physDeviceProperties;
169 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
170 if (physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
171 this->destroy();
172 return false;
173 }
174
Greg Daniel2ff202712018-06-14 11:50:10 -0400175 // query to get the initial queue props size
176 uint32_t queueCount;
177 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
178 if (!queueCount) {
179 this->destroy();
180 return false;
181 }
182
183 // now get the actual queue props
184 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
185 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
186
187 // iterate to find the graphics queue
188 mGraphicsQueueIndex = queueCount;
189 for (uint32_t i = 0; i < queueCount; i++) {
190 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
191 mGraphicsQueueIndex = i;
192 break;
193 }
194 }
195 if (mGraphicsQueueIndex == queueCount) {
196 this->destroy();
197 return false;
198 }
199
200 // All physical devices and queue families on Android must be capable of
201 // presentation with any native window. So just use the first one.
202 mPresentQueueIndex = 0;
203
204 std::vector<const char*> deviceExtensions;
205 {
206 uint32_t extensionCount = 0;
207 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
208 nullptr);
209 if (VK_SUCCESS != err) {
210 this->destroy();
211 return false;
212 }
213 std::unique_ptr<VkExtensionProperties[]> extensions(
214 new VkExtensionProperties[extensionCount]);
215 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
216 extensions.get());
217 if (VK_SUCCESS != err) {
218 this->destroy();
219 return false;
220 }
221 bool hasKHRSwapchainExtension = false;
222 for (uint32_t i = 0; i < extensionCount; ++i) {
223 deviceExtensions.push_back(extensions[i].extensionName);
224 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
225 hasKHRSwapchainExtension = true;
226 }
227 }
228 if (!hasKHRSwapchainExtension) {
229 this->destroy();
230 return false;
231 }
232 }
233
Greg Daniela227dbb2018-08-20 09:19:48 -0400234 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
235 if (device != VK_NULL_HANDLE) {
236 return vkGetDeviceProcAddr(device, proc_name);
237 }
238 return vkGetInstanceProcAddr(instance, proc_name);
239 };
240 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
241 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
242
Greg Daniel26e0dca2018-09-18 10:33:19 -0400243 if (!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
244 this->destroy();
245 return false;
246 }
247
Greg Daniela227dbb2018-08-20 09:19:48 -0400248 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
249 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
250 features.pNext = nullptr;
251
252 // Setup all extension feature structs we may want to use.
253 void** tailPNext = &features.pNext;
254
255 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
256 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
257 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
258 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
259 LOG_ALWAYS_FATAL_IF(!blend);
260 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
261 blend->pNext = nullptr;
262 *tailPNext = blend;
263 tailPNext = &blend->pNext;
264 }
265
Greg Daniel05036172018-11-28 17:08:04 -0500266 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
267 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
268 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
269 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
270 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
271 ycbcrFeature->pNext = nullptr;
272 *tailPNext = ycbcrFeature;
273 tailPNext = &ycbcrFeature->pNext;
274
Greg Daniela227dbb2018-08-20 09:19:48 -0400275 // query to get the physical device features
276 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400277 // this looks like it would slow things down,
278 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400279 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400280
281 float queuePriorities[1] = { 0.0 };
282
283 const VkDeviceQueueCreateInfo queueInfo[2] = {
284 {
285 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
286 nullptr, // pNext
287 0, // VkDeviceQueueCreateFlags
288 mGraphicsQueueIndex, // queueFamilyIndex
289 1, // queueCount
290 queuePriorities, // pQueuePriorities
291 },
292 {
293 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
294 nullptr, // pNext
295 0, // VkDeviceQueueCreateFlags
296 mPresentQueueIndex, // queueFamilyIndex
297 1, // queueCount
298 queuePriorities, // pQueuePriorities
299 }
300 };
301 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
302
303 const VkDeviceCreateInfo deviceInfo = {
304 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400305 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400306 0, // VkDeviceCreateFlags
307 queueInfoCount, // queueCreateInfoCount
308 queueInfo, // pQueueCreateInfos
309 0, // layerCount
310 nullptr, // ppEnabledLayerNames
311 (uint32_t) deviceExtensions.size(), // extensionCount
312 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400313 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400314 };
315
316 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
317 if (err) {
318 this->destroy();
319 return false;
320 }
321
322 GET_DEV_PROC(GetDeviceQueue);
323 GET_DEV_PROC(DeviceWaitIdle);
324 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500325 GET_DEV_PROC(CreateSwapchainKHR);
326 GET_DEV_PROC(DestroySwapchainKHR);
327 GET_DEV_PROC(GetSwapchainImagesKHR);
328 GET_DEV_PROC(AcquireNextImageKHR);
329 GET_DEV_PROC(QueuePresentKHR);
330 GET_DEV_PROC(CreateCommandPool);
331 GET_DEV_PROC(DestroyCommandPool);
332 GET_DEV_PROC(AllocateCommandBuffers);
333 GET_DEV_PROC(FreeCommandBuffers);
334 GET_DEV_PROC(ResetCommandBuffer);
335 GET_DEV_PROC(BeginCommandBuffer);
336 GET_DEV_PROC(EndCommandBuffer);
337 GET_DEV_PROC(CmdPipelineBarrier);
338 GET_DEV_PROC(GetDeviceQueue);
339 GET_DEV_PROC(QueueSubmit);
340 GET_DEV_PROC(QueueWaitIdle);
341 GET_DEV_PROC(DeviceWaitIdle);
342 GET_DEV_PROC(CreateSemaphore);
343 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400344 GET_DEV_PROC(ImportSemaphoreFdKHR);
345 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500346 GET_DEV_PROC(CreateFence);
347 GET_DEV_PROC(DestroyFence);
348 GET_DEV_PROC(WaitForFences);
349 GET_DEV_PROC(ResetFences);
350
Greg Daniel2ff202712018-06-14 11:50:10 -0400351 return true;
352}
353
Greg Daniela227dbb2018-08-20 09:19:48 -0400354static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
355 // All Vulkan structs that could be part of the features chain will start with the
356 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
357 // so we can get access to the pNext for the next struct.
358 struct CommonVulkanHeader {
359 VkStructureType sType;
360 void* pNext;
361 };
362
363 void* pNext = features.pNext;
364 while (pNext) {
365 void* current = pNext;
366 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
367 free(current);
368 }
369}
370
Greg Daniel2ff202712018-06-14 11:50:10 -0400371void VulkanManager::initialize() {
372 if (mDevice != VK_NULL_HANDLE) {
373 return;
374 }
375
Greg Daniela227dbb2018-08-20 09:19:48 -0400376 GET_PROC(EnumerateInstanceVersion);
377 uint32_t instanceVersion = 0;
378 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
379 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
380
381 GrVkExtensions extensions;
382 VkPhysicalDeviceFeatures2 features;
383 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400384
385 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
386
Greg Daniel2ff202712018-06-14 11:50:10 -0400387 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
388 if (device != VK_NULL_HANDLE) {
389 return vkGetDeviceProcAddr(device, proc_name);
390 }
391 return vkGetInstanceProcAddr(instance, proc_name);
392 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400393
394 GrVkBackendContext backendContext;
395 backendContext.fInstance = mInstance;
396 backendContext.fPhysicalDevice = mPhysicalDevice;
397 backendContext.fDevice = mDevice;
398 backendContext.fQueue = mGraphicsQueue;
399 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400400 backendContext.fInstanceVersion = instanceVersion;
401 backendContext.fVkExtensions = &extensions;
402 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400403 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400404
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500405 // create the command pool for the command buffers
406 if (VK_NULL_HANDLE == mCommandPool) {
407 VkCommandPoolCreateInfo commandPoolInfo;
408 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
409 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
410 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400411 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500412 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400413 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
414 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500415 SkASSERT(VK_SUCCESS == res);
416 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400417 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
418
419 if (!setupDummyCommandBuffer()) {
420 this->destroy();
421 return;
422 }
423 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
424
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500425
Greg Daniel2ff202712018-06-14 11:50:10 -0400426 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500427
Stan Ilievd495f432017-10-09 15:49:32 -0400428 GrContextOptions options;
429 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800430 // TODO: get a string describing the SPIR-V compiler version and use it here
431 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400432 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500433 LOG_ALWAYS_FATAL_IF(!grContext.get());
434 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400435
436 free_features_extensions_structs(features);
437
Greg Danielcd558522016-11-17 13:31:40 -0500438 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
439 mSwapBehavior = SwapBehavior::BufferAge;
440 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500441}
442
443// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
444// previous uses have finished before returning.
445VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
446 SkASSERT(surface->mBackbuffers);
447
448 ++surface->mCurrentBackbufferIndex;
449 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
450 surface->mCurrentBackbufferIndex = 0;
451 }
452
John Reck1bcacfd2017-11-03 10:12:19 -0700453 VulkanSurface::BackbufferInfo* backbuffer =
454 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500455
456 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
457 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400458 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500459 if (res != VK_SUCCESS) {
460 return nullptr;
461 }
462
463 return backbuffer;
464}
465
Stan Iliev305e13a2018-11-13 11:14:48 -0500466SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
467 // Recreate VulkanSurface, if ANativeWindow has been resized.
468 VulkanSurface* surface = *surfaceOut;
469 int windowWidth = 0, windowHeight = 0;
470 ANativeWindow* window = surface->mNativeWindow;
471 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
472 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
473 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
474 ColorMode colorMode = surface->mColorMode;
475 destroySurface(surface);
476 *surfaceOut = createSurface(window, colorMode);
477 surface = *surfaceOut;
478 }
479
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500480 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
481 SkASSERT(backbuffer);
482
483 VkResult res;
484
Greg Daniel2ff202712018-06-14 11:50:10 -0400485 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500486 SkASSERT(VK_SUCCESS == res);
487
488 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
489 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400490 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700491 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
492 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500493
494 if (VK_ERROR_SURFACE_LOST_KHR == res) {
495 // need to figure out how to create a new vkSurface without the platformData*
496 // maybe use attach somehow? but need a Window
497 return nullptr;
498 }
499 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
500 // tear swapchain down and try again
501 if (!createSwapchain(surface)) {
502 return nullptr;
503 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500504 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400505 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500506 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500507
508 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400509 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700510 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
511 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500512
513 if (VK_SUCCESS != res) {
514 return nullptr;
515 }
516 }
517
518 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500519 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500520 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400521 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500522 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400523 VkAccessFlags srcAccessMask = 0;
524 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
525 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500526
527 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700528 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
529 NULL, // pNext
530 srcAccessMask, // outputMask
531 dstAccessMask, // inputMask
532 layout, // oldLayout
533 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
534 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400535 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700536 surface->mImages[backbuffer->mImageIndex], // image
537 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500538 };
539 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
540
541 VkCommandBufferBeginInfo info;
542 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
543 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
544 info.flags = 0;
545 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
546
John Reck1bcacfd2017-11-03 10:12:19 -0700547 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
548 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500549
550 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
551
552 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
553 // insert the layout transfer into the queue and wait on the acquire
554 VkSubmitInfo submitInfo;
555 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
556 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
557 submitInfo.waitSemaphoreCount = 1;
558 // Wait to make sure aquire semaphore set above has signaled.
559 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
560 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
561 submitInfo.commandBufferCount = 1;
562 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
563 submitInfo.signalSemaphoreCount = 0;
564
565 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400566 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500567
568 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500569 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400570 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
571 SkSurface::kFlushRead_BackendHandleAccess);
572 if (!backendRT.isValid()) {
573 SkASSERT(backendRT.isValid());
574 return nullptr;
575 }
576 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500577
578 surface->mBackbuffer = std::move(skSurface);
579 return surface->mBackbuffer.get();
580}
581
582void VulkanManager::destroyBuffers(VulkanSurface* surface) {
583 if (surface->mBackbuffers) {
584 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400585 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500586 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400587 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
588 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
589 mFreeCommandBuffers(mDevice, mCommandPool, 2,
590 surface->mBackbuffers[i].mTransitionCmdBuffers);
591 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
592 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500593 }
594 }
595
596 delete[] surface->mBackbuffers;
597 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500598 delete[] surface->mImageInfos;
599 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500600 delete[] surface->mImages;
601 surface->mImages = nullptr;
602}
603
604void VulkanManager::destroySurface(VulkanSurface* surface) {
605 // Make sure all submit commands have finished before starting to destroy objects.
606 if (VK_NULL_HANDLE != mPresentQueue) {
607 mQueueWaitIdle(mPresentQueue);
608 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400609 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500610
611 destroyBuffers(surface);
612
613 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400614 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500615 surface->mSwapchain = VK_NULL_HANDLE;
616 }
617
618 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400619 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500620 surface->mVkSurface = VK_NULL_HANDLE;
621 }
622 delete surface;
623}
624
625void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400626 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500627 SkASSERT(surface->mImageCount);
628 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400629 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500630
631 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
632
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500633 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500634 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500635 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500636 GrVkImageInfo info;
637 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500638 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500639 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
640 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
641 info.fFormat = format;
642 info.fLevelCount = 1;
643
Greg Danielac2d2322017-07-12 11:30:15 -0400644 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500645
Greg Danielcd558522016-11-17 13:31:40 -0500646 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700647 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400648 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
Stan Iliev79351f32018-09-19 14:23:49 -0400649 surface->mColorMode == ColorMode::WideColorGamut ? kRGBA_F16_SkColorType
650 : kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500651 }
652
653 SkASSERT(mCommandPool != VK_NULL_HANDLE);
654
655 // set up the backbuffers
656 VkSemaphoreCreateInfo semaphoreInfo;
657 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
658 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
659 semaphoreInfo.pNext = nullptr;
660 semaphoreInfo.flags = 0;
661 VkCommandBufferAllocateInfo commandBuffersInfo;
662 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
663 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
664 commandBuffersInfo.pNext = nullptr;
665 commandBuffersInfo.commandPool = mCommandPool;
666 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
667 commandBuffersInfo.commandBufferCount = 2;
668 VkFenceCreateInfo fenceInfo;
669 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
670 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
671 fenceInfo.pNext = nullptr;
672 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
673
674 // we create one additional backbuffer structure here, because we want to
675 // give the command buffers they contain a chance to finish before we cycle back
676 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
677 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
678 SkDEBUGCODE(VkResult res);
679 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400680 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700681 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400682 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700683 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400684 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700685 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400686 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700687 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400688 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700689 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500690 SkASSERT(VK_SUCCESS == res);
691 }
692 surface->mCurrentBackbufferIndex = surface->mImageCount;
693}
694
695bool VulkanManager::createSwapchain(VulkanSurface* surface) {
696 // check for capabilities
697 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400698 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700699 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500700 if (VK_SUCCESS != res) {
701 return false;
702 }
703
704 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400705 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700706 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500707 if (VK_SUCCESS != res) {
708 return false;
709 }
710
Ben Wagnereec27d52017-01-11 15:32:07 -0500711 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400712 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700713 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500714 if (VK_SUCCESS != res) {
715 return false;
716 }
717
718 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400719 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700720 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500721 if (VK_SUCCESS != res) {
722 return false;
723 }
724
Ben Wagnereec27d52017-01-11 15:32:07 -0500725 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400726 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700727 surface->mVkSurface, &presentModeCount,
728 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500729 if (VK_SUCCESS != res) {
730 return false;
731 }
732
733 VkExtent2D extent = caps.currentExtent;
734 // clamp width; to handle currentExtent of -1 and protect us from broken hints
735 if (extent.width < caps.minImageExtent.width) {
736 extent.width = caps.minImageExtent.width;
737 }
738 SkASSERT(extent.width <= caps.maxImageExtent.width);
739 // clamp height
740 if (extent.height < caps.minImageExtent.height) {
741 extent.height = caps.minImageExtent.height;
742 }
743 SkASSERT(extent.height <= caps.maxImageExtent.height);
Stan Iliev305e13a2018-11-13 11:14:48 -0500744 surface->mWindowWidth = extent.width;
745 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500746
747 uint32_t imageCount = caps.minImageCount + 2;
748 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
749 // Application must settle for fewer images than desired:
750 imageCount = caps.maxImageCount;
751 }
752
753 // Currently Skia requires the images to be color attchments and support all transfer
754 // operations.
755 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
756 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
757 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
758 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
759 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700760 SkASSERT(caps.supportedCompositeAlpha &
761 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500762 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700763 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
764 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
765 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500766
Stan Iliev79351f32018-09-19 14:23:49 -0400767 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500768 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Stan Iliev79351f32018-09-19 14:23:49 -0400769 if (surface->mColorMode == ColorMode::WideColorGamut) {
770 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
771 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
772 }
773 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500774 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400775 if (surfaceFormat == surfaceFormats[i].format
776 && colorSpace == surfaceFormats[i].colorSpace) {
777 foundSurfaceFormat = true;
778 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500779 }
780 }
781
Stan Iliev79351f32018-09-19 14:23:49 -0400782 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500783 return false;
784 }
785
Greg Daniel8a2a7542018-10-04 13:46:55 -0400786 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500787 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500788
789 VkSwapchainCreateInfoKHR swapchainCreateInfo;
790 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
791 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
792 swapchainCreateInfo.surface = surface->mVkSurface;
793 swapchainCreateInfo.minImageCount = imageCount;
794 swapchainCreateInfo.imageFormat = surfaceFormat;
795 swapchainCreateInfo.imageColorSpace = colorSpace;
796 swapchainCreateInfo.imageExtent = extent;
797 swapchainCreateInfo.imageArrayLayers = 1;
798 swapchainCreateInfo.imageUsage = usageFlags;
799
Greg Daniel2ff202712018-06-14 11:50:10 -0400800 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
801 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500802 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
803 swapchainCreateInfo.queueFamilyIndexCount = 2;
804 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
805 } else {
806 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
807 swapchainCreateInfo.queueFamilyIndexCount = 0;
808 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
809 }
810
811 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
812 swapchainCreateInfo.compositeAlpha = composite_alpha;
813 swapchainCreateInfo.presentMode = mode;
814 swapchainCreateInfo.clipped = true;
815 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
816
Greg Daniel2ff202712018-06-14 11:50:10 -0400817 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500818 if (VK_SUCCESS != res) {
819 return false;
820 }
821
822 // destroy the old swapchain
823 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400824 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500825
826 destroyBuffers(surface);
827
Greg Daniel2ff202712018-06-14 11:50:10 -0400828 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500829 }
830
831 createBuffers(surface, surfaceFormat, extent);
832
833 return true;
834}
835
Stan Iliev79351f32018-09-19 14:23:49 -0400836VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500837 initialize();
838
839 if (!window) {
840 return nullptr;
841 }
842
Stan Iliev305e13a2018-11-13 11:14:48 -0500843 VulkanSurface* surface = new VulkanSurface(colorMode, window);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500844
845 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
846 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
847 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
848 surfaceCreateInfo.pNext = nullptr;
849 surfaceCreateInfo.flags = 0;
850 surfaceCreateInfo.window = window;
851
Greg Daniel2ff202712018-06-14 11:50:10 -0400852 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
853 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500854 if (VK_SUCCESS != res) {
855 delete surface;
856 return nullptr;
857 }
858
John Reck1bcacfd2017-11-03 10:12:19 -0700859 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400860 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
861 // All physical devices and queue families on Android must be capable of
862 // presentation with any native window.
863 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500864
865 if (!createSwapchain(surface)) {
866 destroySurface(surface);
867 return nullptr;
868 }
869
870 return surface;
871}
872
873// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400874static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500875 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
876 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
877 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
878 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
879 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400880 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
881 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
882 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
883 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
884 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
885 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
886 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500887 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
888 return VK_PIPELINE_STAGE_HOST_BIT;
889 }
890
891 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
892 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
893}
894
895// Helper to know which src access mask we need to set when transitioning to the present layout
896static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
897 VkAccessFlags flags = 0;
898 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
899 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700900 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
901 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
902 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500903 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
904 flags = VK_ACCESS_HOST_WRITE_BIT;
905 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
906 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
907 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
908 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
909 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
910 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
911 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
912 flags = VK_ACCESS_TRANSFER_READ_BIT;
913 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
914 flags = VK_ACCESS_SHADER_READ_BIT;
915 }
916 return flags;
917}
918
919void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500920 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
921 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400922 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500923 }
924
Greg Daniel74ea2012017-11-10 11:32:58 -0500925 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700926 VulkanSurface::BackbufferInfo* backbuffer =
927 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400928
Greg Danielcd558522016-11-17 13:31:40 -0500929 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400930 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
931 SkSurface::kFlushRead_BackendHandleAccess);
932 SkASSERT(backendRT.isValid());
933
934 GrVkImageInfo imageInfo;
935 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
936
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500937 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400938 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500939
940 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
941 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400942 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400943 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500944 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
945 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400946 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500947
948 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700949 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
950 NULL, // pNext
951 srcAccessMask, // outputMask
952 dstAccessMask, // inputMask
953 layout, // oldLayout
954 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400955 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700956 mPresentQueueIndex, // dstQueueFamilyIndex
957 surface->mImages[backbuffer->mImageIndex], // image
958 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500959 };
960
961 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
962 VkCommandBufferBeginInfo info;
963 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
964 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
965 info.flags = 0;
966 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700967 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
968 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500969 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
970
Greg Danielcd558522016-11-17 13:31:40 -0500971 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500972
973 // insert the layout transfer into the queue and wait on the acquire
974 VkSubmitInfo submitInfo;
975 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
976 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
977 submitInfo.waitSemaphoreCount = 0;
978 submitInfo.pWaitDstStageMask = 0;
979 submitInfo.commandBufferCount = 1;
980 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
981 submitInfo.signalSemaphoreCount = 1;
982 // When this command buffer finishes we will signal this semaphore so that we know it is now
983 // safe to present the image to the screen.
984 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
985
986 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400987 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500988
989 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
990 // to the image is complete and that the layout has been change to present on the graphics
991 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700992 const VkPresentInfoKHR presentInfo = {
993 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
994 NULL, // pNext
995 1, // waitSemaphoreCount
996 &backbuffer->mRenderSemaphore, // pWaitSemaphores
997 1, // swapchainCount
998 &surface->mSwapchain, // pSwapchains
999 &backbuffer->mImageIndex, // pImageIndices
1000 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001001 };
1002
1003 mQueuePresentKHR(mPresentQueue, &presentInfo);
1004
1005 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -05001006 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
1007 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
1008 surface->mCurrentTime++;
1009}
1010
1011int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001012 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001013 VulkanSurface::BackbufferInfo* backbuffer =
1014 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1015 if (mSwapBehavior == SwapBehavior::Discard ||
1016 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001017 return 0;
1018 }
1019 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1020 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001021}
1022
Greg Daniel26e0dca2018-09-18 10:33:19 -04001023bool VulkanManager::setupDummyCommandBuffer() {
1024 if (mDummyCB != VK_NULL_HANDLE) {
1025 return true;
1026 }
1027
1028 VkCommandBufferAllocateInfo commandBuffersInfo;
1029 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1030 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1031 commandBuffersInfo.pNext = nullptr;
1032 commandBuffersInfo.commandPool = mCommandPool;
1033 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1034 commandBuffersInfo.commandBufferCount = 1;
1035
1036 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1037 if (err != VK_SUCCESS) {
1038 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1039 // make sure the driver didn't set a value and then return a failure.
1040 mDummyCB = VK_NULL_HANDLE;
1041 return false;
1042 }
1043
1044 VkCommandBufferBeginInfo beginInfo;
1045 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1046 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1047 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1048
1049 mBeginCommandBuffer(mDummyCB, &beginInfo);
1050 mEndCommandBuffer(mDummyCB);
1051 return true;
1052}
1053
Stan Iliev564ca3e2018-09-04 22:00:00 +00001054status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001055 if (!hasVkContext()) {
1056 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1057 return INVALID_OPERATION;
1058 }
1059
Stan Iliev7a081272018-10-26 17:54:18 -04001060 // Block GPU on the fence.
1061 int fenceFd = fence->dup();
1062 if (fenceFd == -1) {
1063 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1064 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001065 }
Stan Iliev7a081272018-10-26 17:54:18 -04001066
1067 VkSemaphoreCreateInfo semaphoreInfo;
1068 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1069 semaphoreInfo.pNext = nullptr;
1070 semaphoreInfo.flags = 0;
1071 VkSemaphore semaphore;
1072 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1073 if (VK_SUCCESS != err) {
1074 ALOGE("Failed to create import semaphore, err: %d", err);
1075 return UNKNOWN_ERROR;
1076 }
1077 VkImportSemaphoreFdInfoKHR importInfo;
1078 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1079 importInfo.pNext = nullptr;
1080 importInfo.semaphore = semaphore;
1081 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1082 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1083 importInfo.fd = fenceFd;
1084
1085 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1086 if (VK_SUCCESS != err) {
1087 ALOGE("Failed to import semaphore, err: %d", err);
1088 return UNKNOWN_ERROR;
1089 }
1090
1091 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1092
1093 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1094
1095 VkSubmitInfo submitInfo;
1096 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1097 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1098 submitInfo.waitSemaphoreCount = 1;
1099 // Wait to make sure aquire semaphore set above has signaled.
1100 submitInfo.pWaitSemaphores = &semaphore;
1101 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1102 submitInfo.commandBufferCount = 1;
1103 submitInfo.pCommandBuffers = &mDummyCB;
1104 submitInfo.signalSemaphoreCount = 0;
1105
1106 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1107
1108 // On Android when we import a semaphore, it is imported using temporary permanence. That
1109 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1110 // state before importing. This means it will now be in an idle state with no pending
1111 // signal or wait operations, so it is safe to immediately delete it.
1112 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001113 return OK;
1114}
1115
1116status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001117 if (!hasVkContext()) {
1118 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1119 return INVALID_OPERATION;
1120 }
1121
Greg Daniel26e0dca2018-09-18 10:33:19 -04001122 VkExportSemaphoreCreateInfo exportInfo;
1123 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1124 exportInfo.pNext = nullptr;
1125 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1126
1127 VkSemaphoreCreateInfo semaphoreInfo;
1128 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1129 semaphoreInfo.pNext = &exportInfo;
1130 semaphoreInfo.flags = 0;
1131 VkSemaphore semaphore;
1132 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1133 if (VK_SUCCESS != err) {
1134 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1135 return INVALID_OPERATION;
1136 }
1137
1138 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1139
1140 VkSubmitInfo submitInfo;
1141 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1142 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1143 submitInfo.waitSemaphoreCount = 0;
1144 submitInfo.pWaitSemaphores = nullptr;
1145 submitInfo.pWaitDstStageMask = nullptr;
1146 submitInfo.commandBufferCount = 1;
1147 submitInfo.pCommandBuffers = &mDummyCB;
1148 submitInfo.signalSemaphoreCount = 1;
1149 submitInfo.pSignalSemaphores = &semaphore;
1150
1151 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1152
1153 VkSemaphoreGetFdInfoKHR getFdInfo;
1154 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1155 getFdInfo.pNext = nullptr;
1156 getFdInfo.semaphore = semaphore;
1157 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1158
1159 int fenceFd = 0;
1160
1161 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1162 if (VK_SUCCESS != err) {
1163 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1164 return INVALID_OPERATION;
1165 }
1166 nativeFence = new Fence(fenceFd);
1167
1168 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1169 // destroying the semaphore and creating a new one with the same handle, and the payloads
1170 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1171 // it and we don't need to wait on the command buffer we submitted to finish.
1172 mDestroySemaphore(mDevice, semaphore, nullptr);
1173
Stan Iliev564ca3e2018-09-04 22:00:00 +00001174 return OK;
1175}
1176
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001177} /* namespace renderthread */
1178} /* namespace uirenderer */
1179} /* namespace android */