blob: ce2dddc6904918d60c0a35b1154206fc3ffc28a5 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Bo Liu7b8c1eb2019-01-08 20:17:55 -080037static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
38 // All Vulkan structs that could be part of the features chain will start with the
39 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
40 // so we can get access to the pNext for the next struct.
41 struct CommonVulkanHeader {
42 VkStructureType sType;
43 void* pNext;
44 };
45
46 void* pNext = features.pNext;
47 while (pNext) {
48 void* current = pNext;
49 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
50 free(current);
51 }
52}
53
Greg Daniel2ff202712018-06-14 11:50:10 -040054#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
55#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
56#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050057
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050058void VulkanManager::destroy() {
Greg Daniel26e0dca2018-09-18 10:33:19 -040059 // We don't need to explicitly free the command buffer since it automatically gets freed when we
60 // delete the VkCommandPool below.
61 mDummyCB = VK_NULL_HANDLE;
62
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040064 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050065 mCommandPool = VK_NULL_HANDLE;
66 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050067
Greg Daniel2ff202712018-06-14 11:50:10 -040068 if (mDevice != VK_NULL_HANDLE) {
69 mDeviceWaitIdle(mDevice);
70 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070071 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050072
Greg Daniel2ff202712018-06-14 11:50:10 -040073 if (mInstance != VK_NULL_HANDLE) {
74 mDestroyInstance(mInstance, nullptr);
75 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050076
Greg Daniel2ff202712018-06-14 11:50:10 -040077 mGraphicsQueue = VK_NULL_HANDLE;
78 mPresentQueue = VK_NULL_HANDLE;
79 mDevice = VK_NULL_HANDLE;
80 mPhysicalDevice = VK_NULL_HANDLE;
81 mInstance = VK_NULL_HANDLE;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -080082 mInstanceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -080083 mInstanceExtensions.clear();
Roman Kiryanov74ace839e2019-03-07 18:22:19 -080084 mDeviceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -080085 mDeviceExtensions.clear();
86 free_features_extensions_structs(mPhysicalDeviceFeatures2);
87 mPhysicalDeviceFeatures2 = {};
Greg Daniel2ff202712018-06-14 11:50:10 -040088}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050089
Stan Iliev90276c82019-02-03 18:01:02 -050090void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040091 VkResult err;
92
93 constexpr VkApplicationInfo app_info = {
94 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
95 nullptr, // pNext
96 "android framework", // pApplicationName
97 0, // applicationVersion
98 "android framework", // pEngineName
99 0, // engineVerison
Greg Danieleaf310e2019-01-28 16:10:32 -0500100 mAPIVersion, // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -0400101 };
102
Greg Daniel2ff202712018-06-14 11:50:10 -0400103 {
104 GET_PROC(EnumerateInstanceExtensionProperties);
105
106 uint32_t extensionCount = 0;
107 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500108 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800109 mInstanceExtensionsOwner.resize(extensionCount);
110 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
111 mInstanceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500112 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400113 bool hasKHRSurfaceExtension = false;
114 bool hasKHRAndroidSurfaceExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800115 for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
116 mInstanceExtensions.push_back(extension.extensionName);
117 if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400118 hasKHRSurfaceExtension = true;
119 }
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800120 if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400121 hasKHRAndroidSurfaceExtension = true;
122 }
123 }
Stan Iliev90276c82019-02-03 18:01:02 -0500124 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400125 }
126
127 const VkInstanceCreateInfo instance_create = {
128 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
129 nullptr, // pNext
130 0, // flags
131 &app_info, // pApplicationInfo
132 0, // enabledLayerNameCount
133 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800134 (uint32_t) mInstanceExtensions.size(), // enabledExtensionNameCount
135 mInstanceExtensions.data(), // ppEnabledExtensionNames
Greg Daniel2ff202712018-06-14 11:50:10 -0400136 };
137
138 GET_PROC(CreateInstance);
139 err = mCreateInstance(&instance_create, nullptr, &mInstance);
Stan Iliev90276c82019-02-03 18:01:02 -0500140 LOG_ALWAYS_FATAL_IF(err < 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400141
142 GET_INST_PROC(DestroyInstance);
143 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400144 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400145 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400146 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400147 GET_INST_PROC(CreateDevice);
148 GET_INST_PROC(EnumerateDeviceExtensionProperties);
149 GET_INST_PROC(CreateAndroidSurfaceKHR);
150 GET_INST_PROC(DestroySurfaceKHR);
151 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
152 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
153 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
154 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
155
156 uint32_t gpuCount;
Stan Iliev90276c82019-02-03 18:01:02 -0500157 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
158 LOG_ALWAYS_FATAL_IF(!gpuCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400159 // Just returning the first physical device instead of getting the whole array. Since there
160 // should only be one device on android.
161 gpuCount = 1;
162 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
163 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
Stan Iliev90276c82019-02-03 18:01:02 -0500164 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400165
Greg Daniel96259622018-10-01 14:42:56 -0400166 VkPhysicalDeviceProperties physDeviceProperties;
167 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
Stan Iliev90276c82019-02-03 18:01:02 -0500168 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniel96259622018-10-01 14:42:56 -0400169
Greg Daniel2ff202712018-06-14 11:50:10 -0400170 // query to get the initial queue props size
171 uint32_t queueCount;
172 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500173 LOG_ALWAYS_FATAL_IF(!queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400174
175 // now get the actual queue props
176 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
177 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
178
179 // iterate to find the graphics queue
180 mGraphicsQueueIndex = queueCount;
181 for (uint32_t i = 0; i < queueCount; i++) {
182 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
183 mGraphicsQueueIndex = i;
184 break;
185 }
186 }
Stan Iliev90276c82019-02-03 18:01:02 -0500187 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400188
189 // All physical devices and queue families on Android must be capable of
190 // presentation with any native window. So just use the first one.
191 mPresentQueueIndex = 0;
192
Greg Daniel2ff202712018-06-14 11:50:10 -0400193 {
194 uint32_t extensionCount = 0;
195 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
196 nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500197 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800198 mDeviceExtensionsOwner.resize(extensionCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400199 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800200 mDeviceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500201 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400202 bool hasKHRSwapchainExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800203 for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
204 mDeviceExtensions.push_back(extension.extensionName);
205 if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400206 hasKHRSwapchainExtension = true;
207 }
208 }
Stan Iliev90276c82019-02-03 18:01:02 -0500209 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400210 }
211
Greg Daniela227dbb2018-08-20 09:19:48 -0400212 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
213 if (device != VK_NULL_HANDLE) {
214 return vkGetDeviceProcAddr(device, proc_name);
215 }
216 return vkGetInstanceProcAddr(instance, proc_name);
217 };
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800218
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800219 grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
220 mInstanceExtensions.data(), mDeviceExtensions.size(), mDeviceExtensions.data());
Greg Daniela227dbb2018-08-20 09:19:48 -0400221
Stan Iliev90276c82019-02-03 18:01:02 -0500222 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
Greg Daniel26e0dca2018-09-18 10:33:19 -0400223
Greg Daniela227dbb2018-08-20 09:19:48 -0400224 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
225 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
226 features.pNext = nullptr;
227
228 // Setup all extension feature structs we may want to use.
229 void** tailPNext = &features.pNext;
230
231 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
232 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
233 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
234 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
235 LOG_ALWAYS_FATAL_IF(!blend);
236 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
237 blend->pNext = nullptr;
238 *tailPNext = blend;
239 tailPNext = &blend->pNext;
240 }
241
Greg Daniel05036172018-11-28 17:08:04 -0500242 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
243 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
244 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
245 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
246 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
247 ycbcrFeature->pNext = nullptr;
248 *tailPNext = ycbcrFeature;
249 tailPNext = &ycbcrFeature->pNext;
250
Greg Daniela227dbb2018-08-20 09:19:48 -0400251 // query to get the physical device features
252 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400253 // this looks like it would slow things down,
254 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400255 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400256
257 float queuePriorities[1] = { 0.0 };
258
Stan Iliev7e733362019-02-28 13:16:36 -0500259 void* queueNextPtr = nullptr;
260
261 VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
262
263 if (Properties::contextPriority != 0
264 && grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
265 memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
266 queuePriorityCreateInfo.sType =
267 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
268 queuePriorityCreateInfo.pNext = nullptr;
269 switch (Properties::contextPriority) {
270 case EGL_CONTEXT_PRIORITY_LOW_IMG:
271 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
272 break;
273 case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
274 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
275 break;
276 case EGL_CONTEXT_PRIORITY_HIGH_IMG:
277 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
278 break;
279 default:
280 LOG_ALWAYS_FATAL("Unsupported context priority");
281 }
282 queueNextPtr = &queuePriorityCreateInfo;
283 }
284
Greg Daniel2ff202712018-06-14 11:50:10 -0400285 const VkDeviceQueueCreateInfo queueInfo[2] = {
286 {
287 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
Stan Iliev7e733362019-02-28 13:16:36 -0500288 queueNextPtr, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400289 0, // VkDeviceQueueCreateFlags
290 mGraphicsQueueIndex, // queueFamilyIndex
291 1, // queueCount
292 queuePriorities, // pQueuePriorities
293 },
294 {
295 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
Stan Iliev7e733362019-02-28 13:16:36 -0500296 queueNextPtr, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400297 0, // VkDeviceQueueCreateFlags
298 mPresentQueueIndex, // queueFamilyIndex
299 1, // queueCount
300 queuePriorities, // pQueuePriorities
301 }
302 };
303 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
304
305 const VkDeviceCreateInfo deviceInfo = {
306 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400307 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400308 0, // VkDeviceCreateFlags
309 queueInfoCount, // queueCreateInfoCount
310 queueInfo, // pQueueCreateInfos
311 0, // layerCount
312 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800313 (uint32_t) mDeviceExtensions.size(), // extensionCount
314 mDeviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400315 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400316 };
317
Stan Iliev90276c82019-02-03 18:01:02 -0500318 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
Greg Daniel2ff202712018-06-14 11:50:10 -0400319
320 GET_DEV_PROC(GetDeviceQueue);
321 GET_DEV_PROC(DeviceWaitIdle);
322 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500323 GET_DEV_PROC(CreateSwapchainKHR);
324 GET_DEV_PROC(DestroySwapchainKHR);
325 GET_DEV_PROC(GetSwapchainImagesKHR);
326 GET_DEV_PROC(AcquireNextImageKHR);
327 GET_DEV_PROC(QueuePresentKHR);
328 GET_DEV_PROC(CreateCommandPool);
329 GET_DEV_PROC(DestroyCommandPool);
330 GET_DEV_PROC(AllocateCommandBuffers);
331 GET_DEV_PROC(FreeCommandBuffers);
332 GET_DEV_PROC(ResetCommandBuffer);
333 GET_DEV_PROC(BeginCommandBuffer);
334 GET_DEV_PROC(EndCommandBuffer);
335 GET_DEV_PROC(CmdPipelineBarrier);
336 GET_DEV_PROC(GetDeviceQueue);
337 GET_DEV_PROC(QueueSubmit);
338 GET_DEV_PROC(QueueWaitIdle);
339 GET_DEV_PROC(DeviceWaitIdle);
340 GET_DEV_PROC(CreateSemaphore);
341 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400342 GET_DEV_PROC(ImportSemaphoreFdKHR);
343 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500344 GET_DEV_PROC(CreateFence);
345 GET_DEV_PROC(DestroyFence);
346 GET_DEV_PROC(WaitForFences);
347 GET_DEV_PROC(ResetFences);
Greg Daniel2ff202712018-06-14 11:50:10 -0400348}
349
350void VulkanManager::initialize() {
351 if (mDevice != VK_NULL_HANDLE) {
352 return;
353 }
354
Greg Daniela227dbb2018-08-20 09:19:48 -0400355 GET_PROC(EnumerateInstanceVersion);
Greg Danieleaf310e2019-01-28 16:10:32 -0500356 uint32_t instanceVersion;
357 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
358 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniela227dbb2018-08-20 09:19:48 -0400359
Stan Iliev981afe72019-02-13 14:24:33 -0500360 this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400361
362 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
363
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500364 // create the command pool for the command buffers
365 if (VK_NULL_HANDLE == mCommandPool) {
366 VkCommandPoolCreateInfo commandPoolInfo;
367 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
368 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
369 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400370 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500371 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400372 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
373 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500374 SkASSERT(VK_SUCCESS == res);
375 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400376 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
377
378 if (!setupDummyCommandBuffer()) {
379 this->destroy();
Stan Iliev90276c82019-02-03 18:01:02 -0500380 // Pass through will crash on next line.
Greg Daniel26e0dca2018-09-18 10:33:19 -0400381 }
382 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
383
Greg Daniel2ff202712018-06-14 11:50:10 -0400384 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500385
Greg Danielcd558522016-11-17 13:31:40 -0500386 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
387 mSwapBehavior = SwapBehavior::BufferAge;
388 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500389}
390
Stan Iliev898123b2019-02-14 14:57:44 -0500391sk_sp<GrContext> VulkanManager::createContext(const GrContextOptions& options) {
Stan Iliev981afe72019-02-13 14:24:33 -0500392 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
393 if (device != VK_NULL_HANDLE) {
394 return vkGetDeviceProcAddr(device, proc_name);
395 }
396 return vkGetInstanceProcAddr(instance, proc_name);
397 };
398
399 GrVkBackendContext backendContext;
400 backendContext.fInstance = mInstance;
401 backendContext.fPhysicalDevice = mPhysicalDevice;
402 backendContext.fDevice = mDevice;
403 backendContext.fQueue = mGraphicsQueue;
404 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
405 backendContext.fMaxAPIVersion = mAPIVersion;
406 backendContext.fVkExtensions = &mExtensions;
407 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
408 backendContext.fGetProc = std::move(getProc);
409
410 return GrContext::MakeVulkan(backendContext, options);
411}
412
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800413VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
414 return VkFunctorInitParams{
415 .instance = mInstance,
416 .physical_device = mPhysicalDevice,
417 .device = mDevice,
418 .queue = mGraphicsQueue,
419 .graphics_queue_index = mGraphicsQueueIndex,
Greg Danieleaf310e2019-01-28 16:10:32 -0500420 .api_version = mAPIVersion,
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800421 .enabled_instance_extension_names = mInstanceExtensions.data(),
422 .enabled_instance_extension_names_length =
423 static_cast<uint32_t>(mInstanceExtensions.size()),
424 .enabled_device_extension_names = mDeviceExtensions.data(),
425 .enabled_device_extension_names_length =
426 static_cast<uint32_t>(mDeviceExtensions.size()),
427 .device_features_2 = &mPhysicalDeviceFeatures2,
428 };
429}
430
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500431// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
432// previous uses have finished before returning.
433VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
434 SkASSERT(surface->mBackbuffers);
435
436 ++surface->mCurrentBackbufferIndex;
437 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
438 surface->mCurrentBackbufferIndex = 0;
439 }
440
John Reck1bcacfd2017-11-03 10:12:19 -0700441 VulkanSurface::BackbufferInfo* backbuffer =
442 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500443
444 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
445 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400446 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500447 if (res != VK_SUCCESS) {
448 return nullptr;
449 }
450
451 return backbuffer;
452}
453
Greg Danielc4076782019-01-08 16:01:18 -0500454static SkMatrix getPreTransformMatrix(int width, int height,
455 VkSurfaceTransformFlagBitsKHR transform) {
456 switch (transform) {
457 case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR:
458 return SkMatrix::I();
459 case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR:
460 return SkMatrix::MakeAll(0, -1, height, 1, 0, 0, 0, 0, 1);
461 case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR:
462 return SkMatrix::MakeAll(-1, 0, width, 0, -1, height, 0, 0, 1);
463 case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR:
464 return SkMatrix::MakeAll(0, 1, 0, -1, 0, width, 0, 0, 1);
465 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR:
466 return SkMatrix::MakeAll(-1, 0, width, 0, 1, 0, 0, 0, 1);
467 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR:
468 return SkMatrix::MakeAll(0, -1, height, -1, 0, width, 0, 0, 1);
469 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR:
470 return SkMatrix::MakeAll(1, 0, 0, 0, -1, height, 0, 0, 1);
471 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR:
472 return SkMatrix::MakeAll(0, 1, 0, 1, 0, 0, 0, 0, 1);
473 default:
474 LOG_ALWAYS_FATAL("Unsupported pre transform of swapchain.");
475 }
476 return SkMatrix::I();
477}
478
479
Stan Iliev305e13a2018-11-13 11:14:48 -0500480SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
481 // Recreate VulkanSurface, if ANativeWindow has been resized.
482 VulkanSurface* surface = *surfaceOut;
483 int windowWidth = 0, windowHeight = 0;
484 ANativeWindow* window = surface->mNativeWindow;
485 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
486 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
487 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
488 ColorMode colorMode = surface->mColorMode;
Stan Iliev987a80c2018-12-04 10:07:21 -0500489 sk_sp<SkColorSpace> colorSpace = surface->mColorSpace;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800490 SkColorType colorType = surface->mColorType;
Stan Iliev981afe72019-02-13 14:24:33 -0500491 GrContext* grContext = surface->mGrContext;
Stan Iliev305e13a2018-11-13 11:14:48 -0500492 destroySurface(surface);
Stan Iliev981afe72019-02-13 14:24:33 -0500493 *surfaceOut = createSurface(window, colorMode, colorSpace, colorType, grContext);
Stan Iliev305e13a2018-11-13 11:14:48 -0500494 surface = *surfaceOut;
Stan Iliev90276c82019-02-03 18:01:02 -0500495 if (!surface) {
496 return nullptr;
497 }
Stan Iliev305e13a2018-11-13 11:14:48 -0500498 }
499
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500500 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
501 SkASSERT(backbuffer);
502
503 VkResult res;
504
Greg Daniel2ff202712018-06-14 11:50:10 -0400505 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500506 SkASSERT(VK_SUCCESS == res);
507
508 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
509 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400510 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700511 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
512 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500513
514 if (VK_ERROR_SURFACE_LOST_KHR == res) {
515 // need to figure out how to create a new vkSurface without the platformData*
516 // maybe use attach somehow? but need a Window
517 return nullptr;
518 }
Greg Danielc4076782019-01-08 16:01:18 -0500519 if (VK_ERROR_OUT_OF_DATE_KHR == res || VK_SUBOPTIMAL_KHR == res) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500520 // tear swapchain down and try again
521 if (!createSwapchain(surface)) {
522 return nullptr;
523 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500524 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400525 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500526 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500527
528 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400529 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700530 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
531 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500532
533 if (VK_SUCCESS != res) {
534 return nullptr;
535 }
536 }
537
538 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500539 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500540 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400541 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500542 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400543 VkAccessFlags srcAccessMask = 0;
544 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
545 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500546
547 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700548 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
549 NULL, // pNext
550 srcAccessMask, // outputMask
551 dstAccessMask, // inputMask
552 layout, // oldLayout
553 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
554 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400555 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700556 surface->mImages[backbuffer->mImageIndex], // image
557 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500558 };
559 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
560
561 VkCommandBufferBeginInfo info;
562 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
563 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
564 info.flags = 0;
565 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
566
John Reck1bcacfd2017-11-03 10:12:19 -0700567 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
568 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500569
570 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
571
572 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
573 // insert the layout transfer into the queue and wait on the acquire
574 VkSubmitInfo submitInfo;
575 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
576 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
577 submitInfo.waitSemaphoreCount = 1;
578 // Wait to make sure aquire semaphore set above has signaled.
579 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
580 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
581 submitInfo.commandBufferCount = 1;
582 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
583 submitInfo.signalSemaphoreCount = 0;
584
585 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400586 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500587
588 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500589 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400590 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
591 SkSurface::kFlushRead_BackendHandleAccess);
592 if (!backendRT.isValid()) {
593 SkASSERT(backendRT.isValid());
594 return nullptr;
595 }
596 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500597
Greg Danielc4076782019-01-08 16:01:18 -0500598 surface->mPreTransform = getPreTransformMatrix(surface->windowWidth(),
599 surface->windowHeight(),
600 surface->mTransform);
601
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500602 surface->mBackbuffer = std::move(skSurface);
603 return surface->mBackbuffer.get();
604}
605
606void VulkanManager::destroyBuffers(VulkanSurface* surface) {
607 if (surface->mBackbuffers) {
608 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400609 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500610 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400611 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
612 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
613 mFreeCommandBuffers(mDevice, mCommandPool, 2,
614 surface->mBackbuffers[i].mTransitionCmdBuffers);
615 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
616 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500617 }
618 }
619
620 delete[] surface->mBackbuffers;
621 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500622 delete[] surface->mImageInfos;
623 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500624 delete[] surface->mImages;
625 surface->mImages = nullptr;
626}
627
628void VulkanManager::destroySurface(VulkanSurface* surface) {
629 // Make sure all submit commands have finished before starting to destroy objects.
630 if (VK_NULL_HANDLE != mPresentQueue) {
631 mQueueWaitIdle(mPresentQueue);
632 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400633 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500634
635 destroyBuffers(surface);
636
637 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400638 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500639 surface->mSwapchain = VK_NULL_HANDLE;
640 }
641
642 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400643 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500644 surface->mVkSurface = VK_NULL_HANDLE;
645 }
646 delete surface;
647}
648
649void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400650 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500651 SkASSERT(surface->mImageCount);
652 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400653 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500654
655 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
656
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500657 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500658 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500659 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500660 GrVkImageInfo info;
661 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500662 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500663 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
664 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
665 info.fFormat = format;
666 info.fLevelCount = 1;
667
Greg Danielac2d2322017-07-12 11:30:15 -0400668 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500669
Greg Danielcd558522016-11-17 13:31:40 -0500670 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700671 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Stan Iliev981afe72019-02-13 14:24:33 -0500672 surface->mGrContext, backendRT, kTopLeft_GrSurfaceOrigin,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800673 surface->mColorType, surface->mColorSpace, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500674 }
675
676 SkASSERT(mCommandPool != VK_NULL_HANDLE);
677
678 // set up the backbuffers
679 VkSemaphoreCreateInfo semaphoreInfo;
680 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
681 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
682 semaphoreInfo.pNext = nullptr;
683 semaphoreInfo.flags = 0;
684 VkCommandBufferAllocateInfo commandBuffersInfo;
685 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
686 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
687 commandBuffersInfo.pNext = nullptr;
688 commandBuffersInfo.commandPool = mCommandPool;
689 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
690 commandBuffersInfo.commandBufferCount = 2;
691 VkFenceCreateInfo fenceInfo;
692 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
693 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
694 fenceInfo.pNext = nullptr;
695 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
696
697 // we create one additional backbuffer structure here, because we want to
698 // give the command buffers they contain a chance to finish before we cycle back
699 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
700 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
701 SkDEBUGCODE(VkResult res);
702 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400703 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700704 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400705 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700706 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400707 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700708 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400709 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700710 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400711 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700712 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500713 SkASSERT(VK_SUCCESS == res);
714 }
715 surface->mCurrentBackbufferIndex = surface->mImageCount;
716}
717
718bool VulkanManager::createSwapchain(VulkanSurface* surface) {
719 // check for capabilities
720 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400721 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700722 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500723 if (VK_SUCCESS != res) {
724 return false;
725 }
726
727 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400728 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700729 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500730 if (VK_SUCCESS != res) {
731 return false;
732 }
733
Ben Wagnereec27d52017-01-11 15:32:07 -0500734 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400735 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700736 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500737 if (VK_SUCCESS != res) {
738 return false;
739 }
740
741 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400742 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700743 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500744 if (VK_SUCCESS != res) {
745 return false;
746 }
747
Ben Wagnereec27d52017-01-11 15:32:07 -0500748 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400749 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700750 surface->mVkSurface, &presentModeCount,
751 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500752 if (VK_SUCCESS != res) {
753 return false;
754 }
755
Greg Danielc4076782019-01-08 16:01:18 -0500756 if (!SkToBool(caps.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)) {
757 return false;
758 }
759 VkSurfaceTransformFlagBitsKHR transform;
760 if (SkToBool(caps.supportedTransforms & caps.currentTransform) &&
761 !SkToBool(caps.currentTransform & VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR)) {
762 transform = caps.currentTransform;
763 } else {
764 transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
765 }
766
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500767 VkExtent2D extent = caps.currentExtent;
768 // clamp width; to handle currentExtent of -1 and protect us from broken hints
769 if (extent.width < caps.minImageExtent.width) {
770 extent.width = caps.minImageExtent.width;
771 }
772 SkASSERT(extent.width <= caps.maxImageExtent.width);
773 // clamp height
774 if (extent.height < caps.minImageExtent.height) {
775 extent.height = caps.minImageExtent.height;
776 }
777 SkASSERT(extent.height <= caps.maxImageExtent.height);
Greg Danielc4076782019-01-08 16:01:18 -0500778
779 VkExtent2D swapExtent = extent;
780 if (transform == VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR ||
781 transform == VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR ||
782 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR ||
783 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR) {
784 swapExtent.width = extent.height;
785 swapExtent.height = extent.width;
786 }
787
Stan Iliev305e13a2018-11-13 11:14:48 -0500788 surface->mWindowWidth = extent.width;
789 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500790
Greg Daniel4d5bf2a2018-12-04 12:17:28 -0500791 uint32_t imageCount = std::max<uint32_t>(3, caps.minImageCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500792 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
793 // Application must settle for fewer images than desired:
794 imageCount = caps.maxImageCount;
795 }
796
797 // Currently Skia requires the images to be color attchments and support all transfer
798 // operations.
799 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
800 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
801 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
802 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
Greg Danielc4076782019-01-08 16:01:18 -0500803
John Reck1bcacfd2017-11-03 10:12:19 -0700804 SkASSERT(caps.supportedCompositeAlpha &
805 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500806 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700807 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
808 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
809 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500810
Stan Iliev79351f32018-09-19 14:23:49 -0400811 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500812 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800813 if (surface->mColorType == SkColorType::kRGBA_F16_SkColorType) {
Stan Iliev79351f32018-09-19 14:23:49 -0400814 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
Stan Iliev79351f32018-09-19 14:23:49 -0400815 }
Peiyong Lin3bff1352018-12-11 07:56:07 -0800816
817 if (surface->mColorMode == ColorMode::WideColorGamut) {
Brian Osmane0cf5972019-01-23 10:41:20 -0500818 skcms_Matrix3x3 surfaceGamut;
819 LOG_ALWAYS_FATAL_IF(!surface->mColorSpace->toXYZD50(&surfaceGamut),
820 "Could not get gamut matrix from color space");
821 if (memcmp(&surfaceGamut, &SkNamedGamut::kSRGB, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800822 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
Brian Osmane0cf5972019-01-23 10:41:20 -0500823 } else if (memcmp(&surfaceGamut, &SkNamedGamut::kDCIP3, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800824 colorSpace = VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT;
825 } else {
826 LOG_ALWAYS_FATAL("Unreachable: unsupported wide color space.");
827 }
828 }
829
Stan Iliev79351f32018-09-19 14:23:49 -0400830 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500831 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400832 if (surfaceFormat == surfaceFormats[i].format
833 && colorSpace == surfaceFormats[i].colorSpace) {
834 foundSurfaceFormat = true;
835 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500836 }
837 }
838
Stan Iliev79351f32018-09-19 14:23:49 -0400839 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500840 return false;
841 }
842
Greg Daniel8a2a7542018-10-04 13:46:55 -0400843 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500844 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500845
846 VkSwapchainCreateInfoKHR swapchainCreateInfo;
847 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
848 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
849 swapchainCreateInfo.surface = surface->mVkSurface;
850 swapchainCreateInfo.minImageCount = imageCount;
851 swapchainCreateInfo.imageFormat = surfaceFormat;
852 swapchainCreateInfo.imageColorSpace = colorSpace;
Greg Danielc4076782019-01-08 16:01:18 -0500853 swapchainCreateInfo.imageExtent = swapExtent;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500854 swapchainCreateInfo.imageArrayLayers = 1;
855 swapchainCreateInfo.imageUsage = usageFlags;
856
Greg Daniel2ff202712018-06-14 11:50:10 -0400857 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
858 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500859 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
860 swapchainCreateInfo.queueFamilyIndexCount = 2;
861 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
862 } else {
863 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
864 swapchainCreateInfo.queueFamilyIndexCount = 0;
865 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
866 }
867
Greg Danielc4076782019-01-08 16:01:18 -0500868 swapchainCreateInfo.preTransform = transform;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500869 swapchainCreateInfo.compositeAlpha = composite_alpha;
870 swapchainCreateInfo.presentMode = mode;
871 swapchainCreateInfo.clipped = true;
872 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
873
Greg Daniel2ff202712018-06-14 11:50:10 -0400874 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500875 if (VK_SUCCESS != res) {
876 return false;
877 }
878
Greg Danielc4076782019-01-08 16:01:18 -0500879 surface->mTransform = transform;
880
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500881 // destroy the old swapchain
882 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400883 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500884
885 destroyBuffers(surface);
886
Greg Daniel2ff202712018-06-14 11:50:10 -0400887 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500888 }
889
Greg Danielc4076782019-01-08 16:01:18 -0500890 createBuffers(surface, surfaceFormat, swapExtent);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500891
Stan Ilievbc462582018-12-10 13:13:41 -0500892 // The window content is not updated (frozen) until a buffer of the window size is received.
893 // This prevents temporary stretching of the window after it is resized, but before the first
894 // buffer with new size is enqueued.
895 native_window_set_scaling_mode(surface->mNativeWindow, NATIVE_WINDOW_SCALING_MODE_FREEZE);
896
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500897 return true;
898}
899
Stan Iliev987a80c2018-12-04 10:07:21 -0500900VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800901 sk_sp<SkColorSpace> surfaceColorSpace,
Stan Iliev981afe72019-02-13 14:24:33 -0500902 SkColorType surfaceColorType,
903 GrContext* grContext) {
904 LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500905 if (!window) {
906 return nullptr;
907 }
908
Peiyong Lin3bff1352018-12-11 07:56:07 -0800909 VulkanSurface* surface = new VulkanSurface(colorMode, window, surfaceColorSpace,
Stan Iliev981afe72019-02-13 14:24:33 -0500910 surfaceColorType, grContext);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500911
912 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
913 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
914 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
915 surfaceCreateInfo.pNext = nullptr;
916 surfaceCreateInfo.flags = 0;
917 surfaceCreateInfo.window = window;
918
Greg Daniel2ff202712018-06-14 11:50:10 -0400919 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
920 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500921 if (VK_SUCCESS != res) {
922 delete surface;
923 return nullptr;
924 }
925
John Reck1bcacfd2017-11-03 10:12:19 -0700926 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400927 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
928 // All physical devices and queue families on Android must be capable of
929 // presentation with any native window.
930 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500931
932 if (!createSwapchain(surface)) {
933 destroySurface(surface);
934 return nullptr;
935 }
936
937 return surface;
938}
939
940// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400941static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500942 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
943 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
944 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
945 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
946 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400947 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
948 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
949 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
950 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
951 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
952 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
953 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500954 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
955 return VK_PIPELINE_STAGE_HOST_BIT;
956 }
957
958 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
959 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
960}
961
962// Helper to know which src access mask we need to set when transitioning to the present layout
963static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
964 VkAccessFlags flags = 0;
965 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
966 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700967 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
968 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
969 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500970 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
971 flags = VK_ACCESS_HOST_WRITE_BIT;
972 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
973 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
974 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
975 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
976 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
977 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
978 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
979 flags = VK_ACCESS_TRANSFER_READ_BIT;
980 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
981 flags = VK_ACCESS_SHADER_READ_BIT;
982 }
983 return flags;
984}
985
986void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500987 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
988 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400989 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500990 }
991
Greg Daniel74ea2012017-11-10 11:32:58 -0500992 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700993 VulkanSurface::BackbufferInfo* backbuffer =
994 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400995
Greg Danielcd558522016-11-17 13:31:40 -0500996 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400997 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
998 SkSurface::kFlushRead_BackendHandleAccess);
999 SkASSERT(backendRT.isValid());
1000
1001 GrVkImageInfo imageInfo;
1002 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
1003
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001004 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -04001005 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001006
1007 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
1008 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -04001009 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -04001010 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001011 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
1012 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -04001013 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001014
1015 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -07001016 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1017 NULL, // pNext
1018 srcAccessMask, // outputMask
1019 dstAccessMask, // inputMask
1020 layout, // oldLayout
1021 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -04001022 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -07001023 mPresentQueueIndex, // dstQueueFamilyIndex
1024 surface->mImages[backbuffer->mImageIndex], // image
1025 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001026 };
1027
1028 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
1029 VkCommandBufferBeginInfo info;
1030 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
1031 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1032 info.flags = 0;
1033 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -07001034 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
1035 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001036 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
1037
Greg Danielcd558522016-11-17 13:31:40 -05001038 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001039
1040 // insert the layout transfer into the queue and wait on the acquire
1041 VkSubmitInfo submitInfo;
1042 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1043 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1044 submitInfo.waitSemaphoreCount = 0;
1045 submitInfo.pWaitDstStageMask = 0;
1046 submitInfo.commandBufferCount = 1;
1047 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
1048 submitInfo.signalSemaphoreCount = 1;
1049 // When this command buffer finishes we will signal this semaphore so that we know it is now
1050 // safe to present the image to the screen.
1051 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
1052
1053 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -04001054 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001055
1056 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
1057 // to the image is complete and that the layout has been change to present on the graphics
1058 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -07001059 const VkPresentInfoKHR presentInfo = {
1060 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
1061 NULL, // pNext
1062 1, // waitSemaphoreCount
1063 &backbuffer->mRenderSemaphore, // pWaitSemaphores
1064 1, // swapchainCount
1065 &surface->mSwapchain, // pSwapchains
1066 &backbuffer->mImageIndex, // pImageIndices
1067 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001068 };
1069
1070 mQueuePresentKHR(mPresentQueue, &presentInfo);
1071
1072 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -05001073 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
1074 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
1075 surface->mCurrentTime++;
1076}
1077
1078int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001079 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001080 VulkanSurface::BackbufferInfo* backbuffer =
1081 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1082 if (mSwapBehavior == SwapBehavior::Discard ||
1083 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001084 return 0;
1085 }
1086 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1087 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001088}
1089
Greg Daniel26e0dca2018-09-18 10:33:19 -04001090bool VulkanManager::setupDummyCommandBuffer() {
1091 if (mDummyCB != VK_NULL_HANDLE) {
1092 return true;
1093 }
1094
1095 VkCommandBufferAllocateInfo commandBuffersInfo;
1096 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1097 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1098 commandBuffersInfo.pNext = nullptr;
1099 commandBuffersInfo.commandPool = mCommandPool;
1100 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1101 commandBuffersInfo.commandBufferCount = 1;
1102
1103 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1104 if (err != VK_SUCCESS) {
1105 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1106 // make sure the driver didn't set a value and then return a failure.
1107 mDummyCB = VK_NULL_HANDLE;
1108 return false;
1109 }
1110
1111 VkCommandBufferBeginInfo beginInfo;
1112 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1113 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1114 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1115
1116 mBeginCommandBuffer(mDummyCB, &beginInfo);
1117 mEndCommandBuffer(mDummyCB);
1118 return true;
1119}
1120
Stan Iliev564ca3e2018-09-04 22:00:00 +00001121status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001122 if (!hasVkContext()) {
1123 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1124 return INVALID_OPERATION;
1125 }
1126
Stan Iliev7a081272018-10-26 17:54:18 -04001127 // Block GPU on the fence.
1128 int fenceFd = fence->dup();
1129 if (fenceFd == -1) {
1130 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1131 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001132 }
Stan Iliev7a081272018-10-26 17:54:18 -04001133
1134 VkSemaphoreCreateInfo semaphoreInfo;
1135 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1136 semaphoreInfo.pNext = nullptr;
1137 semaphoreInfo.flags = 0;
1138 VkSemaphore semaphore;
1139 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1140 if (VK_SUCCESS != err) {
1141 ALOGE("Failed to create import semaphore, err: %d", err);
1142 return UNKNOWN_ERROR;
1143 }
1144 VkImportSemaphoreFdInfoKHR importInfo;
1145 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1146 importInfo.pNext = nullptr;
1147 importInfo.semaphore = semaphore;
1148 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1149 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1150 importInfo.fd = fenceFd;
1151
1152 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1153 if (VK_SUCCESS != err) {
1154 ALOGE("Failed to import semaphore, err: %d", err);
1155 return UNKNOWN_ERROR;
1156 }
1157
1158 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1159
1160 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1161
1162 VkSubmitInfo submitInfo;
1163 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1164 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1165 submitInfo.waitSemaphoreCount = 1;
1166 // Wait to make sure aquire semaphore set above has signaled.
1167 submitInfo.pWaitSemaphores = &semaphore;
1168 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1169 submitInfo.commandBufferCount = 1;
1170 submitInfo.pCommandBuffers = &mDummyCB;
1171 submitInfo.signalSemaphoreCount = 0;
1172
1173 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1174
1175 // On Android when we import a semaphore, it is imported using temporary permanence. That
1176 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1177 // state before importing. This means it will now be in an idle state with no pending
1178 // signal or wait operations, so it is safe to immediately delete it.
1179 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001180 return OK;
1181}
1182
1183status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001184 if (!hasVkContext()) {
1185 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1186 return INVALID_OPERATION;
1187 }
1188
Greg Daniel26e0dca2018-09-18 10:33:19 -04001189 VkExportSemaphoreCreateInfo exportInfo;
1190 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1191 exportInfo.pNext = nullptr;
1192 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1193
1194 VkSemaphoreCreateInfo semaphoreInfo;
1195 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1196 semaphoreInfo.pNext = &exportInfo;
1197 semaphoreInfo.flags = 0;
1198 VkSemaphore semaphore;
1199 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1200 if (VK_SUCCESS != err) {
1201 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1202 return INVALID_OPERATION;
1203 }
1204
1205 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1206
1207 VkSubmitInfo submitInfo;
1208 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1209 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1210 submitInfo.waitSemaphoreCount = 0;
1211 submitInfo.pWaitSemaphores = nullptr;
1212 submitInfo.pWaitDstStageMask = nullptr;
1213 submitInfo.commandBufferCount = 1;
1214 submitInfo.pCommandBuffers = &mDummyCB;
1215 submitInfo.signalSemaphoreCount = 1;
1216 submitInfo.pSignalSemaphores = &semaphore;
1217
1218 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1219
1220 VkSemaphoreGetFdInfoKHR getFdInfo;
1221 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1222 getFdInfo.pNext = nullptr;
1223 getFdInfo.semaphore = semaphore;
1224 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1225
1226 int fenceFd = 0;
1227
1228 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1229 if (VK_SUCCESS != err) {
1230 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1231 return INVALID_OPERATION;
1232 }
1233 nativeFence = new Fence(fenceFd);
1234
1235 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1236 // destroying the semaphore and creating a new one with the same handle, and the payloads
1237 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1238 // it and we don't need to wait on the command buffer we submitted to finish.
1239 mDestroySemaphore(mDevice, semaphore, nullptr);
1240
Stan Iliev564ca3e2018-09-04 22:00:00 +00001241 return OK;
1242}
1243
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001244} /* namespace renderthread */
1245} /* namespace uirenderer */
1246} /* namespace android */