blob: 90397fddf6186b3bb889ec3690b67097f286c617 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Bo Liu7b8c1eb2019-01-08 20:17:55 -080037static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
38 // All Vulkan structs that could be part of the features chain will start with the
39 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
40 // so we can get access to the pNext for the next struct.
41 struct CommonVulkanHeader {
42 VkStructureType sType;
43 void* pNext;
44 };
45
46 void* pNext = features.pNext;
47 while (pNext) {
48 void* current = pNext;
49 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
50 free(current);
51 }
52}
53
Greg Daniel2ff202712018-06-14 11:50:10 -040054#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
55#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
56#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050057
John Reck1bcacfd2017-11-03 10:12:19 -070058VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
60void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050061 mRenderThread.setGrContext(nullptr);
62
Greg Daniel26e0dca2018-09-18 10:33:19 -040063 // We don't need to explicitly free the command buffer since it automatically gets freed when we
64 // delete the VkCommandPool below.
65 mDummyCB = VK_NULL_HANDLE;
66
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050067 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040068 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050069 mCommandPool = VK_NULL_HANDLE;
70 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050071
Greg Daniel2ff202712018-06-14 11:50:10 -040072 if (mDevice != VK_NULL_HANDLE) {
73 mDeviceWaitIdle(mDevice);
74 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070075 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050076
Greg Daniel2ff202712018-06-14 11:50:10 -040077 if (mInstance != VK_NULL_HANDLE) {
78 mDestroyInstance(mInstance, nullptr);
79 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050080
Greg Daniel2ff202712018-06-14 11:50:10 -040081 mGraphicsQueue = VK_NULL_HANDLE;
82 mPresentQueue = VK_NULL_HANDLE;
83 mDevice = VK_NULL_HANDLE;
84 mPhysicalDevice = VK_NULL_HANDLE;
85 mInstance = VK_NULL_HANDLE;
Bo Liu7b8c1eb2019-01-08 20:17:55 -080086 mInstanceExtensions.clear();
87 mDeviceExtensions.clear();
88 free_features_extensions_structs(mPhysicalDeviceFeatures2);
89 mPhysicalDeviceFeatures2 = {};
Greg Daniel2ff202712018-06-14 11:50:10 -040090}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050091
Stan Iliev90276c82019-02-03 18:01:02 -050092void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040093 VkResult err;
94
95 constexpr VkApplicationInfo app_info = {
96 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
97 nullptr, // pNext
98 "android framework", // pApplicationName
99 0, // applicationVersion
100 "android framework", // pEngineName
101 0, // engineVerison
Greg Danieleaf310e2019-01-28 16:10:32 -0500102 mAPIVersion, // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -0400103 };
104
Greg Daniel2ff202712018-06-14 11:50:10 -0400105 {
106 GET_PROC(EnumerateInstanceExtensionProperties);
107
108 uint32_t extensionCount = 0;
109 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500110 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400111 std::unique_ptr<VkExtensionProperties[]> extensions(
112 new VkExtensionProperties[extensionCount]);
113 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
Stan Iliev90276c82019-02-03 18:01:02 -0500114 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400115 bool hasKHRSurfaceExtension = false;
116 bool hasKHRAndroidSurfaceExtension = false;
117 for (uint32_t i = 0; i < extensionCount; ++i) {
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800118 mInstanceExtensions.push_back(extensions[i].extensionName);
Greg Daniel2ff202712018-06-14 11:50:10 -0400119 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
120 hasKHRSurfaceExtension = true;
121 }
122 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
123 hasKHRAndroidSurfaceExtension = true;
124 }
125 }
Stan Iliev90276c82019-02-03 18:01:02 -0500126 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400127 }
128
129 const VkInstanceCreateInfo instance_create = {
130 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
131 nullptr, // pNext
132 0, // flags
133 &app_info, // pApplicationInfo
134 0, // enabledLayerNameCount
135 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800136 (uint32_t) mInstanceExtensions.size(), // enabledExtensionNameCount
137 mInstanceExtensions.data(), // ppEnabledExtensionNames
Greg Daniel2ff202712018-06-14 11:50:10 -0400138 };
139
140 GET_PROC(CreateInstance);
141 err = mCreateInstance(&instance_create, nullptr, &mInstance);
Stan Iliev90276c82019-02-03 18:01:02 -0500142 LOG_ALWAYS_FATAL_IF(err < 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400143
144 GET_INST_PROC(DestroyInstance);
145 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400146 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400147 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400148 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400149 GET_INST_PROC(CreateDevice);
150 GET_INST_PROC(EnumerateDeviceExtensionProperties);
151 GET_INST_PROC(CreateAndroidSurfaceKHR);
152 GET_INST_PROC(DestroySurfaceKHR);
153 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
154 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
155 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
156 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
157
158 uint32_t gpuCount;
Stan Iliev90276c82019-02-03 18:01:02 -0500159 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
160 LOG_ALWAYS_FATAL_IF(!gpuCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400161 // Just returning the first physical device instead of getting the whole array. Since there
162 // should only be one device on android.
163 gpuCount = 1;
164 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
165 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
Stan Iliev90276c82019-02-03 18:01:02 -0500166 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400167
Greg Daniel96259622018-10-01 14:42:56 -0400168 VkPhysicalDeviceProperties physDeviceProperties;
169 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
Stan Iliev90276c82019-02-03 18:01:02 -0500170 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniel96259622018-10-01 14:42:56 -0400171
Greg Daniel2ff202712018-06-14 11:50:10 -0400172 // query to get the initial queue props size
173 uint32_t queueCount;
174 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500175 LOG_ALWAYS_FATAL_IF(!queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400176
177 // now get the actual queue props
178 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
179 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
180
181 // iterate to find the graphics queue
182 mGraphicsQueueIndex = queueCount;
183 for (uint32_t i = 0; i < queueCount; i++) {
184 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
185 mGraphicsQueueIndex = i;
186 break;
187 }
188 }
Stan Iliev90276c82019-02-03 18:01:02 -0500189 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400190
191 // All physical devices and queue families on Android must be capable of
192 // presentation with any native window. So just use the first one.
193 mPresentQueueIndex = 0;
194
Greg Daniel2ff202712018-06-14 11:50:10 -0400195 {
196 uint32_t extensionCount = 0;
197 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
198 nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500199 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400200 std::unique_ptr<VkExtensionProperties[]> extensions(
201 new VkExtensionProperties[extensionCount]);
202 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
203 extensions.get());
Stan Iliev90276c82019-02-03 18:01:02 -0500204 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400205 bool hasKHRSwapchainExtension = false;
206 for (uint32_t i = 0; i < extensionCount; ++i) {
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800207 mDeviceExtensions.push_back(extensions[i].extensionName);
Greg Daniel2ff202712018-06-14 11:50:10 -0400208 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
209 hasKHRSwapchainExtension = true;
210 }
211 }
Stan Iliev90276c82019-02-03 18:01:02 -0500212 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400213 }
214
Greg Daniela227dbb2018-08-20 09:19:48 -0400215 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
216 if (device != VK_NULL_HANDLE) {
217 return vkGetDeviceProcAddr(device, proc_name);
218 }
219 return vkGetInstanceProcAddr(instance, proc_name);
220 };
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800221 grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
222 mInstanceExtensions.data(), mDeviceExtensions.size(), mDeviceExtensions.data());
Greg Daniela227dbb2018-08-20 09:19:48 -0400223
Stan Iliev90276c82019-02-03 18:01:02 -0500224 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
Greg Daniel26e0dca2018-09-18 10:33:19 -0400225
Greg Daniela227dbb2018-08-20 09:19:48 -0400226 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
227 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
228 features.pNext = nullptr;
229
230 // Setup all extension feature structs we may want to use.
231 void** tailPNext = &features.pNext;
232
233 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
234 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
235 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
236 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
237 LOG_ALWAYS_FATAL_IF(!blend);
238 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
239 blend->pNext = nullptr;
240 *tailPNext = blend;
241 tailPNext = &blend->pNext;
242 }
243
Greg Daniel05036172018-11-28 17:08:04 -0500244 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
245 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
246 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
247 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
248 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
249 ycbcrFeature->pNext = nullptr;
250 *tailPNext = ycbcrFeature;
251 tailPNext = &ycbcrFeature->pNext;
252
Greg Daniela227dbb2018-08-20 09:19:48 -0400253 // query to get the physical device features
254 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400255 // this looks like it would slow things down,
256 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400257 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400258
259 float queuePriorities[1] = { 0.0 };
260
261 const VkDeviceQueueCreateInfo queueInfo[2] = {
262 {
263 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
264 nullptr, // pNext
265 0, // VkDeviceQueueCreateFlags
266 mGraphicsQueueIndex, // queueFamilyIndex
267 1, // queueCount
268 queuePriorities, // pQueuePriorities
269 },
270 {
271 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
272 nullptr, // pNext
273 0, // VkDeviceQueueCreateFlags
274 mPresentQueueIndex, // queueFamilyIndex
275 1, // queueCount
276 queuePriorities, // pQueuePriorities
277 }
278 };
279 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
280
281 const VkDeviceCreateInfo deviceInfo = {
282 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400283 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400284 0, // VkDeviceCreateFlags
285 queueInfoCount, // queueCreateInfoCount
286 queueInfo, // pQueueCreateInfos
287 0, // layerCount
288 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800289 (uint32_t) mDeviceExtensions.size(), // extensionCount
290 mDeviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400291 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400292 };
293
Stan Iliev90276c82019-02-03 18:01:02 -0500294 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
Greg Daniel2ff202712018-06-14 11:50:10 -0400295
296 GET_DEV_PROC(GetDeviceQueue);
297 GET_DEV_PROC(DeviceWaitIdle);
298 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500299 GET_DEV_PROC(CreateSwapchainKHR);
300 GET_DEV_PROC(DestroySwapchainKHR);
301 GET_DEV_PROC(GetSwapchainImagesKHR);
302 GET_DEV_PROC(AcquireNextImageKHR);
303 GET_DEV_PROC(QueuePresentKHR);
304 GET_DEV_PROC(CreateCommandPool);
305 GET_DEV_PROC(DestroyCommandPool);
306 GET_DEV_PROC(AllocateCommandBuffers);
307 GET_DEV_PROC(FreeCommandBuffers);
308 GET_DEV_PROC(ResetCommandBuffer);
309 GET_DEV_PROC(BeginCommandBuffer);
310 GET_DEV_PROC(EndCommandBuffer);
311 GET_DEV_PROC(CmdPipelineBarrier);
312 GET_DEV_PROC(GetDeviceQueue);
313 GET_DEV_PROC(QueueSubmit);
314 GET_DEV_PROC(QueueWaitIdle);
315 GET_DEV_PROC(DeviceWaitIdle);
316 GET_DEV_PROC(CreateSemaphore);
317 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400318 GET_DEV_PROC(ImportSemaphoreFdKHR);
319 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500320 GET_DEV_PROC(CreateFence);
321 GET_DEV_PROC(DestroyFence);
322 GET_DEV_PROC(WaitForFences);
323 GET_DEV_PROC(ResetFences);
Greg Daniel2ff202712018-06-14 11:50:10 -0400324}
325
326void VulkanManager::initialize() {
327 if (mDevice != VK_NULL_HANDLE) {
328 return;
329 }
330
Greg Daniela227dbb2018-08-20 09:19:48 -0400331 GET_PROC(EnumerateInstanceVersion);
Greg Danieleaf310e2019-01-28 16:10:32 -0500332 uint32_t instanceVersion;
333 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
334 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniela227dbb2018-08-20 09:19:48 -0400335
336 GrVkExtensions extensions;
Stan Iliev90276c82019-02-03 18:01:02 -0500337 this->setupDevice(extensions, mPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400338
339 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
340
Greg Daniel2ff202712018-06-14 11:50:10 -0400341 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
342 if (device != VK_NULL_HANDLE) {
343 return vkGetDeviceProcAddr(device, proc_name);
344 }
345 return vkGetInstanceProcAddr(instance, proc_name);
346 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400347
348 GrVkBackendContext backendContext;
349 backendContext.fInstance = mInstance;
350 backendContext.fPhysicalDevice = mPhysicalDevice;
351 backendContext.fDevice = mDevice;
352 backendContext.fQueue = mGraphicsQueue;
353 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Danieleaf310e2019-01-28 16:10:32 -0500354 backendContext.fMaxAPIVersion = mAPIVersion;
Greg Daniela227dbb2018-08-20 09:19:48 -0400355 backendContext.fVkExtensions = &extensions;
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800356 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
Greg Daniel4aa58672018-07-13 13:10:36 -0400357 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400358
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500359 // create the command pool for the command buffers
360 if (VK_NULL_HANDLE == mCommandPool) {
361 VkCommandPoolCreateInfo commandPoolInfo;
362 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
363 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
364 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400365 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500366 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400367 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
368 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500369 SkASSERT(VK_SUCCESS == res);
370 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400371 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
372
373 if (!setupDummyCommandBuffer()) {
374 this->destroy();
Stan Iliev90276c82019-02-03 18:01:02 -0500375 // Pass through will crash on next line.
Greg Daniel26e0dca2018-09-18 10:33:19 -0400376 }
377 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
378
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500379
Greg Daniel2ff202712018-06-14 11:50:10 -0400380 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500381
Stan Ilievd495f432017-10-09 15:49:32 -0400382 GrContextOptions options;
383 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800384 // TODO: get a string describing the SPIR-V compiler version and use it here
385 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400386 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500387 LOG_ALWAYS_FATAL_IF(!grContext.get());
388 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400389
Greg Danielcd558522016-11-17 13:31:40 -0500390 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
391 mSwapBehavior = SwapBehavior::BufferAge;
392 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500393}
394
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800395VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
396 return VkFunctorInitParams{
397 .instance = mInstance,
398 .physical_device = mPhysicalDevice,
399 .device = mDevice,
400 .queue = mGraphicsQueue,
401 .graphics_queue_index = mGraphicsQueueIndex,
Greg Danieleaf310e2019-01-28 16:10:32 -0500402 .api_version = mAPIVersion,
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800403 .enabled_instance_extension_names = mInstanceExtensions.data(),
404 .enabled_instance_extension_names_length =
405 static_cast<uint32_t>(mInstanceExtensions.size()),
406 .enabled_device_extension_names = mDeviceExtensions.data(),
407 .enabled_device_extension_names_length =
408 static_cast<uint32_t>(mDeviceExtensions.size()),
409 .device_features_2 = &mPhysicalDeviceFeatures2,
410 };
411}
412
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500413// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
414// previous uses have finished before returning.
415VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
416 SkASSERT(surface->mBackbuffers);
417
418 ++surface->mCurrentBackbufferIndex;
419 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
420 surface->mCurrentBackbufferIndex = 0;
421 }
422
John Reck1bcacfd2017-11-03 10:12:19 -0700423 VulkanSurface::BackbufferInfo* backbuffer =
424 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500425
426 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
427 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400428 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500429 if (res != VK_SUCCESS) {
430 return nullptr;
431 }
432
433 return backbuffer;
434}
435
Greg Danielc4076782019-01-08 16:01:18 -0500436static SkMatrix getPreTransformMatrix(int width, int height,
437 VkSurfaceTransformFlagBitsKHR transform) {
438 switch (transform) {
439 case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR:
440 return SkMatrix::I();
441 case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR:
442 return SkMatrix::MakeAll(0, -1, height, 1, 0, 0, 0, 0, 1);
443 case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR:
444 return SkMatrix::MakeAll(-1, 0, width, 0, -1, height, 0, 0, 1);
445 case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR:
446 return SkMatrix::MakeAll(0, 1, 0, -1, 0, width, 0, 0, 1);
447 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR:
448 return SkMatrix::MakeAll(-1, 0, width, 0, 1, 0, 0, 0, 1);
449 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR:
450 return SkMatrix::MakeAll(0, -1, height, -1, 0, width, 0, 0, 1);
451 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR:
452 return SkMatrix::MakeAll(1, 0, 0, 0, -1, height, 0, 0, 1);
453 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR:
454 return SkMatrix::MakeAll(0, 1, 0, 1, 0, 0, 0, 0, 1);
455 default:
456 LOG_ALWAYS_FATAL("Unsupported pre transform of swapchain.");
457 }
458 return SkMatrix::I();
459}
460
461
Stan Iliev305e13a2018-11-13 11:14:48 -0500462SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
463 // Recreate VulkanSurface, if ANativeWindow has been resized.
464 VulkanSurface* surface = *surfaceOut;
465 int windowWidth = 0, windowHeight = 0;
466 ANativeWindow* window = surface->mNativeWindow;
467 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
468 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
469 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
470 ColorMode colorMode = surface->mColorMode;
Stan Iliev987a80c2018-12-04 10:07:21 -0500471 sk_sp<SkColorSpace> colorSpace = surface->mColorSpace;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800472 SkColorType colorType = surface->mColorType;
Stan Iliev305e13a2018-11-13 11:14:48 -0500473 destroySurface(surface);
Brian Osmane0cf5972019-01-23 10:41:20 -0500474 *surfaceOut = createSurface(window, colorMode, colorSpace, colorType);
Stan Iliev305e13a2018-11-13 11:14:48 -0500475 surface = *surfaceOut;
Stan Iliev90276c82019-02-03 18:01:02 -0500476 if (!surface) {
477 return nullptr;
478 }
Stan Iliev305e13a2018-11-13 11:14:48 -0500479 }
480
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500481 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
482 SkASSERT(backbuffer);
483
484 VkResult res;
485
Greg Daniel2ff202712018-06-14 11:50:10 -0400486 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500487 SkASSERT(VK_SUCCESS == res);
488
489 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
490 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400491 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700492 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
493 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500494
495 if (VK_ERROR_SURFACE_LOST_KHR == res) {
496 // need to figure out how to create a new vkSurface without the platformData*
497 // maybe use attach somehow? but need a Window
498 return nullptr;
499 }
Greg Danielc4076782019-01-08 16:01:18 -0500500 if (VK_ERROR_OUT_OF_DATE_KHR == res || VK_SUBOPTIMAL_KHR == res) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500501 // tear swapchain down and try again
502 if (!createSwapchain(surface)) {
503 return nullptr;
504 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500505 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400506 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500507 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500508
509 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400510 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700511 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
512 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500513
514 if (VK_SUCCESS != res) {
515 return nullptr;
516 }
517 }
518
519 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500520 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500521 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400522 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500523 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400524 VkAccessFlags srcAccessMask = 0;
525 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
526 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500527
528 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700529 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
530 NULL, // pNext
531 srcAccessMask, // outputMask
532 dstAccessMask, // inputMask
533 layout, // oldLayout
534 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
535 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400536 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700537 surface->mImages[backbuffer->mImageIndex], // image
538 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500539 };
540 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
541
542 VkCommandBufferBeginInfo info;
543 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
544 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
545 info.flags = 0;
546 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
547
John Reck1bcacfd2017-11-03 10:12:19 -0700548 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
549 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500550
551 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
552
553 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
554 // insert the layout transfer into the queue and wait on the acquire
555 VkSubmitInfo submitInfo;
556 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
557 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
558 submitInfo.waitSemaphoreCount = 1;
559 // Wait to make sure aquire semaphore set above has signaled.
560 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
561 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
562 submitInfo.commandBufferCount = 1;
563 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
564 submitInfo.signalSemaphoreCount = 0;
565
566 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400567 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500568
569 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500570 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400571 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
572 SkSurface::kFlushRead_BackendHandleAccess);
573 if (!backendRT.isValid()) {
574 SkASSERT(backendRT.isValid());
575 return nullptr;
576 }
577 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500578
Greg Danielc4076782019-01-08 16:01:18 -0500579 surface->mPreTransform = getPreTransformMatrix(surface->windowWidth(),
580 surface->windowHeight(),
581 surface->mTransform);
582
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500583 surface->mBackbuffer = std::move(skSurface);
584 return surface->mBackbuffer.get();
585}
586
587void VulkanManager::destroyBuffers(VulkanSurface* surface) {
588 if (surface->mBackbuffers) {
589 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400590 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500591 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400592 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
593 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
594 mFreeCommandBuffers(mDevice, mCommandPool, 2,
595 surface->mBackbuffers[i].mTransitionCmdBuffers);
596 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
597 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500598 }
599 }
600
601 delete[] surface->mBackbuffers;
602 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500603 delete[] surface->mImageInfos;
604 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500605 delete[] surface->mImages;
606 surface->mImages = nullptr;
607}
608
609void VulkanManager::destroySurface(VulkanSurface* surface) {
610 // Make sure all submit commands have finished before starting to destroy objects.
611 if (VK_NULL_HANDLE != mPresentQueue) {
612 mQueueWaitIdle(mPresentQueue);
613 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400614 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500615
616 destroyBuffers(surface);
617
618 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400619 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500620 surface->mSwapchain = VK_NULL_HANDLE;
621 }
622
623 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400624 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500625 surface->mVkSurface = VK_NULL_HANDLE;
626 }
627 delete surface;
628}
629
630void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400631 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500632 SkASSERT(surface->mImageCount);
633 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400634 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500635
636 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
637
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500638 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500639 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500640 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500641 GrVkImageInfo info;
642 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500643 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500644 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
645 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
646 info.fFormat = format;
647 info.fLevelCount = 1;
648
Greg Danielac2d2322017-07-12 11:30:15 -0400649 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500650
Greg Danielcd558522016-11-17 13:31:40 -0500651 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700652 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400653 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800654 surface->mColorType, surface->mColorSpace, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500655 }
656
657 SkASSERT(mCommandPool != VK_NULL_HANDLE);
658
659 // set up the backbuffers
660 VkSemaphoreCreateInfo semaphoreInfo;
661 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
662 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
663 semaphoreInfo.pNext = nullptr;
664 semaphoreInfo.flags = 0;
665 VkCommandBufferAllocateInfo commandBuffersInfo;
666 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
667 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
668 commandBuffersInfo.pNext = nullptr;
669 commandBuffersInfo.commandPool = mCommandPool;
670 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
671 commandBuffersInfo.commandBufferCount = 2;
672 VkFenceCreateInfo fenceInfo;
673 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
674 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
675 fenceInfo.pNext = nullptr;
676 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
677
678 // we create one additional backbuffer structure here, because we want to
679 // give the command buffers they contain a chance to finish before we cycle back
680 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
681 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
682 SkDEBUGCODE(VkResult res);
683 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400684 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700685 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400686 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700687 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400688 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700689 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400690 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700691 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400692 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700693 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500694 SkASSERT(VK_SUCCESS == res);
695 }
696 surface->mCurrentBackbufferIndex = surface->mImageCount;
697}
698
699bool VulkanManager::createSwapchain(VulkanSurface* surface) {
700 // check for capabilities
701 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400702 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700703 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500704 if (VK_SUCCESS != res) {
705 return false;
706 }
707
708 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400709 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700710 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500711 if (VK_SUCCESS != res) {
712 return false;
713 }
714
Ben Wagnereec27d52017-01-11 15:32:07 -0500715 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400716 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700717 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500718 if (VK_SUCCESS != res) {
719 return false;
720 }
721
722 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400723 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700724 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500725 if (VK_SUCCESS != res) {
726 return false;
727 }
728
Ben Wagnereec27d52017-01-11 15:32:07 -0500729 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400730 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700731 surface->mVkSurface, &presentModeCount,
732 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500733 if (VK_SUCCESS != res) {
734 return false;
735 }
736
Greg Danielc4076782019-01-08 16:01:18 -0500737 if (!SkToBool(caps.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)) {
738 return false;
739 }
740 VkSurfaceTransformFlagBitsKHR transform;
741 if (SkToBool(caps.supportedTransforms & caps.currentTransform) &&
742 !SkToBool(caps.currentTransform & VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR)) {
743 transform = caps.currentTransform;
744 } else {
745 transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
746 }
747
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500748 VkExtent2D extent = caps.currentExtent;
749 // clamp width; to handle currentExtent of -1 and protect us from broken hints
750 if (extent.width < caps.minImageExtent.width) {
751 extent.width = caps.minImageExtent.width;
752 }
753 SkASSERT(extent.width <= caps.maxImageExtent.width);
754 // clamp height
755 if (extent.height < caps.minImageExtent.height) {
756 extent.height = caps.minImageExtent.height;
757 }
758 SkASSERT(extent.height <= caps.maxImageExtent.height);
Greg Danielc4076782019-01-08 16:01:18 -0500759
760 VkExtent2D swapExtent = extent;
761 if (transform == VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR ||
762 transform == VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR ||
763 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR ||
764 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR) {
765 swapExtent.width = extent.height;
766 swapExtent.height = extent.width;
767 }
768
Stan Iliev305e13a2018-11-13 11:14:48 -0500769 surface->mWindowWidth = extent.width;
770 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500771
Greg Daniel4d5bf2a2018-12-04 12:17:28 -0500772 uint32_t imageCount = std::max<uint32_t>(3, caps.minImageCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500773 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
774 // Application must settle for fewer images than desired:
775 imageCount = caps.maxImageCount;
776 }
777
778 // Currently Skia requires the images to be color attchments and support all transfer
779 // operations.
780 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
781 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
782 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
783 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
Greg Danielc4076782019-01-08 16:01:18 -0500784
John Reck1bcacfd2017-11-03 10:12:19 -0700785 SkASSERT(caps.supportedCompositeAlpha &
786 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500787 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700788 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
789 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
790 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500791
Stan Iliev79351f32018-09-19 14:23:49 -0400792 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500793 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800794 if (surface->mColorType == SkColorType::kRGBA_F16_SkColorType) {
Stan Iliev79351f32018-09-19 14:23:49 -0400795 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
Stan Iliev79351f32018-09-19 14:23:49 -0400796 }
Peiyong Lin3bff1352018-12-11 07:56:07 -0800797
798 if (surface->mColorMode == ColorMode::WideColorGamut) {
Brian Osmane0cf5972019-01-23 10:41:20 -0500799 skcms_Matrix3x3 surfaceGamut;
800 LOG_ALWAYS_FATAL_IF(!surface->mColorSpace->toXYZD50(&surfaceGamut),
801 "Could not get gamut matrix from color space");
802 if (memcmp(&surfaceGamut, &SkNamedGamut::kSRGB, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800803 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
Brian Osmane0cf5972019-01-23 10:41:20 -0500804 } else if (memcmp(&surfaceGamut, &SkNamedGamut::kDCIP3, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800805 colorSpace = VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT;
806 } else {
807 LOG_ALWAYS_FATAL("Unreachable: unsupported wide color space.");
808 }
809 }
810
Stan Iliev79351f32018-09-19 14:23:49 -0400811 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500812 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400813 if (surfaceFormat == surfaceFormats[i].format
814 && colorSpace == surfaceFormats[i].colorSpace) {
815 foundSurfaceFormat = true;
816 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500817 }
818 }
819
Stan Iliev79351f32018-09-19 14:23:49 -0400820 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500821 return false;
822 }
823
Greg Daniel8a2a7542018-10-04 13:46:55 -0400824 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500825 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500826
827 VkSwapchainCreateInfoKHR swapchainCreateInfo;
828 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
829 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
830 swapchainCreateInfo.surface = surface->mVkSurface;
831 swapchainCreateInfo.minImageCount = imageCount;
832 swapchainCreateInfo.imageFormat = surfaceFormat;
833 swapchainCreateInfo.imageColorSpace = colorSpace;
Greg Danielc4076782019-01-08 16:01:18 -0500834 swapchainCreateInfo.imageExtent = swapExtent;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500835 swapchainCreateInfo.imageArrayLayers = 1;
836 swapchainCreateInfo.imageUsage = usageFlags;
837
Greg Daniel2ff202712018-06-14 11:50:10 -0400838 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
839 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500840 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
841 swapchainCreateInfo.queueFamilyIndexCount = 2;
842 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
843 } else {
844 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
845 swapchainCreateInfo.queueFamilyIndexCount = 0;
846 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
847 }
848
Greg Danielc4076782019-01-08 16:01:18 -0500849 swapchainCreateInfo.preTransform = transform;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500850 swapchainCreateInfo.compositeAlpha = composite_alpha;
851 swapchainCreateInfo.presentMode = mode;
852 swapchainCreateInfo.clipped = true;
853 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
854
Greg Daniel2ff202712018-06-14 11:50:10 -0400855 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500856 if (VK_SUCCESS != res) {
857 return false;
858 }
859
Greg Danielc4076782019-01-08 16:01:18 -0500860 surface->mTransform = transform;
861
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500862 // destroy the old swapchain
863 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400864 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500865
866 destroyBuffers(surface);
867
Greg Daniel2ff202712018-06-14 11:50:10 -0400868 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500869 }
870
Greg Danielc4076782019-01-08 16:01:18 -0500871 createBuffers(surface, surfaceFormat, swapExtent);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500872
Stan Ilievbc462582018-12-10 13:13:41 -0500873 // The window content is not updated (frozen) until a buffer of the window size is received.
874 // This prevents temporary stretching of the window after it is resized, but before the first
875 // buffer with new size is enqueued.
876 native_window_set_scaling_mode(surface->mNativeWindow, NATIVE_WINDOW_SCALING_MODE_FREEZE);
877
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500878 return true;
879}
880
Stan Iliev987a80c2018-12-04 10:07:21 -0500881VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800882 sk_sp<SkColorSpace> surfaceColorSpace,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800883 SkColorType surfaceColorType) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500884 initialize();
885
886 if (!window) {
887 return nullptr;
888 }
889
Peiyong Lin3bff1352018-12-11 07:56:07 -0800890 VulkanSurface* surface = new VulkanSurface(colorMode, window, surfaceColorSpace,
Brian Osmane0cf5972019-01-23 10:41:20 -0500891 surfaceColorType);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500892
893 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
894 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
895 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
896 surfaceCreateInfo.pNext = nullptr;
897 surfaceCreateInfo.flags = 0;
898 surfaceCreateInfo.window = window;
899
Greg Daniel2ff202712018-06-14 11:50:10 -0400900 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
901 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500902 if (VK_SUCCESS != res) {
903 delete surface;
904 return nullptr;
905 }
906
John Reck1bcacfd2017-11-03 10:12:19 -0700907 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400908 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
909 // All physical devices and queue families on Android must be capable of
910 // presentation with any native window.
911 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500912
913 if (!createSwapchain(surface)) {
914 destroySurface(surface);
915 return nullptr;
916 }
917
918 return surface;
919}
920
921// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400922static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500923 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
924 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
925 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
926 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
927 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400928 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
929 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
930 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
931 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
932 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
933 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
934 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500935 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
936 return VK_PIPELINE_STAGE_HOST_BIT;
937 }
938
939 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
940 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
941}
942
943// Helper to know which src access mask we need to set when transitioning to the present layout
944static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
945 VkAccessFlags flags = 0;
946 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
947 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700948 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
949 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
950 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500951 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
952 flags = VK_ACCESS_HOST_WRITE_BIT;
953 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
954 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
955 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
956 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
957 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
958 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
959 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
960 flags = VK_ACCESS_TRANSFER_READ_BIT;
961 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
962 flags = VK_ACCESS_SHADER_READ_BIT;
963 }
964 return flags;
965}
966
967void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500968 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
969 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400970 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500971 }
972
Greg Daniel74ea2012017-11-10 11:32:58 -0500973 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700974 VulkanSurface::BackbufferInfo* backbuffer =
975 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400976
Greg Danielcd558522016-11-17 13:31:40 -0500977 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400978 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
979 SkSurface::kFlushRead_BackendHandleAccess);
980 SkASSERT(backendRT.isValid());
981
982 GrVkImageInfo imageInfo;
983 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
984
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500985 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400986 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500987
988 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
989 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400990 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400991 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500992 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
993 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400994 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500995
996 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700997 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
998 NULL, // pNext
999 srcAccessMask, // outputMask
1000 dstAccessMask, // inputMask
1001 layout, // oldLayout
1002 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -04001003 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -07001004 mPresentQueueIndex, // dstQueueFamilyIndex
1005 surface->mImages[backbuffer->mImageIndex], // image
1006 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001007 };
1008
1009 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
1010 VkCommandBufferBeginInfo info;
1011 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
1012 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1013 info.flags = 0;
1014 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -07001015 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
1016 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001017 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
1018
Greg Danielcd558522016-11-17 13:31:40 -05001019 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001020
1021 // insert the layout transfer into the queue and wait on the acquire
1022 VkSubmitInfo submitInfo;
1023 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1024 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1025 submitInfo.waitSemaphoreCount = 0;
1026 submitInfo.pWaitDstStageMask = 0;
1027 submitInfo.commandBufferCount = 1;
1028 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
1029 submitInfo.signalSemaphoreCount = 1;
1030 // When this command buffer finishes we will signal this semaphore so that we know it is now
1031 // safe to present the image to the screen.
1032 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
1033
1034 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -04001035 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001036
1037 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
1038 // to the image is complete and that the layout has been change to present on the graphics
1039 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -07001040 const VkPresentInfoKHR presentInfo = {
1041 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
1042 NULL, // pNext
1043 1, // waitSemaphoreCount
1044 &backbuffer->mRenderSemaphore, // pWaitSemaphores
1045 1, // swapchainCount
1046 &surface->mSwapchain, // pSwapchains
1047 &backbuffer->mImageIndex, // pImageIndices
1048 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001049 };
1050
1051 mQueuePresentKHR(mPresentQueue, &presentInfo);
1052
1053 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -05001054 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
1055 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
1056 surface->mCurrentTime++;
1057}
1058
1059int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001060 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001061 VulkanSurface::BackbufferInfo* backbuffer =
1062 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1063 if (mSwapBehavior == SwapBehavior::Discard ||
1064 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001065 return 0;
1066 }
1067 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1068 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001069}
1070
Greg Daniel26e0dca2018-09-18 10:33:19 -04001071bool VulkanManager::setupDummyCommandBuffer() {
1072 if (mDummyCB != VK_NULL_HANDLE) {
1073 return true;
1074 }
1075
1076 VkCommandBufferAllocateInfo commandBuffersInfo;
1077 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1078 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1079 commandBuffersInfo.pNext = nullptr;
1080 commandBuffersInfo.commandPool = mCommandPool;
1081 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1082 commandBuffersInfo.commandBufferCount = 1;
1083
1084 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1085 if (err != VK_SUCCESS) {
1086 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1087 // make sure the driver didn't set a value and then return a failure.
1088 mDummyCB = VK_NULL_HANDLE;
1089 return false;
1090 }
1091
1092 VkCommandBufferBeginInfo beginInfo;
1093 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1094 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1095 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1096
1097 mBeginCommandBuffer(mDummyCB, &beginInfo);
1098 mEndCommandBuffer(mDummyCB);
1099 return true;
1100}
1101
Stan Iliev564ca3e2018-09-04 22:00:00 +00001102status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001103 if (!hasVkContext()) {
1104 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1105 return INVALID_OPERATION;
1106 }
1107
Stan Iliev7a081272018-10-26 17:54:18 -04001108 // Block GPU on the fence.
1109 int fenceFd = fence->dup();
1110 if (fenceFd == -1) {
1111 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1112 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001113 }
Stan Iliev7a081272018-10-26 17:54:18 -04001114
1115 VkSemaphoreCreateInfo semaphoreInfo;
1116 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1117 semaphoreInfo.pNext = nullptr;
1118 semaphoreInfo.flags = 0;
1119 VkSemaphore semaphore;
1120 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1121 if (VK_SUCCESS != err) {
1122 ALOGE("Failed to create import semaphore, err: %d", err);
1123 return UNKNOWN_ERROR;
1124 }
1125 VkImportSemaphoreFdInfoKHR importInfo;
1126 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1127 importInfo.pNext = nullptr;
1128 importInfo.semaphore = semaphore;
1129 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1130 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1131 importInfo.fd = fenceFd;
1132
1133 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1134 if (VK_SUCCESS != err) {
1135 ALOGE("Failed to import semaphore, err: %d", err);
1136 return UNKNOWN_ERROR;
1137 }
1138
1139 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1140
1141 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1142
1143 VkSubmitInfo submitInfo;
1144 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1145 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1146 submitInfo.waitSemaphoreCount = 1;
1147 // Wait to make sure aquire semaphore set above has signaled.
1148 submitInfo.pWaitSemaphores = &semaphore;
1149 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1150 submitInfo.commandBufferCount = 1;
1151 submitInfo.pCommandBuffers = &mDummyCB;
1152 submitInfo.signalSemaphoreCount = 0;
1153
1154 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1155
1156 // On Android when we import a semaphore, it is imported using temporary permanence. That
1157 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1158 // state before importing. This means it will now be in an idle state with no pending
1159 // signal or wait operations, so it is safe to immediately delete it.
1160 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001161 return OK;
1162}
1163
1164status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001165 if (!hasVkContext()) {
1166 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1167 return INVALID_OPERATION;
1168 }
1169
Greg Daniel26e0dca2018-09-18 10:33:19 -04001170 VkExportSemaphoreCreateInfo exportInfo;
1171 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1172 exportInfo.pNext = nullptr;
1173 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1174
1175 VkSemaphoreCreateInfo semaphoreInfo;
1176 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1177 semaphoreInfo.pNext = &exportInfo;
1178 semaphoreInfo.flags = 0;
1179 VkSemaphore semaphore;
1180 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1181 if (VK_SUCCESS != err) {
1182 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1183 return INVALID_OPERATION;
1184 }
1185
1186 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1187
1188 VkSubmitInfo submitInfo;
1189 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1190 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1191 submitInfo.waitSemaphoreCount = 0;
1192 submitInfo.pWaitSemaphores = nullptr;
1193 submitInfo.pWaitDstStageMask = nullptr;
1194 submitInfo.commandBufferCount = 1;
1195 submitInfo.pCommandBuffers = &mDummyCB;
1196 submitInfo.signalSemaphoreCount = 1;
1197 submitInfo.pSignalSemaphores = &semaphore;
1198
1199 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1200
1201 VkSemaphoreGetFdInfoKHR getFdInfo;
1202 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1203 getFdInfo.pNext = nullptr;
1204 getFdInfo.semaphore = semaphore;
1205 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1206
1207 int fenceFd = 0;
1208
1209 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1210 if (VK_SUCCESS != err) {
1211 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1212 return INVALID_OPERATION;
1213 }
1214 nativeFence = new Fence(fenceFd);
1215
1216 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1217 // destroying the semaphore and creating a new one with the same handle, and the payloads
1218 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1219 // it and we don't need to wait on the command buffer we submitted to finish.
1220 mDestroySemaphore(mDevice, semaphore, nullptr);
1221
Stan Iliev564ca3e2018-09-04 22:00:00 +00001222 return OK;
1223}
1224
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001225} /* namespace renderthread */
1226} /* namespace uirenderer */
1227} /* namespace android */