blob: 5af660c8738a9d0c43294923a58788dda7ff82db [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Bo Liu7b8c1eb2019-01-08 20:17:55 -080037static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
38 // All Vulkan structs that could be part of the features chain will start with the
39 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
40 // so we can get access to the pNext for the next struct.
41 struct CommonVulkanHeader {
42 VkStructureType sType;
43 void* pNext;
44 };
45
46 void* pNext = features.pNext;
47 while (pNext) {
48 void* current = pNext;
49 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
50 free(current);
51 }
52}
53
Greg Daniel2ff202712018-06-14 11:50:10 -040054#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
55#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
56#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050057
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050058void VulkanManager::destroy() {
Greg Daniel26e0dca2018-09-18 10:33:19 -040059 // We don't need to explicitly free the command buffer since it automatically gets freed when we
60 // delete the VkCommandPool below.
61 mDummyCB = VK_NULL_HANDLE;
62
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040064 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050065 mCommandPool = VK_NULL_HANDLE;
66 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050067
Greg Daniel2ff202712018-06-14 11:50:10 -040068 if (mDevice != VK_NULL_HANDLE) {
69 mDeviceWaitIdle(mDevice);
70 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070071 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050072
Greg Daniel2ff202712018-06-14 11:50:10 -040073 if (mInstance != VK_NULL_HANDLE) {
74 mDestroyInstance(mInstance, nullptr);
75 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050076
Greg Daniel2ff202712018-06-14 11:50:10 -040077 mGraphicsQueue = VK_NULL_HANDLE;
78 mPresentQueue = VK_NULL_HANDLE;
79 mDevice = VK_NULL_HANDLE;
80 mPhysicalDevice = VK_NULL_HANDLE;
81 mInstance = VK_NULL_HANDLE;
Bo Liu7b8c1eb2019-01-08 20:17:55 -080082 mInstanceExtensions.clear();
83 mDeviceExtensions.clear();
84 free_features_extensions_structs(mPhysicalDeviceFeatures2);
85 mPhysicalDeviceFeatures2 = {};
Greg Daniel2ff202712018-06-14 11:50:10 -040086}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050087
Stan Iliev90276c82019-02-03 18:01:02 -050088void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040089 VkResult err;
90
91 constexpr VkApplicationInfo app_info = {
92 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
93 nullptr, // pNext
94 "android framework", // pApplicationName
95 0, // applicationVersion
96 "android framework", // pEngineName
97 0, // engineVerison
Greg Danieleaf310e2019-01-28 16:10:32 -050098 mAPIVersion, // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -040099 };
100
Greg Daniel2ff202712018-06-14 11:50:10 -0400101 {
102 GET_PROC(EnumerateInstanceExtensionProperties);
103
104 uint32_t extensionCount = 0;
105 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500106 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400107 std::unique_ptr<VkExtensionProperties[]> extensions(
108 new VkExtensionProperties[extensionCount]);
109 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
Stan Iliev90276c82019-02-03 18:01:02 -0500110 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400111 bool hasKHRSurfaceExtension = false;
112 bool hasKHRAndroidSurfaceExtension = false;
113 for (uint32_t i = 0; i < extensionCount; ++i) {
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800114 mInstanceExtensions.push_back(extensions[i].extensionName);
Greg Daniel2ff202712018-06-14 11:50:10 -0400115 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
116 hasKHRSurfaceExtension = true;
117 }
118 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
119 hasKHRAndroidSurfaceExtension = true;
120 }
121 }
Stan Iliev90276c82019-02-03 18:01:02 -0500122 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400123 }
124
125 const VkInstanceCreateInfo instance_create = {
126 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
127 nullptr, // pNext
128 0, // flags
129 &app_info, // pApplicationInfo
130 0, // enabledLayerNameCount
131 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800132 (uint32_t) mInstanceExtensions.size(), // enabledExtensionNameCount
133 mInstanceExtensions.data(), // ppEnabledExtensionNames
Greg Daniel2ff202712018-06-14 11:50:10 -0400134 };
135
136 GET_PROC(CreateInstance);
137 err = mCreateInstance(&instance_create, nullptr, &mInstance);
Stan Iliev90276c82019-02-03 18:01:02 -0500138 LOG_ALWAYS_FATAL_IF(err < 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400139
140 GET_INST_PROC(DestroyInstance);
141 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400142 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400143 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400144 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400145 GET_INST_PROC(CreateDevice);
146 GET_INST_PROC(EnumerateDeviceExtensionProperties);
147 GET_INST_PROC(CreateAndroidSurfaceKHR);
148 GET_INST_PROC(DestroySurfaceKHR);
149 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
150 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
151 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
152 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
153
154 uint32_t gpuCount;
Stan Iliev90276c82019-02-03 18:01:02 -0500155 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
156 LOG_ALWAYS_FATAL_IF(!gpuCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400157 // Just returning the first physical device instead of getting the whole array. Since there
158 // should only be one device on android.
159 gpuCount = 1;
160 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
161 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
Stan Iliev90276c82019-02-03 18:01:02 -0500162 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400163
Greg Daniel96259622018-10-01 14:42:56 -0400164 VkPhysicalDeviceProperties physDeviceProperties;
165 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
Stan Iliev90276c82019-02-03 18:01:02 -0500166 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniel96259622018-10-01 14:42:56 -0400167
Greg Daniel2ff202712018-06-14 11:50:10 -0400168 // query to get the initial queue props size
169 uint32_t queueCount;
170 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500171 LOG_ALWAYS_FATAL_IF(!queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400172
173 // now get the actual queue props
174 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
175 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
176
177 // iterate to find the graphics queue
178 mGraphicsQueueIndex = queueCount;
179 for (uint32_t i = 0; i < queueCount; i++) {
180 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
181 mGraphicsQueueIndex = i;
182 break;
183 }
184 }
Stan Iliev90276c82019-02-03 18:01:02 -0500185 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400186
187 // All physical devices and queue families on Android must be capable of
188 // presentation with any native window. So just use the first one.
189 mPresentQueueIndex = 0;
190
Greg Daniel2ff202712018-06-14 11:50:10 -0400191 {
192 uint32_t extensionCount = 0;
193 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
194 nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500195 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400196 std::unique_ptr<VkExtensionProperties[]> extensions(
197 new VkExtensionProperties[extensionCount]);
198 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
199 extensions.get());
Stan Iliev90276c82019-02-03 18:01:02 -0500200 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400201 bool hasKHRSwapchainExtension = false;
202 for (uint32_t i = 0; i < extensionCount; ++i) {
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800203 mDeviceExtensions.push_back(extensions[i].extensionName);
Greg Daniel2ff202712018-06-14 11:50:10 -0400204 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
205 hasKHRSwapchainExtension = true;
206 }
207 }
Stan Iliev90276c82019-02-03 18:01:02 -0500208 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400209 }
210
Greg Daniela227dbb2018-08-20 09:19:48 -0400211 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
212 if (device != VK_NULL_HANDLE) {
213 return vkGetDeviceProcAddr(device, proc_name);
214 }
215 return vkGetInstanceProcAddr(instance, proc_name);
216 };
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800217 grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
218 mInstanceExtensions.data(), mDeviceExtensions.size(), mDeviceExtensions.data());
Greg Daniela227dbb2018-08-20 09:19:48 -0400219
Stan Iliev90276c82019-02-03 18:01:02 -0500220 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
Greg Daniel26e0dca2018-09-18 10:33:19 -0400221
Greg Daniela227dbb2018-08-20 09:19:48 -0400222 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
223 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
224 features.pNext = nullptr;
225
226 // Setup all extension feature structs we may want to use.
227 void** tailPNext = &features.pNext;
228
229 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
230 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
231 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
232 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
233 LOG_ALWAYS_FATAL_IF(!blend);
234 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
235 blend->pNext = nullptr;
236 *tailPNext = blend;
237 tailPNext = &blend->pNext;
238 }
239
Greg Daniel05036172018-11-28 17:08:04 -0500240 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
241 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
242 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
243 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
244 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
245 ycbcrFeature->pNext = nullptr;
246 *tailPNext = ycbcrFeature;
247 tailPNext = &ycbcrFeature->pNext;
248
Greg Daniela227dbb2018-08-20 09:19:48 -0400249 // query to get the physical device features
250 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400251 // this looks like it would slow things down,
252 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400253 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400254
255 float queuePriorities[1] = { 0.0 };
256
Stan Iliev7e733362019-02-28 13:16:36 -0500257 void* queueNextPtr = nullptr;
258
259 VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
260
261 if (Properties::contextPriority != 0
262 && grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
263 memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
264 queuePriorityCreateInfo.sType =
265 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
266 queuePriorityCreateInfo.pNext = nullptr;
267 switch (Properties::contextPriority) {
268 case EGL_CONTEXT_PRIORITY_LOW_IMG:
269 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
270 break;
271 case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
272 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
273 break;
274 case EGL_CONTEXT_PRIORITY_HIGH_IMG:
275 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
276 break;
277 default:
278 LOG_ALWAYS_FATAL("Unsupported context priority");
279 }
280 queueNextPtr = &queuePriorityCreateInfo;
281 }
282
Greg Daniel2ff202712018-06-14 11:50:10 -0400283 const VkDeviceQueueCreateInfo queueInfo[2] = {
284 {
285 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
Stan Iliev7e733362019-02-28 13:16:36 -0500286 queueNextPtr, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400287 0, // VkDeviceQueueCreateFlags
288 mGraphicsQueueIndex, // queueFamilyIndex
289 1, // queueCount
290 queuePriorities, // pQueuePriorities
291 },
292 {
293 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
Stan Iliev7e733362019-02-28 13:16:36 -0500294 queueNextPtr, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400295 0, // VkDeviceQueueCreateFlags
296 mPresentQueueIndex, // queueFamilyIndex
297 1, // queueCount
298 queuePriorities, // pQueuePriorities
299 }
300 };
301 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
302
303 const VkDeviceCreateInfo deviceInfo = {
304 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400305 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400306 0, // VkDeviceCreateFlags
307 queueInfoCount, // queueCreateInfoCount
308 queueInfo, // pQueueCreateInfos
309 0, // layerCount
310 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800311 (uint32_t) mDeviceExtensions.size(), // extensionCount
312 mDeviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400313 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400314 };
315
Stan Iliev90276c82019-02-03 18:01:02 -0500316 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
Greg Daniel2ff202712018-06-14 11:50:10 -0400317
318 GET_DEV_PROC(GetDeviceQueue);
319 GET_DEV_PROC(DeviceWaitIdle);
320 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500321 GET_DEV_PROC(CreateSwapchainKHR);
322 GET_DEV_PROC(DestroySwapchainKHR);
323 GET_DEV_PROC(GetSwapchainImagesKHR);
324 GET_DEV_PROC(AcquireNextImageKHR);
325 GET_DEV_PROC(QueuePresentKHR);
326 GET_DEV_PROC(CreateCommandPool);
327 GET_DEV_PROC(DestroyCommandPool);
328 GET_DEV_PROC(AllocateCommandBuffers);
329 GET_DEV_PROC(FreeCommandBuffers);
330 GET_DEV_PROC(ResetCommandBuffer);
331 GET_DEV_PROC(BeginCommandBuffer);
332 GET_DEV_PROC(EndCommandBuffer);
333 GET_DEV_PROC(CmdPipelineBarrier);
334 GET_DEV_PROC(GetDeviceQueue);
335 GET_DEV_PROC(QueueSubmit);
336 GET_DEV_PROC(QueueWaitIdle);
337 GET_DEV_PROC(DeviceWaitIdle);
338 GET_DEV_PROC(CreateSemaphore);
339 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400340 GET_DEV_PROC(ImportSemaphoreFdKHR);
341 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500342 GET_DEV_PROC(CreateFence);
343 GET_DEV_PROC(DestroyFence);
344 GET_DEV_PROC(WaitForFences);
345 GET_DEV_PROC(ResetFences);
Greg Daniel2ff202712018-06-14 11:50:10 -0400346}
347
348void VulkanManager::initialize() {
349 if (mDevice != VK_NULL_HANDLE) {
350 return;
351 }
352
Greg Daniela227dbb2018-08-20 09:19:48 -0400353 GET_PROC(EnumerateInstanceVersion);
Greg Danieleaf310e2019-01-28 16:10:32 -0500354 uint32_t instanceVersion;
355 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
356 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniela227dbb2018-08-20 09:19:48 -0400357
Stan Iliev981afe72019-02-13 14:24:33 -0500358 this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400359
360 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
361
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500362 // create the command pool for the command buffers
363 if (VK_NULL_HANDLE == mCommandPool) {
364 VkCommandPoolCreateInfo commandPoolInfo;
365 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
366 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
367 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400368 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500369 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400370 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
371 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500372 SkASSERT(VK_SUCCESS == res);
373 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400374 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
375
376 if (!setupDummyCommandBuffer()) {
377 this->destroy();
Stan Iliev90276c82019-02-03 18:01:02 -0500378 // Pass through will crash on next line.
Greg Daniel26e0dca2018-09-18 10:33:19 -0400379 }
380 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
381
Greg Daniel2ff202712018-06-14 11:50:10 -0400382 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500383
Greg Danielcd558522016-11-17 13:31:40 -0500384 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
385 mSwapBehavior = SwapBehavior::BufferAge;
386 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500387}
388
Stan Iliev898123b2019-02-14 14:57:44 -0500389sk_sp<GrContext> VulkanManager::createContext(const GrContextOptions& options) {
Stan Iliev981afe72019-02-13 14:24:33 -0500390 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
391 if (device != VK_NULL_HANDLE) {
392 return vkGetDeviceProcAddr(device, proc_name);
393 }
394 return vkGetInstanceProcAddr(instance, proc_name);
395 };
396
397 GrVkBackendContext backendContext;
398 backendContext.fInstance = mInstance;
399 backendContext.fPhysicalDevice = mPhysicalDevice;
400 backendContext.fDevice = mDevice;
401 backendContext.fQueue = mGraphicsQueue;
402 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
403 backendContext.fMaxAPIVersion = mAPIVersion;
404 backendContext.fVkExtensions = &mExtensions;
405 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
406 backendContext.fGetProc = std::move(getProc);
407
408 return GrContext::MakeVulkan(backendContext, options);
409}
410
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800411VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
412 return VkFunctorInitParams{
413 .instance = mInstance,
414 .physical_device = mPhysicalDevice,
415 .device = mDevice,
416 .queue = mGraphicsQueue,
417 .graphics_queue_index = mGraphicsQueueIndex,
Greg Danieleaf310e2019-01-28 16:10:32 -0500418 .api_version = mAPIVersion,
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800419 .enabled_instance_extension_names = mInstanceExtensions.data(),
420 .enabled_instance_extension_names_length =
421 static_cast<uint32_t>(mInstanceExtensions.size()),
422 .enabled_device_extension_names = mDeviceExtensions.data(),
423 .enabled_device_extension_names_length =
424 static_cast<uint32_t>(mDeviceExtensions.size()),
425 .device_features_2 = &mPhysicalDeviceFeatures2,
426 };
427}
428
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500429// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
430// previous uses have finished before returning.
431VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
432 SkASSERT(surface->mBackbuffers);
433
434 ++surface->mCurrentBackbufferIndex;
435 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
436 surface->mCurrentBackbufferIndex = 0;
437 }
438
John Reck1bcacfd2017-11-03 10:12:19 -0700439 VulkanSurface::BackbufferInfo* backbuffer =
440 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500441
442 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
443 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400444 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500445 if (res != VK_SUCCESS) {
446 return nullptr;
447 }
448
449 return backbuffer;
450}
451
Greg Danielc4076782019-01-08 16:01:18 -0500452static SkMatrix getPreTransformMatrix(int width, int height,
453 VkSurfaceTransformFlagBitsKHR transform) {
454 switch (transform) {
455 case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR:
456 return SkMatrix::I();
457 case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR:
458 return SkMatrix::MakeAll(0, -1, height, 1, 0, 0, 0, 0, 1);
459 case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR:
460 return SkMatrix::MakeAll(-1, 0, width, 0, -1, height, 0, 0, 1);
461 case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR:
462 return SkMatrix::MakeAll(0, 1, 0, -1, 0, width, 0, 0, 1);
463 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR:
464 return SkMatrix::MakeAll(-1, 0, width, 0, 1, 0, 0, 0, 1);
465 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR:
466 return SkMatrix::MakeAll(0, -1, height, -1, 0, width, 0, 0, 1);
467 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR:
468 return SkMatrix::MakeAll(1, 0, 0, 0, -1, height, 0, 0, 1);
469 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR:
470 return SkMatrix::MakeAll(0, 1, 0, 1, 0, 0, 0, 0, 1);
471 default:
472 LOG_ALWAYS_FATAL("Unsupported pre transform of swapchain.");
473 }
474 return SkMatrix::I();
475}
476
477
Stan Iliev305e13a2018-11-13 11:14:48 -0500478SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
479 // Recreate VulkanSurface, if ANativeWindow has been resized.
480 VulkanSurface* surface = *surfaceOut;
481 int windowWidth = 0, windowHeight = 0;
482 ANativeWindow* window = surface->mNativeWindow;
483 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
484 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
485 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
486 ColorMode colorMode = surface->mColorMode;
Stan Iliev987a80c2018-12-04 10:07:21 -0500487 sk_sp<SkColorSpace> colorSpace = surface->mColorSpace;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800488 SkColorType colorType = surface->mColorType;
Stan Iliev981afe72019-02-13 14:24:33 -0500489 GrContext* grContext = surface->mGrContext;
Stan Iliev305e13a2018-11-13 11:14:48 -0500490 destroySurface(surface);
Stan Iliev981afe72019-02-13 14:24:33 -0500491 *surfaceOut = createSurface(window, colorMode, colorSpace, colorType, grContext);
Stan Iliev305e13a2018-11-13 11:14:48 -0500492 surface = *surfaceOut;
Stan Iliev90276c82019-02-03 18:01:02 -0500493 if (!surface) {
494 return nullptr;
495 }
Stan Iliev305e13a2018-11-13 11:14:48 -0500496 }
497
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500498 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
499 SkASSERT(backbuffer);
500
501 VkResult res;
502
Greg Daniel2ff202712018-06-14 11:50:10 -0400503 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500504 SkASSERT(VK_SUCCESS == res);
505
506 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
507 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400508 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700509 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
510 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500511
512 if (VK_ERROR_SURFACE_LOST_KHR == res) {
513 // need to figure out how to create a new vkSurface without the platformData*
514 // maybe use attach somehow? but need a Window
515 return nullptr;
516 }
Greg Danielc4076782019-01-08 16:01:18 -0500517 if (VK_ERROR_OUT_OF_DATE_KHR == res || VK_SUBOPTIMAL_KHR == res) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500518 // tear swapchain down and try again
519 if (!createSwapchain(surface)) {
520 return nullptr;
521 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500522 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400523 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500524 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500525
526 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400527 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700528 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
529 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500530
531 if (VK_SUCCESS != res) {
532 return nullptr;
533 }
534 }
535
536 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500537 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500538 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400539 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500540 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400541 VkAccessFlags srcAccessMask = 0;
542 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
543 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500544
545 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700546 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
547 NULL, // pNext
548 srcAccessMask, // outputMask
549 dstAccessMask, // inputMask
550 layout, // oldLayout
551 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
552 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400553 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700554 surface->mImages[backbuffer->mImageIndex], // image
555 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500556 };
557 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
558
559 VkCommandBufferBeginInfo info;
560 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
561 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
562 info.flags = 0;
563 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
564
John Reck1bcacfd2017-11-03 10:12:19 -0700565 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
566 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500567
568 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
569
570 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
571 // insert the layout transfer into the queue and wait on the acquire
572 VkSubmitInfo submitInfo;
573 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
574 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
575 submitInfo.waitSemaphoreCount = 1;
576 // Wait to make sure aquire semaphore set above has signaled.
577 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
578 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
579 submitInfo.commandBufferCount = 1;
580 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
581 submitInfo.signalSemaphoreCount = 0;
582
583 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400584 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500585
586 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500587 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400588 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
589 SkSurface::kFlushRead_BackendHandleAccess);
590 if (!backendRT.isValid()) {
591 SkASSERT(backendRT.isValid());
592 return nullptr;
593 }
594 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500595
Greg Danielc4076782019-01-08 16:01:18 -0500596 surface->mPreTransform = getPreTransformMatrix(surface->windowWidth(),
597 surface->windowHeight(),
598 surface->mTransform);
599
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500600 surface->mBackbuffer = std::move(skSurface);
601 return surface->mBackbuffer.get();
602}
603
604void VulkanManager::destroyBuffers(VulkanSurface* surface) {
605 if (surface->mBackbuffers) {
606 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400607 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500608 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400609 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
610 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
611 mFreeCommandBuffers(mDevice, mCommandPool, 2,
612 surface->mBackbuffers[i].mTransitionCmdBuffers);
613 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
614 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500615 }
616 }
617
618 delete[] surface->mBackbuffers;
619 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500620 delete[] surface->mImageInfos;
621 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500622 delete[] surface->mImages;
623 surface->mImages = nullptr;
624}
625
626void VulkanManager::destroySurface(VulkanSurface* surface) {
627 // Make sure all submit commands have finished before starting to destroy objects.
628 if (VK_NULL_HANDLE != mPresentQueue) {
629 mQueueWaitIdle(mPresentQueue);
630 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400631 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500632
633 destroyBuffers(surface);
634
635 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400636 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500637 surface->mSwapchain = VK_NULL_HANDLE;
638 }
639
640 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400641 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500642 surface->mVkSurface = VK_NULL_HANDLE;
643 }
644 delete surface;
645}
646
647void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400648 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500649 SkASSERT(surface->mImageCount);
650 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400651 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500652
653 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
654
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500655 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500656 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500657 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500658 GrVkImageInfo info;
659 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500660 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500661 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
662 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
663 info.fFormat = format;
664 info.fLevelCount = 1;
665
Greg Danielac2d2322017-07-12 11:30:15 -0400666 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500667
Greg Danielcd558522016-11-17 13:31:40 -0500668 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700669 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Stan Iliev981afe72019-02-13 14:24:33 -0500670 surface->mGrContext, backendRT, kTopLeft_GrSurfaceOrigin,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800671 surface->mColorType, surface->mColorSpace, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500672 }
673
674 SkASSERT(mCommandPool != VK_NULL_HANDLE);
675
676 // set up the backbuffers
677 VkSemaphoreCreateInfo semaphoreInfo;
678 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
679 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
680 semaphoreInfo.pNext = nullptr;
681 semaphoreInfo.flags = 0;
682 VkCommandBufferAllocateInfo commandBuffersInfo;
683 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
684 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
685 commandBuffersInfo.pNext = nullptr;
686 commandBuffersInfo.commandPool = mCommandPool;
687 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
688 commandBuffersInfo.commandBufferCount = 2;
689 VkFenceCreateInfo fenceInfo;
690 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
691 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
692 fenceInfo.pNext = nullptr;
693 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
694
695 // we create one additional backbuffer structure here, because we want to
696 // give the command buffers they contain a chance to finish before we cycle back
697 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
698 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
699 SkDEBUGCODE(VkResult res);
700 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400701 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700702 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400703 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700704 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400705 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700706 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400707 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700708 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400709 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700710 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500711 SkASSERT(VK_SUCCESS == res);
712 }
713 surface->mCurrentBackbufferIndex = surface->mImageCount;
714}
715
716bool VulkanManager::createSwapchain(VulkanSurface* surface) {
717 // check for capabilities
718 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400719 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700720 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500721 if (VK_SUCCESS != res) {
722 return false;
723 }
724
725 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400726 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700727 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500728 if (VK_SUCCESS != res) {
729 return false;
730 }
731
Ben Wagnereec27d52017-01-11 15:32:07 -0500732 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400733 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700734 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500735 if (VK_SUCCESS != res) {
736 return false;
737 }
738
739 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400740 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700741 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500742 if (VK_SUCCESS != res) {
743 return false;
744 }
745
Ben Wagnereec27d52017-01-11 15:32:07 -0500746 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400747 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700748 surface->mVkSurface, &presentModeCount,
749 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500750 if (VK_SUCCESS != res) {
751 return false;
752 }
753
Greg Danielc4076782019-01-08 16:01:18 -0500754 if (!SkToBool(caps.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)) {
755 return false;
756 }
757 VkSurfaceTransformFlagBitsKHR transform;
758 if (SkToBool(caps.supportedTransforms & caps.currentTransform) &&
759 !SkToBool(caps.currentTransform & VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR)) {
760 transform = caps.currentTransform;
761 } else {
762 transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
763 }
764
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500765 VkExtent2D extent = caps.currentExtent;
766 // clamp width; to handle currentExtent of -1 and protect us from broken hints
767 if (extent.width < caps.minImageExtent.width) {
768 extent.width = caps.minImageExtent.width;
769 }
770 SkASSERT(extent.width <= caps.maxImageExtent.width);
771 // clamp height
772 if (extent.height < caps.minImageExtent.height) {
773 extent.height = caps.minImageExtent.height;
774 }
775 SkASSERT(extent.height <= caps.maxImageExtent.height);
Greg Danielc4076782019-01-08 16:01:18 -0500776
777 VkExtent2D swapExtent = extent;
778 if (transform == VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR ||
779 transform == VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR ||
780 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR ||
781 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR) {
782 swapExtent.width = extent.height;
783 swapExtent.height = extent.width;
784 }
785
Stan Iliev305e13a2018-11-13 11:14:48 -0500786 surface->mWindowWidth = extent.width;
787 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500788
Greg Daniel4d5bf2a2018-12-04 12:17:28 -0500789 uint32_t imageCount = std::max<uint32_t>(3, caps.minImageCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500790 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
791 // Application must settle for fewer images than desired:
792 imageCount = caps.maxImageCount;
793 }
794
795 // Currently Skia requires the images to be color attchments and support all transfer
796 // operations.
797 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
798 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
799 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
800 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
Greg Danielc4076782019-01-08 16:01:18 -0500801
John Reck1bcacfd2017-11-03 10:12:19 -0700802 SkASSERT(caps.supportedCompositeAlpha &
803 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500804 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700805 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
806 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
807 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500808
Stan Iliev79351f32018-09-19 14:23:49 -0400809 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500810 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800811 if (surface->mColorType == SkColorType::kRGBA_F16_SkColorType) {
Stan Iliev79351f32018-09-19 14:23:49 -0400812 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
Stan Iliev79351f32018-09-19 14:23:49 -0400813 }
Peiyong Lin3bff1352018-12-11 07:56:07 -0800814
815 if (surface->mColorMode == ColorMode::WideColorGamut) {
Brian Osmane0cf5972019-01-23 10:41:20 -0500816 skcms_Matrix3x3 surfaceGamut;
817 LOG_ALWAYS_FATAL_IF(!surface->mColorSpace->toXYZD50(&surfaceGamut),
818 "Could not get gamut matrix from color space");
819 if (memcmp(&surfaceGamut, &SkNamedGamut::kSRGB, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800820 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
Brian Osmane0cf5972019-01-23 10:41:20 -0500821 } else if (memcmp(&surfaceGamut, &SkNamedGamut::kDCIP3, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800822 colorSpace = VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT;
823 } else {
824 LOG_ALWAYS_FATAL("Unreachable: unsupported wide color space.");
825 }
826 }
827
Stan Iliev79351f32018-09-19 14:23:49 -0400828 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500829 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400830 if (surfaceFormat == surfaceFormats[i].format
831 && colorSpace == surfaceFormats[i].colorSpace) {
832 foundSurfaceFormat = true;
833 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500834 }
835 }
836
Stan Iliev79351f32018-09-19 14:23:49 -0400837 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500838 return false;
839 }
840
Greg Daniel8a2a7542018-10-04 13:46:55 -0400841 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500842 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500843
844 VkSwapchainCreateInfoKHR swapchainCreateInfo;
845 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
846 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
847 swapchainCreateInfo.surface = surface->mVkSurface;
848 swapchainCreateInfo.minImageCount = imageCount;
849 swapchainCreateInfo.imageFormat = surfaceFormat;
850 swapchainCreateInfo.imageColorSpace = colorSpace;
Greg Danielc4076782019-01-08 16:01:18 -0500851 swapchainCreateInfo.imageExtent = swapExtent;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500852 swapchainCreateInfo.imageArrayLayers = 1;
853 swapchainCreateInfo.imageUsage = usageFlags;
854
Greg Daniel2ff202712018-06-14 11:50:10 -0400855 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
856 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500857 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
858 swapchainCreateInfo.queueFamilyIndexCount = 2;
859 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
860 } else {
861 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
862 swapchainCreateInfo.queueFamilyIndexCount = 0;
863 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
864 }
865
Greg Danielc4076782019-01-08 16:01:18 -0500866 swapchainCreateInfo.preTransform = transform;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500867 swapchainCreateInfo.compositeAlpha = composite_alpha;
868 swapchainCreateInfo.presentMode = mode;
869 swapchainCreateInfo.clipped = true;
870 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
871
Greg Daniel2ff202712018-06-14 11:50:10 -0400872 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500873 if (VK_SUCCESS != res) {
874 return false;
875 }
876
Greg Danielc4076782019-01-08 16:01:18 -0500877 surface->mTransform = transform;
878
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500879 // destroy the old swapchain
880 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400881 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500882
883 destroyBuffers(surface);
884
Greg Daniel2ff202712018-06-14 11:50:10 -0400885 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500886 }
887
Greg Danielc4076782019-01-08 16:01:18 -0500888 createBuffers(surface, surfaceFormat, swapExtent);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500889
Stan Ilievbc462582018-12-10 13:13:41 -0500890 // The window content is not updated (frozen) until a buffer of the window size is received.
891 // This prevents temporary stretching of the window after it is resized, but before the first
892 // buffer with new size is enqueued.
893 native_window_set_scaling_mode(surface->mNativeWindow, NATIVE_WINDOW_SCALING_MODE_FREEZE);
894
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500895 return true;
896}
897
Stan Iliev987a80c2018-12-04 10:07:21 -0500898VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800899 sk_sp<SkColorSpace> surfaceColorSpace,
Stan Iliev981afe72019-02-13 14:24:33 -0500900 SkColorType surfaceColorType,
901 GrContext* grContext) {
902 LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500903 if (!window) {
904 return nullptr;
905 }
906
Peiyong Lin3bff1352018-12-11 07:56:07 -0800907 VulkanSurface* surface = new VulkanSurface(colorMode, window, surfaceColorSpace,
Stan Iliev981afe72019-02-13 14:24:33 -0500908 surfaceColorType, grContext);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500909
910 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
911 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
912 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
913 surfaceCreateInfo.pNext = nullptr;
914 surfaceCreateInfo.flags = 0;
915 surfaceCreateInfo.window = window;
916
Greg Daniel2ff202712018-06-14 11:50:10 -0400917 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
918 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500919 if (VK_SUCCESS != res) {
920 delete surface;
921 return nullptr;
922 }
923
John Reck1bcacfd2017-11-03 10:12:19 -0700924 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400925 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
926 // All physical devices and queue families on Android must be capable of
927 // presentation with any native window.
928 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500929
930 if (!createSwapchain(surface)) {
931 destroySurface(surface);
932 return nullptr;
933 }
934
935 return surface;
936}
937
938// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400939static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500940 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
941 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
942 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
943 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
944 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400945 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
946 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
947 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
948 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
949 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
950 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
951 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500952 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
953 return VK_PIPELINE_STAGE_HOST_BIT;
954 }
955
956 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
957 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
958}
959
960// Helper to know which src access mask we need to set when transitioning to the present layout
961static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
962 VkAccessFlags flags = 0;
963 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
964 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700965 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
966 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
967 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500968 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
969 flags = VK_ACCESS_HOST_WRITE_BIT;
970 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
971 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
972 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
973 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
974 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
975 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
976 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
977 flags = VK_ACCESS_TRANSFER_READ_BIT;
978 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
979 flags = VK_ACCESS_SHADER_READ_BIT;
980 }
981 return flags;
982}
983
984void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500985 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
986 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400987 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500988 }
989
Greg Daniel74ea2012017-11-10 11:32:58 -0500990 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700991 VulkanSurface::BackbufferInfo* backbuffer =
992 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400993
Greg Danielcd558522016-11-17 13:31:40 -0500994 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400995 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
996 SkSurface::kFlushRead_BackendHandleAccess);
997 SkASSERT(backendRT.isValid());
998
999 GrVkImageInfo imageInfo;
1000 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
1001
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001002 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -04001003 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001004
1005 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
1006 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -04001007 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -04001008 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001009 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
1010 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -04001011 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001012
1013 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -07001014 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1015 NULL, // pNext
1016 srcAccessMask, // outputMask
1017 dstAccessMask, // inputMask
1018 layout, // oldLayout
1019 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -04001020 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -07001021 mPresentQueueIndex, // dstQueueFamilyIndex
1022 surface->mImages[backbuffer->mImageIndex], // image
1023 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001024 };
1025
1026 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
1027 VkCommandBufferBeginInfo info;
1028 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
1029 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1030 info.flags = 0;
1031 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -07001032 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
1033 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001034 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
1035
Greg Danielcd558522016-11-17 13:31:40 -05001036 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001037
1038 // insert the layout transfer into the queue and wait on the acquire
1039 VkSubmitInfo submitInfo;
1040 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1041 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1042 submitInfo.waitSemaphoreCount = 0;
1043 submitInfo.pWaitDstStageMask = 0;
1044 submitInfo.commandBufferCount = 1;
1045 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
1046 submitInfo.signalSemaphoreCount = 1;
1047 // When this command buffer finishes we will signal this semaphore so that we know it is now
1048 // safe to present the image to the screen.
1049 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
1050
1051 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -04001052 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001053
1054 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
1055 // to the image is complete and that the layout has been change to present on the graphics
1056 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -07001057 const VkPresentInfoKHR presentInfo = {
1058 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
1059 NULL, // pNext
1060 1, // waitSemaphoreCount
1061 &backbuffer->mRenderSemaphore, // pWaitSemaphores
1062 1, // swapchainCount
1063 &surface->mSwapchain, // pSwapchains
1064 &backbuffer->mImageIndex, // pImageIndices
1065 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001066 };
1067
1068 mQueuePresentKHR(mPresentQueue, &presentInfo);
1069
1070 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -05001071 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
1072 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
1073 surface->mCurrentTime++;
1074}
1075
1076int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001077 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001078 VulkanSurface::BackbufferInfo* backbuffer =
1079 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1080 if (mSwapBehavior == SwapBehavior::Discard ||
1081 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001082 return 0;
1083 }
1084 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1085 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001086}
1087
Greg Daniel26e0dca2018-09-18 10:33:19 -04001088bool VulkanManager::setupDummyCommandBuffer() {
1089 if (mDummyCB != VK_NULL_HANDLE) {
1090 return true;
1091 }
1092
1093 VkCommandBufferAllocateInfo commandBuffersInfo;
1094 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1095 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1096 commandBuffersInfo.pNext = nullptr;
1097 commandBuffersInfo.commandPool = mCommandPool;
1098 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1099 commandBuffersInfo.commandBufferCount = 1;
1100
1101 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1102 if (err != VK_SUCCESS) {
1103 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1104 // make sure the driver didn't set a value and then return a failure.
1105 mDummyCB = VK_NULL_HANDLE;
1106 return false;
1107 }
1108
1109 VkCommandBufferBeginInfo beginInfo;
1110 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1111 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1112 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1113
1114 mBeginCommandBuffer(mDummyCB, &beginInfo);
1115 mEndCommandBuffer(mDummyCB);
1116 return true;
1117}
1118
Stan Iliev564ca3e2018-09-04 22:00:00 +00001119status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001120 if (!hasVkContext()) {
1121 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1122 return INVALID_OPERATION;
1123 }
1124
Stan Iliev7a081272018-10-26 17:54:18 -04001125 // Block GPU on the fence.
1126 int fenceFd = fence->dup();
1127 if (fenceFd == -1) {
1128 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1129 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001130 }
Stan Iliev7a081272018-10-26 17:54:18 -04001131
1132 VkSemaphoreCreateInfo semaphoreInfo;
1133 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1134 semaphoreInfo.pNext = nullptr;
1135 semaphoreInfo.flags = 0;
1136 VkSemaphore semaphore;
1137 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1138 if (VK_SUCCESS != err) {
1139 ALOGE("Failed to create import semaphore, err: %d", err);
1140 return UNKNOWN_ERROR;
1141 }
1142 VkImportSemaphoreFdInfoKHR importInfo;
1143 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1144 importInfo.pNext = nullptr;
1145 importInfo.semaphore = semaphore;
1146 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1147 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1148 importInfo.fd = fenceFd;
1149
1150 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1151 if (VK_SUCCESS != err) {
1152 ALOGE("Failed to import semaphore, err: %d", err);
1153 return UNKNOWN_ERROR;
1154 }
1155
1156 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1157
1158 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1159
1160 VkSubmitInfo submitInfo;
1161 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1162 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1163 submitInfo.waitSemaphoreCount = 1;
1164 // Wait to make sure aquire semaphore set above has signaled.
1165 submitInfo.pWaitSemaphores = &semaphore;
1166 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1167 submitInfo.commandBufferCount = 1;
1168 submitInfo.pCommandBuffers = &mDummyCB;
1169 submitInfo.signalSemaphoreCount = 0;
1170
1171 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1172
1173 // On Android when we import a semaphore, it is imported using temporary permanence. That
1174 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1175 // state before importing. This means it will now be in an idle state with no pending
1176 // signal or wait operations, so it is safe to immediately delete it.
1177 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001178 return OK;
1179}
1180
1181status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001182 if (!hasVkContext()) {
1183 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1184 return INVALID_OPERATION;
1185 }
1186
Greg Daniel26e0dca2018-09-18 10:33:19 -04001187 VkExportSemaphoreCreateInfo exportInfo;
1188 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1189 exportInfo.pNext = nullptr;
1190 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1191
1192 VkSemaphoreCreateInfo semaphoreInfo;
1193 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1194 semaphoreInfo.pNext = &exportInfo;
1195 semaphoreInfo.flags = 0;
1196 VkSemaphore semaphore;
1197 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1198 if (VK_SUCCESS != err) {
1199 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1200 return INVALID_OPERATION;
1201 }
1202
1203 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1204
1205 VkSubmitInfo submitInfo;
1206 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1207 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1208 submitInfo.waitSemaphoreCount = 0;
1209 submitInfo.pWaitSemaphores = nullptr;
1210 submitInfo.pWaitDstStageMask = nullptr;
1211 submitInfo.commandBufferCount = 1;
1212 submitInfo.pCommandBuffers = &mDummyCB;
1213 submitInfo.signalSemaphoreCount = 1;
1214 submitInfo.pSignalSemaphores = &semaphore;
1215
1216 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1217
1218 VkSemaphoreGetFdInfoKHR getFdInfo;
1219 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1220 getFdInfo.pNext = nullptr;
1221 getFdInfo.semaphore = semaphore;
1222 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1223
1224 int fenceFd = 0;
1225
1226 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1227 if (VK_SUCCESS != err) {
1228 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1229 return INVALID_OPERATION;
1230 }
1231 nativeFence = new Fence(fenceFd);
1232
1233 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1234 // destroying the semaphore and creating a new one with the same handle, and the payloads
1235 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1236 // it and we don't need to wait on the command buffer we submitted to finish.
1237 mDestroySemaphore(mDevice, semaphore, nullptr);
1238
Stan Iliev564ca3e2018-09-04 22:00:00 +00001239 return OK;
1240}
1241
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001242} /* namespace renderthread */
1243} /* namespace uirenderer */
1244} /* namespace android */