blob: 3b43f12975970aed585595646f37ed0c11fb3f5b [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Bo Liu7b8c1eb2019-01-08 20:17:55 -080037static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
38 // All Vulkan structs that could be part of the features chain will start with the
39 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
40 // so we can get access to the pNext for the next struct.
41 struct CommonVulkanHeader {
42 VkStructureType sType;
43 void* pNext;
44 };
45
46 void* pNext = features.pNext;
47 while (pNext) {
48 void* current = pNext;
49 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
50 free(current);
51 }
52}
53
Greg Daniel2ff202712018-06-14 11:50:10 -040054#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
55#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
56#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050057
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050058void VulkanManager::destroy() {
Greg Daniel26e0dca2018-09-18 10:33:19 -040059 // We don't need to explicitly free the command buffer since it automatically gets freed when we
60 // delete the VkCommandPool below.
61 mDummyCB = VK_NULL_HANDLE;
62
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050063 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040064 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050065 mCommandPool = VK_NULL_HANDLE;
66 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050067
Greg Daniel2ff202712018-06-14 11:50:10 -040068 if (mDevice != VK_NULL_HANDLE) {
69 mDeviceWaitIdle(mDevice);
70 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070071 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050072
Greg Daniel2ff202712018-06-14 11:50:10 -040073 if (mInstance != VK_NULL_HANDLE) {
74 mDestroyInstance(mInstance, nullptr);
75 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050076
Greg Daniel2ff202712018-06-14 11:50:10 -040077 mGraphicsQueue = VK_NULL_HANDLE;
78 mPresentQueue = VK_NULL_HANDLE;
79 mDevice = VK_NULL_HANDLE;
80 mPhysicalDevice = VK_NULL_HANDLE;
81 mInstance = VK_NULL_HANDLE;
Bo Liu7b8c1eb2019-01-08 20:17:55 -080082 mInstanceExtensions.clear();
83 mDeviceExtensions.clear();
84 free_features_extensions_structs(mPhysicalDeviceFeatures2);
85 mPhysicalDeviceFeatures2 = {};
Greg Daniel2ff202712018-06-14 11:50:10 -040086}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050087
Stan Iliev90276c82019-02-03 18:01:02 -050088void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040089 VkResult err;
90
91 constexpr VkApplicationInfo app_info = {
92 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
93 nullptr, // pNext
94 "android framework", // pApplicationName
95 0, // applicationVersion
96 "android framework", // pEngineName
97 0, // engineVerison
Greg Danieleaf310e2019-01-28 16:10:32 -050098 mAPIVersion, // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -040099 };
100
Greg Daniel2ff202712018-06-14 11:50:10 -0400101 {
102 GET_PROC(EnumerateInstanceExtensionProperties);
103
104 uint32_t extensionCount = 0;
105 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500106 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400107 std::unique_ptr<VkExtensionProperties[]> extensions(
108 new VkExtensionProperties[extensionCount]);
109 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
Stan Iliev90276c82019-02-03 18:01:02 -0500110 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400111 bool hasKHRSurfaceExtension = false;
112 bool hasKHRAndroidSurfaceExtension = false;
113 for (uint32_t i = 0; i < extensionCount; ++i) {
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800114 mInstanceExtensions.push_back(extensions[i].extensionName);
Greg Daniel2ff202712018-06-14 11:50:10 -0400115 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
116 hasKHRSurfaceExtension = true;
117 }
118 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
119 hasKHRAndroidSurfaceExtension = true;
120 }
121 }
Stan Iliev90276c82019-02-03 18:01:02 -0500122 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400123 }
124
125 const VkInstanceCreateInfo instance_create = {
126 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
127 nullptr, // pNext
128 0, // flags
129 &app_info, // pApplicationInfo
130 0, // enabledLayerNameCount
131 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800132 (uint32_t) mInstanceExtensions.size(), // enabledExtensionNameCount
133 mInstanceExtensions.data(), // ppEnabledExtensionNames
Greg Daniel2ff202712018-06-14 11:50:10 -0400134 };
135
136 GET_PROC(CreateInstance);
137 err = mCreateInstance(&instance_create, nullptr, &mInstance);
Stan Iliev90276c82019-02-03 18:01:02 -0500138 LOG_ALWAYS_FATAL_IF(err < 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400139
140 GET_INST_PROC(DestroyInstance);
141 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400142 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400143 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400144 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400145 GET_INST_PROC(CreateDevice);
146 GET_INST_PROC(EnumerateDeviceExtensionProperties);
147 GET_INST_PROC(CreateAndroidSurfaceKHR);
148 GET_INST_PROC(DestroySurfaceKHR);
149 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
150 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
151 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
152 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
153
154 uint32_t gpuCount;
Stan Iliev90276c82019-02-03 18:01:02 -0500155 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
156 LOG_ALWAYS_FATAL_IF(!gpuCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400157 // Just returning the first physical device instead of getting the whole array. Since there
158 // should only be one device on android.
159 gpuCount = 1;
160 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
161 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
Stan Iliev90276c82019-02-03 18:01:02 -0500162 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400163
Greg Daniel96259622018-10-01 14:42:56 -0400164 VkPhysicalDeviceProperties physDeviceProperties;
165 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
Stan Iliev90276c82019-02-03 18:01:02 -0500166 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniel96259622018-10-01 14:42:56 -0400167
Greg Daniel2ff202712018-06-14 11:50:10 -0400168 // query to get the initial queue props size
169 uint32_t queueCount;
170 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500171 LOG_ALWAYS_FATAL_IF(!queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400172
173 // now get the actual queue props
174 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
175 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
176
177 // iterate to find the graphics queue
178 mGraphicsQueueIndex = queueCount;
179 for (uint32_t i = 0; i < queueCount; i++) {
180 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
181 mGraphicsQueueIndex = i;
182 break;
183 }
184 }
Stan Iliev90276c82019-02-03 18:01:02 -0500185 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400186
187 // All physical devices and queue families on Android must be capable of
188 // presentation with any native window. So just use the first one.
189 mPresentQueueIndex = 0;
190
Greg Daniel2ff202712018-06-14 11:50:10 -0400191 {
192 uint32_t extensionCount = 0;
193 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
194 nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500195 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400196 std::unique_ptr<VkExtensionProperties[]> extensions(
197 new VkExtensionProperties[extensionCount]);
198 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
199 extensions.get());
Stan Iliev90276c82019-02-03 18:01:02 -0500200 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400201 bool hasKHRSwapchainExtension = false;
202 for (uint32_t i = 0; i < extensionCount; ++i) {
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800203 mDeviceExtensions.push_back(extensions[i].extensionName);
Greg Daniel2ff202712018-06-14 11:50:10 -0400204 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
205 hasKHRSwapchainExtension = true;
206 }
207 }
Stan Iliev90276c82019-02-03 18:01:02 -0500208 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400209 }
210
Greg Daniela227dbb2018-08-20 09:19:48 -0400211 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
212 if (device != VK_NULL_HANDLE) {
213 return vkGetDeviceProcAddr(device, proc_name);
214 }
215 return vkGetInstanceProcAddr(instance, proc_name);
216 };
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800217 grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
218 mInstanceExtensions.data(), mDeviceExtensions.size(), mDeviceExtensions.data());
Greg Daniela227dbb2018-08-20 09:19:48 -0400219
Stan Iliev90276c82019-02-03 18:01:02 -0500220 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
Greg Daniel26e0dca2018-09-18 10:33:19 -0400221
Greg Daniela227dbb2018-08-20 09:19:48 -0400222 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
223 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
224 features.pNext = nullptr;
225
226 // Setup all extension feature structs we may want to use.
227 void** tailPNext = &features.pNext;
228
229 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
230 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
231 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
232 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
233 LOG_ALWAYS_FATAL_IF(!blend);
234 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
235 blend->pNext = nullptr;
236 *tailPNext = blend;
237 tailPNext = &blend->pNext;
238 }
239
Greg Daniel05036172018-11-28 17:08:04 -0500240 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
241 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
242 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
243 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
244 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
245 ycbcrFeature->pNext = nullptr;
246 *tailPNext = ycbcrFeature;
247 tailPNext = &ycbcrFeature->pNext;
248
Greg Daniela227dbb2018-08-20 09:19:48 -0400249 // query to get the physical device features
250 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400251 // this looks like it would slow things down,
252 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400253 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400254
255 float queuePriorities[1] = { 0.0 };
256
257 const VkDeviceQueueCreateInfo queueInfo[2] = {
258 {
259 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
260 nullptr, // pNext
261 0, // VkDeviceQueueCreateFlags
262 mGraphicsQueueIndex, // queueFamilyIndex
263 1, // queueCount
264 queuePriorities, // pQueuePriorities
265 },
266 {
267 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
268 nullptr, // pNext
269 0, // VkDeviceQueueCreateFlags
270 mPresentQueueIndex, // queueFamilyIndex
271 1, // queueCount
272 queuePriorities, // pQueuePriorities
273 }
274 };
275 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
276
277 const VkDeviceCreateInfo deviceInfo = {
278 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400279 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400280 0, // VkDeviceCreateFlags
281 queueInfoCount, // queueCreateInfoCount
282 queueInfo, // pQueueCreateInfos
283 0, // layerCount
284 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800285 (uint32_t) mDeviceExtensions.size(), // extensionCount
286 mDeviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400287 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400288 };
289
Stan Iliev90276c82019-02-03 18:01:02 -0500290 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
Greg Daniel2ff202712018-06-14 11:50:10 -0400291
292 GET_DEV_PROC(GetDeviceQueue);
293 GET_DEV_PROC(DeviceWaitIdle);
294 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500295 GET_DEV_PROC(CreateSwapchainKHR);
296 GET_DEV_PROC(DestroySwapchainKHR);
297 GET_DEV_PROC(GetSwapchainImagesKHR);
298 GET_DEV_PROC(AcquireNextImageKHR);
299 GET_DEV_PROC(QueuePresentKHR);
300 GET_DEV_PROC(CreateCommandPool);
301 GET_DEV_PROC(DestroyCommandPool);
302 GET_DEV_PROC(AllocateCommandBuffers);
303 GET_DEV_PROC(FreeCommandBuffers);
304 GET_DEV_PROC(ResetCommandBuffer);
305 GET_DEV_PROC(BeginCommandBuffer);
306 GET_DEV_PROC(EndCommandBuffer);
307 GET_DEV_PROC(CmdPipelineBarrier);
308 GET_DEV_PROC(GetDeviceQueue);
309 GET_DEV_PROC(QueueSubmit);
310 GET_DEV_PROC(QueueWaitIdle);
311 GET_DEV_PROC(DeviceWaitIdle);
312 GET_DEV_PROC(CreateSemaphore);
313 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400314 GET_DEV_PROC(ImportSemaphoreFdKHR);
315 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500316 GET_DEV_PROC(CreateFence);
317 GET_DEV_PROC(DestroyFence);
318 GET_DEV_PROC(WaitForFences);
319 GET_DEV_PROC(ResetFences);
Greg Daniel2ff202712018-06-14 11:50:10 -0400320}
321
322void VulkanManager::initialize() {
323 if (mDevice != VK_NULL_HANDLE) {
324 return;
325 }
326
Greg Daniela227dbb2018-08-20 09:19:48 -0400327 GET_PROC(EnumerateInstanceVersion);
Greg Danieleaf310e2019-01-28 16:10:32 -0500328 uint32_t instanceVersion;
329 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
330 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniela227dbb2018-08-20 09:19:48 -0400331
Stan Iliev981afe72019-02-13 14:24:33 -0500332 this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400333
334 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
335
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500336 // create the command pool for the command buffers
337 if (VK_NULL_HANDLE == mCommandPool) {
338 VkCommandPoolCreateInfo commandPoolInfo;
339 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
340 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
341 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400342 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500343 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400344 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
345 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500346 SkASSERT(VK_SUCCESS == res);
347 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400348 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
349
350 if (!setupDummyCommandBuffer()) {
351 this->destroy();
Stan Iliev90276c82019-02-03 18:01:02 -0500352 // Pass through will crash on next line.
Greg Daniel26e0dca2018-09-18 10:33:19 -0400353 }
354 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
355
Greg Daniel2ff202712018-06-14 11:50:10 -0400356 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500357
Greg Danielcd558522016-11-17 13:31:40 -0500358 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
359 mSwapBehavior = SwapBehavior::BufferAge;
360 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500361}
362
Stan Iliev898123b2019-02-14 14:57:44 -0500363sk_sp<GrContext> VulkanManager::createContext(const GrContextOptions& options) {
Stan Iliev981afe72019-02-13 14:24:33 -0500364 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
365 if (device != VK_NULL_HANDLE) {
366 return vkGetDeviceProcAddr(device, proc_name);
367 }
368 return vkGetInstanceProcAddr(instance, proc_name);
369 };
370
371 GrVkBackendContext backendContext;
372 backendContext.fInstance = mInstance;
373 backendContext.fPhysicalDevice = mPhysicalDevice;
374 backendContext.fDevice = mDevice;
375 backendContext.fQueue = mGraphicsQueue;
376 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
377 backendContext.fMaxAPIVersion = mAPIVersion;
378 backendContext.fVkExtensions = &mExtensions;
379 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
380 backendContext.fGetProc = std::move(getProc);
381
382 return GrContext::MakeVulkan(backendContext, options);
383}
384
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800385VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
386 return VkFunctorInitParams{
387 .instance = mInstance,
388 .physical_device = mPhysicalDevice,
389 .device = mDevice,
390 .queue = mGraphicsQueue,
391 .graphics_queue_index = mGraphicsQueueIndex,
Greg Danieleaf310e2019-01-28 16:10:32 -0500392 .api_version = mAPIVersion,
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800393 .enabled_instance_extension_names = mInstanceExtensions.data(),
394 .enabled_instance_extension_names_length =
395 static_cast<uint32_t>(mInstanceExtensions.size()),
396 .enabled_device_extension_names = mDeviceExtensions.data(),
397 .enabled_device_extension_names_length =
398 static_cast<uint32_t>(mDeviceExtensions.size()),
399 .device_features_2 = &mPhysicalDeviceFeatures2,
400 };
401}
402
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500403// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
404// previous uses have finished before returning.
405VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
406 SkASSERT(surface->mBackbuffers);
407
408 ++surface->mCurrentBackbufferIndex;
409 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
410 surface->mCurrentBackbufferIndex = 0;
411 }
412
John Reck1bcacfd2017-11-03 10:12:19 -0700413 VulkanSurface::BackbufferInfo* backbuffer =
414 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500415
416 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
417 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400418 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500419 if (res != VK_SUCCESS) {
420 return nullptr;
421 }
422
423 return backbuffer;
424}
425
Greg Danielc4076782019-01-08 16:01:18 -0500426static SkMatrix getPreTransformMatrix(int width, int height,
427 VkSurfaceTransformFlagBitsKHR transform) {
428 switch (transform) {
429 case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR:
430 return SkMatrix::I();
431 case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR:
432 return SkMatrix::MakeAll(0, -1, height, 1, 0, 0, 0, 0, 1);
433 case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR:
434 return SkMatrix::MakeAll(-1, 0, width, 0, -1, height, 0, 0, 1);
435 case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR:
436 return SkMatrix::MakeAll(0, 1, 0, -1, 0, width, 0, 0, 1);
437 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR:
438 return SkMatrix::MakeAll(-1, 0, width, 0, 1, 0, 0, 0, 1);
439 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR:
440 return SkMatrix::MakeAll(0, -1, height, -1, 0, width, 0, 0, 1);
441 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR:
442 return SkMatrix::MakeAll(1, 0, 0, 0, -1, height, 0, 0, 1);
443 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR:
444 return SkMatrix::MakeAll(0, 1, 0, 1, 0, 0, 0, 0, 1);
445 default:
446 LOG_ALWAYS_FATAL("Unsupported pre transform of swapchain.");
447 }
448 return SkMatrix::I();
449}
450
451
Stan Iliev305e13a2018-11-13 11:14:48 -0500452SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
453 // Recreate VulkanSurface, if ANativeWindow has been resized.
454 VulkanSurface* surface = *surfaceOut;
455 int windowWidth = 0, windowHeight = 0;
456 ANativeWindow* window = surface->mNativeWindow;
457 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
458 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
459 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
460 ColorMode colorMode = surface->mColorMode;
Stan Iliev987a80c2018-12-04 10:07:21 -0500461 sk_sp<SkColorSpace> colorSpace = surface->mColorSpace;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800462 SkColorType colorType = surface->mColorType;
Stan Iliev981afe72019-02-13 14:24:33 -0500463 GrContext* grContext = surface->mGrContext;
Stan Iliev305e13a2018-11-13 11:14:48 -0500464 destroySurface(surface);
Stan Iliev981afe72019-02-13 14:24:33 -0500465 *surfaceOut = createSurface(window, colorMode, colorSpace, colorType, grContext);
Stan Iliev305e13a2018-11-13 11:14:48 -0500466 surface = *surfaceOut;
Stan Iliev90276c82019-02-03 18:01:02 -0500467 if (!surface) {
468 return nullptr;
469 }
Stan Iliev305e13a2018-11-13 11:14:48 -0500470 }
471
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500472 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
473 SkASSERT(backbuffer);
474
475 VkResult res;
476
Greg Daniel2ff202712018-06-14 11:50:10 -0400477 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500478 SkASSERT(VK_SUCCESS == res);
479
480 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
481 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400482 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700483 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
484 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500485
486 if (VK_ERROR_SURFACE_LOST_KHR == res) {
487 // need to figure out how to create a new vkSurface without the platformData*
488 // maybe use attach somehow? but need a Window
489 return nullptr;
490 }
Greg Danielc4076782019-01-08 16:01:18 -0500491 if (VK_ERROR_OUT_OF_DATE_KHR == res || VK_SUBOPTIMAL_KHR == res) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500492 // tear swapchain down and try again
493 if (!createSwapchain(surface)) {
494 return nullptr;
495 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500496 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400497 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500498 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500499
500 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400501 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700502 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
503 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500504
505 if (VK_SUCCESS != res) {
506 return nullptr;
507 }
508 }
509
510 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500511 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500512 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400513 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500514 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400515 VkAccessFlags srcAccessMask = 0;
516 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
517 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500518
519 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700520 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
521 NULL, // pNext
522 srcAccessMask, // outputMask
523 dstAccessMask, // inputMask
524 layout, // oldLayout
525 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
526 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400527 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700528 surface->mImages[backbuffer->mImageIndex], // image
529 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500530 };
531 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
532
533 VkCommandBufferBeginInfo info;
534 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
535 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
536 info.flags = 0;
537 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
538
John Reck1bcacfd2017-11-03 10:12:19 -0700539 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
540 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500541
542 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
543
544 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
545 // insert the layout transfer into the queue and wait on the acquire
546 VkSubmitInfo submitInfo;
547 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
548 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
549 submitInfo.waitSemaphoreCount = 1;
550 // Wait to make sure aquire semaphore set above has signaled.
551 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
552 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
553 submitInfo.commandBufferCount = 1;
554 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
555 submitInfo.signalSemaphoreCount = 0;
556
557 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400558 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500559
560 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500561 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400562 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
563 SkSurface::kFlushRead_BackendHandleAccess);
564 if (!backendRT.isValid()) {
565 SkASSERT(backendRT.isValid());
566 return nullptr;
567 }
568 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500569
Greg Danielc4076782019-01-08 16:01:18 -0500570 surface->mPreTransform = getPreTransformMatrix(surface->windowWidth(),
571 surface->windowHeight(),
572 surface->mTransform);
573
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500574 surface->mBackbuffer = std::move(skSurface);
575 return surface->mBackbuffer.get();
576}
577
578void VulkanManager::destroyBuffers(VulkanSurface* surface) {
579 if (surface->mBackbuffers) {
580 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400581 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500582 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400583 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
584 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
585 mFreeCommandBuffers(mDevice, mCommandPool, 2,
586 surface->mBackbuffers[i].mTransitionCmdBuffers);
587 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
588 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500589 }
590 }
591
592 delete[] surface->mBackbuffers;
593 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500594 delete[] surface->mImageInfos;
595 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500596 delete[] surface->mImages;
597 surface->mImages = nullptr;
598}
599
600void VulkanManager::destroySurface(VulkanSurface* surface) {
601 // Make sure all submit commands have finished before starting to destroy objects.
602 if (VK_NULL_HANDLE != mPresentQueue) {
603 mQueueWaitIdle(mPresentQueue);
604 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400605 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500606
607 destroyBuffers(surface);
608
609 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400610 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500611 surface->mSwapchain = VK_NULL_HANDLE;
612 }
613
614 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400615 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500616 surface->mVkSurface = VK_NULL_HANDLE;
617 }
618 delete surface;
619}
620
621void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400622 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500623 SkASSERT(surface->mImageCount);
624 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400625 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500626
627 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
628
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500629 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500630 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500631 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500632 GrVkImageInfo info;
633 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500634 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500635 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
636 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
637 info.fFormat = format;
638 info.fLevelCount = 1;
639
Greg Danielac2d2322017-07-12 11:30:15 -0400640 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500641
Greg Danielcd558522016-11-17 13:31:40 -0500642 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700643 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Stan Iliev981afe72019-02-13 14:24:33 -0500644 surface->mGrContext, backendRT, kTopLeft_GrSurfaceOrigin,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800645 surface->mColorType, surface->mColorSpace, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500646 }
647
648 SkASSERT(mCommandPool != VK_NULL_HANDLE);
649
650 // set up the backbuffers
651 VkSemaphoreCreateInfo semaphoreInfo;
652 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
653 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
654 semaphoreInfo.pNext = nullptr;
655 semaphoreInfo.flags = 0;
656 VkCommandBufferAllocateInfo commandBuffersInfo;
657 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
658 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
659 commandBuffersInfo.pNext = nullptr;
660 commandBuffersInfo.commandPool = mCommandPool;
661 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
662 commandBuffersInfo.commandBufferCount = 2;
663 VkFenceCreateInfo fenceInfo;
664 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
665 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
666 fenceInfo.pNext = nullptr;
667 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
668
669 // we create one additional backbuffer structure here, because we want to
670 // give the command buffers they contain a chance to finish before we cycle back
671 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
672 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
673 SkDEBUGCODE(VkResult res);
674 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400675 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700676 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400677 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700678 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400679 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700680 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400681 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700682 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400683 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700684 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500685 SkASSERT(VK_SUCCESS == res);
686 }
687 surface->mCurrentBackbufferIndex = surface->mImageCount;
688}
689
690bool VulkanManager::createSwapchain(VulkanSurface* surface) {
691 // check for capabilities
692 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400693 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700694 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500695 if (VK_SUCCESS != res) {
696 return false;
697 }
698
699 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400700 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700701 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500702 if (VK_SUCCESS != res) {
703 return false;
704 }
705
Ben Wagnereec27d52017-01-11 15:32:07 -0500706 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400707 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700708 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500709 if (VK_SUCCESS != res) {
710 return false;
711 }
712
713 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400714 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700715 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500716 if (VK_SUCCESS != res) {
717 return false;
718 }
719
Ben Wagnereec27d52017-01-11 15:32:07 -0500720 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400721 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700722 surface->mVkSurface, &presentModeCount,
723 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500724 if (VK_SUCCESS != res) {
725 return false;
726 }
727
Greg Danielc4076782019-01-08 16:01:18 -0500728 if (!SkToBool(caps.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)) {
729 return false;
730 }
731 VkSurfaceTransformFlagBitsKHR transform;
732 if (SkToBool(caps.supportedTransforms & caps.currentTransform) &&
733 !SkToBool(caps.currentTransform & VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR)) {
734 transform = caps.currentTransform;
735 } else {
736 transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
737 }
738
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500739 VkExtent2D extent = caps.currentExtent;
740 // clamp width; to handle currentExtent of -1 and protect us from broken hints
741 if (extent.width < caps.minImageExtent.width) {
742 extent.width = caps.minImageExtent.width;
743 }
744 SkASSERT(extent.width <= caps.maxImageExtent.width);
745 // clamp height
746 if (extent.height < caps.minImageExtent.height) {
747 extent.height = caps.minImageExtent.height;
748 }
749 SkASSERT(extent.height <= caps.maxImageExtent.height);
Greg Danielc4076782019-01-08 16:01:18 -0500750
751 VkExtent2D swapExtent = extent;
752 if (transform == VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR ||
753 transform == VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR ||
754 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR ||
755 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR) {
756 swapExtent.width = extent.height;
757 swapExtent.height = extent.width;
758 }
759
Stan Iliev305e13a2018-11-13 11:14:48 -0500760 surface->mWindowWidth = extent.width;
761 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500762
Greg Daniel4d5bf2a2018-12-04 12:17:28 -0500763 uint32_t imageCount = std::max<uint32_t>(3, caps.minImageCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500764 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
765 // Application must settle for fewer images than desired:
766 imageCount = caps.maxImageCount;
767 }
768
769 // Currently Skia requires the images to be color attchments and support all transfer
770 // operations.
771 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
772 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
773 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
774 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
Greg Danielc4076782019-01-08 16:01:18 -0500775
John Reck1bcacfd2017-11-03 10:12:19 -0700776 SkASSERT(caps.supportedCompositeAlpha &
777 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500778 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700779 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
780 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
781 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500782
Stan Iliev79351f32018-09-19 14:23:49 -0400783 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500784 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800785 if (surface->mColorType == SkColorType::kRGBA_F16_SkColorType) {
Stan Iliev79351f32018-09-19 14:23:49 -0400786 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
Stan Iliev79351f32018-09-19 14:23:49 -0400787 }
Peiyong Lin3bff1352018-12-11 07:56:07 -0800788
789 if (surface->mColorMode == ColorMode::WideColorGamut) {
Brian Osmane0cf5972019-01-23 10:41:20 -0500790 skcms_Matrix3x3 surfaceGamut;
791 LOG_ALWAYS_FATAL_IF(!surface->mColorSpace->toXYZD50(&surfaceGamut),
792 "Could not get gamut matrix from color space");
793 if (memcmp(&surfaceGamut, &SkNamedGamut::kSRGB, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800794 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
Brian Osmane0cf5972019-01-23 10:41:20 -0500795 } else if (memcmp(&surfaceGamut, &SkNamedGamut::kDCIP3, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800796 colorSpace = VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT;
797 } else {
798 LOG_ALWAYS_FATAL("Unreachable: unsupported wide color space.");
799 }
800 }
801
Stan Iliev79351f32018-09-19 14:23:49 -0400802 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500803 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400804 if (surfaceFormat == surfaceFormats[i].format
805 && colorSpace == surfaceFormats[i].colorSpace) {
806 foundSurfaceFormat = true;
807 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500808 }
809 }
810
Stan Iliev79351f32018-09-19 14:23:49 -0400811 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500812 return false;
813 }
814
Greg Daniel8a2a7542018-10-04 13:46:55 -0400815 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500816 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500817
818 VkSwapchainCreateInfoKHR swapchainCreateInfo;
819 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
820 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
821 swapchainCreateInfo.surface = surface->mVkSurface;
822 swapchainCreateInfo.minImageCount = imageCount;
823 swapchainCreateInfo.imageFormat = surfaceFormat;
824 swapchainCreateInfo.imageColorSpace = colorSpace;
Greg Danielc4076782019-01-08 16:01:18 -0500825 swapchainCreateInfo.imageExtent = swapExtent;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500826 swapchainCreateInfo.imageArrayLayers = 1;
827 swapchainCreateInfo.imageUsage = usageFlags;
828
Greg Daniel2ff202712018-06-14 11:50:10 -0400829 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
830 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500831 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
832 swapchainCreateInfo.queueFamilyIndexCount = 2;
833 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
834 } else {
835 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
836 swapchainCreateInfo.queueFamilyIndexCount = 0;
837 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
838 }
839
Greg Danielc4076782019-01-08 16:01:18 -0500840 swapchainCreateInfo.preTransform = transform;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500841 swapchainCreateInfo.compositeAlpha = composite_alpha;
842 swapchainCreateInfo.presentMode = mode;
843 swapchainCreateInfo.clipped = true;
844 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
845
Greg Daniel2ff202712018-06-14 11:50:10 -0400846 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500847 if (VK_SUCCESS != res) {
848 return false;
849 }
850
Greg Danielc4076782019-01-08 16:01:18 -0500851 surface->mTransform = transform;
852
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500853 // destroy the old swapchain
854 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400855 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500856
857 destroyBuffers(surface);
858
Greg Daniel2ff202712018-06-14 11:50:10 -0400859 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500860 }
861
Greg Danielc4076782019-01-08 16:01:18 -0500862 createBuffers(surface, surfaceFormat, swapExtent);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500863
Stan Ilievbc462582018-12-10 13:13:41 -0500864 // The window content is not updated (frozen) until a buffer of the window size is received.
865 // This prevents temporary stretching of the window after it is resized, but before the first
866 // buffer with new size is enqueued.
867 native_window_set_scaling_mode(surface->mNativeWindow, NATIVE_WINDOW_SCALING_MODE_FREEZE);
868
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500869 return true;
870}
871
Stan Iliev987a80c2018-12-04 10:07:21 -0500872VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800873 sk_sp<SkColorSpace> surfaceColorSpace,
Stan Iliev981afe72019-02-13 14:24:33 -0500874 SkColorType surfaceColorType,
875 GrContext* grContext) {
876 LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500877 if (!window) {
878 return nullptr;
879 }
880
Peiyong Lin3bff1352018-12-11 07:56:07 -0800881 VulkanSurface* surface = new VulkanSurface(colorMode, window, surfaceColorSpace,
Stan Iliev981afe72019-02-13 14:24:33 -0500882 surfaceColorType, grContext);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500883
884 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
885 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
886 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
887 surfaceCreateInfo.pNext = nullptr;
888 surfaceCreateInfo.flags = 0;
889 surfaceCreateInfo.window = window;
890
Greg Daniel2ff202712018-06-14 11:50:10 -0400891 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
892 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500893 if (VK_SUCCESS != res) {
894 delete surface;
895 return nullptr;
896 }
897
John Reck1bcacfd2017-11-03 10:12:19 -0700898 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400899 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
900 // All physical devices and queue families on Android must be capable of
901 // presentation with any native window.
902 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500903
904 if (!createSwapchain(surface)) {
905 destroySurface(surface);
906 return nullptr;
907 }
908
909 return surface;
910}
911
912// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400913static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500914 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
915 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
916 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
917 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
918 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400919 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
920 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
921 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
922 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
923 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
924 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
925 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500926 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
927 return VK_PIPELINE_STAGE_HOST_BIT;
928 }
929
930 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
931 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
932}
933
934// Helper to know which src access mask we need to set when transitioning to the present layout
935static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
936 VkAccessFlags flags = 0;
937 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
938 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700939 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
940 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
941 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500942 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
943 flags = VK_ACCESS_HOST_WRITE_BIT;
944 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
945 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
946 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
947 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
948 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
949 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
950 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
951 flags = VK_ACCESS_TRANSFER_READ_BIT;
952 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
953 flags = VK_ACCESS_SHADER_READ_BIT;
954 }
955 return flags;
956}
957
958void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500959 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
960 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400961 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500962 }
963
Greg Daniel74ea2012017-11-10 11:32:58 -0500964 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700965 VulkanSurface::BackbufferInfo* backbuffer =
966 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400967
Greg Danielcd558522016-11-17 13:31:40 -0500968 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400969 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
970 SkSurface::kFlushRead_BackendHandleAccess);
971 SkASSERT(backendRT.isValid());
972
973 GrVkImageInfo imageInfo;
974 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
975
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500976 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400977 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500978
979 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
980 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400981 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400982 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500983 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
984 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400985 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500986
987 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700988 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
989 NULL, // pNext
990 srcAccessMask, // outputMask
991 dstAccessMask, // inputMask
992 layout, // oldLayout
993 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400994 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700995 mPresentQueueIndex, // dstQueueFamilyIndex
996 surface->mImages[backbuffer->mImageIndex], // image
997 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500998 };
999
1000 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
1001 VkCommandBufferBeginInfo info;
1002 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
1003 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1004 info.flags = 0;
1005 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -07001006 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
1007 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001008 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
1009
Greg Danielcd558522016-11-17 13:31:40 -05001010 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001011
1012 // insert the layout transfer into the queue and wait on the acquire
1013 VkSubmitInfo submitInfo;
1014 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1015 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1016 submitInfo.waitSemaphoreCount = 0;
1017 submitInfo.pWaitDstStageMask = 0;
1018 submitInfo.commandBufferCount = 1;
1019 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
1020 submitInfo.signalSemaphoreCount = 1;
1021 // When this command buffer finishes we will signal this semaphore so that we know it is now
1022 // safe to present the image to the screen.
1023 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
1024
1025 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -04001026 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001027
1028 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
1029 // to the image is complete and that the layout has been change to present on the graphics
1030 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -07001031 const VkPresentInfoKHR presentInfo = {
1032 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
1033 NULL, // pNext
1034 1, // waitSemaphoreCount
1035 &backbuffer->mRenderSemaphore, // pWaitSemaphores
1036 1, // swapchainCount
1037 &surface->mSwapchain, // pSwapchains
1038 &backbuffer->mImageIndex, // pImageIndices
1039 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001040 };
1041
1042 mQueuePresentKHR(mPresentQueue, &presentInfo);
1043
1044 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -05001045 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
1046 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
1047 surface->mCurrentTime++;
1048}
1049
1050int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001051 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001052 VulkanSurface::BackbufferInfo* backbuffer =
1053 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1054 if (mSwapBehavior == SwapBehavior::Discard ||
1055 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001056 return 0;
1057 }
1058 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1059 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001060}
1061
Greg Daniel26e0dca2018-09-18 10:33:19 -04001062bool VulkanManager::setupDummyCommandBuffer() {
1063 if (mDummyCB != VK_NULL_HANDLE) {
1064 return true;
1065 }
1066
1067 VkCommandBufferAllocateInfo commandBuffersInfo;
1068 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1069 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1070 commandBuffersInfo.pNext = nullptr;
1071 commandBuffersInfo.commandPool = mCommandPool;
1072 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1073 commandBuffersInfo.commandBufferCount = 1;
1074
1075 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1076 if (err != VK_SUCCESS) {
1077 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1078 // make sure the driver didn't set a value and then return a failure.
1079 mDummyCB = VK_NULL_HANDLE;
1080 return false;
1081 }
1082
1083 VkCommandBufferBeginInfo beginInfo;
1084 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1085 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1086 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1087
1088 mBeginCommandBuffer(mDummyCB, &beginInfo);
1089 mEndCommandBuffer(mDummyCB);
1090 return true;
1091}
1092
Stan Iliev564ca3e2018-09-04 22:00:00 +00001093status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001094 if (!hasVkContext()) {
1095 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1096 return INVALID_OPERATION;
1097 }
1098
Stan Iliev7a081272018-10-26 17:54:18 -04001099 // Block GPU on the fence.
1100 int fenceFd = fence->dup();
1101 if (fenceFd == -1) {
1102 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1103 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001104 }
Stan Iliev7a081272018-10-26 17:54:18 -04001105
1106 VkSemaphoreCreateInfo semaphoreInfo;
1107 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1108 semaphoreInfo.pNext = nullptr;
1109 semaphoreInfo.flags = 0;
1110 VkSemaphore semaphore;
1111 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1112 if (VK_SUCCESS != err) {
1113 ALOGE("Failed to create import semaphore, err: %d", err);
1114 return UNKNOWN_ERROR;
1115 }
1116 VkImportSemaphoreFdInfoKHR importInfo;
1117 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1118 importInfo.pNext = nullptr;
1119 importInfo.semaphore = semaphore;
1120 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1121 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1122 importInfo.fd = fenceFd;
1123
1124 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1125 if (VK_SUCCESS != err) {
1126 ALOGE("Failed to import semaphore, err: %d", err);
1127 return UNKNOWN_ERROR;
1128 }
1129
1130 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1131
1132 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1133
1134 VkSubmitInfo submitInfo;
1135 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1136 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1137 submitInfo.waitSemaphoreCount = 1;
1138 // Wait to make sure aquire semaphore set above has signaled.
1139 submitInfo.pWaitSemaphores = &semaphore;
1140 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1141 submitInfo.commandBufferCount = 1;
1142 submitInfo.pCommandBuffers = &mDummyCB;
1143 submitInfo.signalSemaphoreCount = 0;
1144
1145 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1146
1147 // On Android when we import a semaphore, it is imported using temporary permanence. That
1148 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1149 // state before importing. This means it will now be in an idle state with no pending
1150 // signal or wait operations, so it is safe to immediately delete it.
1151 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001152 return OK;
1153}
1154
1155status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001156 if (!hasVkContext()) {
1157 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1158 return INVALID_OPERATION;
1159 }
1160
Greg Daniel26e0dca2018-09-18 10:33:19 -04001161 VkExportSemaphoreCreateInfo exportInfo;
1162 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1163 exportInfo.pNext = nullptr;
1164 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1165
1166 VkSemaphoreCreateInfo semaphoreInfo;
1167 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1168 semaphoreInfo.pNext = &exportInfo;
1169 semaphoreInfo.flags = 0;
1170 VkSemaphore semaphore;
1171 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1172 if (VK_SUCCESS != err) {
1173 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1174 return INVALID_OPERATION;
1175 }
1176
1177 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1178
1179 VkSubmitInfo submitInfo;
1180 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1181 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1182 submitInfo.waitSemaphoreCount = 0;
1183 submitInfo.pWaitSemaphores = nullptr;
1184 submitInfo.pWaitDstStageMask = nullptr;
1185 submitInfo.commandBufferCount = 1;
1186 submitInfo.pCommandBuffers = &mDummyCB;
1187 submitInfo.signalSemaphoreCount = 1;
1188 submitInfo.pSignalSemaphores = &semaphore;
1189
1190 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1191
1192 VkSemaphoreGetFdInfoKHR getFdInfo;
1193 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1194 getFdInfo.pNext = nullptr;
1195 getFdInfo.semaphore = semaphore;
1196 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1197
1198 int fenceFd = 0;
1199
1200 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1201 if (VK_SUCCESS != err) {
1202 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1203 return INVALID_OPERATION;
1204 }
1205 nativeFence = new Fence(fenceFd);
1206
1207 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1208 // destroying the semaphore and creating a new one with the same handle, and the payloads
1209 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1210 // it and we don't need to wait on the command buffer we submitted to finish.
1211 mDestroySemaphore(mDevice, semaphore, nullptr);
1212
Stan Iliev564ca3e2018-09-04 22:00:00 +00001213 return OK;
1214}
1215
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001216} /* namespace renderthread */
1217} /* namespace uirenderer */
1218} /* namespace android */