blob: 1e7520216d6644483b9db4e2242bb983e116d8dd [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Stan Iliev305e13a2018-11-13 11:14:48 -050019#include <gui/Surface.h>
20
Greg Danielcd558522016-11-17 13:31:40 -050021#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050022#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050023#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050024#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050025
Greg Danielac2d2322017-07-12 11:30:15 -040026#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050027#include <GrContext.h>
28#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040029#include <GrTypes.h>
30#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050031#include <vk/GrVkTypes.h>
32
33namespace android {
34namespace uirenderer {
35namespace renderthread {
36
Bo Liu7b8c1eb2019-01-08 20:17:55 -080037static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
38 // All Vulkan structs that could be part of the features chain will start with the
39 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
40 // so we can get access to the pNext for the next struct.
41 struct CommonVulkanHeader {
42 VkStructureType sType;
43 void* pNext;
44 };
45
46 void* pNext = features.pNext;
47 while (pNext) {
48 void* current = pNext;
49 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
50 free(current);
51 }
52}
53
Greg Daniel2ff202712018-06-14 11:50:10 -040054#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
55#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
56#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050057
John Reck1bcacfd2017-11-03 10:12:19 -070058VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
60void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050061 mRenderThread.setGrContext(nullptr);
62
Greg Daniel26e0dca2018-09-18 10:33:19 -040063 // We don't need to explicitly free the command buffer since it automatically gets freed when we
64 // delete the VkCommandPool below.
65 mDummyCB = VK_NULL_HANDLE;
66
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050067 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040068 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050069 mCommandPool = VK_NULL_HANDLE;
70 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050071
Greg Daniel2ff202712018-06-14 11:50:10 -040072 if (mDevice != VK_NULL_HANDLE) {
73 mDeviceWaitIdle(mDevice);
74 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070075 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050076
Greg Daniel2ff202712018-06-14 11:50:10 -040077 if (mInstance != VK_NULL_HANDLE) {
78 mDestroyInstance(mInstance, nullptr);
79 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050080
Greg Daniel2ff202712018-06-14 11:50:10 -040081 mGraphicsQueue = VK_NULL_HANDLE;
82 mPresentQueue = VK_NULL_HANDLE;
83 mDevice = VK_NULL_HANDLE;
84 mPhysicalDevice = VK_NULL_HANDLE;
85 mInstance = VK_NULL_HANDLE;
Bo Liu7b8c1eb2019-01-08 20:17:55 -080086 mInstanceVersion = 0u;
87 mInstanceExtensions.clear();
88 mDeviceExtensions.clear();
89 free_features_extensions_structs(mPhysicalDeviceFeatures2);
90 mPhysicalDeviceFeatures2 = {};
Greg Daniel2ff202712018-06-14 11:50:10 -040091}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050092
Greg Daniela227dbb2018-08-20 09:19:48 -040093bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040094 VkResult err;
95
96 constexpr VkApplicationInfo app_info = {
97 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
98 nullptr, // pNext
99 "android framework", // pApplicationName
100 0, // applicationVersion
101 "android framework", // pEngineName
102 0, // engineVerison
Greg Daniel96259622018-10-01 14:42:56 -0400103 VK_MAKE_VERSION(1, 1, 0), // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -0400104 };
105
Greg Daniel2ff202712018-06-14 11:50:10 -0400106 {
107 GET_PROC(EnumerateInstanceExtensionProperties);
108
109 uint32_t extensionCount = 0;
110 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
111 if (VK_SUCCESS != err) {
112 return false;
113 }
114 std::unique_ptr<VkExtensionProperties[]> extensions(
115 new VkExtensionProperties[extensionCount]);
116 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
117 if (VK_SUCCESS != err) {
118 return false;
119 }
120 bool hasKHRSurfaceExtension = false;
121 bool hasKHRAndroidSurfaceExtension = false;
122 for (uint32_t i = 0; i < extensionCount; ++i) {
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800123 mInstanceExtensions.push_back(extensions[i].extensionName);
Greg Daniel2ff202712018-06-14 11:50:10 -0400124 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
125 hasKHRSurfaceExtension = true;
126 }
127 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
128 hasKHRAndroidSurfaceExtension = true;
129 }
130 }
131 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
132 this->destroy();
133 return false;
134 }
135 }
136
137 const VkInstanceCreateInfo instance_create = {
138 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
139 nullptr, // pNext
140 0, // flags
141 &app_info, // pApplicationInfo
142 0, // enabledLayerNameCount
143 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800144 (uint32_t) mInstanceExtensions.size(), // enabledExtensionNameCount
145 mInstanceExtensions.data(), // ppEnabledExtensionNames
Greg Daniel2ff202712018-06-14 11:50:10 -0400146 };
147
148 GET_PROC(CreateInstance);
149 err = mCreateInstance(&instance_create, nullptr, &mInstance);
150 if (err < 0) {
151 this->destroy();
152 return false;
153 }
154
155 GET_INST_PROC(DestroyInstance);
156 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniel96259622018-10-01 14:42:56 -0400157 GET_INST_PROC(GetPhysicalDeviceProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400158 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400159 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400160 GET_INST_PROC(CreateDevice);
161 GET_INST_PROC(EnumerateDeviceExtensionProperties);
162 GET_INST_PROC(CreateAndroidSurfaceKHR);
163 GET_INST_PROC(DestroySurfaceKHR);
164 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
165 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
166 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
167 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
168
169 uint32_t gpuCount;
170 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
171 if (err) {
172 this->destroy();
173 return false;
174 }
175 if (!gpuCount) {
176 this->destroy();
177 return false;
178 }
179 // Just returning the first physical device instead of getting the whole array. Since there
180 // should only be one device on android.
181 gpuCount = 1;
182 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
183 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
184 if (err && VK_INCOMPLETE != err) {
185 this->destroy();
186 return false;
187 }
188
Greg Daniel96259622018-10-01 14:42:56 -0400189 VkPhysicalDeviceProperties physDeviceProperties;
190 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
191 if (physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
192 this->destroy();
193 return false;
194 }
195
Greg Daniel2ff202712018-06-14 11:50:10 -0400196 // query to get the initial queue props size
197 uint32_t queueCount;
198 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
199 if (!queueCount) {
200 this->destroy();
201 return false;
202 }
203
204 // now get the actual queue props
205 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
206 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
207
208 // iterate to find the graphics queue
209 mGraphicsQueueIndex = queueCount;
210 for (uint32_t i = 0; i < queueCount; i++) {
211 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
212 mGraphicsQueueIndex = i;
213 break;
214 }
215 }
216 if (mGraphicsQueueIndex == queueCount) {
217 this->destroy();
218 return false;
219 }
220
221 // All physical devices and queue families on Android must be capable of
222 // presentation with any native window. So just use the first one.
223 mPresentQueueIndex = 0;
224
Greg Daniel2ff202712018-06-14 11:50:10 -0400225 {
226 uint32_t extensionCount = 0;
227 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
228 nullptr);
229 if (VK_SUCCESS != err) {
230 this->destroy();
231 return false;
232 }
233 std::unique_ptr<VkExtensionProperties[]> extensions(
234 new VkExtensionProperties[extensionCount]);
235 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
236 extensions.get());
237 if (VK_SUCCESS != err) {
238 this->destroy();
239 return false;
240 }
241 bool hasKHRSwapchainExtension = false;
242 for (uint32_t i = 0; i < extensionCount; ++i) {
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800243 mDeviceExtensions.push_back(extensions[i].extensionName);
Greg Daniel2ff202712018-06-14 11:50:10 -0400244 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
245 hasKHRSwapchainExtension = true;
246 }
247 }
248 if (!hasKHRSwapchainExtension) {
249 this->destroy();
250 return false;
251 }
252 }
253
Greg Daniela227dbb2018-08-20 09:19:48 -0400254 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
255 if (device != VK_NULL_HANDLE) {
256 return vkGetDeviceProcAddr(device, proc_name);
257 }
258 return vkGetInstanceProcAddr(instance, proc_name);
259 };
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800260 grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
261 mInstanceExtensions.data(), mDeviceExtensions.size(), mDeviceExtensions.data());
Greg Daniela227dbb2018-08-20 09:19:48 -0400262
Greg Daniel26e0dca2018-09-18 10:33:19 -0400263 if (!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
264 this->destroy();
265 return false;
266 }
267
Greg Daniela227dbb2018-08-20 09:19:48 -0400268 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
269 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
270 features.pNext = nullptr;
271
272 // Setup all extension feature structs we may want to use.
273 void** tailPNext = &features.pNext;
274
275 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
276 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
277 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
278 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
279 LOG_ALWAYS_FATAL_IF(!blend);
280 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
281 blend->pNext = nullptr;
282 *tailPNext = blend;
283 tailPNext = &blend->pNext;
284 }
285
Greg Daniel05036172018-11-28 17:08:04 -0500286 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
287 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) malloc(
288 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
289 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
290 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
291 ycbcrFeature->pNext = nullptr;
292 *tailPNext = ycbcrFeature;
293 tailPNext = &ycbcrFeature->pNext;
294
Greg Daniela227dbb2018-08-20 09:19:48 -0400295 // query to get the physical device features
296 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400297 // this looks like it would slow things down,
298 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400299 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400300
301 float queuePriorities[1] = { 0.0 };
302
303 const VkDeviceQueueCreateInfo queueInfo[2] = {
304 {
305 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
306 nullptr, // pNext
307 0, // VkDeviceQueueCreateFlags
308 mGraphicsQueueIndex, // queueFamilyIndex
309 1, // queueCount
310 queuePriorities, // pQueuePriorities
311 },
312 {
313 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
314 nullptr, // pNext
315 0, // VkDeviceQueueCreateFlags
316 mPresentQueueIndex, // queueFamilyIndex
317 1, // queueCount
318 queuePriorities, // pQueuePriorities
319 }
320 };
321 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
322
323 const VkDeviceCreateInfo deviceInfo = {
324 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400325 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400326 0, // VkDeviceCreateFlags
327 queueInfoCount, // queueCreateInfoCount
328 queueInfo, // pQueueCreateInfos
329 0, // layerCount
330 nullptr, // ppEnabledLayerNames
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800331 (uint32_t) mDeviceExtensions.size(), // extensionCount
332 mDeviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400333 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400334 };
335
336 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
337 if (err) {
338 this->destroy();
339 return false;
340 }
341
342 GET_DEV_PROC(GetDeviceQueue);
343 GET_DEV_PROC(DeviceWaitIdle);
344 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500345 GET_DEV_PROC(CreateSwapchainKHR);
346 GET_DEV_PROC(DestroySwapchainKHR);
347 GET_DEV_PROC(GetSwapchainImagesKHR);
348 GET_DEV_PROC(AcquireNextImageKHR);
349 GET_DEV_PROC(QueuePresentKHR);
350 GET_DEV_PROC(CreateCommandPool);
351 GET_DEV_PROC(DestroyCommandPool);
352 GET_DEV_PROC(AllocateCommandBuffers);
353 GET_DEV_PROC(FreeCommandBuffers);
354 GET_DEV_PROC(ResetCommandBuffer);
355 GET_DEV_PROC(BeginCommandBuffer);
356 GET_DEV_PROC(EndCommandBuffer);
357 GET_DEV_PROC(CmdPipelineBarrier);
358 GET_DEV_PROC(GetDeviceQueue);
359 GET_DEV_PROC(QueueSubmit);
360 GET_DEV_PROC(QueueWaitIdle);
361 GET_DEV_PROC(DeviceWaitIdle);
362 GET_DEV_PROC(CreateSemaphore);
363 GET_DEV_PROC(DestroySemaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400364 GET_DEV_PROC(ImportSemaphoreFdKHR);
365 GET_DEV_PROC(GetSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500366 GET_DEV_PROC(CreateFence);
367 GET_DEV_PROC(DestroyFence);
368 GET_DEV_PROC(WaitForFences);
369 GET_DEV_PROC(ResetFences);
370
Greg Daniel2ff202712018-06-14 11:50:10 -0400371 return true;
372}
373
374void VulkanManager::initialize() {
375 if (mDevice != VK_NULL_HANDLE) {
376 return;
377 }
378
Greg Daniela227dbb2018-08-20 09:19:48 -0400379 GET_PROC(EnumerateInstanceVersion);
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800380 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&mInstanceVersion));
381 LOG_ALWAYS_FATAL_IF(mInstanceVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniela227dbb2018-08-20 09:19:48 -0400382
383 GrVkExtensions extensions;
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800384 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, mPhysicalDeviceFeatures2));
Greg Daniel2ff202712018-06-14 11:50:10 -0400385
386 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
387
Greg Daniel2ff202712018-06-14 11:50:10 -0400388 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
389 if (device != VK_NULL_HANDLE) {
390 return vkGetDeviceProcAddr(device, proc_name);
391 }
392 return vkGetInstanceProcAddr(instance, proc_name);
393 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400394
395 GrVkBackendContext backendContext;
396 backendContext.fInstance = mInstance;
397 backendContext.fPhysicalDevice = mPhysicalDevice;
398 backendContext.fDevice = mDevice;
399 backendContext.fQueue = mGraphicsQueue;
400 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800401 backendContext.fInstanceVersion = mInstanceVersion;
Greg Daniela227dbb2018-08-20 09:19:48 -0400402 backendContext.fVkExtensions = &extensions;
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800403 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
Greg Daniel4aa58672018-07-13 13:10:36 -0400404 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400405
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500406 // create the command pool for the command buffers
407 if (VK_NULL_HANDLE == mCommandPool) {
408 VkCommandPoolCreateInfo commandPoolInfo;
409 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
410 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
411 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400412 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500413 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400414 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
415 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500416 SkASSERT(VK_SUCCESS == res);
417 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400418 LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE);
419
420 if (!setupDummyCommandBuffer()) {
421 this->destroy();
422 return;
423 }
424 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
425
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500426
Greg Daniel2ff202712018-06-14 11:50:10 -0400427 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500428
Stan Ilievd495f432017-10-09 15:49:32 -0400429 GrContextOptions options;
430 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800431 // TODO: get a string describing the SPIR-V compiler version and use it here
432 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400433 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500434 LOG_ALWAYS_FATAL_IF(!grContext.get());
435 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400436
Greg Danielcd558522016-11-17 13:31:40 -0500437 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
438 mSwapBehavior = SwapBehavior::BufferAge;
439 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500440}
441
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800442VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
443 return VkFunctorInitParams{
444 .instance = mInstance,
445 .physical_device = mPhysicalDevice,
446 .device = mDevice,
447 .queue = mGraphicsQueue,
448 .graphics_queue_index = mGraphicsQueueIndex,
449 .instance_version = mInstanceVersion,
450 .enabled_instance_extension_names = mInstanceExtensions.data(),
451 .enabled_instance_extension_names_length =
452 static_cast<uint32_t>(mInstanceExtensions.size()),
453 .enabled_device_extension_names = mDeviceExtensions.data(),
454 .enabled_device_extension_names_length =
455 static_cast<uint32_t>(mDeviceExtensions.size()),
456 .device_features_2 = &mPhysicalDeviceFeatures2,
457 };
458}
459
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500460// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
461// previous uses have finished before returning.
462VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
463 SkASSERT(surface->mBackbuffers);
464
465 ++surface->mCurrentBackbufferIndex;
466 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
467 surface->mCurrentBackbufferIndex = 0;
468 }
469
John Reck1bcacfd2017-11-03 10:12:19 -0700470 VulkanSurface::BackbufferInfo* backbuffer =
471 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500472
473 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
474 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400475 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500476 if (res != VK_SUCCESS) {
477 return nullptr;
478 }
479
480 return backbuffer;
481}
482
Greg Danielc4076782019-01-08 16:01:18 -0500483static SkMatrix getPreTransformMatrix(int width, int height,
484 VkSurfaceTransformFlagBitsKHR transform) {
485 switch (transform) {
486 case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR:
487 return SkMatrix::I();
488 case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR:
489 return SkMatrix::MakeAll(0, -1, height, 1, 0, 0, 0, 0, 1);
490 case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR:
491 return SkMatrix::MakeAll(-1, 0, width, 0, -1, height, 0, 0, 1);
492 case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR:
493 return SkMatrix::MakeAll(0, 1, 0, -1, 0, width, 0, 0, 1);
494 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR:
495 return SkMatrix::MakeAll(-1, 0, width, 0, 1, 0, 0, 0, 1);
496 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR:
497 return SkMatrix::MakeAll(0, -1, height, -1, 0, width, 0, 0, 1);
498 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR:
499 return SkMatrix::MakeAll(1, 0, 0, 0, -1, height, 0, 0, 1);
500 case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR:
501 return SkMatrix::MakeAll(0, 1, 0, 1, 0, 0, 0, 0, 1);
502 default:
503 LOG_ALWAYS_FATAL("Unsupported pre transform of swapchain.");
504 }
505 return SkMatrix::I();
506}
507
508
Stan Iliev305e13a2018-11-13 11:14:48 -0500509SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface** surfaceOut) {
510 // Recreate VulkanSurface, if ANativeWindow has been resized.
511 VulkanSurface* surface = *surfaceOut;
512 int windowWidth = 0, windowHeight = 0;
513 ANativeWindow* window = surface->mNativeWindow;
514 window->query(window, NATIVE_WINDOW_WIDTH, &windowWidth);
515 window->query(window, NATIVE_WINDOW_HEIGHT, &windowHeight);
516 if (windowWidth != surface->mWindowWidth || windowHeight != surface->mWindowHeight) {
517 ColorMode colorMode = surface->mColorMode;
Stan Iliev987a80c2018-12-04 10:07:21 -0500518 sk_sp<SkColorSpace> colorSpace = surface->mColorSpace;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800519 SkColorType colorType = surface->mColorType;
Stan Iliev305e13a2018-11-13 11:14:48 -0500520 destroySurface(surface);
Brian Osmane0cf5972019-01-23 10:41:20 -0500521 *surfaceOut = createSurface(window, colorMode, colorSpace, colorType);
Stan Iliev305e13a2018-11-13 11:14:48 -0500522 surface = *surfaceOut;
523 }
524
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500525 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
526 SkASSERT(backbuffer);
527
528 VkResult res;
529
Greg Daniel2ff202712018-06-14 11:50:10 -0400530 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500531 SkASSERT(VK_SUCCESS == res);
532
533 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
534 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400535 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700536 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
537 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500538
539 if (VK_ERROR_SURFACE_LOST_KHR == res) {
540 // need to figure out how to create a new vkSurface without the platformData*
541 // maybe use attach somehow? but need a Window
542 return nullptr;
543 }
Greg Danielc4076782019-01-08 16:01:18 -0500544 if (VK_ERROR_OUT_OF_DATE_KHR == res || VK_SUBOPTIMAL_KHR == res) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500545 // tear swapchain down and try again
546 if (!createSwapchain(surface)) {
547 return nullptr;
548 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500549 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400550 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500551 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500552
553 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400554 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700555 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
556 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500557
558 if (VK_SUCCESS != res) {
559 return nullptr;
560 }
561 }
562
563 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500564 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500565 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -0400566 VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500567 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400568 VkAccessFlags srcAccessMask = 0;
569 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
570 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500571
572 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700573 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
574 NULL, // pNext
575 srcAccessMask, // outputMask
576 dstAccessMask, // inputMask
577 layout, // oldLayout
578 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
579 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400580 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700581 surface->mImages[backbuffer->mImageIndex], // image
582 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500583 };
584 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
585
586 VkCommandBufferBeginInfo info;
587 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
588 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
589 info.flags = 0;
590 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
591
John Reck1bcacfd2017-11-03 10:12:19 -0700592 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
593 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500594
595 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
596
597 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
598 // insert the layout transfer into the queue and wait on the acquire
599 VkSubmitInfo submitInfo;
600 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
601 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
602 submitInfo.waitSemaphoreCount = 1;
603 // Wait to make sure aquire semaphore set above has signaled.
604 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
605 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
606 submitInfo.commandBufferCount = 1;
607 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
608 submitInfo.signalSemaphoreCount = 0;
609
610 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400611 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500612
613 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500614 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400615 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
616 SkSurface::kFlushRead_BackendHandleAccess);
617 if (!backendRT.isValid()) {
618 SkASSERT(backendRT.isValid());
619 return nullptr;
620 }
621 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500622
Greg Danielc4076782019-01-08 16:01:18 -0500623 surface->mPreTransform = getPreTransformMatrix(surface->windowWidth(),
624 surface->windowHeight(),
625 surface->mTransform);
626
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500627 surface->mBackbuffer = std::move(skSurface);
628 return surface->mBackbuffer.get();
629}
630
631void VulkanManager::destroyBuffers(VulkanSurface* surface) {
632 if (surface->mBackbuffers) {
633 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400634 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500635 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400636 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
637 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
638 mFreeCommandBuffers(mDevice, mCommandPool, 2,
639 surface->mBackbuffers[i].mTransitionCmdBuffers);
640 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
641 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500642 }
643 }
644
645 delete[] surface->mBackbuffers;
646 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500647 delete[] surface->mImageInfos;
648 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500649 delete[] surface->mImages;
650 surface->mImages = nullptr;
651}
652
653void VulkanManager::destroySurface(VulkanSurface* surface) {
654 // Make sure all submit commands have finished before starting to destroy objects.
655 if (VK_NULL_HANDLE != mPresentQueue) {
656 mQueueWaitIdle(mPresentQueue);
657 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400658 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500659
660 destroyBuffers(surface);
661
662 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400663 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500664 surface->mSwapchain = VK_NULL_HANDLE;
665 }
666
667 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400668 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500669 surface->mVkSurface = VK_NULL_HANDLE;
670 }
671 delete surface;
672}
673
674void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400675 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500676 SkASSERT(surface->mImageCount);
677 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400678 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500679
680 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
681
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500682 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500683 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500684 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500685 GrVkImageInfo info;
686 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500687 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500688 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
689 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
690 info.fFormat = format;
691 info.fLevelCount = 1;
692
Greg Danielac2d2322017-07-12 11:30:15 -0400693 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500694
Greg Danielcd558522016-11-17 13:31:40 -0500695 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700696 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400697 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800698 surface->mColorType, surface->mColorSpace, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500699 }
700
701 SkASSERT(mCommandPool != VK_NULL_HANDLE);
702
703 // set up the backbuffers
704 VkSemaphoreCreateInfo semaphoreInfo;
705 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
706 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
707 semaphoreInfo.pNext = nullptr;
708 semaphoreInfo.flags = 0;
709 VkCommandBufferAllocateInfo commandBuffersInfo;
710 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
711 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
712 commandBuffersInfo.pNext = nullptr;
713 commandBuffersInfo.commandPool = mCommandPool;
714 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
715 commandBuffersInfo.commandBufferCount = 2;
716 VkFenceCreateInfo fenceInfo;
717 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
718 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
719 fenceInfo.pNext = nullptr;
720 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
721
722 // we create one additional backbuffer structure here, because we want to
723 // give the command buffers they contain a chance to finish before we cycle back
724 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
725 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
726 SkDEBUGCODE(VkResult res);
727 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400728 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700729 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400730 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700731 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400732 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700733 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400734 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700735 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400736 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700737 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500738 SkASSERT(VK_SUCCESS == res);
739 }
740 surface->mCurrentBackbufferIndex = surface->mImageCount;
741}
742
743bool VulkanManager::createSwapchain(VulkanSurface* surface) {
744 // check for capabilities
745 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400746 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700747 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500748 if (VK_SUCCESS != res) {
749 return false;
750 }
751
752 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400753 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700754 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500755 if (VK_SUCCESS != res) {
756 return false;
757 }
758
Ben Wagnereec27d52017-01-11 15:32:07 -0500759 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400760 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700761 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500762 if (VK_SUCCESS != res) {
763 return false;
764 }
765
766 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400767 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700768 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500769 if (VK_SUCCESS != res) {
770 return false;
771 }
772
Ben Wagnereec27d52017-01-11 15:32:07 -0500773 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400774 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700775 surface->mVkSurface, &presentModeCount,
776 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500777 if (VK_SUCCESS != res) {
778 return false;
779 }
780
Greg Danielc4076782019-01-08 16:01:18 -0500781 if (!SkToBool(caps.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)) {
782 return false;
783 }
784 VkSurfaceTransformFlagBitsKHR transform;
785 if (SkToBool(caps.supportedTransforms & caps.currentTransform) &&
786 !SkToBool(caps.currentTransform & VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR)) {
787 transform = caps.currentTransform;
788 } else {
789 transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
790 }
791
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500792 VkExtent2D extent = caps.currentExtent;
793 // clamp width; to handle currentExtent of -1 and protect us from broken hints
794 if (extent.width < caps.minImageExtent.width) {
795 extent.width = caps.minImageExtent.width;
796 }
797 SkASSERT(extent.width <= caps.maxImageExtent.width);
798 // clamp height
799 if (extent.height < caps.minImageExtent.height) {
800 extent.height = caps.minImageExtent.height;
801 }
802 SkASSERT(extent.height <= caps.maxImageExtent.height);
Greg Danielc4076782019-01-08 16:01:18 -0500803
804 VkExtent2D swapExtent = extent;
805 if (transform == VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR ||
806 transform == VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR ||
807 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR ||
808 transform == VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR) {
809 swapExtent.width = extent.height;
810 swapExtent.height = extent.width;
811 }
812
Stan Iliev305e13a2018-11-13 11:14:48 -0500813 surface->mWindowWidth = extent.width;
814 surface->mWindowHeight = extent.height;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500815
Greg Daniel4d5bf2a2018-12-04 12:17:28 -0500816 uint32_t imageCount = std::max<uint32_t>(3, caps.minImageCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500817 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
818 // Application must settle for fewer images than desired:
819 imageCount = caps.maxImageCount;
820 }
821
822 // Currently Skia requires the images to be color attchments and support all transfer
823 // operations.
824 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
825 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
826 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
827 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
Greg Danielc4076782019-01-08 16:01:18 -0500828
John Reck1bcacfd2017-11-03 10:12:19 -0700829 SkASSERT(caps.supportedCompositeAlpha &
830 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500831 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700832 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
833 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
834 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500835
Stan Iliev79351f32018-09-19 14:23:49 -0400836 VkFormat surfaceFormat = VK_FORMAT_R8G8B8A8_UNORM;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500837 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
Peiyong Lin3bff1352018-12-11 07:56:07 -0800838 if (surface->mColorType == SkColorType::kRGBA_F16_SkColorType) {
Stan Iliev79351f32018-09-19 14:23:49 -0400839 surfaceFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
Stan Iliev79351f32018-09-19 14:23:49 -0400840 }
Peiyong Lin3bff1352018-12-11 07:56:07 -0800841
842 if (surface->mColorMode == ColorMode::WideColorGamut) {
Brian Osmane0cf5972019-01-23 10:41:20 -0500843 skcms_Matrix3x3 surfaceGamut;
844 LOG_ALWAYS_FATAL_IF(!surface->mColorSpace->toXYZD50(&surfaceGamut),
845 "Could not get gamut matrix from color space");
846 if (memcmp(&surfaceGamut, &SkNamedGamut::kSRGB, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800847 colorSpace = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT;
Brian Osmane0cf5972019-01-23 10:41:20 -0500848 } else if (memcmp(&surfaceGamut, &SkNamedGamut::kDCIP3, sizeof(surfaceGamut)) == 0) {
Peiyong Lin3bff1352018-12-11 07:56:07 -0800849 colorSpace = VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT;
850 } else {
851 LOG_ALWAYS_FATAL("Unreachable: unsupported wide color space.");
852 }
853 }
854
Stan Iliev79351f32018-09-19 14:23:49 -0400855 bool foundSurfaceFormat = false;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500856 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
Stan Iliev79351f32018-09-19 14:23:49 -0400857 if (surfaceFormat == surfaceFormats[i].format
858 && colorSpace == surfaceFormats[i].colorSpace) {
859 foundSurfaceFormat = true;
860 break;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500861 }
862 }
863
Stan Iliev79351f32018-09-19 14:23:49 -0400864 if (!foundSurfaceFormat) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500865 return false;
866 }
867
Greg Daniel8a2a7542018-10-04 13:46:55 -0400868 // FIFO is always available and will match what we do on GL so just pick that here.
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500869 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500870
871 VkSwapchainCreateInfoKHR swapchainCreateInfo;
872 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
873 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
874 swapchainCreateInfo.surface = surface->mVkSurface;
875 swapchainCreateInfo.minImageCount = imageCount;
876 swapchainCreateInfo.imageFormat = surfaceFormat;
877 swapchainCreateInfo.imageColorSpace = colorSpace;
Greg Danielc4076782019-01-08 16:01:18 -0500878 swapchainCreateInfo.imageExtent = swapExtent;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500879 swapchainCreateInfo.imageArrayLayers = 1;
880 swapchainCreateInfo.imageUsage = usageFlags;
881
Greg Daniel2ff202712018-06-14 11:50:10 -0400882 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
883 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500884 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
885 swapchainCreateInfo.queueFamilyIndexCount = 2;
886 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
887 } else {
888 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
889 swapchainCreateInfo.queueFamilyIndexCount = 0;
890 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
891 }
892
Greg Danielc4076782019-01-08 16:01:18 -0500893 swapchainCreateInfo.preTransform = transform;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500894 swapchainCreateInfo.compositeAlpha = composite_alpha;
895 swapchainCreateInfo.presentMode = mode;
896 swapchainCreateInfo.clipped = true;
897 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
898
Greg Daniel2ff202712018-06-14 11:50:10 -0400899 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500900 if (VK_SUCCESS != res) {
901 return false;
902 }
903
Greg Danielc4076782019-01-08 16:01:18 -0500904 surface->mTransform = transform;
905
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500906 // destroy the old swapchain
907 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400908 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500909
910 destroyBuffers(surface);
911
Greg Daniel2ff202712018-06-14 11:50:10 -0400912 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500913 }
914
Greg Danielc4076782019-01-08 16:01:18 -0500915 createBuffers(surface, surfaceFormat, swapExtent);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500916
Stan Ilievbc462582018-12-10 13:13:41 -0500917 // The window content is not updated (frozen) until a buffer of the window size is received.
918 // This prevents temporary stretching of the window after it is resized, but before the first
919 // buffer with new size is enqueued.
920 native_window_set_scaling_mode(surface->mNativeWindow, NATIVE_WINDOW_SCALING_MODE_FREEZE);
921
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500922 return true;
923}
924
Stan Iliev987a80c2018-12-04 10:07:21 -0500925VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800926 sk_sp<SkColorSpace> surfaceColorSpace,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800927 SkColorType surfaceColorType) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500928 initialize();
929
930 if (!window) {
931 return nullptr;
932 }
933
Peiyong Lin3bff1352018-12-11 07:56:07 -0800934 VulkanSurface* surface = new VulkanSurface(colorMode, window, surfaceColorSpace,
Brian Osmane0cf5972019-01-23 10:41:20 -0500935 surfaceColorType);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500936
937 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
938 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
939 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
940 surfaceCreateInfo.pNext = nullptr;
941 surfaceCreateInfo.flags = 0;
942 surfaceCreateInfo.window = window;
943
Greg Daniel2ff202712018-06-14 11:50:10 -0400944 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
945 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500946 if (VK_SUCCESS != res) {
947 delete surface;
948 return nullptr;
949 }
950
John Reck1bcacfd2017-11-03 10:12:19 -0700951 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400952 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
953 // All physical devices and queue families on Android must be capable of
954 // presentation with any native window.
955 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500956
957 if (!createSwapchain(surface)) {
958 destroySurface(surface);
959 return nullptr;
960 }
961
962 return surface;
963}
964
965// Helper to know which src stage flags we need to set when transitioning to the present layout
Greg Daniel8a2a7542018-10-04 13:46:55 -0400966static VkPipelineStageFlags layoutToPipelineSrcStageFlags(const VkImageLayout layout) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500967 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
968 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
969 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
970 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
971 return VK_PIPELINE_STAGE_TRANSFER_BIT;
Greg Daniel8a2a7542018-10-04 13:46:55 -0400972 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
973 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
974 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
975 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
976 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
977 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
978 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500979 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
980 return VK_PIPELINE_STAGE_HOST_BIT;
981 }
982
983 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
984 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
985}
986
987// Helper to know which src access mask we need to set when transitioning to the present layout
988static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
989 VkAccessFlags flags = 0;
990 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
991 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700992 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
993 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
994 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500995 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
996 flags = VK_ACCESS_HOST_WRITE_BIT;
997 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
998 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
999 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
1000 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
1001 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
1002 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
1003 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
1004 flags = VK_ACCESS_TRANSFER_READ_BIT;
1005 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
1006 flags = VK_ACCESS_SHADER_READ_BIT;
1007 }
1008 return flags;
1009}
1010
1011void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -05001012 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
1013 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -04001014 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -05001015 }
1016
Greg Daniel74ea2012017-11-10 11:32:58 -05001017 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001018 VulkanSurface::BackbufferInfo* backbuffer =
1019 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -04001020
Greg Danielcd558522016-11-17 13:31:40 -05001021 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -04001022 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
1023 SkSurface::kFlushRead_BackendHandleAccess);
1024 SkASSERT(backendRT.isValid());
1025
1026 GrVkImageInfo imageInfo;
1027 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
1028
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001029 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -04001030 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001031
1032 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
1033 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -04001034 VkImageLayout layout = imageInfo.fImageLayout;
Greg Daniel8a2a7542018-10-04 13:46:55 -04001035 VkPipelineStageFlags srcStageMask = layoutToPipelineSrcStageFlags(layout);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001036 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
1037 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
Greg Daniel8a2a7542018-10-04 13:46:55 -04001038 VkAccessFlags dstAccessMask = 0;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001039
1040 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -07001041 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1042 NULL, // pNext
1043 srcAccessMask, // outputMask
1044 dstAccessMask, // inputMask
1045 layout, // oldLayout
1046 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -04001047 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -07001048 mPresentQueueIndex, // dstQueueFamilyIndex
1049 surface->mImages[backbuffer->mImageIndex], // image
1050 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001051 };
1052
1053 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
1054 VkCommandBufferBeginInfo info;
1055 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
1056 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1057 info.flags = 0;
1058 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -07001059 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
1060 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001061 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
1062
Greg Danielcd558522016-11-17 13:31:40 -05001063 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001064
1065 // insert the layout transfer into the queue and wait on the acquire
1066 VkSubmitInfo submitInfo;
1067 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1068 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1069 submitInfo.waitSemaphoreCount = 0;
1070 submitInfo.pWaitDstStageMask = 0;
1071 submitInfo.commandBufferCount = 1;
1072 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
1073 submitInfo.signalSemaphoreCount = 1;
1074 // When this command buffer finishes we will signal this semaphore so that we know it is now
1075 // safe to present the image to the screen.
1076 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
1077
1078 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -04001079 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001080
1081 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
1082 // to the image is complete and that the layout has been change to present on the graphics
1083 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -07001084 const VkPresentInfoKHR presentInfo = {
1085 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
1086 NULL, // pNext
1087 1, // waitSemaphoreCount
1088 &backbuffer->mRenderSemaphore, // pWaitSemaphores
1089 1, // swapchainCount
1090 &surface->mSwapchain, // pSwapchains
1091 &backbuffer->mImageIndex, // pImageIndices
1092 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001093 };
1094
1095 mQueuePresentKHR(mPresentQueue, &presentInfo);
1096
1097 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -05001098 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
1099 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
1100 surface->mCurrentTime++;
1101}
1102
1103int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -05001104 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -07001105 VulkanSurface::BackbufferInfo* backbuffer =
1106 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
1107 if (mSwapBehavior == SwapBehavior::Discard ||
1108 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -05001109 return 0;
1110 }
1111 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
1112 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001113}
1114
Greg Daniel26e0dca2018-09-18 10:33:19 -04001115bool VulkanManager::setupDummyCommandBuffer() {
1116 if (mDummyCB != VK_NULL_HANDLE) {
1117 return true;
1118 }
1119
1120 VkCommandBufferAllocateInfo commandBuffersInfo;
1121 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
1122 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1123 commandBuffersInfo.pNext = nullptr;
1124 commandBuffersInfo.commandPool = mCommandPool;
1125 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1126 commandBuffersInfo.commandBufferCount = 1;
1127
1128 VkResult err = mAllocateCommandBuffers(mDevice, &commandBuffersInfo, &mDummyCB);
1129 if (err != VK_SUCCESS) {
1130 // It is probably unnecessary to set this back to VK_NULL_HANDLE, but we set it anyways to
1131 // make sure the driver didn't set a value and then return a failure.
1132 mDummyCB = VK_NULL_HANDLE;
1133 return false;
1134 }
1135
1136 VkCommandBufferBeginInfo beginInfo;
1137 memset(&beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1138 beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1139 beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
1140
1141 mBeginCommandBuffer(mDummyCB, &beginInfo);
1142 mEndCommandBuffer(mDummyCB);
1143 return true;
1144}
1145
Stan Iliev564ca3e2018-09-04 22:00:00 +00001146status_t VulkanManager::fenceWait(sp<Fence>& fence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001147 if (!hasVkContext()) {
1148 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
1149 return INVALID_OPERATION;
1150 }
1151
Stan Iliev7a081272018-10-26 17:54:18 -04001152 // Block GPU on the fence.
1153 int fenceFd = fence->dup();
1154 if (fenceFd == -1) {
1155 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
1156 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +00001157 }
Stan Iliev7a081272018-10-26 17:54:18 -04001158
1159 VkSemaphoreCreateInfo semaphoreInfo;
1160 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1161 semaphoreInfo.pNext = nullptr;
1162 semaphoreInfo.flags = 0;
1163 VkSemaphore semaphore;
1164 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1165 if (VK_SUCCESS != err) {
1166 ALOGE("Failed to create import semaphore, err: %d", err);
1167 return UNKNOWN_ERROR;
1168 }
1169 VkImportSemaphoreFdInfoKHR importInfo;
1170 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
1171 importInfo.pNext = nullptr;
1172 importInfo.semaphore = semaphore;
1173 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1174 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1175 importInfo.fd = fenceFd;
1176
1177 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
1178 if (VK_SUCCESS != err) {
1179 ALOGE("Failed to import semaphore, err: %d", err);
1180 return UNKNOWN_ERROR;
1181 }
1182
1183 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1184
1185 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1186
1187 VkSubmitInfo submitInfo;
1188 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1189 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1190 submitInfo.waitSemaphoreCount = 1;
1191 // Wait to make sure aquire semaphore set above has signaled.
1192 submitInfo.pWaitSemaphores = &semaphore;
1193 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
1194 submitInfo.commandBufferCount = 1;
1195 submitInfo.pCommandBuffers = &mDummyCB;
1196 submitInfo.signalSemaphoreCount = 0;
1197
1198 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1199
1200 // On Android when we import a semaphore, it is imported using temporary permanence. That
1201 // means as soon as we queue the semaphore for a wait it reverts to its previous permanent
1202 // state before importing. This means it will now be in an idle state with no pending
1203 // signal or wait operations, so it is safe to immediately delete it.
1204 mDestroySemaphore(mDevice, semaphore, nullptr);
Stan Iliev564ca3e2018-09-04 22:00:00 +00001205 return OK;
1206}
1207
1208status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
Greg Daniel26e0dca2018-09-18 10:33:19 -04001209 if (!hasVkContext()) {
1210 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
1211 return INVALID_OPERATION;
1212 }
1213
Greg Daniel26e0dca2018-09-18 10:33:19 -04001214 VkExportSemaphoreCreateInfo exportInfo;
1215 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
1216 exportInfo.pNext = nullptr;
1217 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1218
1219 VkSemaphoreCreateInfo semaphoreInfo;
1220 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
1221 semaphoreInfo.pNext = &exportInfo;
1222 semaphoreInfo.flags = 0;
1223 VkSemaphore semaphore;
1224 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
1225 if (VK_SUCCESS != err) {
1226 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
1227 return INVALID_OPERATION;
1228 }
1229
1230 LOG_ALWAYS_FATAL_IF(mDummyCB == VK_NULL_HANDLE);
1231
1232 VkSubmitInfo submitInfo;
1233 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
1234 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1235 submitInfo.waitSemaphoreCount = 0;
1236 submitInfo.pWaitSemaphores = nullptr;
1237 submitInfo.pWaitDstStageMask = nullptr;
1238 submitInfo.commandBufferCount = 1;
1239 submitInfo.pCommandBuffers = &mDummyCB;
1240 submitInfo.signalSemaphoreCount = 1;
1241 submitInfo.pSignalSemaphores = &semaphore;
1242
1243 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
1244
1245 VkSemaphoreGetFdInfoKHR getFdInfo;
1246 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
1247 getFdInfo.pNext = nullptr;
1248 getFdInfo.semaphore = semaphore;
1249 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1250
1251 int fenceFd = 0;
1252
1253 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
1254 if (VK_SUCCESS != err) {
1255 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
1256 return INVALID_OPERATION;
1257 }
1258 nativeFence = new Fence(fenceFd);
1259
1260 // Exporting a semaphore with copy transference via vkGetSemahporeFdKHR, has the same effect of
1261 // destroying the semaphore and creating a new one with the same handle, and the payloads
1262 // ownership is move to the Fd we created. Thus the semahpore is in a state that we can delete
1263 // it and we don't need to wait on the command buffer we submitted to finish.
1264 mDestroySemaphore(mDevice, semaphore, nullptr);
1265
Stan Iliev564ca3e2018-09-04 22:00:00 +00001266 return OK;
1267}
1268
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001269} /* namespace renderthread */
1270} /* namespace uirenderer */
1271} /* namespace android */