blob: 1517f579a084e89c845e00de9ce5e211713bc4f1 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050022#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050023#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024
Greg Danielac2d2322017-07-12 11:30:15 -040025#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050026#include <GrContext.h>
27#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040028#include <GrTypes.h>
29#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050030#include <vk/GrVkTypes.h>
31
32namespace android {
33namespace uirenderer {
34namespace renderthread {
35
Greg Daniel2ff202712018-06-14 11:50:10 -040036#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
37#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
38#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050039
John Reck1bcacfd2017-11-03 10:12:19 -070040VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050041
42void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050043 mRenderThread.renderState().onVkContextDestroyed();
44 mRenderThread.setGrContext(nullptr);
45
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050046 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040047 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050048 mCommandPool = VK_NULL_HANDLE;
49 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050
Greg Daniel2ff202712018-06-14 11:50:10 -040051 if (mDevice != VK_NULL_HANDLE) {
52 mDeviceWaitIdle(mDevice);
53 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070054 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050055
Greg Daniel2ff202712018-06-14 11:50:10 -040056 if (mInstance != VK_NULL_HANDLE) {
57 mDestroyInstance(mInstance, nullptr);
58 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
Greg Daniel2ff202712018-06-14 11:50:10 -040060 mGraphicsQueue = VK_NULL_HANDLE;
61 mPresentQueue = VK_NULL_HANDLE;
62 mDevice = VK_NULL_HANDLE;
63 mPhysicalDevice = VK_NULL_HANDLE;
64 mInstance = VK_NULL_HANDLE;
65}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050066
Greg Daniela227dbb2018-08-20 09:19:48 -040067bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040068 VkResult err;
69
70 constexpr VkApplicationInfo app_info = {
71 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
72 nullptr, // pNext
73 "android framework", // pApplicationName
74 0, // applicationVersion
75 "android framework", // pEngineName
76 0, // engineVerison
77 VK_MAKE_VERSION(1, 0, 0), // apiVersion
78 };
79
80 std::vector<const char*> instanceExtensions;
81 {
82 GET_PROC(EnumerateInstanceExtensionProperties);
83
84 uint32_t extensionCount = 0;
85 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
86 if (VK_SUCCESS != err) {
87 return false;
88 }
89 std::unique_ptr<VkExtensionProperties[]> extensions(
90 new VkExtensionProperties[extensionCount]);
91 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
92 if (VK_SUCCESS != err) {
93 return false;
94 }
95 bool hasKHRSurfaceExtension = false;
96 bool hasKHRAndroidSurfaceExtension = false;
97 for (uint32_t i = 0; i < extensionCount; ++i) {
98 instanceExtensions.push_back(extensions[i].extensionName);
99 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
100 hasKHRSurfaceExtension = true;
101 }
102 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
103 hasKHRAndroidSurfaceExtension = true;
104 }
105 }
106 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
107 this->destroy();
108 return false;
109 }
110 }
111
112 const VkInstanceCreateInfo instance_create = {
113 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
114 nullptr, // pNext
115 0, // flags
116 &app_info, // pApplicationInfo
117 0, // enabledLayerNameCount
118 nullptr, // ppEnabledLayerNames
119 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
120 instanceExtensions.data(), // ppEnabledExtensionNames
121 };
122
123 GET_PROC(CreateInstance);
124 err = mCreateInstance(&instance_create, nullptr, &mInstance);
125 if (err < 0) {
126 this->destroy();
127 return false;
128 }
129
130 GET_INST_PROC(DestroyInstance);
131 GET_INST_PROC(EnumeratePhysicalDevices);
132 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400133 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400134 GET_INST_PROC(CreateDevice);
135 GET_INST_PROC(EnumerateDeviceExtensionProperties);
136 GET_INST_PROC(CreateAndroidSurfaceKHR);
137 GET_INST_PROC(DestroySurfaceKHR);
138 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
139 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
140 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
141 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
142
143 uint32_t gpuCount;
144 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
145 if (err) {
146 this->destroy();
147 return false;
148 }
149 if (!gpuCount) {
150 this->destroy();
151 return false;
152 }
153 // Just returning the first physical device instead of getting the whole array. Since there
154 // should only be one device on android.
155 gpuCount = 1;
156 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
157 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
158 if (err && VK_INCOMPLETE != err) {
159 this->destroy();
160 return false;
161 }
162
163 // query to get the initial queue props size
164 uint32_t queueCount;
165 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
166 if (!queueCount) {
167 this->destroy();
168 return false;
169 }
170
171 // now get the actual queue props
172 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
173 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
174
175 // iterate to find the graphics queue
176 mGraphicsQueueIndex = queueCount;
177 for (uint32_t i = 0; i < queueCount; i++) {
178 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
179 mGraphicsQueueIndex = i;
180 break;
181 }
182 }
183 if (mGraphicsQueueIndex == queueCount) {
184 this->destroy();
185 return false;
186 }
187
188 // All physical devices and queue families on Android must be capable of
189 // presentation with any native window. So just use the first one.
190 mPresentQueueIndex = 0;
191
192 std::vector<const char*> deviceExtensions;
193 {
194 uint32_t extensionCount = 0;
195 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
196 nullptr);
197 if (VK_SUCCESS != err) {
198 this->destroy();
199 return false;
200 }
201 std::unique_ptr<VkExtensionProperties[]> extensions(
202 new VkExtensionProperties[extensionCount]);
203 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
204 extensions.get());
205 if (VK_SUCCESS != err) {
206 this->destroy();
207 return false;
208 }
209 bool hasKHRSwapchainExtension = false;
210 for (uint32_t i = 0; i < extensionCount; ++i) {
211 deviceExtensions.push_back(extensions[i].extensionName);
212 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
213 hasKHRSwapchainExtension = true;
214 }
215 }
216 if (!hasKHRSwapchainExtension) {
217 this->destroy();
218 return false;
219 }
220 }
221
Greg Daniela227dbb2018-08-20 09:19:48 -0400222 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
223 if (device != VK_NULL_HANDLE) {
224 return vkGetDeviceProcAddr(device, proc_name);
225 }
226 return vkGetInstanceProcAddr(instance, proc_name);
227 };
228 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
229 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
230
231 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
232 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
233 features.pNext = nullptr;
234
235 // Setup all extension feature structs we may want to use.
236 void** tailPNext = &features.pNext;
237
238 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
239 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
240 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
241 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
242 LOG_ALWAYS_FATAL_IF(!blend);
243 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
244 blend->pNext = nullptr;
245 *tailPNext = blend;
246 tailPNext = &blend->pNext;
247 }
248
249 // query to get the physical device features
250 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400251 // this looks like it would slow things down,
252 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400253 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400254
255 float queuePriorities[1] = { 0.0 };
256
257 const VkDeviceQueueCreateInfo queueInfo[2] = {
258 {
259 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
260 nullptr, // pNext
261 0, // VkDeviceQueueCreateFlags
262 mGraphicsQueueIndex, // queueFamilyIndex
263 1, // queueCount
264 queuePriorities, // pQueuePriorities
265 },
266 {
267 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
268 nullptr, // pNext
269 0, // VkDeviceQueueCreateFlags
270 mPresentQueueIndex, // queueFamilyIndex
271 1, // queueCount
272 queuePriorities, // pQueuePriorities
273 }
274 };
275 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
276
277 const VkDeviceCreateInfo deviceInfo = {
278 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400279 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400280 0, // VkDeviceCreateFlags
281 queueInfoCount, // queueCreateInfoCount
282 queueInfo, // pQueueCreateInfos
283 0, // layerCount
284 nullptr, // ppEnabledLayerNames
285 (uint32_t) deviceExtensions.size(), // extensionCount
286 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400287 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400288 };
289
290 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
291 if (err) {
292 this->destroy();
293 return false;
294 }
295
296 GET_DEV_PROC(GetDeviceQueue);
297 GET_DEV_PROC(DeviceWaitIdle);
298 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500299 GET_DEV_PROC(CreateSwapchainKHR);
300 GET_DEV_PROC(DestroySwapchainKHR);
301 GET_DEV_PROC(GetSwapchainImagesKHR);
302 GET_DEV_PROC(AcquireNextImageKHR);
303 GET_DEV_PROC(QueuePresentKHR);
304 GET_DEV_PROC(CreateCommandPool);
305 GET_DEV_PROC(DestroyCommandPool);
306 GET_DEV_PROC(AllocateCommandBuffers);
307 GET_DEV_PROC(FreeCommandBuffers);
308 GET_DEV_PROC(ResetCommandBuffer);
309 GET_DEV_PROC(BeginCommandBuffer);
310 GET_DEV_PROC(EndCommandBuffer);
311 GET_DEV_PROC(CmdPipelineBarrier);
312 GET_DEV_PROC(GetDeviceQueue);
313 GET_DEV_PROC(QueueSubmit);
314 GET_DEV_PROC(QueueWaitIdle);
315 GET_DEV_PROC(DeviceWaitIdle);
316 GET_DEV_PROC(CreateSemaphore);
317 GET_DEV_PROC(DestroySemaphore);
318 GET_DEV_PROC(CreateFence);
319 GET_DEV_PROC(DestroyFence);
320 GET_DEV_PROC(WaitForFences);
321 GET_DEV_PROC(ResetFences);
322
Greg Daniel2ff202712018-06-14 11:50:10 -0400323 return true;
324}
325
Greg Daniela227dbb2018-08-20 09:19:48 -0400326static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
327 // All Vulkan structs that could be part of the features chain will start with the
328 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
329 // so we can get access to the pNext for the next struct.
330 struct CommonVulkanHeader {
331 VkStructureType sType;
332 void* pNext;
333 };
334
335 void* pNext = features.pNext;
336 while (pNext) {
337 void* current = pNext;
338 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
339 free(current);
340 }
341}
342
Greg Daniel2ff202712018-06-14 11:50:10 -0400343void VulkanManager::initialize() {
344 if (mDevice != VK_NULL_HANDLE) {
345 return;
346 }
347
Greg Daniela227dbb2018-08-20 09:19:48 -0400348 GET_PROC(EnumerateInstanceVersion);
349 uint32_t instanceVersion = 0;
350 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
351 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
352
353 GrVkExtensions extensions;
354 VkPhysicalDeviceFeatures2 features;
355 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400356
357 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
358
Greg Daniel2ff202712018-06-14 11:50:10 -0400359 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
360 if (device != VK_NULL_HANDLE) {
361 return vkGetDeviceProcAddr(device, proc_name);
362 }
363 return vkGetInstanceProcAddr(instance, proc_name);
364 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400365
366 GrVkBackendContext backendContext;
367 backendContext.fInstance = mInstance;
368 backendContext.fPhysicalDevice = mPhysicalDevice;
369 backendContext.fDevice = mDevice;
370 backendContext.fQueue = mGraphicsQueue;
371 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400372 backendContext.fInstanceVersion = instanceVersion;
373 backendContext.fVkExtensions = &extensions;
374 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400375 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400376
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500377 // create the command pool for the command buffers
378 if (VK_NULL_HANDLE == mCommandPool) {
379 VkCommandPoolCreateInfo commandPoolInfo;
380 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
381 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
382 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400383 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500384 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400385 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
386 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500387 SkASSERT(VK_SUCCESS == res);
388 }
389
Greg Daniel2ff202712018-06-14 11:50:10 -0400390 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500391
Stan Ilievd495f432017-10-09 15:49:32 -0400392 GrContextOptions options;
393 options.fDisableDistanceFieldPaths = true;
394 mRenderThread.cacheManager().configureContext(&options);
Greg Daniel2ff202712018-06-14 11:50:10 -0400395 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500396 LOG_ALWAYS_FATAL_IF(!grContext.get());
397 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400398
399 free_features_extensions_structs(features);
400
Greg Daniel85e09072018-04-09 12:36:45 -0400401 DeviceInfo::initialize(mRenderThread.getGrContext()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500402
403 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
404 mSwapBehavior = SwapBehavior::BufferAge;
405 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500406
407 mRenderThread.renderState().onVkContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500408}
409
410// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
411// previous uses have finished before returning.
412VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
413 SkASSERT(surface->mBackbuffers);
414
415 ++surface->mCurrentBackbufferIndex;
416 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
417 surface->mCurrentBackbufferIndex = 0;
418 }
419
John Reck1bcacfd2017-11-03 10:12:19 -0700420 VulkanSurface::BackbufferInfo* backbuffer =
421 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500422
423 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
424 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400425 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500426 if (res != VK_SUCCESS) {
427 return nullptr;
428 }
429
430 return backbuffer;
431}
432
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500433SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
434 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
435 SkASSERT(backbuffer);
436
437 VkResult res;
438
Greg Daniel2ff202712018-06-14 11:50:10 -0400439 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500440 SkASSERT(VK_SUCCESS == res);
441
442 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
443 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400444 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700445 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
446 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500447
448 if (VK_ERROR_SURFACE_LOST_KHR == res) {
449 // need to figure out how to create a new vkSurface without the platformData*
450 // maybe use attach somehow? but need a Window
451 return nullptr;
452 }
453 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
454 // tear swapchain down and try again
455 if (!createSwapchain(surface)) {
456 return nullptr;
457 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500458 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400459 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500460 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500461
462 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400463 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700464 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
465 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500466
467 if (VK_SUCCESS != res) {
468 return nullptr;
469 }
470 }
471
472 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500473 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500474 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
John Reck1bcacfd2017-11-03 10:12:19 -0700475 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout)
476 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
477 : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500478 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700479 VkAccessFlags srcAccessMask =
480 (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500481 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
482
483 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700484 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
485 NULL, // pNext
486 srcAccessMask, // outputMask
487 dstAccessMask, // inputMask
488 layout, // oldLayout
489 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
490 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400491 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700492 surface->mImages[backbuffer->mImageIndex], // image
493 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500494 };
495 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
496
497 VkCommandBufferBeginInfo info;
498 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
499 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
500 info.flags = 0;
501 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
502
John Reck1bcacfd2017-11-03 10:12:19 -0700503 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
504 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500505
506 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
507
508 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
509 // insert the layout transfer into the queue and wait on the acquire
510 VkSubmitInfo submitInfo;
511 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
512 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
513 submitInfo.waitSemaphoreCount = 1;
514 // Wait to make sure aquire semaphore set above has signaled.
515 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
516 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
517 submitInfo.commandBufferCount = 1;
518 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
519 submitInfo.signalSemaphoreCount = 0;
520
521 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400522 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500523
524 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500525 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400526 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
527 SkSurface::kFlushRead_BackendHandleAccess);
528 if (!backendRT.isValid()) {
529 SkASSERT(backendRT.isValid());
530 return nullptr;
531 }
532 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500533
534 surface->mBackbuffer = std::move(skSurface);
535 return surface->mBackbuffer.get();
536}
537
538void VulkanManager::destroyBuffers(VulkanSurface* surface) {
539 if (surface->mBackbuffers) {
540 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400541 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500542 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400543 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
544 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
545 mFreeCommandBuffers(mDevice, mCommandPool, 2,
546 surface->mBackbuffers[i].mTransitionCmdBuffers);
547 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
548 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500549 }
550 }
551
552 delete[] surface->mBackbuffers;
553 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500554 delete[] surface->mImageInfos;
555 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500556 delete[] surface->mImages;
557 surface->mImages = nullptr;
558}
559
560void VulkanManager::destroySurface(VulkanSurface* surface) {
561 // Make sure all submit commands have finished before starting to destroy objects.
562 if (VK_NULL_HANDLE != mPresentQueue) {
563 mQueueWaitIdle(mPresentQueue);
564 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400565 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500566
567 destroyBuffers(surface);
568
569 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400570 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500571 surface->mSwapchain = VK_NULL_HANDLE;
572 }
573
574 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400575 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500576 surface->mVkSurface = VK_NULL_HANDLE;
577 }
578 delete surface;
579}
580
581void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400582 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500583 SkASSERT(surface->mImageCount);
584 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400585 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500586
587 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
588
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500589 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500590 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500591 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500592 GrVkImageInfo info;
593 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500594 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500595 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
596 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
597 info.fFormat = format;
598 info.fLevelCount = 1;
599
Greg Danielac2d2322017-07-12 11:30:15 -0400600 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500601
Greg Danielcd558522016-11-17 13:31:40 -0500602 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700603 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400604 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
605 kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500606 }
607
608 SkASSERT(mCommandPool != VK_NULL_HANDLE);
609
610 // set up the backbuffers
611 VkSemaphoreCreateInfo semaphoreInfo;
612 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
613 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
614 semaphoreInfo.pNext = nullptr;
615 semaphoreInfo.flags = 0;
616 VkCommandBufferAllocateInfo commandBuffersInfo;
617 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
618 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
619 commandBuffersInfo.pNext = nullptr;
620 commandBuffersInfo.commandPool = mCommandPool;
621 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
622 commandBuffersInfo.commandBufferCount = 2;
623 VkFenceCreateInfo fenceInfo;
624 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
625 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
626 fenceInfo.pNext = nullptr;
627 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
628
629 // we create one additional backbuffer structure here, because we want to
630 // give the command buffers they contain a chance to finish before we cycle back
631 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
632 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
633 SkDEBUGCODE(VkResult res);
634 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400635 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700636 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400637 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700638 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400639 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700640 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400641 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700642 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400643 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700644 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500645 SkASSERT(VK_SUCCESS == res);
646 }
647 surface->mCurrentBackbufferIndex = surface->mImageCount;
648}
649
650bool VulkanManager::createSwapchain(VulkanSurface* surface) {
651 // check for capabilities
652 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400653 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700654 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500655 if (VK_SUCCESS != res) {
656 return false;
657 }
658
659 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400660 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700661 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500662 if (VK_SUCCESS != res) {
663 return false;
664 }
665
Ben Wagnereec27d52017-01-11 15:32:07 -0500666 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400667 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700668 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500669 if (VK_SUCCESS != res) {
670 return false;
671 }
672
673 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400674 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700675 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500676 if (VK_SUCCESS != res) {
677 return false;
678 }
679
Ben Wagnereec27d52017-01-11 15:32:07 -0500680 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400681 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700682 surface->mVkSurface, &presentModeCount,
683 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500684 if (VK_SUCCESS != res) {
685 return false;
686 }
687
688 VkExtent2D extent = caps.currentExtent;
689 // clamp width; to handle currentExtent of -1 and protect us from broken hints
690 if (extent.width < caps.minImageExtent.width) {
691 extent.width = caps.minImageExtent.width;
692 }
693 SkASSERT(extent.width <= caps.maxImageExtent.width);
694 // clamp height
695 if (extent.height < caps.minImageExtent.height) {
696 extent.height = caps.minImageExtent.height;
697 }
698 SkASSERT(extent.height <= caps.maxImageExtent.height);
699
700 uint32_t imageCount = caps.minImageCount + 2;
701 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
702 // Application must settle for fewer images than desired:
703 imageCount = caps.maxImageCount;
704 }
705
706 // Currently Skia requires the images to be color attchments and support all transfer
707 // operations.
708 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
709 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
710 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
711 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
712 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700713 SkASSERT(caps.supportedCompositeAlpha &
714 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500715 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700716 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
717 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
718 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500719
720 // Pick our surface format. For now, just make sure it matches our sRGB request:
721 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
722 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
723
724 bool wantSRGB = false;
725#ifdef ANDROID_ENABLE_LINEAR_BLENDING
726 wantSRGB = true;
727#endif
728 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
729 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
730 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
731 if (desiredFormat == surfaceFormats[i].format) {
732 surfaceFormat = surfaceFormats[i].format;
733 colorSpace = surfaceFormats[i].colorSpace;
734 }
735 }
736
737 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
738 return false;
739 }
740
741 // If mailbox mode is available, use it, as it is the lowest-latency non-
742 // tearing mode. If not, fall back to FIFO which is always available.
743 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
744 for (uint32_t i = 0; i < presentModeCount; ++i) {
745 // use mailbox
746 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
747 mode = presentModes[i];
748 break;
749 }
750 }
751
752 VkSwapchainCreateInfoKHR swapchainCreateInfo;
753 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
754 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
755 swapchainCreateInfo.surface = surface->mVkSurface;
756 swapchainCreateInfo.minImageCount = imageCount;
757 swapchainCreateInfo.imageFormat = surfaceFormat;
758 swapchainCreateInfo.imageColorSpace = colorSpace;
759 swapchainCreateInfo.imageExtent = extent;
760 swapchainCreateInfo.imageArrayLayers = 1;
761 swapchainCreateInfo.imageUsage = usageFlags;
762
Greg Daniel2ff202712018-06-14 11:50:10 -0400763 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
764 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500765 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
766 swapchainCreateInfo.queueFamilyIndexCount = 2;
767 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
768 } else {
769 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
770 swapchainCreateInfo.queueFamilyIndexCount = 0;
771 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
772 }
773
774 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
775 swapchainCreateInfo.compositeAlpha = composite_alpha;
776 swapchainCreateInfo.presentMode = mode;
777 swapchainCreateInfo.clipped = true;
778 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
779
Greg Daniel2ff202712018-06-14 11:50:10 -0400780 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500781 if (VK_SUCCESS != res) {
782 return false;
783 }
784
785 // destroy the old swapchain
786 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400787 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500788
789 destroyBuffers(surface);
790
Greg Daniel2ff202712018-06-14 11:50:10 -0400791 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500792 }
793
794 createBuffers(surface, surfaceFormat, extent);
795
796 return true;
797}
798
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500799VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
800 initialize();
801
802 if (!window) {
803 return nullptr;
804 }
805
806 VulkanSurface* surface = new VulkanSurface();
807
808 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
809 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
810 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
811 surfaceCreateInfo.pNext = nullptr;
812 surfaceCreateInfo.flags = 0;
813 surfaceCreateInfo.window = window;
814
Greg Daniel2ff202712018-06-14 11:50:10 -0400815 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
816 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500817 if (VK_SUCCESS != res) {
818 delete surface;
819 return nullptr;
820 }
821
John Reck1bcacfd2017-11-03 10:12:19 -0700822 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400823 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
824 // All physical devices and queue families on Android must be capable of
825 // presentation with any native window.
826 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500827
828 if (!createSwapchain(surface)) {
829 destroySurface(surface);
830 return nullptr;
831 }
832
833 return surface;
834}
835
836// Helper to know which src stage flags we need to set when transitioning to the present layout
837static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
838 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
839 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
840 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
841 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
842 return VK_PIPELINE_STAGE_TRANSFER_BIT;
843 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
844 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
845 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
846 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
847 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
848 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
849 return VK_PIPELINE_STAGE_HOST_BIT;
850 }
851
852 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
853 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
854}
855
856// Helper to know which src access mask we need to set when transitioning to the present layout
857static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
858 VkAccessFlags flags = 0;
859 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
860 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700861 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
862 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
863 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500864 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
865 flags = VK_ACCESS_HOST_WRITE_BIT;
866 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
867 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
868 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
869 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
870 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
871 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
872 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
873 flags = VK_ACCESS_TRANSFER_READ_BIT;
874 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
875 flags = VK_ACCESS_SHADER_READ_BIT;
876 }
877 return flags;
878}
879
880void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500881 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
882 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400883 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500884 }
885
Greg Daniel74ea2012017-11-10 11:32:58 -0500886 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700887 VulkanSurface::BackbufferInfo* backbuffer =
888 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400889
Greg Danielcd558522016-11-17 13:31:40 -0500890 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400891 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
892 SkSurface::kFlushRead_BackendHandleAccess);
893 SkASSERT(backendRT.isValid());
894
895 GrVkImageInfo imageInfo;
896 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
897
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500898 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400899 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500900
901 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
902 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400903 VkImageLayout layout = imageInfo.fImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500904 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
905 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
906 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
907 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
908
909 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700910 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
911 NULL, // pNext
912 srcAccessMask, // outputMask
913 dstAccessMask, // inputMask
914 layout, // oldLayout
915 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400916 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700917 mPresentQueueIndex, // dstQueueFamilyIndex
918 surface->mImages[backbuffer->mImageIndex], // image
919 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500920 };
921
922 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
923 VkCommandBufferBeginInfo info;
924 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
925 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
926 info.flags = 0;
927 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700928 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
929 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500930 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
931
Greg Danielcd558522016-11-17 13:31:40 -0500932 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500933
934 // insert the layout transfer into the queue and wait on the acquire
935 VkSubmitInfo submitInfo;
936 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
937 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
938 submitInfo.waitSemaphoreCount = 0;
939 submitInfo.pWaitDstStageMask = 0;
940 submitInfo.commandBufferCount = 1;
941 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
942 submitInfo.signalSemaphoreCount = 1;
943 // When this command buffer finishes we will signal this semaphore so that we know it is now
944 // safe to present the image to the screen.
945 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
946
947 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400948 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500949
950 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
951 // to the image is complete and that the layout has been change to present on the graphics
952 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700953 const VkPresentInfoKHR presentInfo = {
954 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
955 NULL, // pNext
956 1, // waitSemaphoreCount
957 &backbuffer->mRenderSemaphore, // pWaitSemaphores
958 1, // swapchainCount
959 &surface->mSwapchain, // pSwapchains
960 &backbuffer->mImageIndex, // pImageIndices
961 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500962 };
963
964 mQueuePresentKHR(mPresentQueue, &presentInfo);
965
966 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500967 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
968 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
969 surface->mCurrentTime++;
970}
971
972int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -0500973 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700974 VulkanSurface::BackbufferInfo* backbuffer =
975 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
976 if (mSwapBehavior == SwapBehavior::Discard ||
977 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500978 return 0;
979 }
980 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
981 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500982}
983
984} /* namespace renderthread */
985} /* namespace uirenderer */
986} /* namespace android */