blob: 10edf73e3f2a44d4e73394b2e4f5e2bbfa20a671 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050022#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050023#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024
Greg Danielac2d2322017-07-12 11:30:15 -040025#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050026#include <GrContext.h>
27#include <GrTypes.h>
28#include <vk/GrVkTypes.h>
29
30namespace android {
31namespace uirenderer {
32namespace renderthread {
33
Greg Daniel2ff202712018-06-14 11:50:10 -040034#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
35#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
36#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050037
John Reck1bcacfd2017-11-03 10:12:19 -070038VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050039
40void VulkanManager::destroy() {
Greg Daniel45ec62b2017-01-04 14:27:00 -050041 mRenderThread.renderState().onVkContextDestroyed();
42 mRenderThread.setGrContext(nullptr);
43
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050044 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040045 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050046 mCommandPool = VK_NULL_HANDLE;
47 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050048
Greg Daniel2ff202712018-06-14 11:50:10 -040049 if (mDevice != VK_NULL_HANDLE) {
50 mDeviceWaitIdle(mDevice);
51 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070052 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050053
Greg Daniel2ff202712018-06-14 11:50:10 -040054 if (mInstance != VK_NULL_HANDLE) {
55 mDestroyInstance(mInstance, nullptr);
56 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050057
Greg Daniel2ff202712018-06-14 11:50:10 -040058 mGraphicsQueue = VK_NULL_HANDLE;
59 mPresentQueue = VK_NULL_HANDLE;
60 mDevice = VK_NULL_HANDLE;
61 mPhysicalDevice = VK_NULL_HANDLE;
62 mInstance = VK_NULL_HANDLE;
63}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050064
Greg Daniel2ff202712018-06-14 11:50:10 -040065bool VulkanManager::setupDevice(VkPhysicalDeviceFeatures& deviceFeatures) {
66 VkResult err;
67
68 constexpr VkApplicationInfo app_info = {
69 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
70 nullptr, // pNext
71 "android framework", // pApplicationName
72 0, // applicationVersion
73 "android framework", // pEngineName
74 0, // engineVerison
75 VK_MAKE_VERSION(1, 0, 0), // apiVersion
76 };
77
78 std::vector<const char*> instanceExtensions;
79 {
80 GET_PROC(EnumerateInstanceExtensionProperties);
81
82 uint32_t extensionCount = 0;
83 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
84 if (VK_SUCCESS != err) {
85 return false;
86 }
87 std::unique_ptr<VkExtensionProperties[]> extensions(
88 new VkExtensionProperties[extensionCount]);
89 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
90 if (VK_SUCCESS != err) {
91 return false;
92 }
93 bool hasKHRSurfaceExtension = false;
94 bool hasKHRAndroidSurfaceExtension = false;
95 for (uint32_t i = 0; i < extensionCount; ++i) {
96 instanceExtensions.push_back(extensions[i].extensionName);
97 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
98 hasKHRSurfaceExtension = true;
99 }
100 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
101 hasKHRAndroidSurfaceExtension = true;
102 }
103 }
104 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
105 this->destroy();
106 return false;
107 }
108 }
109
110 const VkInstanceCreateInfo instance_create = {
111 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
112 nullptr, // pNext
113 0, // flags
114 &app_info, // pApplicationInfo
115 0, // enabledLayerNameCount
116 nullptr, // ppEnabledLayerNames
117 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
118 instanceExtensions.data(), // ppEnabledExtensionNames
119 };
120
121 GET_PROC(CreateInstance);
122 err = mCreateInstance(&instance_create, nullptr, &mInstance);
123 if (err < 0) {
124 this->destroy();
125 return false;
126 }
127
128 GET_INST_PROC(DestroyInstance);
129 GET_INST_PROC(EnumeratePhysicalDevices);
130 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
131 GET_INST_PROC(GetPhysicalDeviceFeatures);
132 GET_INST_PROC(CreateDevice);
133 GET_INST_PROC(EnumerateDeviceExtensionProperties);
134 GET_INST_PROC(CreateAndroidSurfaceKHR);
135 GET_INST_PROC(DestroySurfaceKHR);
136 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
137 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
138 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
139 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
140
141 uint32_t gpuCount;
142 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
143 if (err) {
144 this->destroy();
145 return false;
146 }
147 if (!gpuCount) {
148 this->destroy();
149 return false;
150 }
151 // Just returning the first physical device instead of getting the whole array. Since there
152 // should only be one device on android.
153 gpuCount = 1;
154 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
155 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
156 if (err && VK_INCOMPLETE != err) {
157 this->destroy();
158 return false;
159 }
160
161 // query to get the initial queue props size
162 uint32_t queueCount;
163 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
164 if (!queueCount) {
165 this->destroy();
166 return false;
167 }
168
169 // now get the actual queue props
170 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
171 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
172
173 // iterate to find the graphics queue
174 mGraphicsQueueIndex = queueCount;
175 for (uint32_t i = 0; i < queueCount; i++) {
176 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
177 mGraphicsQueueIndex = i;
178 break;
179 }
180 }
181 if (mGraphicsQueueIndex == queueCount) {
182 this->destroy();
183 return false;
184 }
185
186 // All physical devices and queue families on Android must be capable of
187 // presentation with any native window. So just use the first one.
188 mPresentQueueIndex = 0;
189
190 std::vector<const char*> deviceExtensions;
191 {
192 uint32_t extensionCount = 0;
193 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
194 nullptr);
195 if (VK_SUCCESS != err) {
196 this->destroy();
197 return false;
198 }
199 std::unique_ptr<VkExtensionProperties[]> extensions(
200 new VkExtensionProperties[extensionCount]);
201 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
202 extensions.get());
203 if (VK_SUCCESS != err) {
204 this->destroy();
205 return false;
206 }
207 bool hasKHRSwapchainExtension = false;
208 for (uint32_t i = 0; i < extensionCount; ++i) {
209 deviceExtensions.push_back(extensions[i].extensionName);
210 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
211 hasKHRSwapchainExtension = true;
212 }
213 }
214 if (!hasKHRSwapchainExtension) {
215 this->destroy();
216 return false;
217 }
218 }
219
220 // query to get the physical device properties
221 mGetPhysicalDeviceFeatures(mPhysicalDevice, &deviceFeatures);
222 // this looks like it would slow things down,
223 // and we can't depend on it on all platforms
224 deviceFeatures.robustBufferAccess = VK_FALSE;
225
226 float queuePriorities[1] = { 0.0 };
227
228 const VkDeviceQueueCreateInfo queueInfo[2] = {
229 {
230 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
231 nullptr, // pNext
232 0, // VkDeviceQueueCreateFlags
233 mGraphicsQueueIndex, // queueFamilyIndex
234 1, // queueCount
235 queuePriorities, // pQueuePriorities
236 },
237 {
238 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
239 nullptr, // pNext
240 0, // VkDeviceQueueCreateFlags
241 mPresentQueueIndex, // queueFamilyIndex
242 1, // queueCount
243 queuePriorities, // pQueuePriorities
244 }
245 };
246 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
247
248 const VkDeviceCreateInfo deviceInfo = {
249 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
250 nullptr, // pNext
251 0, // VkDeviceCreateFlags
252 queueInfoCount, // queueCreateInfoCount
253 queueInfo, // pQueueCreateInfos
254 0, // layerCount
255 nullptr, // ppEnabledLayerNames
256 (uint32_t) deviceExtensions.size(), // extensionCount
257 deviceExtensions.data(), // ppEnabledExtensionNames
258 &deviceFeatures // ppEnabledFeatures
259 };
260
261 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
262 if (err) {
263 this->destroy();
264 return false;
265 }
266
267 GET_DEV_PROC(GetDeviceQueue);
268 GET_DEV_PROC(DeviceWaitIdle);
269 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500270 GET_DEV_PROC(CreateSwapchainKHR);
271 GET_DEV_PROC(DestroySwapchainKHR);
272 GET_DEV_PROC(GetSwapchainImagesKHR);
273 GET_DEV_PROC(AcquireNextImageKHR);
274 GET_DEV_PROC(QueuePresentKHR);
275 GET_DEV_PROC(CreateCommandPool);
276 GET_DEV_PROC(DestroyCommandPool);
277 GET_DEV_PROC(AllocateCommandBuffers);
278 GET_DEV_PROC(FreeCommandBuffers);
279 GET_DEV_PROC(ResetCommandBuffer);
280 GET_DEV_PROC(BeginCommandBuffer);
281 GET_DEV_PROC(EndCommandBuffer);
282 GET_DEV_PROC(CmdPipelineBarrier);
283 GET_DEV_PROC(GetDeviceQueue);
284 GET_DEV_PROC(QueueSubmit);
285 GET_DEV_PROC(QueueWaitIdle);
286 GET_DEV_PROC(DeviceWaitIdle);
287 GET_DEV_PROC(CreateSemaphore);
288 GET_DEV_PROC(DestroySemaphore);
289 GET_DEV_PROC(CreateFence);
290 GET_DEV_PROC(DestroyFence);
291 GET_DEV_PROC(WaitForFences);
292 GET_DEV_PROC(ResetFences);
293
Greg Daniel2ff202712018-06-14 11:50:10 -0400294 return true;
295}
296
297void VulkanManager::initialize() {
298 if (mDevice != VK_NULL_HANDLE) {
299 return;
300 }
301
302 std::vector<const char*> instanceExtensions;
303 std::vector<const char*> deviceExtensions;
304 VkPhysicalDeviceFeatures deviceFeatures;
305 LOG_ALWAYS_FATAL_IF(!this->setupDevice(deviceFeatures));
306
307 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
308
309 uint32_t extensionFlags = kKHR_surface_GrVkExtensionFlag |
310 kKHR_android_surface_GrVkExtensionFlag |
311 kKHR_swapchain_GrVkExtensionFlag;
312
313 uint32_t featureFlags = 0;
314 if (deviceFeatures.geometryShader) {
315 featureFlags |= kGeometryShader_GrVkFeatureFlag;
316 }
317 if (deviceFeatures.dualSrcBlend) {
318 featureFlags |= kDualSrcBlend_GrVkFeatureFlag;
319 }
320 if (deviceFeatures.sampleRateShading) {
321 featureFlags |= kSampleRateShading_GrVkFeatureFlag;
322 }
323
324 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
325 if (device != VK_NULL_HANDLE) {
326 return vkGetDeviceProcAddr(device, proc_name);
327 }
328 return vkGetInstanceProcAddr(instance, proc_name);
329 };
330 auto interface =
331 sk_make_sp<GrVkInterface>(getProc, mInstance, mDevice, extensionFlags);
332
333 GrVkBackendContext backendContext;
334 backendContext.fInstance = mInstance;
335 backendContext.fPhysicalDevice = mPhysicalDevice;
336 backendContext.fDevice = mDevice;
337 backendContext.fQueue = mGraphicsQueue;
338 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
339 backendContext.fMinAPIVersion = VK_MAKE_VERSION(1, 0, 0);
340 backendContext.fExtensions = extensionFlags;
341 backendContext.fFeatures = featureFlags;
342 backendContext.fInterface = std::move(interface);
343 backendContext.fOwnsInstanceAndDevice = false;
344
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500345 // create the command pool for the command buffers
346 if (VK_NULL_HANDLE == mCommandPool) {
347 VkCommandPoolCreateInfo commandPoolInfo;
348 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
349 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
350 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400351 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500352 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400353 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
354 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500355 SkASSERT(VK_SUCCESS == res);
356 }
357
Greg Daniel2ff202712018-06-14 11:50:10 -0400358 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500359
Stan Ilievd495f432017-10-09 15:49:32 -0400360 GrContextOptions options;
361 options.fDisableDistanceFieldPaths = true;
362 mRenderThread.cacheManager().configureContext(&options);
Greg Daniel2ff202712018-06-14 11:50:10 -0400363 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500364 LOG_ALWAYS_FATAL_IF(!grContext.get());
365 mRenderThread.setGrContext(grContext);
Greg Daniel85e09072018-04-09 12:36:45 -0400366 DeviceInfo::initialize(mRenderThread.getGrContext()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500367
368 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
369 mSwapBehavior = SwapBehavior::BufferAge;
370 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500371
372 mRenderThread.renderState().onVkContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500373}
374
375// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
376// previous uses have finished before returning.
377VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
378 SkASSERT(surface->mBackbuffers);
379
380 ++surface->mCurrentBackbufferIndex;
381 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
382 surface->mCurrentBackbufferIndex = 0;
383 }
384
John Reck1bcacfd2017-11-03 10:12:19 -0700385 VulkanSurface::BackbufferInfo* backbuffer =
386 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500387
388 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
389 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400390 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500391 if (res != VK_SUCCESS) {
392 return nullptr;
393 }
394
395 return backbuffer;
396}
397
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500398SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
399 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
400 SkASSERT(backbuffer);
401
402 VkResult res;
403
Greg Daniel2ff202712018-06-14 11:50:10 -0400404 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500405 SkASSERT(VK_SUCCESS == res);
406
407 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
408 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400409 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700410 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
411 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500412
413 if (VK_ERROR_SURFACE_LOST_KHR == res) {
414 // need to figure out how to create a new vkSurface without the platformData*
415 // maybe use attach somehow? but need a Window
416 return nullptr;
417 }
418 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
419 // tear swapchain down and try again
420 if (!createSwapchain(surface)) {
421 return nullptr;
422 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500423 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400424 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500425 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500426
427 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400428 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700429 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
430 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500431
432 if (VK_SUCCESS != res) {
433 return nullptr;
434 }
435 }
436
437 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500438 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500439 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
John Reck1bcacfd2017-11-03 10:12:19 -0700440 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout)
441 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
442 : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500443 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700444 VkAccessFlags srcAccessMask =
445 (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500446 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
447
448 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700449 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
450 NULL, // pNext
451 srcAccessMask, // outputMask
452 dstAccessMask, // inputMask
453 layout, // oldLayout
454 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
455 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400456 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700457 surface->mImages[backbuffer->mImageIndex], // image
458 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500459 };
460 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
461
462 VkCommandBufferBeginInfo info;
463 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
464 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
465 info.flags = 0;
466 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
467
John Reck1bcacfd2017-11-03 10:12:19 -0700468 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
469 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500470
471 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
472
473 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
474 // insert the layout transfer into the queue and wait on the acquire
475 VkSubmitInfo submitInfo;
476 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
477 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
478 submitInfo.waitSemaphoreCount = 1;
479 // Wait to make sure aquire semaphore set above has signaled.
480 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
481 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
482 submitInfo.commandBufferCount = 1;
483 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
484 submitInfo.signalSemaphoreCount = 0;
485
486 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400487 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500488
489 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500490 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400491 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
492 SkSurface::kFlushRead_BackendHandleAccess);
493 if (!backendRT.isValid()) {
494 SkASSERT(backendRT.isValid());
495 return nullptr;
496 }
497 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500498
499 surface->mBackbuffer = std::move(skSurface);
500 return surface->mBackbuffer.get();
501}
502
503void VulkanManager::destroyBuffers(VulkanSurface* surface) {
504 if (surface->mBackbuffers) {
505 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400506 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500507 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400508 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
509 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
510 mFreeCommandBuffers(mDevice, mCommandPool, 2,
511 surface->mBackbuffers[i].mTransitionCmdBuffers);
512 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
513 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500514 }
515 }
516
517 delete[] surface->mBackbuffers;
518 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500519 delete[] surface->mImageInfos;
520 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500521 delete[] surface->mImages;
522 surface->mImages = nullptr;
523}
524
525void VulkanManager::destroySurface(VulkanSurface* surface) {
526 // Make sure all submit commands have finished before starting to destroy objects.
527 if (VK_NULL_HANDLE != mPresentQueue) {
528 mQueueWaitIdle(mPresentQueue);
529 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400530 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500531
532 destroyBuffers(surface);
533
534 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400535 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500536 surface->mSwapchain = VK_NULL_HANDLE;
537 }
538
539 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400540 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500541 surface->mVkSurface = VK_NULL_HANDLE;
542 }
543 delete surface;
544}
545
546void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400547 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500548 SkASSERT(surface->mImageCount);
549 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400550 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500551
552 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
553
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500554 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500555 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500556 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500557 GrVkImageInfo info;
558 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500559 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500560 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
561 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
562 info.fFormat = format;
563 info.fLevelCount = 1;
564
Greg Danielac2d2322017-07-12 11:30:15 -0400565 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500566
Greg Danielcd558522016-11-17 13:31:40 -0500567 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700568 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400569 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
570 kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500571 }
572
573 SkASSERT(mCommandPool != VK_NULL_HANDLE);
574
575 // set up the backbuffers
576 VkSemaphoreCreateInfo semaphoreInfo;
577 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
578 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
579 semaphoreInfo.pNext = nullptr;
580 semaphoreInfo.flags = 0;
581 VkCommandBufferAllocateInfo commandBuffersInfo;
582 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
583 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
584 commandBuffersInfo.pNext = nullptr;
585 commandBuffersInfo.commandPool = mCommandPool;
586 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
587 commandBuffersInfo.commandBufferCount = 2;
588 VkFenceCreateInfo fenceInfo;
589 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
590 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
591 fenceInfo.pNext = nullptr;
592 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
593
594 // we create one additional backbuffer structure here, because we want to
595 // give the command buffers they contain a chance to finish before we cycle back
596 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
597 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
598 SkDEBUGCODE(VkResult res);
599 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400600 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700601 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400602 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700603 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400604 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700605 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400606 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700607 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400608 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700609 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500610 SkASSERT(VK_SUCCESS == res);
611 }
612 surface->mCurrentBackbufferIndex = surface->mImageCount;
613}
614
615bool VulkanManager::createSwapchain(VulkanSurface* surface) {
616 // check for capabilities
617 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400618 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700619 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500620 if (VK_SUCCESS != res) {
621 return false;
622 }
623
624 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400625 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700626 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500627 if (VK_SUCCESS != res) {
628 return false;
629 }
630
Ben Wagnereec27d52017-01-11 15:32:07 -0500631 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400632 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700633 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500634 if (VK_SUCCESS != res) {
635 return false;
636 }
637
638 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400639 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700640 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500641 if (VK_SUCCESS != res) {
642 return false;
643 }
644
Ben Wagnereec27d52017-01-11 15:32:07 -0500645 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400646 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700647 surface->mVkSurface, &presentModeCount,
648 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500649 if (VK_SUCCESS != res) {
650 return false;
651 }
652
653 VkExtent2D extent = caps.currentExtent;
654 // clamp width; to handle currentExtent of -1 and protect us from broken hints
655 if (extent.width < caps.minImageExtent.width) {
656 extent.width = caps.minImageExtent.width;
657 }
658 SkASSERT(extent.width <= caps.maxImageExtent.width);
659 // clamp height
660 if (extent.height < caps.minImageExtent.height) {
661 extent.height = caps.minImageExtent.height;
662 }
663 SkASSERT(extent.height <= caps.maxImageExtent.height);
664
665 uint32_t imageCount = caps.minImageCount + 2;
666 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
667 // Application must settle for fewer images than desired:
668 imageCount = caps.maxImageCount;
669 }
670
671 // Currently Skia requires the images to be color attchments and support all transfer
672 // operations.
673 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
674 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
675 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
676 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
677 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700678 SkASSERT(caps.supportedCompositeAlpha &
679 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500680 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700681 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
682 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
683 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500684
685 // Pick our surface format. For now, just make sure it matches our sRGB request:
686 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
687 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
688
689 bool wantSRGB = false;
690#ifdef ANDROID_ENABLE_LINEAR_BLENDING
691 wantSRGB = true;
692#endif
693 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
694 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
695 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
696 if (desiredFormat == surfaceFormats[i].format) {
697 surfaceFormat = surfaceFormats[i].format;
698 colorSpace = surfaceFormats[i].colorSpace;
699 }
700 }
701
702 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
703 return false;
704 }
705
706 // If mailbox mode is available, use it, as it is the lowest-latency non-
707 // tearing mode. If not, fall back to FIFO which is always available.
708 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
709 for (uint32_t i = 0; i < presentModeCount; ++i) {
710 // use mailbox
711 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
712 mode = presentModes[i];
713 break;
714 }
715 }
716
717 VkSwapchainCreateInfoKHR swapchainCreateInfo;
718 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
719 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
720 swapchainCreateInfo.surface = surface->mVkSurface;
721 swapchainCreateInfo.minImageCount = imageCount;
722 swapchainCreateInfo.imageFormat = surfaceFormat;
723 swapchainCreateInfo.imageColorSpace = colorSpace;
724 swapchainCreateInfo.imageExtent = extent;
725 swapchainCreateInfo.imageArrayLayers = 1;
726 swapchainCreateInfo.imageUsage = usageFlags;
727
Greg Daniel2ff202712018-06-14 11:50:10 -0400728 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
729 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500730 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
731 swapchainCreateInfo.queueFamilyIndexCount = 2;
732 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
733 } else {
734 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
735 swapchainCreateInfo.queueFamilyIndexCount = 0;
736 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
737 }
738
739 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
740 swapchainCreateInfo.compositeAlpha = composite_alpha;
741 swapchainCreateInfo.presentMode = mode;
742 swapchainCreateInfo.clipped = true;
743 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
744
Greg Daniel2ff202712018-06-14 11:50:10 -0400745 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500746 if (VK_SUCCESS != res) {
747 return false;
748 }
749
750 // destroy the old swapchain
751 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400752 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500753
754 destroyBuffers(surface);
755
Greg Daniel2ff202712018-06-14 11:50:10 -0400756 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500757 }
758
759 createBuffers(surface, surfaceFormat, extent);
760
761 return true;
762}
763
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500764VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
765 initialize();
766
767 if (!window) {
768 return nullptr;
769 }
770
771 VulkanSurface* surface = new VulkanSurface();
772
773 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
774 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
775 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
776 surfaceCreateInfo.pNext = nullptr;
777 surfaceCreateInfo.flags = 0;
778 surfaceCreateInfo.window = window;
779
Greg Daniel2ff202712018-06-14 11:50:10 -0400780 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
781 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500782 if (VK_SUCCESS != res) {
783 delete surface;
784 return nullptr;
785 }
786
John Reck1bcacfd2017-11-03 10:12:19 -0700787 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400788 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
789 // All physical devices and queue families on Android must be capable of
790 // presentation with any native window.
791 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500792
793 if (!createSwapchain(surface)) {
794 destroySurface(surface);
795 return nullptr;
796 }
797
798 return surface;
799}
800
801// Helper to know which src stage flags we need to set when transitioning to the present layout
802static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
803 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
804 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
805 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
806 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
807 return VK_PIPELINE_STAGE_TRANSFER_BIT;
808 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
809 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
810 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
811 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
812 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
813 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
814 return VK_PIPELINE_STAGE_HOST_BIT;
815 }
816
817 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
818 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
819}
820
821// Helper to know which src access mask we need to set when transitioning to the present layout
822static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
823 VkAccessFlags flags = 0;
824 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
825 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700826 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
827 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
828 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500829 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
830 flags = VK_ACCESS_HOST_WRITE_BIT;
831 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
832 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
833 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
834 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
835 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
836 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
837 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
838 flags = VK_ACCESS_TRANSFER_READ_BIT;
839 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
840 flags = VK_ACCESS_SHADER_READ_BIT;
841 }
842 return flags;
843}
844
845void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500846 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
847 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400848 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500849 }
850
Greg Daniel74ea2012017-11-10 11:32:58 -0500851 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700852 VulkanSurface::BackbufferInfo* backbuffer =
853 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400854
Greg Danielcd558522016-11-17 13:31:40 -0500855 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400856 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
857 SkSurface::kFlushRead_BackendHandleAccess);
858 SkASSERT(backendRT.isValid());
859
860 GrVkImageInfo imageInfo;
861 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
862
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500863 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400864 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500865
866 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
867 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400868 VkImageLayout layout = imageInfo.fImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500869 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
870 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
871 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
872 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
873
874 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700875 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
876 NULL, // pNext
877 srcAccessMask, // outputMask
878 dstAccessMask, // inputMask
879 layout, // oldLayout
880 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400881 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700882 mPresentQueueIndex, // dstQueueFamilyIndex
883 surface->mImages[backbuffer->mImageIndex], // image
884 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500885 };
886
887 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
888 VkCommandBufferBeginInfo info;
889 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
890 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
891 info.flags = 0;
892 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700893 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
894 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500895 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
896
Greg Danielcd558522016-11-17 13:31:40 -0500897 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500898
899 // insert the layout transfer into the queue and wait on the acquire
900 VkSubmitInfo submitInfo;
901 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
902 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
903 submitInfo.waitSemaphoreCount = 0;
904 submitInfo.pWaitDstStageMask = 0;
905 submitInfo.commandBufferCount = 1;
906 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
907 submitInfo.signalSemaphoreCount = 1;
908 // When this command buffer finishes we will signal this semaphore so that we know it is now
909 // safe to present the image to the screen.
910 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
911
912 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400913 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500914
915 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
916 // to the image is complete and that the layout has been change to present on the graphics
917 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700918 const VkPresentInfoKHR presentInfo = {
919 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
920 NULL, // pNext
921 1, // waitSemaphoreCount
922 &backbuffer->mRenderSemaphore, // pWaitSemaphores
923 1, // swapchainCount
924 &surface->mSwapchain, // pSwapchains
925 &backbuffer->mImageIndex, // pImageIndices
926 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500927 };
928
929 mQueuePresentKHR(mPresentQueue, &presentInfo);
930
931 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500932 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
933 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
934 surface->mCurrentTime++;
935}
936
937int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -0500938 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700939 VulkanSurface::BackbufferInfo* backbuffer =
940 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
941 if (mSwapBehavior == SwapBehavior::Discard ||
942 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500943 return 0;
944 }
945 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
946 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500947}
948
949} /* namespace renderthread */
950} /* namespace uirenderer */
951} /* namespace android */