blob: 4d239bc616d02232ab329cd62264f39c8120bfe7 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
20#include "RenderThread.h"
21
22#include <GrContext.h>
23#include <GrTypes.h>
24#include <vk/GrVkTypes.h>
25
26namespace android {
27namespace uirenderer {
28namespace renderthread {
29
30#define GET_PROC(F) m ## F = (PFN_vk ## F) vkGetInstanceProcAddr(instance, "vk" #F)
31#define GET_DEV_PROC(F) m ## F = (PFN_vk ## F) vkGetDeviceProcAddr(device, "vk" #F)
32
33VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {
34}
35
36void VulkanManager::destroy() {
37 if (!hasVkContext()) return;
38
39 if (VK_NULL_HANDLE != mCommandPool) {
40 mDestroyCommandPool(mBackendContext->fDevice, mCommandPool, nullptr);
41 mCommandPool = VK_NULL_HANDLE;
42 }
43}
44
45void VulkanManager::initialize() {
46 if (hasVkContext()) { return; }
47
48 auto canPresent = [](VkInstance, VkPhysicalDevice, uint32_t) { return true; };
49
50 mBackendContext.reset(GrVkBackendContext::Create(&mPresentQueueIndex, canPresent));
51
52 // Get all the addresses of needed vulkan functions
53 VkInstance instance = mBackendContext->fInstance;
54 VkDevice device = mBackendContext->fDevice;
55 GET_PROC(CreateAndroidSurfaceKHR);
56 GET_PROC(DestroySurfaceKHR);
57 GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
58 GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
59 GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
60 GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
61 GET_DEV_PROC(CreateSwapchainKHR);
62 GET_DEV_PROC(DestroySwapchainKHR);
63 GET_DEV_PROC(GetSwapchainImagesKHR);
64 GET_DEV_PROC(AcquireNextImageKHR);
65 GET_DEV_PROC(QueuePresentKHR);
66 GET_DEV_PROC(CreateCommandPool);
67 GET_DEV_PROC(DestroyCommandPool);
68 GET_DEV_PROC(AllocateCommandBuffers);
69 GET_DEV_PROC(FreeCommandBuffers);
70 GET_DEV_PROC(ResetCommandBuffer);
71 GET_DEV_PROC(BeginCommandBuffer);
72 GET_DEV_PROC(EndCommandBuffer);
73 GET_DEV_PROC(CmdPipelineBarrier);
74 GET_DEV_PROC(GetDeviceQueue);
75 GET_DEV_PROC(QueueSubmit);
76 GET_DEV_PROC(QueueWaitIdle);
77 GET_DEV_PROC(DeviceWaitIdle);
78 GET_DEV_PROC(CreateSemaphore);
79 GET_DEV_PROC(DestroySemaphore);
80 GET_DEV_PROC(CreateFence);
81 GET_DEV_PROC(DestroyFence);
82 GET_DEV_PROC(WaitForFences);
83 GET_DEV_PROC(ResetFences);
84
85 // create the command pool for the command buffers
86 if (VK_NULL_HANDLE == mCommandPool) {
87 VkCommandPoolCreateInfo commandPoolInfo;
88 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
89 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
90 // this needs to be on the render queue
91 commandPoolInfo.queueFamilyIndex = mBackendContext->fGraphicsQueueIndex;
92 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
93 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mBackendContext->fDevice,
94 &commandPoolInfo, nullptr, &mCommandPool);
95 SkASSERT(VK_SUCCESS == res);
96 }
97
98 mGetDeviceQueue(mBackendContext->fDevice, mPresentQueueIndex, 0, &mPresentQueue);
99
100 mRenderThread.setGrContext(GrContext::Create(kVulkan_GrBackend,
101 (GrBackendContext) mBackendContext.get()));
102 DeviceInfo::initialize(mRenderThread.getGrContext()->caps()->maxRenderTargetSize());
103}
104
105// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
106// previous uses have finished before returning.
107VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
108 SkASSERT(surface->mBackbuffers);
109
110 ++surface->mCurrentBackbufferIndex;
111 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
112 surface->mCurrentBackbufferIndex = 0;
113 }
114
115 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
116 surface->mCurrentBackbufferIndex;
117
118 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
119 // reuse its commands buffers.
120 VkResult res = mWaitForFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences,
121 true, UINT64_MAX);
122 if (res != VK_SUCCESS) {
123 return nullptr;
124 }
125
126 return backbuffer;
127}
128
129
130SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
131 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
132 SkASSERT(backbuffer);
133
134 VkResult res;
135
136 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
137 SkASSERT(VK_SUCCESS == res);
138
139 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
140 // finished presenting and that it is safe to begin sending new commands to the returned image.
141 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
142 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
143
144 if (VK_ERROR_SURFACE_LOST_KHR == res) {
145 // need to figure out how to create a new vkSurface without the platformData*
146 // maybe use attach somehow? but need a Window
147 return nullptr;
148 }
149 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
150 // tear swapchain down and try again
151 if (!createSwapchain(surface)) {
152 return nullptr;
153 }
154
155 // acquire the image
156 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
157 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
158
159 if (VK_SUCCESS != res) {
160 return nullptr;
161 }
162 }
163
164 // set up layout transfer from initial to color attachment
165 VkImageLayout layout = surface->mImageLayouts[backbuffer->mImageIndex];
166 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
167 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
168 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT :
169 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
170 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
171 VkAccessFlags srcAccessMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
172 0 : VK_ACCESS_MEMORY_READ_BIT;
173 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
174
175 VkImageMemoryBarrier imageMemoryBarrier = {
176 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
177 NULL, // pNext
178 srcAccessMask, // outputMask
179 dstAccessMask, // inputMask
180 layout, // oldLayout
181 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
182 mPresentQueueIndex, // srcQueueFamilyIndex
183 mBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex
184 surface->mImages[backbuffer->mImageIndex], // image
185 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
186 };
187 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
188
189 VkCommandBufferBeginInfo info;
190 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
191 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
192 info.flags = 0;
193 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
194
195 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0,
196 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
197
198 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
199
200 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
201 // insert the layout transfer into the queue and wait on the acquire
202 VkSubmitInfo submitInfo;
203 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
204 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
205 submitInfo.waitSemaphoreCount = 1;
206 // Wait to make sure aquire semaphore set above has signaled.
207 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
208 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
209 submitInfo.commandBufferCount = 1;
210 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
211 submitInfo.signalSemaphoreCount = 0;
212
213 // Attach first fence to submission here so we can track when the command buffer finishes.
214 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
215
216 // We need to notify Skia that we changed the layout of the wrapped VkImage
217 GrVkImageInfo* imageInfo;
218 sk_sp<SkSurface> skSurface = surface->mSurfaces[backbuffer->mImageIndex];
219 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
220 SkSurface::kFlushRead_BackendHandleAccess);
221 imageInfo->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
222
223 surface->mBackbuffer = std::move(skSurface);
224 return surface->mBackbuffer.get();
225}
226
227void VulkanManager::destroyBuffers(VulkanSurface* surface) {
228 if (surface->mBackbuffers) {
229 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
230 mWaitForFences(mBackendContext->fDevice, 2, surface->mBackbuffers[i].mUsageFences, true,
231 UINT64_MAX);
232 surface->mBackbuffers[i].mImageIndex = -1;
233 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mAcquireSemaphore,
234 nullptr);
235 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mRenderSemaphore,
236 nullptr);
237 mFreeCommandBuffers(mBackendContext->fDevice, mCommandPool, 2,
238 surface->mBackbuffers[i].mTransitionCmdBuffers);
239 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
240 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
241 }
242 }
243
244 delete[] surface->mBackbuffers;
245 surface->mBackbuffers = nullptr;
246 delete[] surface->mSurfaces;
247 surface->mSurfaces = nullptr;
248 delete[] surface->mImageLayouts;
249 surface->mImageLayouts = nullptr;
250 delete[] surface->mImages;
251 surface->mImages = nullptr;
252}
253
254void VulkanManager::destroySurface(VulkanSurface* surface) {
255 // Make sure all submit commands have finished before starting to destroy objects.
256 if (VK_NULL_HANDLE != mPresentQueue) {
257 mQueueWaitIdle(mPresentQueue);
258 }
259 mDeviceWaitIdle(mBackendContext->fDevice);
260
261 destroyBuffers(surface);
262
263 if (VK_NULL_HANDLE != surface->mSwapchain) {
264 mDestroySwapchainKHR(mBackendContext->fDevice, surface->mSwapchain, nullptr);
265 surface->mSwapchain = VK_NULL_HANDLE;
266 }
267
268 if (VK_NULL_HANDLE != surface->mVkSurface) {
269 mDestroySurfaceKHR(mBackendContext->fInstance, surface->mVkSurface, nullptr);
270 surface->mVkSurface = VK_NULL_HANDLE;
271 }
272 delete surface;
273}
274
275void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
276 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain, &surface->mImageCount,
277 nullptr);
278 SkASSERT(surface->mImageCount);
279 surface->mImages = new VkImage[surface->mImageCount];
280 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain,
281 &surface->mImageCount, surface->mImages);
282
283 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
284
285 bool wantSRGB = VK_FORMAT_R8G8B8A8_SRGB == format;
286 GrPixelConfig config = wantSRGB ? kSRGBA_8888_GrPixelConfig : kRGBA_8888_GrPixelConfig;
287
288 // set up initial image layouts and create surfaces
289 surface->mImageLayouts = new VkImageLayout[surface->mImageCount];
290 surface->mSurfaces = new sk_sp<SkSurface>[surface->mImageCount];
291 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
292 GrBackendRenderTargetDesc desc;
293 GrVkImageInfo info;
294 info.fImage = surface->mImages[i];
295 info.fAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
296 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
297 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
298 info.fFormat = format;
299 info.fLevelCount = 1;
300
301 desc.fWidth = extent.width;
302 desc.fHeight = extent.height;
303 desc.fConfig = config;
304 desc.fOrigin = kTopLeft_GrSurfaceOrigin;
305 desc.fSampleCnt = 0;
306 desc.fStencilBits = 0;
307 desc.fRenderTargetHandle = (GrBackendObject) &info;
308
309 surface->mSurfaces[i] = SkSurface::MakeFromBackendRenderTarget(mRenderThread.getGrContext(),
310 desc, &props);
311 surface->mImageLayouts[i] = VK_IMAGE_LAYOUT_UNDEFINED;
312 }
313
314 SkASSERT(mCommandPool != VK_NULL_HANDLE);
315
316 // set up the backbuffers
317 VkSemaphoreCreateInfo semaphoreInfo;
318 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
319 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
320 semaphoreInfo.pNext = nullptr;
321 semaphoreInfo.flags = 0;
322 VkCommandBufferAllocateInfo commandBuffersInfo;
323 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
324 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
325 commandBuffersInfo.pNext = nullptr;
326 commandBuffersInfo.commandPool = mCommandPool;
327 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
328 commandBuffersInfo.commandBufferCount = 2;
329 VkFenceCreateInfo fenceInfo;
330 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
331 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
332 fenceInfo.pNext = nullptr;
333 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
334
335 // we create one additional backbuffer structure here, because we want to
336 // give the command buffers they contain a chance to finish before we cycle back
337 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
338 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
339 SkDEBUGCODE(VkResult res);
340 surface->mBackbuffers[i].mImageIndex = -1;
341 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
342 &surface->mBackbuffers[i].mAcquireSemaphore);
343 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
344 &surface->mBackbuffers[i].mRenderSemaphore);
345 SkDEBUGCODE(res = ) mAllocateCommandBuffers(mBackendContext->fDevice, &commandBuffersInfo,
346 surface->mBackbuffers[i].mTransitionCmdBuffers);
347 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
348 &surface->mBackbuffers[i].mUsageFences[0]);
349 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
350 &surface->mBackbuffers[i].mUsageFences[1]);
351 SkASSERT(VK_SUCCESS == res);
352 }
353 surface->mCurrentBackbufferIndex = surface->mImageCount;
354}
355
356bool VulkanManager::createSwapchain(VulkanSurface* surface) {
357 // check for capabilities
358 VkSurfaceCapabilitiesKHR caps;
359 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mBackendContext->fPhysicalDevice,
360 surface->mVkSurface, &caps);
361 if (VK_SUCCESS != res) {
362 return false;
363 }
364
365 uint32_t surfaceFormatCount;
366 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
367 &surfaceFormatCount, nullptr);
368 if (VK_SUCCESS != res) {
369 return false;
370 }
371
372 SkAutoMalloc surfaceFormatAlloc(surfaceFormatCount * sizeof(VkSurfaceFormatKHR));
373 VkSurfaceFormatKHR* surfaceFormats = (VkSurfaceFormatKHR*)surfaceFormatAlloc.get();
374 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
375 &surfaceFormatCount, surfaceFormats);
376 if (VK_SUCCESS != res) {
377 return false;
378 }
379
380 uint32_t presentModeCount;
381 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
382 surface->mVkSurface, &presentModeCount, nullptr);
383 if (VK_SUCCESS != res) {
384 return false;
385 }
386
387 SkAutoMalloc presentModeAlloc(presentModeCount * sizeof(VkPresentModeKHR));
388 VkPresentModeKHR* presentModes = (VkPresentModeKHR*)presentModeAlloc.get();
389 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
390 surface->mVkSurface, &presentModeCount, presentModes);
391 if (VK_SUCCESS != res) {
392 return false;
393 }
394
395 VkExtent2D extent = caps.currentExtent;
396 // clamp width; to handle currentExtent of -1 and protect us from broken hints
397 if (extent.width < caps.minImageExtent.width) {
398 extent.width = caps.minImageExtent.width;
399 }
400 SkASSERT(extent.width <= caps.maxImageExtent.width);
401 // clamp height
402 if (extent.height < caps.minImageExtent.height) {
403 extent.height = caps.minImageExtent.height;
404 }
405 SkASSERT(extent.height <= caps.maxImageExtent.height);
406
407 uint32_t imageCount = caps.minImageCount + 2;
408 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
409 // Application must settle for fewer images than desired:
410 imageCount = caps.maxImageCount;
411 }
412
413 // Currently Skia requires the images to be color attchments and support all transfer
414 // operations.
415 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
416 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
417 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
418 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
419 SkASSERT(caps.supportedTransforms & caps.currentTransform);
420 SkASSERT(caps.supportedCompositeAlpha & (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
421 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
422 VkCompositeAlphaFlagBitsKHR composite_alpha =
423 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) ?
424 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR :
425 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
426
427 // Pick our surface format. For now, just make sure it matches our sRGB request:
428 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
429 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
430
431 bool wantSRGB = false;
432#ifdef ANDROID_ENABLE_LINEAR_BLENDING
433 wantSRGB = true;
434#endif
435 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
436 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
437 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
438 if (desiredFormat == surfaceFormats[i].format) {
439 surfaceFormat = surfaceFormats[i].format;
440 colorSpace = surfaceFormats[i].colorSpace;
441 }
442 }
443
444 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
445 return false;
446 }
447
448 // If mailbox mode is available, use it, as it is the lowest-latency non-
449 // tearing mode. If not, fall back to FIFO which is always available.
450 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
451 for (uint32_t i = 0; i < presentModeCount; ++i) {
452 // use mailbox
453 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
454 mode = presentModes[i];
455 break;
456 }
457 }
458
459 VkSwapchainCreateInfoKHR swapchainCreateInfo;
460 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
461 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
462 swapchainCreateInfo.surface = surface->mVkSurface;
463 swapchainCreateInfo.minImageCount = imageCount;
464 swapchainCreateInfo.imageFormat = surfaceFormat;
465 swapchainCreateInfo.imageColorSpace = colorSpace;
466 swapchainCreateInfo.imageExtent = extent;
467 swapchainCreateInfo.imageArrayLayers = 1;
468 swapchainCreateInfo.imageUsage = usageFlags;
469
470 uint32_t queueFamilies[] = { mBackendContext->fGraphicsQueueIndex, mPresentQueueIndex };
471 if (mBackendContext->fGraphicsQueueIndex != mPresentQueueIndex) {
472 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
473 swapchainCreateInfo.queueFamilyIndexCount = 2;
474 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
475 } else {
476 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
477 swapchainCreateInfo.queueFamilyIndexCount = 0;
478 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
479 }
480
481 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
482 swapchainCreateInfo.compositeAlpha = composite_alpha;
483 swapchainCreateInfo.presentMode = mode;
484 swapchainCreateInfo.clipped = true;
485 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
486
487 res = mCreateSwapchainKHR(mBackendContext->fDevice, &swapchainCreateInfo, nullptr,
488 &surface->mSwapchain);
489 if (VK_SUCCESS != res) {
490 return false;
491 }
492
493 // destroy the old swapchain
494 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
495 mDeviceWaitIdle(mBackendContext->fDevice);
496
497 destroyBuffers(surface);
498
499 mDestroySwapchainKHR(mBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr);
500 }
501
502 createBuffers(surface, surfaceFormat, extent);
503
504 return true;
505}
506
507
508VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
509 initialize();
510
511 if (!window) {
512 return nullptr;
513 }
514
515 VulkanSurface* surface = new VulkanSurface();
516
517 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
518 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
519 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
520 surfaceCreateInfo.pNext = nullptr;
521 surfaceCreateInfo.flags = 0;
522 surfaceCreateInfo.window = window;
523
524 VkResult res = mCreateAndroidSurfaceKHR(mBackendContext->fInstance, &surfaceCreateInfo,
525 nullptr, &surface->mVkSurface);
526 if (VK_SUCCESS != res) {
527 delete surface;
528 return nullptr;
529 }
530
531SkDEBUGCODE(
532 VkBool32 supported;
533 res = mGetPhysicalDeviceSurfaceSupportKHR(mBackendContext->fPhysicalDevice,
534 mPresentQueueIndex, surface->mVkSurface, &supported);
535 // All physical devices and queue families on Android must be capable of presentation with any
536 // native window.
537 SkASSERT(VK_SUCCESS == res && supported);
538);
539
540 if (!createSwapchain(surface)) {
541 destroySurface(surface);
542 return nullptr;
543 }
544
545 return surface;
546}
547
548// Helper to know which src stage flags we need to set when transitioning to the present layout
549static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
550 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
551 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
552 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
553 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
554 return VK_PIPELINE_STAGE_TRANSFER_BIT;
555 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
556 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
557 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
558 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
559 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
560 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
561 return VK_PIPELINE_STAGE_HOST_BIT;
562 }
563
564 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
565 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
566}
567
568// Helper to know which src access mask we need to set when transitioning to the present layout
569static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
570 VkAccessFlags flags = 0;
571 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
572 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
573 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
574 VK_ACCESS_TRANSFER_WRITE_BIT |
575 VK_ACCESS_TRANSFER_READ_BIT |
576 VK_ACCESS_SHADER_READ_BIT |
577 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
578 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
579 flags = VK_ACCESS_HOST_WRITE_BIT;
580 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
581 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
582 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
583 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
584 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
585 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
586 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
587 flags = VK_ACCESS_TRANSFER_READ_BIT;
588 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
589 flags = VK_ACCESS_SHADER_READ_BIT;
590 }
591 return flags;
592}
593
594void VulkanManager::swapBuffers(VulkanSurface* surface) {
595 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
596 surface->mCurrentBackbufferIndex;
597 GrVkImageInfo* imageInfo;
598 SkSurface* skSurface = surface->mSurfaces[backbuffer->mImageIndex].get();
599 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
600 SkSurface::kFlushRead_BackendHandleAccess);
601 // Check to make sure we never change the actually wrapped image
602 SkASSERT(imageInfo->fImage == surface->mImages[backbuffer->mImageIndex]);
603
604 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
605 // previous work is complete for before presenting. So we first add the necessary barrier here.
606 VkImageLayout layout = imageInfo->fImageLayout;
607 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
608 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
609 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
610 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
611
612 VkImageMemoryBarrier imageMemoryBarrier = {
613 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
614 NULL, // pNext
615 srcAccessMask, // outputMask
616 dstAccessMask, // inputMask
617 layout, // oldLayout
618 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
619 mBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex
620 mPresentQueueIndex, // dstQueueFamilyIndex
621 surface->mImages[backbuffer->mImageIndex], // image
622 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
623 };
624
625 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
626 VkCommandBufferBeginInfo info;
627 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
628 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
629 info.flags = 0;
630 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
631 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0,
632 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
633 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
634
635 surface->mImageLayouts[backbuffer->mImageIndex] = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
636
637 // insert the layout transfer into the queue and wait on the acquire
638 VkSubmitInfo submitInfo;
639 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
640 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
641 submitInfo.waitSemaphoreCount = 0;
642 submitInfo.pWaitDstStageMask = 0;
643 submitInfo.commandBufferCount = 1;
644 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
645 submitInfo.signalSemaphoreCount = 1;
646 // When this command buffer finishes we will signal this semaphore so that we know it is now
647 // safe to present the image to the screen.
648 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
649
650 // Attach second fence to submission here so we can track when the command buffer finishes.
651 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
652
653 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
654 // to the image is complete and that the layout has been change to present on the graphics
655 // queue.
656 const VkPresentInfoKHR presentInfo =
657 {
658 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
659 NULL, // pNext
660 1, // waitSemaphoreCount
661 &backbuffer->mRenderSemaphore, // pWaitSemaphores
662 1, // swapchainCount
663 &surface->mSwapchain, // pSwapchains
664 &backbuffer->mImageIndex, // pImageIndices
665 NULL // pResults
666 };
667
668 mQueuePresentKHR(mPresentQueue, &presentInfo);
669
670 surface->mBackbuffer.reset();
671}
672
673} /* namespace renderthread */
674} /* namespace uirenderer */
675} /* namespace android */