blob: 68c04afb2e08038bf0daac9bfb4c38365a2e51f7 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
22
23#include <GrContext.h>
24#include <GrTypes.h>
25#include <vk/GrVkTypes.h>
26
27namespace android {
28namespace uirenderer {
29namespace renderthread {
30
31#define GET_PROC(F) m ## F = (PFN_vk ## F) vkGetInstanceProcAddr(instance, "vk" #F)
32#define GET_DEV_PROC(F) m ## F = (PFN_vk ## F) vkGetDeviceProcAddr(device, "vk" #F)
33
34VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {
35}
36
37void VulkanManager::destroy() {
38 if (!hasVkContext()) return;
39
40 if (VK_NULL_HANDLE != mCommandPool) {
41 mDestroyCommandPool(mBackendContext->fDevice, mCommandPool, nullptr);
42 mCommandPool = VK_NULL_HANDLE;
43 }
44}
45
46void VulkanManager::initialize() {
47 if (hasVkContext()) { return; }
48
49 auto canPresent = [](VkInstance, VkPhysicalDevice, uint32_t) { return true; };
50
51 mBackendContext.reset(GrVkBackendContext::Create(&mPresentQueueIndex, canPresent));
52
53 // Get all the addresses of needed vulkan functions
54 VkInstance instance = mBackendContext->fInstance;
55 VkDevice device = mBackendContext->fDevice;
56 GET_PROC(CreateAndroidSurfaceKHR);
57 GET_PROC(DestroySurfaceKHR);
58 GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
59 GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
60 GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
61 GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
62 GET_DEV_PROC(CreateSwapchainKHR);
63 GET_DEV_PROC(DestroySwapchainKHR);
64 GET_DEV_PROC(GetSwapchainImagesKHR);
65 GET_DEV_PROC(AcquireNextImageKHR);
66 GET_DEV_PROC(QueuePresentKHR);
67 GET_DEV_PROC(CreateCommandPool);
68 GET_DEV_PROC(DestroyCommandPool);
69 GET_DEV_PROC(AllocateCommandBuffers);
70 GET_DEV_PROC(FreeCommandBuffers);
71 GET_DEV_PROC(ResetCommandBuffer);
72 GET_DEV_PROC(BeginCommandBuffer);
73 GET_DEV_PROC(EndCommandBuffer);
74 GET_DEV_PROC(CmdPipelineBarrier);
75 GET_DEV_PROC(GetDeviceQueue);
76 GET_DEV_PROC(QueueSubmit);
77 GET_DEV_PROC(QueueWaitIdle);
78 GET_DEV_PROC(DeviceWaitIdle);
79 GET_DEV_PROC(CreateSemaphore);
80 GET_DEV_PROC(DestroySemaphore);
81 GET_DEV_PROC(CreateFence);
82 GET_DEV_PROC(DestroyFence);
83 GET_DEV_PROC(WaitForFences);
84 GET_DEV_PROC(ResetFences);
85
86 // create the command pool for the command buffers
87 if (VK_NULL_HANDLE == mCommandPool) {
88 VkCommandPoolCreateInfo commandPoolInfo;
89 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
90 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
91 // this needs to be on the render queue
92 commandPoolInfo.queueFamilyIndex = mBackendContext->fGraphicsQueueIndex;
93 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
94 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mBackendContext->fDevice,
95 &commandPoolInfo, nullptr, &mCommandPool);
96 SkASSERT(VK_SUCCESS == res);
97 }
98
99 mGetDeviceQueue(mBackendContext->fDevice, mPresentQueueIndex, 0, &mPresentQueue);
100
101 mRenderThread.setGrContext(GrContext::Create(kVulkan_GrBackend,
102 (GrBackendContext) mBackendContext.get()));
103 DeviceInfo::initialize(mRenderThread.getGrContext()->caps()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500104
105 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
106 mSwapBehavior = SwapBehavior::BufferAge;
107 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500108}
109
110// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
111// previous uses have finished before returning.
112VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
113 SkASSERT(surface->mBackbuffers);
114
115 ++surface->mCurrentBackbufferIndex;
116 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
117 surface->mCurrentBackbufferIndex = 0;
118 }
119
120 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
121 surface->mCurrentBackbufferIndex;
122
123 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
124 // reuse its commands buffers.
125 VkResult res = mWaitForFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences,
126 true, UINT64_MAX);
127 if (res != VK_SUCCESS) {
128 return nullptr;
129 }
130
131 return backbuffer;
132}
133
134
135SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
136 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
137 SkASSERT(backbuffer);
138
139 VkResult res;
140
141 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
142 SkASSERT(VK_SUCCESS == res);
143
144 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
145 // finished presenting and that it is safe to begin sending new commands to the returned image.
146 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
147 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
148
149 if (VK_ERROR_SURFACE_LOST_KHR == res) {
150 // need to figure out how to create a new vkSurface without the platformData*
151 // maybe use attach somehow? but need a Window
152 return nullptr;
153 }
154 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
155 // tear swapchain down and try again
156 if (!createSwapchain(surface)) {
157 return nullptr;
158 }
159
160 // acquire the image
161 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
162 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
163
164 if (VK_SUCCESS != res) {
165 return nullptr;
166 }
167 }
168
169 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500170 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500171 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
172 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
173 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT :
174 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
175 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
176 VkAccessFlags srcAccessMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
177 0 : VK_ACCESS_MEMORY_READ_BIT;
178 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
179
180 VkImageMemoryBarrier imageMemoryBarrier = {
181 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
182 NULL, // pNext
183 srcAccessMask, // outputMask
184 dstAccessMask, // inputMask
185 layout, // oldLayout
186 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
187 mPresentQueueIndex, // srcQueueFamilyIndex
188 mBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex
189 surface->mImages[backbuffer->mImageIndex], // image
190 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
191 };
192 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
193
194 VkCommandBufferBeginInfo info;
195 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
196 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
197 info.flags = 0;
198 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
199
200 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0,
201 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
202
203 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
204
205 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
206 // insert the layout transfer into the queue and wait on the acquire
207 VkSubmitInfo submitInfo;
208 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
209 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
210 submitInfo.waitSemaphoreCount = 1;
211 // Wait to make sure aquire semaphore set above has signaled.
212 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
213 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
214 submitInfo.commandBufferCount = 1;
215 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
216 submitInfo.signalSemaphoreCount = 0;
217
218 // Attach first fence to submission here so we can track when the command buffer finishes.
219 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
220
221 // We need to notify Skia that we changed the layout of the wrapped VkImage
222 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500223 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500224 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
225 SkSurface::kFlushRead_BackendHandleAccess);
226 imageInfo->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
227
228 surface->mBackbuffer = std::move(skSurface);
229 return surface->mBackbuffer.get();
230}
231
232void VulkanManager::destroyBuffers(VulkanSurface* surface) {
233 if (surface->mBackbuffers) {
234 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
235 mWaitForFences(mBackendContext->fDevice, 2, surface->mBackbuffers[i].mUsageFences, true,
236 UINT64_MAX);
237 surface->mBackbuffers[i].mImageIndex = -1;
238 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mAcquireSemaphore,
239 nullptr);
240 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mRenderSemaphore,
241 nullptr);
242 mFreeCommandBuffers(mBackendContext->fDevice, mCommandPool, 2,
243 surface->mBackbuffers[i].mTransitionCmdBuffers);
244 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
245 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
246 }
247 }
248
249 delete[] surface->mBackbuffers;
250 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500251 delete[] surface->mImageInfos;
252 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500253 delete[] surface->mImages;
254 surface->mImages = nullptr;
255}
256
257void VulkanManager::destroySurface(VulkanSurface* surface) {
258 // Make sure all submit commands have finished before starting to destroy objects.
259 if (VK_NULL_HANDLE != mPresentQueue) {
260 mQueueWaitIdle(mPresentQueue);
261 }
262 mDeviceWaitIdle(mBackendContext->fDevice);
263
264 destroyBuffers(surface);
265
266 if (VK_NULL_HANDLE != surface->mSwapchain) {
267 mDestroySwapchainKHR(mBackendContext->fDevice, surface->mSwapchain, nullptr);
268 surface->mSwapchain = VK_NULL_HANDLE;
269 }
270
271 if (VK_NULL_HANDLE != surface->mVkSurface) {
272 mDestroySurfaceKHR(mBackendContext->fInstance, surface->mVkSurface, nullptr);
273 surface->mVkSurface = VK_NULL_HANDLE;
274 }
275 delete surface;
276}
277
278void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
279 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain, &surface->mImageCount,
280 nullptr);
281 SkASSERT(surface->mImageCount);
282 surface->mImages = new VkImage[surface->mImageCount];
283 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain,
284 &surface->mImageCount, surface->mImages);
285
286 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
287
288 bool wantSRGB = VK_FORMAT_R8G8B8A8_SRGB == format;
289 GrPixelConfig config = wantSRGB ? kSRGBA_8888_GrPixelConfig : kRGBA_8888_GrPixelConfig;
290
291 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500292 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500293 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
294 GrBackendRenderTargetDesc desc;
295 GrVkImageInfo info;
296 info.fImage = surface->mImages[i];
297 info.fAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
298 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
299 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
300 info.fFormat = format;
301 info.fLevelCount = 1;
302
303 desc.fWidth = extent.width;
304 desc.fHeight = extent.height;
305 desc.fConfig = config;
306 desc.fOrigin = kTopLeft_GrSurfaceOrigin;
307 desc.fSampleCnt = 0;
308 desc.fStencilBits = 0;
309 desc.fRenderTargetHandle = (GrBackendObject) &info;
310
Greg Danielcd558522016-11-17 13:31:40 -0500311 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
312 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(mRenderThread.getGrContext(),
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500313 desc, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500314 }
315
316 SkASSERT(mCommandPool != VK_NULL_HANDLE);
317
318 // set up the backbuffers
319 VkSemaphoreCreateInfo semaphoreInfo;
320 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
321 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
322 semaphoreInfo.pNext = nullptr;
323 semaphoreInfo.flags = 0;
324 VkCommandBufferAllocateInfo commandBuffersInfo;
325 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
326 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
327 commandBuffersInfo.pNext = nullptr;
328 commandBuffersInfo.commandPool = mCommandPool;
329 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
330 commandBuffersInfo.commandBufferCount = 2;
331 VkFenceCreateInfo fenceInfo;
332 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
333 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
334 fenceInfo.pNext = nullptr;
335 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
336
337 // we create one additional backbuffer structure here, because we want to
338 // give the command buffers they contain a chance to finish before we cycle back
339 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
340 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
341 SkDEBUGCODE(VkResult res);
342 surface->mBackbuffers[i].mImageIndex = -1;
343 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
344 &surface->mBackbuffers[i].mAcquireSemaphore);
345 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
346 &surface->mBackbuffers[i].mRenderSemaphore);
347 SkDEBUGCODE(res = ) mAllocateCommandBuffers(mBackendContext->fDevice, &commandBuffersInfo,
348 surface->mBackbuffers[i].mTransitionCmdBuffers);
349 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
350 &surface->mBackbuffers[i].mUsageFences[0]);
351 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
352 &surface->mBackbuffers[i].mUsageFences[1]);
353 SkASSERT(VK_SUCCESS == res);
354 }
355 surface->mCurrentBackbufferIndex = surface->mImageCount;
356}
357
358bool VulkanManager::createSwapchain(VulkanSurface* surface) {
359 // check for capabilities
360 VkSurfaceCapabilitiesKHR caps;
361 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mBackendContext->fPhysicalDevice,
362 surface->mVkSurface, &caps);
363 if (VK_SUCCESS != res) {
364 return false;
365 }
366
367 uint32_t surfaceFormatCount;
368 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
369 &surfaceFormatCount, nullptr);
370 if (VK_SUCCESS != res) {
371 return false;
372 }
373
374 SkAutoMalloc surfaceFormatAlloc(surfaceFormatCount * sizeof(VkSurfaceFormatKHR));
375 VkSurfaceFormatKHR* surfaceFormats = (VkSurfaceFormatKHR*)surfaceFormatAlloc.get();
376 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
377 &surfaceFormatCount, surfaceFormats);
378 if (VK_SUCCESS != res) {
379 return false;
380 }
381
382 uint32_t presentModeCount;
383 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
384 surface->mVkSurface, &presentModeCount, nullptr);
385 if (VK_SUCCESS != res) {
386 return false;
387 }
388
389 SkAutoMalloc presentModeAlloc(presentModeCount * sizeof(VkPresentModeKHR));
390 VkPresentModeKHR* presentModes = (VkPresentModeKHR*)presentModeAlloc.get();
391 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
392 surface->mVkSurface, &presentModeCount, presentModes);
393 if (VK_SUCCESS != res) {
394 return false;
395 }
396
397 VkExtent2D extent = caps.currentExtent;
398 // clamp width; to handle currentExtent of -1 and protect us from broken hints
399 if (extent.width < caps.minImageExtent.width) {
400 extent.width = caps.minImageExtent.width;
401 }
402 SkASSERT(extent.width <= caps.maxImageExtent.width);
403 // clamp height
404 if (extent.height < caps.minImageExtent.height) {
405 extent.height = caps.minImageExtent.height;
406 }
407 SkASSERT(extent.height <= caps.maxImageExtent.height);
408
409 uint32_t imageCount = caps.minImageCount + 2;
410 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
411 // Application must settle for fewer images than desired:
412 imageCount = caps.maxImageCount;
413 }
414
415 // Currently Skia requires the images to be color attchments and support all transfer
416 // operations.
417 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
418 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
419 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
420 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
421 SkASSERT(caps.supportedTransforms & caps.currentTransform);
422 SkASSERT(caps.supportedCompositeAlpha & (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
423 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
424 VkCompositeAlphaFlagBitsKHR composite_alpha =
425 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) ?
426 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR :
427 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
428
429 // Pick our surface format. For now, just make sure it matches our sRGB request:
430 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
431 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
432
433 bool wantSRGB = false;
434#ifdef ANDROID_ENABLE_LINEAR_BLENDING
435 wantSRGB = true;
436#endif
437 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
438 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
439 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
440 if (desiredFormat == surfaceFormats[i].format) {
441 surfaceFormat = surfaceFormats[i].format;
442 colorSpace = surfaceFormats[i].colorSpace;
443 }
444 }
445
446 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
447 return false;
448 }
449
450 // If mailbox mode is available, use it, as it is the lowest-latency non-
451 // tearing mode. If not, fall back to FIFO which is always available.
452 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
453 for (uint32_t i = 0; i < presentModeCount; ++i) {
454 // use mailbox
455 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
456 mode = presentModes[i];
457 break;
458 }
459 }
460
461 VkSwapchainCreateInfoKHR swapchainCreateInfo;
462 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
463 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
464 swapchainCreateInfo.surface = surface->mVkSurface;
465 swapchainCreateInfo.minImageCount = imageCount;
466 swapchainCreateInfo.imageFormat = surfaceFormat;
467 swapchainCreateInfo.imageColorSpace = colorSpace;
468 swapchainCreateInfo.imageExtent = extent;
469 swapchainCreateInfo.imageArrayLayers = 1;
470 swapchainCreateInfo.imageUsage = usageFlags;
471
472 uint32_t queueFamilies[] = { mBackendContext->fGraphicsQueueIndex, mPresentQueueIndex };
473 if (mBackendContext->fGraphicsQueueIndex != mPresentQueueIndex) {
474 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
475 swapchainCreateInfo.queueFamilyIndexCount = 2;
476 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
477 } else {
478 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
479 swapchainCreateInfo.queueFamilyIndexCount = 0;
480 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
481 }
482
483 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
484 swapchainCreateInfo.compositeAlpha = composite_alpha;
485 swapchainCreateInfo.presentMode = mode;
486 swapchainCreateInfo.clipped = true;
487 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
488
489 res = mCreateSwapchainKHR(mBackendContext->fDevice, &swapchainCreateInfo, nullptr,
490 &surface->mSwapchain);
491 if (VK_SUCCESS != res) {
492 return false;
493 }
494
495 // destroy the old swapchain
496 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
497 mDeviceWaitIdle(mBackendContext->fDevice);
498
499 destroyBuffers(surface);
500
501 mDestroySwapchainKHR(mBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr);
502 }
503
504 createBuffers(surface, surfaceFormat, extent);
505
506 return true;
507}
508
509
510VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
511 initialize();
512
513 if (!window) {
514 return nullptr;
515 }
516
517 VulkanSurface* surface = new VulkanSurface();
518
519 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
520 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
521 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
522 surfaceCreateInfo.pNext = nullptr;
523 surfaceCreateInfo.flags = 0;
524 surfaceCreateInfo.window = window;
525
526 VkResult res = mCreateAndroidSurfaceKHR(mBackendContext->fInstance, &surfaceCreateInfo,
527 nullptr, &surface->mVkSurface);
528 if (VK_SUCCESS != res) {
529 delete surface;
530 return nullptr;
531 }
532
533SkDEBUGCODE(
534 VkBool32 supported;
535 res = mGetPhysicalDeviceSurfaceSupportKHR(mBackendContext->fPhysicalDevice,
536 mPresentQueueIndex, surface->mVkSurface, &supported);
537 // All physical devices and queue families on Android must be capable of presentation with any
538 // native window.
539 SkASSERT(VK_SUCCESS == res && supported);
540);
541
542 if (!createSwapchain(surface)) {
543 destroySurface(surface);
544 return nullptr;
545 }
546
547 return surface;
548}
549
550// Helper to know which src stage flags we need to set when transitioning to the present layout
551static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
552 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
553 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
554 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
555 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
556 return VK_PIPELINE_STAGE_TRANSFER_BIT;
557 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
558 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
559 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
560 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
561 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
562 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
563 return VK_PIPELINE_STAGE_HOST_BIT;
564 }
565
566 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
567 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
568}
569
570// Helper to know which src access mask we need to set when transitioning to the present layout
571static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
572 VkAccessFlags flags = 0;
573 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
574 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
575 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
576 VK_ACCESS_TRANSFER_WRITE_BIT |
577 VK_ACCESS_TRANSFER_READ_BIT |
578 VK_ACCESS_SHADER_READ_BIT |
579 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
580 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
581 flags = VK_ACCESS_HOST_WRITE_BIT;
582 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
583 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
584 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
585 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
586 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
587 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
588 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
589 flags = VK_ACCESS_TRANSFER_READ_BIT;
590 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
591 flags = VK_ACCESS_SHADER_READ_BIT;
592 }
593 return flags;
594}
595
596void VulkanManager::swapBuffers(VulkanSurface* surface) {
597 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
598 surface->mCurrentBackbufferIndex;
599 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500600 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500601 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
602 SkSurface::kFlushRead_BackendHandleAccess);
603 // Check to make sure we never change the actually wrapped image
604 SkASSERT(imageInfo->fImage == surface->mImages[backbuffer->mImageIndex]);
605
606 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
607 // previous work is complete for before presenting. So we first add the necessary barrier here.
608 VkImageLayout layout = imageInfo->fImageLayout;
609 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
610 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
611 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
612 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
613
614 VkImageMemoryBarrier imageMemoryBarrier = {
615 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
616 NULL, // pNext
617 srcAccessMask, // outputMask
618 dstAccessMask, // inputMask
619 layout, // oldLayout
620 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
621 mBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex
622 mPresentQueueIndex, // dstQueueFamilyIndex
623 surface->mImages[backbuffer->mImageIndex], // image
624 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
625 };
626
627 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
628 VkCommandBufferBeginInfo info;
629 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
630 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
631 info.flags = 0;
632 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
633 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0,
634 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
635 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
636
Greg Danielcd558522016-11-17 13:31:40 -0500637 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500638
639 // insert the layout transfer into the queue and wait on the acquire
640 VkSubmitInfo submitInfo;
641 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
642 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
643 submitInfo.waitSemaphoreCount = 0;
644 submitInfo.pWaitDstStageMask = 0;
645 submitInfo.commandBufferCount = 1;
646 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
647 submitInfo.signalSemaphoreCount = 1;
648 // When this command buffer finishes we will signal this semaphore so that we know it is now
649 // safe to present the image to the screen.
650 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
651
652 // Attach second fence to submission here so we can track when the command buffer finishes.
653 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
654
655 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
656 // to the image is complete and that the layout has been change to present on the graphics
657 // queue.
658 const VkPresentInfoKHR presentInfo =
659 {
660 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
661 NULL, // pNext
662 1, // waitSemaphoreCount
663 &backbuffer->mRenderSemaphore, // pWaitSemaphores
664 1, // swapchainCount
665 &surface->mSwapchain, // pSwapchains
666 &backbuffer->mImageIndex, // pImageIndices
667 NULL // pResults
668 };
669
670 mQueuePresentKHR(mPresentQueue, &presentInfo);
671
672 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500673 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
674 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
675 surface->mCurrentTime++;
676}
677
678int VulkanManager::getAge(VulkanSurface* surface) {
679 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
680 surface->mCurrentBackbufferIndex;
681 if (mSwapBehavior == SwapBehavior::Discard
682 || surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
683 return 0;
684 }
685 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
686 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500687}
688
689} /* namespace renderthread */
690} /* namespace uirenderer */
691} /* namespace android */