blob: 454ce4d9e8ff043658b2966aa41c6cdd559e2a60 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050022#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050023#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024
25#include <GrContext.h>
26#include <GrTypes.h>
27#include <vk/GrVkTypes.h>
28
29namespace android {
30namespace uirenderer {
31namespace renderthread {
32
33#define GET_PROC(F) m ## F = (PFN_vk ## F) vkGetInstanceProcAddr(instance, "vk" #F)
34#define GET_DEV_PROC(F) m ## F = (PFN_vk ## F) vkGetDeviceProcAddr(device, "vk" #F)
35
36VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {
37}
38
39void VulkanManager::destroy() {
40 if (!hasVkContext()) return;
41
Greg Daniel45ec62b2017-01-04 14:27:00 -050042 mRenderThread.renderState().onVkContextDestroyed();
43 mRenderThread.setGrContext(nullptr);
44
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050045 if (VK_NULL_HANDLE != mCommandPool) {
46 mDestroyCommandPool(mBackendContext->fDevice, mCommandPool, nullptr);
47 mCommandPool = VK_NULL_HANDLE;
48 }
Greg Daniel45ec62b2017-01-04 14:27:00 -050049 mBackendContext.reset();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050}
51
52void VulkanManager::initialize() {
53 if (hasVkContext()) { return; }
54
55 auto canPresent = [](VkInstance, VkPhysicalDevice, uint32_t) { return true; };
56
57 mBackendContext.reset(GrVkBackendContext::Create(&mPresentQueueIndex, canPresent));
58
59 // Get all the addresses of needed vulkan functions
60 VkInstance instance = mBackendContext->fInstance;
61 VkDevice device = mBackendContext->fDevice;
62 GET_PROC(CreateAndroidSurfaceKHR);
63 GET_PROC(DestroySurfaceKHR);
64 GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
65 GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
66 GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
67 GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
68 GET_DEV_PROC(CreateSwapchainKHR);
69 GET_DEV_PROC(DestroySwapchainKHR);
70 GET_DEV_PROC(GetSwapchainImagesKHR);
71 GET_DEV_PROC(AcquireNextImageKHR);
72 GET_DEV_PROC(QueuePresentKHR);
73 GET_DEV_PROC(CreateCommandPool);
74 GET_DEV_PROC(DestroyCommandPool);
75 GET_DEV_PROC(AllocateCommandBuffers);
76 GET_DEV_PROC(FreeCommandBuffers);
77 GET_DEV_PROC(ResetCommandBuffer);
78 GET_DEV_PROC(BeginCommandBuffer);
79 GET_DEV_PROC(EndCommandBuffer);
80 GET_DEV_PROC(CmdPipelineBarrier);
81 GET_DEV_PROC(GetDeviceQueue);
82 GET_DEV_PROC(QueueSubmit);
83 GET_DEV_PROC(QueueWaitIdle);
84 GET_DEV_PROC(DeviceWaitIdle);
85 GET_DEV_PROC(CreateSemaphore);
86 GET_DEV_PROC(DestroySemaphore);
87 GET_DEV_PROC(CreateFence);
88 GET_DEV_PROC(DestroyFence);
89 GET_DEV_PROC(WaitForFences);
90 GET_DEV_PROC(ResetFences);
91
92 // create the command pool for the command buffers
93 if (VK_NULL_HANDLE == mCommandPool) {
94 VkCommandPoolCreateInfo commandPoolInfo;
95 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
96 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
97 // this needs to be on the render queue
98 commandPoolInfo.queueFamilyIndex = mBackendContext->fGraphicsQueueIndex;
99 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
100 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mBackendContext->fDevice,
101 &commandPoolInfo, nullptr, &mCommandPool);
102 SkASSERT(VK_SUCCESS == res);
103 }
104
105 mGetDeviceQueue(mBackendContext->fDevice, mPresentQueueIndex, 0, &mPresentQueue);
106
107 mRenderThread.setGrContext(GrContext::Create(kVulkan_GrBackend,
108 (GrBackendContext) mBackendContext.get()));
109 DeviceInfo::initialize(mRenderThread.getGrContext()->caps()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500110
111 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
112 mSwapBehavior = SwapBehavior::BufferAge;
113 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500114
115 mRenderThread.renderState().onVkContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500116}
117
118// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
119// previous uses have finished before returning.
120VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
121 SkASSERT(surface->mBackbuffers);
122
123 ++surface->mCurrentBackbufferIndex;
124 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
125 surface->mCurrentBackbufferIndex = 0;
126 }
127
128 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
129 surface->mCurrentBackbufferIndex;
130
131 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
132 // reuse its commands buffers.
133 VkResult res = mWaitForFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences,
134 true, UINT64_MAX);
135 if (res != VK_SUCCESS) {
136 return nullptr;
137 }
138
139 return backbuffer;
140}
141
142
143SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
144 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
145 SkASSERT(backbuffer);
146
147 VkResult res;
148
149 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
150 SkASSERT(VK_SUCCESS == res);
151
152 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
153 // finished presenting and that it is safe to begin sending new commands to the returned image.
154 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
155 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
156
157 if (VK_ERROR_SURFACE_LOST_KHR == res) {
158 // need to figure out how to create a new vkSurface without the platformData*
159 // maybe use attach somehow? but need a Window
160 return nullptr;
161 }
162 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
163 // tear swapchain down and try again
164 if (!createSwapchain(surface)) {
165 return nullptr;
166 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500167 backbuffer = getAvailableBackbuffer(surface);
168 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
169 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500170
171 // acquire the image
172 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
173 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
174
175 if (VK_SUCCESS != res) {
176 return nullptr;
177 }
178 }
179
180 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500181 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500182 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
183 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
184 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT :
185 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
186 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
187 VkAccessFlags srcAccessMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
188 0 : VK_ACCESS_MEMORY_READ_BIT;
189 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
190
191 VkImageMemoryBarrier imageMemoryBarrier = {
192 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
193 NULL, // pNext
194 srcAccessMask, // outputMask
195 dstAccessMask, // inputMask
196 layout, // oldLayout
197 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
198 mPresentQueueIndex, // srcQueueFamilyIndex
199 mBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex
200 surface->mImages[backbuffer->mImageIndex], // image
201 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
202 };
203 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
204
205 VkCommandBufferBeginInfo info;
206 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
207 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
208 info.flags = 0;
209 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
210
211 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0,
212 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
213
214 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
215
216 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
217 // insert the layout transfer into the queue and wait on the acquire
218 VkSubmitInfo submitInfo;
219 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
220 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
221 submitInfo.waitSemaphoreCount = 1;
222 // Wait to make sure aquire semaphore set above has signaled.
223 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
224 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
225 submitInfo.commandBufferCount = 1;
226 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
227 submitInfo.signalSemaphoreCount = 0;
228
229 // Attach first fence to submission here so we can track when the command buffer finishes.
230 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
231
232 // We need to notify Skia that we changed the layout of the wrapped VkImage
233 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500234 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500235 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
236 SkSurface::kFlushRead_BackendHandleAccess);
237 imageInfo->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
238
239 surface->mBackbuffer = std::move(skSurface);
240 return surface->mBackbuffer.get();
241}
242
243void VulkanManager::destroyBuffers(VulkanSurface* surface) {
244 if (surface->mBackbuffers) {
245 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
246 mWaitForFences(mBackendContext->fDevice, 2, surface->mBackbuffers[i].mUsageFences, true,
247 UINT64_MAX);
248 surface->mBackbuffers[i].mImageIndex = -1;
249 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mAcquireSemaphore,
250 nullptr);
251 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mRenderSemaphore,
252 nullptr);
253 mFreeCommandBuffers(mBackendContext->fDevice, mCommandPool, 2,
254 surface->mBackbuffers[i].mTransitionCmdBuffers);
255 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
256 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
257 }
258 }
259
260 delete[] surface->mBackbuffers;
261 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500262 delete[] surface->mImageInfos;
263 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500264 delete[] surface->mImages;
265 surface->mImages = nullptr;
266}
267
268void VulkanManager::destroySurface(VulkanSurface* surface) {
269 // Make sure all submit commands have finished before starting to destroy objects.
270 if (VK_NULL_HANDLE != mPresentQueue) {
271 mQueueWaitIdle(mPresentQueue);
272 }
273 mDeviceWaitIdle(mBackendContext->fDevice);
274
275 destroyBuffers(surface);
276
277 if (VK_NULL_HANDLE != surface->mSwapchain) {
278 mDestroySwapchainKHR(mBackendContext->fDevice, surface->mSwapchain, nullptr);
279 surface->mSwapchain = VK_NULL_HANDLE;
280 }
281
282 if (VK_NULL_HANDLE != surface->mVkSurface) {
283 mDestroySurfaceKHR(mBackendContext->fInstance, surface->mVkSurface, nullptr);
284 surface->mVkSurface = VK_NULL_HANDLE;
285 }
286 delete surface;
287}
288
289void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
290 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain, &surface->mImageCount,
291 nullptr);
292 SkASSERT(surface->mImageCount);
293 surface->mImages = new VkImage[surface->mImageCount];
294 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain,
295 &surface->mImageCount, surface->mImages);
296
297 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
298
299 bool wantSRGB = VK_FORMAT_R8G8B8A8_SRGB == format;
300 GrPixelConfig config = wantSRGB ? kSRGBA_8888_GrPixelConfig : kRGBA_8888_GrPixelConfig;
301
302 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500303 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500304 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
305 GrBackendRenderTargetDesc desc;
306 GrVkImageInfo info;
307 info.fImage = surface->mImages[i];
308 info.fAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
309 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
310 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
311 info.fFormat = format;
312 info.fLevelCount = 1;
313
314 desc.fWidth = extent.width;
315 desc.fHeight = extent.height;
316 desc.fConfig = config;
317 desc.fOrigin = kTopLeft_GrSurfaceOrigin;
318 desc.fSampleCnt = 0;
319 desc.fStencilBits = 0;
320 desc.fRenderTargetHandle = (GrBackendObject) &info;
321
Greg Danielcd558522016-11-17 13:31:40 -0500322 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
323 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(mRenderThread.getGrContext(),
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500324 desc, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500325 }
326
327 SkASSERT(mCommandPool != VK_NULL_HANDLE);
328
329 // set up the backbuffers
330 VkSemaphoreCreateInfo semaphoreInfo;
331 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
332 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
333 semaphoreInfo.pNext = nullptr;
334 semaphoreInfo.flags = 0;
335 VkCommandBufferAllocateInfo commandBuffersInfo;
336 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
337 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
338 commandBuffersInfo.pNext = nullptr;
339 commandBuffersInfo.commandPool = mCommandPool;
340 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
341 commandBuffersInfo.commandBufferCount = 2;
342 VkFenceCreateInfo fenceInfo;
343 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
344 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
345 fenceInfo.pNext = nullptr;
346 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
347
348 // we create one additional backbuffer structure here, because we want to
349 // give the command buffers they contain a chance to finish before we cycle back
350 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
351 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
352 SkDEBUGCODE(VkResult res);
353 surface->mBackbuffers[i].mImageIndex = -1;
354 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
355 &surface->mBackbuffers[i].mAcquireSemaphore);
356 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
357 &surface->mBackbuffers[i].mRenderSemaphore);
358 SkDEBUGCODE(res = ) mAllocateCommandBuffers(mBackendContext->fDevice, &commandBuffersInfo,
359 surface->mBackbuffers[i].mTransitionCmdBuffers);
360 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
361 &surface->mBackbuffers[i].mUsageFences[0]);
362 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
363 &surface->mBackbuffers[i].mUsageFences[1]);
364 SkASSERT(VK_SUCCESS == res);
365 }
366 surface->mCurrentBackbufferIndex = surface->mImageCount;
367}
368
369bool VulkanManager::createSwapchain(VulkanSurface* surface) {
370 // check for capabilities
371 VkSurfaceCapabilitiesKHR caps;
372 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mBackendContext->fPhysicalDevice,
373 surface->mVkSurface, &caps);
374 if (VK_SUCCESS != res) {
375 return false;
376 }
377
378 uint32_t surfaceFormatCount;
379 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
380 &surfaceFormatCount, nullptr);
381 if (VK_SUCCESS != res) {
382 return false;
383 }
384
Ben Wagnereec27d52017-01-11 15:32:07 -0500385 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500386 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
Ben Wagnereec27d52017-01-11 15:32:07 -0500387 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500388 if (VK_SUCCESS != res) {
389 return false;
390 }
391
392 uint32_t presentModeCount;
393 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
394 surface->mVkSurface, &presentModeCount, nullptr);
395 if (VK_SUCCESS != res) {
396 return false;
397 }
398
Ben Wagnereec27d52017-01-11 15:32:07 -0500399 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500400 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
Ben Wagnereec27d52017-01-11 15:32:07 -0500401 surface->mVkSurface, &presentModeCount, presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500402 if (VK_SUCCESS != res) {
403 return false;
404 }
405
406 VkExtent2D extent = caps.currentExtent;
407 // clamp width; to handle currentExtent of -1 and protect us from broken hints
408 if (extent.width < caps.minImageExtent.width) {
409 extent.width = caps.minImageExtent.width;
410 }
411 SkASSERT(extent.width <= caps.maxImageExtent.width);
412 // clamp height
413 if (extent.height < caps.minImageExtent.height) {
414 extent.height = caps.minImageExtent.height;
415 }
416 SkASSERT(extent.height <= caps.maxImageExtent.height);
417
418 uint32_t imageCount = caps.minImageCount + 2;
419 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
420 // Application must settle for fewer images than desired:
421 imageCount = caps.maxImageCount;
422 }
423
424 // Currently Skia requires the images to be color attchments and support all transfer
425 // operations.
426 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
427 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
428 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
429 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
430 SkASSERT(caps.supportedTransforms & caps.currentTransform);
431 SkASSERT(caps.supportedCompositeAlpha & (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
432 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
433 VkCompositeAlphaFlagBitsKHR composite_alpha =
434 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) ?
435 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR :
436 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
437
438 // Pick our surface format. For now, just make sure it matches our sRGB request:
439 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
440 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
441
442 bool wantSRGB = false;
443#ifdef ANDROID_ENABLE_LINEAR_BLENDING
444 wantSRGB = true;
445#endif
446 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
447 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
448 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
449 if (desiredFormat == surfaceFormats[i].format) {
450 surfaceFormat = surfaceFormats[i].format;
451 colorSpace = surfaceFormats[i].colorSpace;
452 }
453 }
454
455 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
456 return false;
457 }
458
459 // If mailbox mode is available, use it, as it is the lowest-latency non-
460 // tearing mode. If not, fall back to FIFO which is always available.
461 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
462 for (uint32_t i = 0; i < presentModeCount; ++i) {
463 // use mailbox
464 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
465 mode = presentModes[i];
466 break;
467 }
468 }
469
470 VkSwapchainCreateInfoKHR swapchainCreateInfo;
471 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
472 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
473 swapchainCreateInfo.surface = surface->mVkSurface;
474 swapchainCreateInfo.minImageCount = imageCount;
475 swapchainCreateInfo.imageFormat = surfaceFormat;
476 swapchainCreateInfo.imageColorSpace = colorSpace;
477 swapchainCreateInfo.imageExtent = extent;
478 swapchainCreateInfo.imageArrayLayers = 1;
479 swapchainCreateInfo.imageUsage = usageFlags;
480
481 uint32_t queueFamilies[] = { mBackendContext->fGraphicsQueueIndex, mPresentQueueIndex };
482 if (mBackendContext->fGraphicsQueueIndex != mPresentQueueIndex) {
483 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
484 swapchainCreateInfo.queueFamilyIndexCount = 2;
485 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
486 } else {
487 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
488 swapchainCreateInfo.queueFamilyIndexCount = 0;
489 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
490 }
491
492 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
493 swapchainCreateInfo.compositeAlpha = composite_alpha;
494 swapchainCreateInfo.presentMode = mode;
495 swapchainCreateInfo.clipped = true;
496 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
497
498 res = mCreateSwapchainKHR(mBackendContext->fDevice, &swapchainCreateInfo, nullptr,
499 &surface->mSwapchain);
500 if (VK_SUCCESS != res) {
501 return false;
502 }
503
504 // destroy the old swapchain
505 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
506 mDeviceWaitIdle(mBackendContext->fDevice);
507
508 destroyBuffers(surface);
509
510 mDestroySwapchainKHR(mBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr);
511 }
512
513 createBuffers(surface, surfaceFormat, extent);
514
515 return true;
516}
517
518
519VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
520 initialize();
521
522 if (!window) {
523 return nullptr;
524 }
525
526 VulkanSurface* surface = new VulkanSurface();
527
528 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
529 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
530 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
531 surfaceCreateInfo.pNext = nullptr;
532 surfaceCreateInfo.flags = 0;
533 surfaceCreateInfo.window = window;
534
535 VkResult res = mCreateAndroidSurfaceKHR(mBackendContext->fInstance, &surfaceCreateInfo,
536 nullptr, &surface->mVkSurface);
537 if (VK_SUCCESS != res) {
538 delete surface;
539 return nullptr;
540 }
541
542SkDEBUGCODE(
543 VkBool32 supported;
544 res = mGetPhysicalDeviceSurfaceSupportKHR(mBackendContext->fPhysicalDevice,
545 mPresentQueueIndex, surface->mVkSurface, &supported);
546 // All physical devices and queue families on Android must be capable of presentation with any
547 // native window.
548 SkASSERT(VK_SUCCESS == res && supported);
549);
550
551 if (!createSwapchain(surface)) {
552 destroySurface(surface);
553 return nullptr;
554 }
555
556 return surface;
557}
558
559// Helper to know which src stage flags we need to set when transitioning to the present layout
560static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
561 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
562 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
563 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
564 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
565 return VK_PIPELINE_STAGE_TRANSFER_BIT;
566 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
567 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
568 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
569 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
570 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
571 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
572 return VK_PIPELINE_STAGE_HOST_BIT;
573 }
574
575 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
576 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
577}
578
579// Helper to know which src access mask we need to set when transitioning to the present layout
580static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
581 VkAccessFlags flags = 0;
582 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
583 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
584 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
585 VK_ACCESS_TRANSFER_WRITE_BIT |
586 VK_ACCESS_TRANSFER_READ_BIT |
587 VK_ACCESS_SHADER_READ_BIT |
588 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
589 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
590 flags = VK_ACCESS_HOST_WRITE_BIT;
591 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
592 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
593 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
594 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
595 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
596 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
597 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
598 flags = VK_ACCESS_TRANSFER_READ_BIT;
599 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
600 flags = VK_ACCESS_SHADER_READ_BIT;
601 }
602 return flags;
603}
604
605void VulkanManager::swapBuffers(VulkanSurface* surface) {
606 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
607 surface->mCurrentBackbufferIndex;
608 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500609 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500610 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
611 SkSurface::kFlushRead_BackendHandleAccess);
612 // Check to make sure we never change the actually wrapped image
613 SkASSERT(imageInfo->fImage == surface->mImages[backbuffer->mImageIndex]);
614
615 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
616 // previous work is complete for before presenting. So we first add the necessary barrier here.
617 VkImageLayout layout = imageInfo->fImageLayout;
618 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
619 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
620 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
621 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
622
623 VkImageMemoryBarrier imageMemoryBarrier = {
624 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
625 NULL, // pNext
626 srcAccessMask, // outputMask
627 dstAccessMask, // inputMask
628 layout, // oldLayout
629 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
630 mBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex
631 mPresentQueueIndex, // dstQueueFamilyIndex
632 surface->mImages[backbuffer->mImageIndex], // image
633 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
634 };
635
636 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
637 VkCommandBufferBeginInfo info;
638 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
639 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
640 info.flags = 0;
641 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
642 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0,
643 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
644 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
645
Greg Danielcd558522016-11-17 13:31:40 -0500646 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500647
648 // insert the layout transfer into the queue and wait on the acquire
649 VkSubmitInfo submitInfo;
650 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
651 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
652 submitInfo.waitSemaphoreCount = 0;
653 submitInfo.pWaitDstStageMask = 0;
654 submitInfo.commandBufferCount = 1;
655 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
656 submitInfo.signalSemaphoreCount = 1;
657 // When this command buffer finishes we will signal this semaphore so that we know it is now
658 // safe to present the image to the screen.
659 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
660
661 // Attach second fence to submission here so we can track when the command buffer finishes.
662 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
663
664 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
665 // to the image is complete and that the layout has been change to present on the graphics
666 // queue.
667 const VkPresentInfoKHR presentInfo =
668 {
669 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
670 NULL, // pNext
671 1, // waitSemaphoreCount
672 &backbuffer->mRenderSemaphore, // pWaitSemaphores
673 1, // swapchainCount
674 &surface->mSwapchain, // pSwapchains
675 &backbuffer->mImageIndex, // pImageIndices
676 NULL // pResults
677 };
678
679 mQueuePresentKHR(mPresentQueue, &presentInfo);
680
681 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500682 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
683 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
684 surface->mCurrentTime++;
685}
686
687int VulkanManager::getAge(VulkanSurface* surface) {
688 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
689 surface->mCurrentBackbufferIndex;
690 if (mSwapBehavior == SwapBehavior::Discard
691 || surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
692 return 0;
693 }
694 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
695 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500696}
697
698} /* namespace renderthread */
699} /* namespace uirenderer */
700} /* namespace android */