blob: a745320ca8844ca96be8a3f30d2dee939f91fa1f [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050022#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050023#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024
25#include <GrContext.h>
26#include <GrTypes.h>
27#include <vk/GrVkTypes.h>
28
29namespace android {
30namespace uirenderer {
31namespace renderthread {
32
33#define GET_PROC(F) m ## F = (PFN_vk ## F) vkGetInstanceProcAddr(instance, "vk" #F)
34#define GET_DEV_PROC(F) m ## F = (PFN_vk ## F) vkGetDeviceProcAddr(device, "vk" #F)
35
36VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {
37}
38
39void VulkanManager::destroy() {
40 if (!hasVkContext()) return;
41
Greg Daniel45ec62b2017-01-04 14:27:00 -050042 mRenderThread.renderState().onVkContextDestroyed();
43 mRenderThread.setGrContext(nullptr);
44
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050045 if (VK_NULL_HANDLE != mCommandPool) {
46 mDestroyCommandPool(mBackendContext->fDevice, mCommandPool, nullptr);
47 mCommandPool = VK_NULL_HANDLE;
48 }
Greg Daniel45ec62b2017-01-04 14:27:00 -050049 mBackendContext.reset();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050}
51
52void VulkanManager::initialize() {
53 if (hasVkContext()) { return; }
54
55 auto canPresent = [](VkInstance, VkPhysicalDevice, uint32_t) { return true; };
56
Greg Daniel53a35432017-04-25 13:44:00 -040057 mBackendContext.reset(GrVkBackendContext::Create(vkGetInstanceProcAddr, vkGetDeviceProcAddr,
58 &mPresentQueueIndex, canPresent));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
60 // Get all the addresses of needed vulkan functions
61 VkInstance instance = mBackendContext->fInstance;
62 VkDevice device = mBackendContext->fDevice;
63 GET_PROC(CreateAndroidSurfaceKHR);
64 GET_PROC(DestroySurfaceKHR);
65 GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
66 GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
67 GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
68 GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
69 GET_DEV_PROC(CreateSwapchainKHR);
70 GET_DEV_PROC(DestroySwapchainKHR);
71 GET_DEV_PROC(GetSwapchainImagesKHR);
72 GET_DEV_PROC(AcquireNextImageKHR);
73 GET_DEV_PROC(QueuePresentKHR);
74 GET_DEV_PROC(CreateCommandPool);
75 GET_DEV_PROC(DestroyCommandPool);
76 GET_DEV_PROC(AllocateCommandBuffers);
77 GET_DEV_PROC(FreeCommandBuffers);
78 GET_DEV_PROC(ResetCommandBuffer);
79 GET_DEV_PROC(BeginCommandBuffer);
80 GET_DEV_PROC(EndCommandBuffer);
81 GET_DEV_PROC(CmdPipelineBarrier);
82 GET_DEV_PROC(GetDeviceQueue);
83 GET_DEV_PROC(QueueSubmit);
84 GET_DEV_PROC(QueueWaitIdle);
85 GET_DEV_PROC(DeviceWaitIdle);
86 GET_DEV_PROC(CreateSemaphore);
87 GET_DEV_PROC(DestroySemaphore);
88 GET_DEV_PROC(CreateFence);
89 GET_DEV_PROC(DestroyFence);
90 GET_DEV_PROC(WaitForFences);
91 GET_DEV_PROC(ResetFences);
92
93 // create the command pool for the command buffers
94 if (VK_NULL_HANDLE == mCommandPool) {
95 VkCommandPoolCreateInfo commandPoolInfo;
96 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
97 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
98 // this needs to be on the render queue
99 commandPoolInfo.queueFamilyIndex = mBackendContext->fGraphicsQueueIndex;
100 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
101 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mBackendContext->fDevice,
102 &commandPoolInfo, nullptr, &mCommandPool);
103 SkASSERT(VK_SUCCESS == res);
104 }
105
106 mGetDeviceQueue(mBackendContext->fDevice, mPresentQueueIndex, 0, &mPresentQueue);
107
108 mRenderThread.setGrContext(GrContext::Create(kVulkan_GrBackend,
109 (GrBackendContext) mBackendContext.get()));
110 DeviceInfo::initialize(mRenderThread.getGrContext()->caps()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500111
112 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
113 mSwapBehavior = SwapBehavior::BufferAge;
114 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500115
116 mRenderThread.renderState().onVkContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500117}
118
119// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
120// previous uses have finished before returning.
121VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
122 SkASSERT(surface->mBackbuffers);
123
124 ++surface->mCurrentBackbufferIndex;
125 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
126 surface->mCurrentBackbufferIndex = 0;
127 }
128
129 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
130 surface->mCurrentBackbufferIndex;
131
132 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
133 // reuse its commands buffers.
134 VkResult res = mWaitForFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences,
135 true, UINT64_MAX);
136 if (res != VK_SUCCESS) {
137 return nullptr;
138 }
139
140 return backbuffer;
141}
142
143
144SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
145 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
146 SkASSERT(backbuffer);
147
148 VkResult res;
149
150 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
151 SkASSERT(VK_SUCCESS == res);
152
153 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
154 // finished presenting and that it is safe to begin sending new commands to the returned image.
155 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
156 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
157
158 if (VK_ERROR_SURFACE_LOST_KHR == res) {
159 // need to figure out how to create a new vkSurface without the platformData*
160 // maybe use attach somehow? but need a Window
161 return nullptr;
162 }
163 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
164 // tear swapchain down and try again
165 if (!createSwapchain(surface)) {
166 return nullptr;
167 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500168 backbuffer = getAvailableBackbuffer(surface);
169 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
170 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500171
172 // acquire the image
173 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
174 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
175
176 if (VK_SUCCESS != res) {
177 return nullptr;
178 }
179 }
180
181 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500182 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500183 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
184 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
185 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT :
186 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
187 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
188 VkAccessFlags srcAccessMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
189 0 : VK_ACCESS_MEMORY_READ_BIT;
190 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
191
192 VkImageMemoryBarrier imageMemoryBarrier = {
193 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
194 NULL, // pNext
195 srcAccessMask, // outputMask
196 dstAccessMask, // inputMask
197 layout, // oldLayout
198 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
199 mPresentQueueIndex, // srcQueueFamilyIndex
200 mBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex
201 surface->mImages[backbuffer->mImageIndex], // image
202 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
203 };
204 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
205
206 VkCommandBufferBeginInfo info;
207 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
208 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
209 info.flags = 0;
210 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
211
212 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0,
213 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
214
215 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
216
217 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
218 // insert the layout transfer into the queue and wait on the acquire
219 VkSubmitInfo submitInfo;
220 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
221 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
222 submitInfo.waitSemaphoreCount = 1;
223 // Wait to make sure aquire semaphore set above has signaled.
224 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
225 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
226 submitInfo.commandBufferCount = 1;
227 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
228 submitInfo.signalSemaphoreCount = 0;
229
230 // Attach first fence to submission here so we can track when the command buffer finishes.
231 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
232
233 // We need to notify Skia that we changed the layout of the wrapped VkImage
234 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500235 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500236 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
237 SkSurface::kFlushRead_BackendHandleAccess);
238 imageInfo->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
239
240 surface->mBackbuffer = std::move(skSurface);
241 return surface->mBackbuffer.get();
242}
243
244void VulkanManager::destroyBuffers(VulkanSurface* surface) {
245 if (surface->mBackbuffers) {
246 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
247 mWaitForFences(mBackendContext->fDevice, 2, surface->mBackbuffers[i].mUsageFences, true,
248 UINT64_MAX);
249 surface->mBackbuffers[i].mImageIndex = -1;
250 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mAcquireSemaphore,
251 nullptr);
252 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mRenderSemaphore,
253 nullptr);
254 mFreeCommandBuffers(mBackendContext->fDevice, mCommandPool, 2,
255 surface->mBackbuffers[i].mTransitionCmdBuffers);
256 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
257 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
258 }
259 }
260
261 delete[] surface->mBackbuffers;
262 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500263 delete[] surface->mImageInfos;
264 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500265 delete[] surface->mImages;
266 surface->mImages = nullptr;
267}
268
269void VulkanManager::destroySurface(VulkanSurface* surface) {
270 // Make sure all submit commands have finished before starting to destroy objects.
271 if (VK_NULL_HANDLE != mPresentQueue) {
272 mQueueWaitIdle(mPresentQueue);
273 }
274 mDeviceWaitIdle(mBackendContext->fDevice);
275
276 destroyBuffers(surface);
277
278 if (VK_NULL_HANDLE != surface->mSwapchain) {
279 mDestroySwapchainKHR(mBackendContext->fDevice, surface->mSwapchain, nullptr);
280 surface->mSwapchain = VK_NULL_HANDLE;
281 }
282
283 if (VK_NULL_HANDLE != surface->mVkSurface) {
284 mDestroySurfaceKHR(mBackendContext->fInstance, surface->mVkSurface, nullptr);
285 surface->mVkSurface = VK_NULL_HANDLE;
286 }
287 delete surface;
288}
289
290void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
291 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain, &surface->mImageCount,
292 nullptr);
293 SkASSERT(surface->mImageCount);
294 surface->mImages = new VkImage[surface->mImageCount];
295 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain,
296 &surface->mImageCount, surface->mImages);
297
298 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
299
300 bool wantSRGB = VK_FORMAT_R8G8B8A8_SRGB == format;
301 GrPixelConfig config = wantSRGB ? kSRGBA_8888_GrPixelConfig : kRGBA_8888_GrPixelConfig;
302
303 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500304 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500305 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
306 GrBackendRenderTargetDesc desc;
307 GrVkImageInfo info;
308 info.fImage = surface->mImages[i];
309 info.fAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
310 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
311 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
312 info.fFormat = format;
313 info.fLevelCount = 1;
314
315 desc.fWidth = extent.width;
316 desc.fHeight = extent.height;
317 desc.fConfig = config;
318 desc.fOrigin = kTopLeft_GrSurfaceOrigin;
319 desc.fSampleCnt = 0;
320 desc.fStencilBits = 0;
321 desc.fRenderTargetHandle = (GrBackendObject) &info;
322
Greg Danielcd558522016-11-17 13:31:40 -0500323 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
324 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(mRenderThread.getGrContext(),
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500325 desc, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500326 }
327
328 SkASSERT(mCommandPool != VK_NULL_HANDLE);
329
330 // set up the backbuffers
331 VkSemaphoreCreateInfo semaphoreInfo;
332 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
333 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
334 semaphoreInfo.pNext = nullptr;
335 semaphoreInfo.flags = 0;
336 VkCommandBufferAllocateInfo commandBuffersInfo;
337 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
338 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
339 commandBuffersInfo.pNext = nullptr;
340 commandBuffersInfo.commandPool = mCommandPool;
341 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
342 commandBuffersInfo.commandBufferCount = 2;
343 VkFenceCreateInfo fenceInfo;
344 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
345 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
346 fenceInfo.pNext = nullptr;
347 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
348
349 // we create one additional backbuffer structure here, because we want to
350 // give the command buffers they contain a chance to finish before we cycle back
351 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
352 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
353 SkDEBUGCODE(VkResult res);
354 surface->mBackbuffers[i].mImageIndex = -1;
355 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
356 &surface->mBackbuffers[i].mAcquireSemaphore);
357 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
358 &surface->mBackbuffers[i].mRenderSemaphore);
359 SkDEBUGCODE(res = ) mAllocateCommandBuffers(mBackendContext->fDevice, &commandBuffersInfo,
360 surface->mBackbuffers[i].mTransitionCmdBuffers);
361 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
362 &surface->mBackbuffers[i].mUsageFences[0]);
363 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
364 &surface->mBackbuffers[i].mUsageFences[1]);
365 SkASSERT(VK_SUCCESS == res);
366 }
367 surface->mCurrentBackbufferIndex = surface->mImageCount;
368}
369
370bool VulkanManager::createSwapchain(VulkanSurface* surface) {
371 // check for capabilities
372 VkSurfaceCapabilitiesKHR caps;
373 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mBackendContext->fPhysicalDevice,
374 surface->mVkSurface, &caps);
375 if (VK_SUCCESS != res) {
376 return false;
377 }
378
379 uint32_t surfaceFormatCount;
380 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
381 &surfaceFormatCount, nullptr);
382 if (VK_SUCCESS != res) {
383 return false;
384 }
385
Ben Wagnereec27d52017-01-11 15:32:07 -0500386 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500387 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
Ben Wagnereec27d52017-01-11 15:32:07 -0500388 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500389 if (VK_SUCCESS != res) {
390 return false;
391 }
392
393 uint32_t presentModeCount;
394 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
395 surface->mVkSurface, &presentModeCount, nullptr);
396 if (VK_SUCCESS != res) {
397 return false;
398 }
399
Ben Wagnereec27d52017-01-11 15:32:07 -0500400 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500401 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
Ben Wagnereec27d52017-01-11 15:32:07 -0500402 surface->mVkSurface, &presentModeCount, presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500403 if (VK_SUCCESS != res) {
404 return false;
405 }
406
407 VkExtent2D extent = caps.currentExtent;
408 // clamp width; to handle currentExtent of -1 and protect us from broken hints
409 if (extent.width < caps.minImageExtent.width) {
410 extent.width = caps.minImageExtent.width;
411 }
412 SkASSERT(extent.width <= caps.maxImageExtent.width);
413 // clamp height
414 if (extent.height < caps.minImageExtent.height) {
415 extent.height = caps.minImageExtent.height;
416 }
417 SkASSERT(extent.height <= caps.maxImageExtent.height);
418
419 uint32_t imageCount = caps.minImageCount + 2;
420 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
421 // Application must settle for fewer images than desired:
422 imageCount = caps.maxImageCount;
423 }
424
425 // Currently Skia requires the images to be color attchments and support all transfer
426 // operations.
427 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
428 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
429 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
430 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
431 SkASSERT(caps.supportedTransforms & caps.currentTransform);
432 SkASSERT(caps.supportedCompositeAlpha & (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
433 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
434 VkCompositeAlphaFlagBitsKHR composite_alpha =
435 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) ?
436 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR :
437 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
438
439 // Pick our surface format. For now, just make sure it matches our sRGB request:
440 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
441 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
442
443 bool wantSRGB = false;
444#ifdef ANDROID_ENABLE_LINEAR_BLENDING
445 wantSRGB = true;
446#endif
447 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
448 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
449 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
450 if (desiredFormat == surfaceFormats[i].format) {
451 surfaceFormat = surfaceFormats[i].format;
452 colorSpace = surfaceFormats[i].colorSpace;
453 }
454 }
455
456 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
457 return false;
458 }
459
460 // If mailbox mode is available, use it, as it is the lowest-latency non-
461 // tearing mode. If not, fall back to FIFO which is always available.
462 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
463 for (uint32_t i = 0; i < presentModeCount; ++i) {
464 // use mailbox
465 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
466 mode = presentModes[i];
467 break;
468 }
469 }
470
471 VkSwapchainCreateInfoKHR swapchainCreateInfo;
472 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
473 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
474 swapchainCreateInfo.surface = surface->mVkSurface;
475 swapchainCreateInfo.minImageCount = imageCount;
476 swapchainCreateInfo.imageFormat = surfaceFormat;
477 swapchainCreateInfo.imageColorSpace = colorSpace;
478 swapchainCreateInfo.imageExtent = extent;
479 swapchainCreateInfo.imageArrayLayers = 1;
480 swapchainCreateInfo.imageUsage = usageFlags;
481
482 uint32_t queueFamilies[] = { mBackendContext->fGraphicsQueueIndex, mPresentQueueIndex };
483 if (mBackendContext->fGraphicsQueueIndex != mPresentQueueIndex) {
484 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
485 swapchainCreateInfo.queueFamilyIndexCount = 2;
486 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
487 } else {
488 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
489 swapchainCreateInfo.queueFamilyIndexCount = 0;
490 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
491 }
492
493 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
494 swapchainCreateInfo.compositeAlpha = composite_alpha;
495 swapchainCreateInfo.presentMode = mode;
496 swapchainCreateInfo.clipped = true;
497 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
498
499 res = mCreateSwapchainKHR(mBackendContext->fDevice, &swapchainCreateInfo, nullptr,
500 &surface->mSwapchain);
501 if (VK_SUCCESS != res) {
502 return false;
503 }
504
505 // destroy the old swapchain
506 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
507 mDeviceWaitIdle(mBackendContext->fDevice);
508
509 destroyBuffers(surface);
510
511 mDestroySwapchainKHR(mBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr);
512 }
513
514 createBuffers(surface, surfaceFormat, extent);
515
516 return true;
517}
518
519
520VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
521 initialize();
522
523 if (!window) {
524 return nullptr;
525 }
526
527 VulkanSurface* surface = new VulkanSurface();
528
529 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
530 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
531 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
532 surfaceCreateInfo.pNext = nullptr;
533 surfaceCreateInfo.flags = 0;
534 surfaceCreateInfo.window = window;
535
536 VkResult res = mCreateAndroidSurfaceKHR(mBackendContext->fInstance, &surfaceCreateInfo,
537 nullptr, &surface->mVkSurface);
538 if (VK_SUCCESS != res) {
539 delete surface;
540 return nullptr;
541 }
542
543SkDEBUGCODE(
544 VkBool32 supported;
545 res = mGetPhysicalDeviceSurfaceSupportKHR(mBackendContext->fPhysicalDevice,
546 mPresentQueueIndex, surface->mVkSurface, &supported);
547 // All physical devices and queue families on Android must be capable of presentation with any
548 // native window.
549 SkASSERT(VK_SUCCESS == res && supported);
550);
551
552 if (!createSwapchain(surface)) {
553 destroySurface(surface);
554 return nullptr;
555 }
556
557 return surface;
558}
559
560// Helper to know which src stage flags we need to set when transitioning to the present layout
561static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
562 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
563 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
564 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
565 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
566 return VK_PIPELINE_STAGE_TRANSFER_BIT;
567 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
568 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
569 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
570 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
571 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
572 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
573 return VK_PIPELINE_STAGE_HOST_BIT;
574 }
575
576 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
577 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
578}
579
580// Helper to know which src access mask we need to set when transitioning to the present layout
581static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
582 VkAccessFlags flags = 0;
583 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
584 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
585 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
586 VK_ACCESS_TRANSFER_WRITE_BIT |
587 VK_ACCESS_TRANSFER_READ_BIT |
588 VK_ACCESS_SHADER_READ_BIT |
589 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
590 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
591 flags = VK_ACCESS_HOST_WRITE_BIT;
592 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
593 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
594 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
595 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
596 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
597 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
598 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
599 flags = VK_ACCESS_TRANSFER_READ_BIT;
600 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
601 flags = VK_ACCESS_SHADER_READ_BIT;
602 }
603 return flags;
604}
605
606void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500607 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
608 ATRACE_NAME("Finishing GPU work");
609 mDeviceWaitIdle(mBackendContext->fDevice);
610 }
611
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500612 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
613 surface->mCurrentBackbufferIndex;
614 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500615 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500616 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
617 SkSurface::kFlushRead_BackendHandleAccess);
618 // Check to make sure we never change the actually wrapped image
619 SkASSERT(imageInfo->fImage == surface->mImages[backbuffer->mImageIndex]);
620
621 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
622 // previous work is complete for before presenting. So we first add the necessary barrier here.
623 VkImageLayout layout = imageInfo->fImageLayout;
624 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
625 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
626 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
627 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
628
629 VkImageMemoryBarrier imageMemoryBarrier = {
630 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
631 NULL, // pNext
632 srcAccessMask, // outputMask
633 dstAccessMask, // inputMask
634 layout, // oldLayout
635 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
636 mBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex
637 mPresentQueueIndex, // dstQueueFamilyIndex
638 surface->mImages[backbuffer->mImageIndex], // image
639 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
640 };
641
642 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
643 VkCommandBufferBeginInfo info;
644 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
645 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
646 info.flags = 0;
647 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
648 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0,
649 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
650 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
651
Greg Danielcd558522016-11-17 13:31:40 -0500652 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500653
654 // insert the layout transfer into the queue and wait on the acquire
655 VkSubmitInfo submitInfo;
656 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
657 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
658 submitInfo.waitSemaphoreCount = 0;
659 submitInfo.pWaitDstStageMask = 0;
660 submitInfo.commandBufferCount = 1;
661 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
662 submitInfo.signalSemaphoreCount = 1;
663 // When this command buffer finishes we will signal this semaphore so that we know it is now
664 // safe to present the image to the screen.
665 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
666
667 // Attach second fence to submission here so we can track when the command buffer finishes.
668 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
669
670 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
671 // to the image is complete and that the layout has been change to present on the graphics
672 // queue.
673 const VkPresentInfoKHR presentInfo =
674 {
675 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
676 NULL, // pNext
677 1, // waitSemaphoreCount
678 &backbuffer->mRenderSemaphore, // pWaitSemaphores
679 1, // swapchainCount
680 &surface->mSwapchain, // pSwapchains
681 &backbuffer->mImageIndex, // pImageIndices
682 NULL // pResults
683 };
684
685 mQueuePresentKHR(mPresentQueue, &presentInfo);
686
687 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500688 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
689 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
690 surface->mCurrentTime++;
691}
692
693int VulkanManager::getAge(VulkanSurface* surface) {
694 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
695 surface->mCurrentBackbufferIndex;
696 if (mSwapBehavior == SwapBehavior::Discard
697 || surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
698 return 0;
699 }
700 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
701 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500702}
703
704} /* namespace renderthread */
705} /* namespace uirenderer */
706} /* namespace android */