blob: 2195143658d2a9828bf1d390ac19c243ad046888 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050022#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050023#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024
Greg Danielac2d2322017-07-12 11:30:15 -040025#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050026#include <GrContext.h>
27#include <GrTypes.h>
28#include <vk/GrVkTypes.h>
29
30namespace android {
31namespace uirenderer {
32namespace renderthread {
33
34#define GET_PROC(F) m ## F = (PFN_vk ## F) vkGetInstanceProcAddr(instance, "vk" #F)
35#define GET_DEV_PROC(F) m ## F = (PFN_vk ## F) vkGetDeviceProcAddr(device, "vk" #F)
36
37VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {
38}
39
40void VulkanManager::destroy() {
41 if (!hasVkContext()) return;
42
Greg Daniel45ec62b2017-01-04 14:27:00 -050043 mRenderThread.renderState().onVkContextDestroyed();
44 mRenderThread.setGrContext(nullptr);
45
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050046 if (VK_NULL_HANDLE != mCommandPool) {
47 mDestroyCommandPool(mBackendContext->fDevice, mCommandPool, nullptr);
48 mCommandPool = VK_NULL_HANDLE;
49 }
Greg Daniel45ec62b2017-01-04 14:27:00 -050050 mBackendContext.reset();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050051}
52
53void VulkanManager::initialize() {
54 if (hasVkContext()) { return; }
55
56 auto canPresent = [](VkInstance, VkPhysicalDevice, uint32_t) { return true; };
57
Greg Daniel53a35432017-04-25 13:44:00 -040058 mBackendContext.reset(GrVkBackendContext::Create(vkGetInstanceProcAddr, vkGetDeviceProcAddr,
59 &mPresentQueueIndex, canPresent));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050060
61 // Get all the addresses of needed vulkan functions
62 VkInstance instance = mBackendContext->fInstance;
63 VkDevice device = mBackendContext->fDevice;
64 GET_PROC(CreateAndroidSurfaceKHR);
65 GET_PROC(DestroySurfaceKHR);
66 GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
67 GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
68 GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
69 GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
70 GET_DEV_PROC(CreateSwapchainKHR);
71 GET_DEV_PROC(DestroySwapchainKHR);
72 GET_DEV_PROC(GetSwapchainImagesKHR);
73 GET_DEV_PROC(AcquireNextImageKHR);
74 GET_DEV_PROC(QueuePresentKHR);
75 GET_DEV_PROC(CreateCommandPool);
76 GET_DEV_PROC(DestroyCommandPool);
77 GET_DEV_PROC(AllocateCommandBuffers);
78 GET_DEV_PROC(FreeCommandBuffers);
79 GET_DEV_PROC(ResetCommandBuffer);
80 GET_DEV_PROC(BeginCommandBuffer);
81 GET_DEV_PROC(EndCommandBuffer);
82 GET_DEV_PROC(CmdPipelineBarrier);
83 GET_DEV_PROC(GetDeviceQueue);
84 GET_DEV_PROC(QueueSubmit);
85 GET_DEV_PROC(QueueWaitIdle);
86 GET_DEV_PROC(DeviceWaitIdle);
87 GET_DEV_PROC(CreateSemaphore);
88 GET_DEV_PROC(DestroySemaphore);
89 GET_DEV_PROC(CreateFence);
90 GET_DEV_PROC(DestroyFence);
91 GET_DEV_PROC(WaitForFences);
92 GET_DEV_PROC(ResetFences);
93
94 // create the command pool for the command buffers
95 if (VK_NULL_HANDLE == mCommandPool) {
96 VkCommandPoolCreateInfo commandPoolInfo;
97 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
98 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
99 // this needs to be on the render queue
100 commandPoolInfo.queueFamilyIndex = mBackendContext->fGraphicsQueueIndex;
101 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
102 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mBackendContext->fDevice,
103 &commandPoolInfo, nullptr, &mCommandPool);
104 SkASSERT(VK_SUCCESS == res);
105 }
106
107 mGetDeviceQueue(mBackendContext->fDevice, mPresentQueueIndex, 0, &mPresentQueue);
108
109 mRenderThread.setGrContext(GrContext::Create(kVulkan_GrBackend,
110 (GrBackendContext) mBackendContext.get()));
111 DeviceInfo::initialize(mRenderThread.getGrContext()->caps()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500112
113 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
114 mSwapBehavior = SwapBehavior::BufferAge;
115 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500116
117 mRenderThread.renderState().onVkContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500118}
119
120// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
121// previous uses have finished before returning.
122VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
123 SkASSERT(surface->mBackbuffers);
124
125 ++surface->mCurrentBackbufferIndex;
126 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
127 surface->mCurrentBackbufferIndex = 0;
128 }
129
130 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
131 surface->mCurrentBackbufferIndex;
132
133 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
134 // reuse its commands buffers.
135 VkResult res = mWaitForFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences,
136 true, UINT64_MAX);
137 if (res != VK_SUCCESS) {
138 return nullptr;
139 }
140
141 return backbuffer;
142}
143
144
145SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
146 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
147 SkASSERT(backbuffer);
148
149 VkResult res;
150
151 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
152 SkASSERT(VK_SUCCESS == res);
153
154 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
155 // finished presenting and that it is safe to begin sending new commands to the returned image.
156 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
157 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
158
159 if (VK_ERROR_SURFACE_LOST_KHR == res) {
160 // need to figure out how to create a new vkSurface without the platformData*
161 // maybe use attach somehow? but need a Window
162 return nullptr;
163 }
164 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
165 // tear swapchain down and try again
166 if (!createSwapchain(surface)) {
167 return nullptr;
168 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500169 backbuffer = getAvailableBackbuffer(surface);
170 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
171 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500172
173 // acquire the image
174 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
175 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->mImageIndex);
176
177 if (VK_SUCCESS != res) {
178 return nullptr;
179 }
180 }
181
182 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500183 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500184 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
185 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
186 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT :
187 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
188 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
189 VkAccessFlags srcAccessMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ?
190 0 : VK_ACCESS_MEMORY_READ_BIT;
191 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
192
193 VkImageMemoryBarrier imageMemoryBarrier = {
194 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
195 NULL, // pNext
196 srcAccessMask, // outputMask
197 dstAccessMask, // inputMask
198 layout, // oldLayout
199 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
200 mPresentQueueIndex, // srcQueueFamilyIndex
201 mBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex
202 surface->mImages[backbuffer->mImageIndex], // image
203 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
204 };
205 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
206
207 VkCommandBufferBeginInfo info;
208 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
209 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
210 info.flags = 0;
211 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
212
213 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0,
214 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
215
216 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
217
218 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
219 // insert the layout transfer into the queue and wait on the acquire
220 VkSubmitInfo submitInfo;
221 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
222 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
223 submitInfo.waitSemaphoreCount = 1;
224 // Wait to make sure aquire semaphore set above has signaled.
225 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
226 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
227 submitInfo.commandBufferCount = 1;
228 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
229 submitInfo.signalSemaphoreCount = 0;
230
231 // Attach first fence to submission here so we can track when the command buffer finishes.
232 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
233
234 // We need to notify Skia that we changed the layout of the wrapped VkImage
235 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500236 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500237 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
238 SkSurface::kFlushRead_BackendHandleAccess);
239 imageInfo->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
240
241 surface->mBackbuffer = std::move(skSurface);
242 return surface->mBackbuffer.get();
243}
244
245void VulkanManager::destroyBuffers(VulkanSurface* surface) {
246 if (surface->mBackbuffers) {
247 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
248 mWaitForFences(mBackendContext->fDevice, 2, surface->mBackbuffers[i].mUsageFences, true,
249 UINT64_MAX);
250 surface->mBackbuffers[i].mImageIndex = -1;
251 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mAcquireSemaphore,
252 nullptr);
253 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mRenderSemaphore,
254 nullptr);
255 mFreeCommandBuffers(mBackendContext->fDevice, mCommandPool, 2,
256 surface->mBackbuffers[i].mTransitionCmdBuffers);
257 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
258 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
259 }
260 }
261
262 delete[] surface->mBackbuffers;
263 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500264 delete[] surface->mImageInfos;
265 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500266 delete[] surface->mImages;
267 surface->mImages = nullptr;
268}
269
270void VulkanManager::destroySurface(VulkanSurface* surface) {
271 // Make sure all submit commands have finished before starting to destroy objects.
272 if (VK_NULL_HANDLE != mPresentQueue) {
273 mQueueWaitIdle(mPresentQueue);
274 }
275 mDeviceWaitIdle(mBackendContext->fDevice);
276
277 destroyBuffers(surface);
278
279 if (VK_NULL_HANDLE != surface->mSwapchain) {
280 mDestroySwapchainKHR(mBackendContext->fDevice, surface->mSwapchain, nullptr);
281 surface->mSwapchain = VK_NULL_HANDLE;
282 }
283
284 if (VK_NULL_HANDLE != surface->mVkSurface) {
285 mDestroySurfaceKHR(mBackendContext->fInstance, surface->mVkSurface, nullptr);
286 surface->mVkSurface = VK_NULL_HANDLE;
287 }
288 delete surface;
289}
290
291void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
292 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain, &surface->mImageCount,
293 nullptr);
294 SkASSERT(surface->mImageCount);
295 surface->mImages = new VkImage[surface->mImageCount];
296 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain,
297 &surface->mImageCount, surface->mImages);
298
299 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
300
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500301 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500302 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500303 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500304 GrVkImageInfo info;
305 info.fImage = surface->mImages[i];
306 info.fAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
307 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
308 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
309 info.fFormat = format;
310 info.fLevelCount = 1;
311
Greg Danielac2d2322017-07-12 11:30:15 -0400312 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500313
Greg Danielcd558522016-11-17 13:31:40 -0500314 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
315 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(mRenderThread.getGrContext(),
Greg Danielac2d2322017-07-12 11:30:15 -0400316 backendRT, kTopLeft_GrSurfaceOrigin, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500317 }
318
319 SkASSERT(mCommandPool != VK_NULL_HANDLE);
320
321 // set up the backbuffers
322 VkSemaphoreCreateInfo semaphoreInfo;
323 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
324 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
325 semaphoreInfo.pNext = nullptr;
326 semaphoreInfo.flags = 0;
327 VkCommandBufferAllocateInfo commandBuffersInfo;
328 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
329 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
330 commandBuffersInfo.pNext = nullptr;
331 commandBuffersInfo.commandPool = mCommandPool;
332 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
333 commandBuffersInfo.commandBufferCount = 2;
334 VkFenceCreateInfo fenceInfo;
335 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
336 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
337 fenceInfo.pNext = nullptr;
338 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
339
340 // we create one additional backbuffer structure here, because we want to
341 // give the command buffers they contain a chance to finish before we cycle back
342 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
343 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
344 SkDEBUGCODE(VkResult res);
345 surface->mBackbuffers[i].mImageIndex = -1;
346 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
347 &surface->mBackbuffers[i].mAcquireSemaphore);
348 SkDEBUGCODE(res = ) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
349 &surface->mBackbuffers[i].mRenderSemaphore);
350 SkDEBUGCODE(res = ) mAllocateCommandBuffers(mBackendContext->fDevice, &commandBuffersInfo,
351 surface->mBackbuffers[i].mTransitionCmdBuffers);
352 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
353 &surface->mBackbuffers[i].mUsageFences[0]);
354 SkDEBUGCODE(res = ) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
355 &surface->mBackbuffers[i].mUsageFences[1]);
356 SkASSERT(VK_SUCCESS == res);
357 }
358 surface->mCurrentBackbufferIndex = surface->mImageCount;
359}
360
361bool VulkanManager::createSwapchain(VulkanSurface* surface) {
362 // check for capabilities
363 VkSurfaceCapabilitiesKHR caps;
364 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mBackendContext->fPhysicalDevice,
365 surface->mVkSurface, &caps);
366 if (VK_SUCCESS != res) {
367 return false;
368 }
369
370 uint32_t surfaceFormatCount;
371 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
372 &surfaceFormatCount, nullptr);
373 if (VK_SUCCESS != res) {
374 return false;
375 }
376
Ben Wagnereec27d52017-01-11 15:32:07 -0500377 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500378 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
Ben Wagnereec27d52017-01-11 15:32:07 -0500379 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500380 if (VK_SUCCESS != res) {
381 return false;
382 }
383
384 uint32_t presentModeCount;
385 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
386 surface->mVkSurface, &presentModeCount, nullptr);
387 if (VK_SUCCESS != res) {
388 return false;
389 }
390
Ben Wagnereec27d52017-01-11 15:32:07 -0500391 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500392 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
Ben Wagnereec27d52017-01-11 15:32:07 -0500393 surface->mVkSurface, &presentModeCount, presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500394 if (VK_SUCCESS != res) {
395 return false;
396 }
397
398 VkExtent2D extent = caps.currentExtent;
399 // clamp width; to handle currentExtent of -1 and protect us from broken hints
400 if (extent.width < caps.minImageExtent.width) {
401 extent.width = caps.minImageExtent.width;
402 }
403 SkASSERT(extent.width <= caps.maxImageExtent.width);
404 // clamp height
405 if (extent.height < caps.minImageExtent.height) {
406 extent.height = caps.minImageExtent.height;
407 }
408 SkASSERT(extent.height <= caps.maxImageExtent.height);
409
410 uint32_t imageCount = caps.minImageCount + 2;
411 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
412 // Application must settle for fewer images than desired:
413 imageCount = caps.maxImageCount;
414 }
415
416 // Currently Skia requires the images to be color attchments and support all transfer
417 // operations.
418 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
419 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
420 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
421 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
422 SkASSERT(caps.supportedTransforms & caps.currentTransform);
423 SkASSERT(caps.supportedCompositeAlpha & (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
424 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
425 VkCompositeAlphaFlagBitsKHR composite_alpha =
426 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) ?
427 VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR :
428 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
429
430 // Pick our surface format. For now, just make sure it matches our sRGB request:
431 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
432 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
433
434 bool wantSRGB = false;
435#ifdef ANDROID_ENABLE_LINEAR_BLENDING
436 wantSRGB = true;
437#endif
438 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
439 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
440 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
441 if (desiredFormat == surfaceFormats[i].format) {
442 surfaceFormat = surfaceFormats[i].format;
443 colorSpace = surfaceFormats[i].colorSpace;
444 }
445 }
446
447 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
448 return false;
449 }
450
451 // If mailbox mode is available, use it, as it is the lowest-latency non-
452 // tearing mode. If not, fall back to FIFO which is always available.
453 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
454 for (uint32_t i = 0; i < presentModeCount; ++i) {
455 // use mailbox
456 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
457 mode = presentModes[i];
458 break;
459 }
460 }
461
462 VkSwapchainCreateInfoKHR swapchainCreateInfo;
463 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
464 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
465 swapchainCreateInfo.surface = surface->mVkSurface;
466 swapchainCreateInfo.minImageCount = imageCount;
467 swapchainCreateInfo.imageFormat = surfaceFormat;
468 swapchainCreateInfo.imageColorSpace = colorSpace;
469 swapchainCreateInfo.imageExtent = extent;
470 swapchainCreateInfo.imageArrayLayers = 1;
471 swapchainCreateInfo.imageUsage = usageFlags;
472
473 uint32_t queueFamilies[] = { mBackendContext->fGraphicsQueueIndex, mPresentQueueIndex };
474 if (mBackendContext->fGraphicsQueueIndex != mPresentQueueIndex) {
475 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
476 swapchainCreateInfo.queueFamilyIndexCount = 2;
477 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
478 } else {
479 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
480 swapchainCreateInfo.queueFamilyIndexCount = 0;
481 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
482 }
483
484 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
485 swapchainCreateInfo.compositeAlpha = composite_alpha;
486 swapchainCreateInfo.presentMode = mode;
487 swapchainCreateInfo.clipped = true;
488 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
489
490 res = mCreateSwapchainKHR(mBackendContext->fDevice, &swapchainCreateInfo, nullptr,
491 &surface->mSwapchain);
492 if (VK_SUCCESS != res) {
493 return false;
494 }
495
496 // destroy the old swapchain
497 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
498 mDeviceWaitIdle(mBackendContext->fDevice);
499
500 destroyBuffers(surface);
501
502 mDestroySwapchainKHR(mBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr);
503 }
504
505 createBuffers(surface, surfaceFormat, extent);
506
507 return true;
508}
509
510
511VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
512 initialize();
513
514 if (!window) {
515 return nullptr;
516 }
517
518 VulkanSurface* surface = new VulkanSurface();
519
520 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
521 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
522 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
523 surfaceCreateInfo.pNext = nullptr;
524 surfaceCreateInfo.flags = 0;
525 surfaceCreateInfo.window = window;
526
527 VkResult res = mCreateAndroidSurfaceKHR(mBackendContext->fInstance, &surfaceCreateInfo,
528 nullptr, &surface->mVkSurface);
529 if (VK_SUCCESS != res) {
530 delete surface;
531 return nullptr;
532 }
533
534SkDEBUGCODE(
535 VkBool32 supported;
536 res = mGetPhysicalDeviceSurfaceSupportKHR(mBackendContext->fPhysicalDevice,
537 mPresentQueueIndex, surface->mVkSurface, &supported);
538 // All physical devices and queue families on Android must be capable of presentation with any
539 // native window.
540 SkASSERT(VK_SUCCESS == res && supported);
541);
542
543 if (!createSwapchain(surface)) {
544 destroySurface(surface);
545 return nullptr;
546 }
547
548 return surface;
549}
550
551// Helper to know which src stage flags we need to set when transitioning to the present layout
552static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
553 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
554 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
555 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
556 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
557 return VK_PIPELINE_STAGE_TRANSFER_BIT;
558 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
559 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
560 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
561 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
562 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
563 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
564 return VK_PIPELINE_STAGE_HOST_BIT;
565 }
566
567 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
568 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
569}
570
571// Helper to know which src access mask we need to set when transitioning to the present layout
572static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
573 VkAccessFlags flags = 0;
574 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
575 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
576 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
577 VK_ACCESS_TRANSFER_WRITE_BIT |
578 VK_ACCESS_TRANSFER_READ_BIT |
579 VK_ACCESS_SHADER_READ_BIT |
580 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
581 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
582 flags = VK_ACCESS_HOST_WRITE_BIT;
583 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
584 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
585 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
586 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
587 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
588 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
589 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
590 flags = VK_ACCESS_TRANSFER_READ_BIT;
591 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
592 flags = VK_ACCESS_SHADER_READ_BIT;
593 }
594 return flags;
595}
596
597void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500598 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
599 ATRACE_NAME("Finishing GPU work");
600 mDeviceWaitIdle(mBackendContext->fDevice);
601 }
602
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500603 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
604 surface->mCurrentBackbufferIndex;
605 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500606 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500607 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
608 SkSurface::kFlushRead_BackendHandleAccess);
609 // Check to make sure we never change the actually wrapped image
610 SkASSERT(imageInfo->fImage == surface->mImages[backbuffer->mImageIndex]);
611
612 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
613 // previous work is complete for before presenting. So we first add the necessary barrier here.
614 VkImageLayout layout = imageInfo->fImageLayout;
615 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
616 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
617 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
618 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
619
620 VkImageMemoryBarrier imageMemoryBarrier = {
621 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
622 NULL, // pNext
623 srcAccessMask, // outputMask
624 dstAccessMask, // inputMask
625 layout, // oldLayout
626 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
627 mBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex
628 mPresentQueueIndex, // dstQueueFamilyIndex
629 surface->mImages[backbuffer->mImageIndex], // image
630 { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
631 };
632
633 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
634 VkCommandBufferBeginInfo info;
635 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
636 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
637 info.flags = 0;
638 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
639 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0,
640 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier);
641 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
642
Greg Danielcd558522016-11-17 13:31:40 -0500643 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500644
645 // insert the layout transfer into the queue and wait on the acquire
646 VkSubmitInfo submitInfo;
647 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
648 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
649 submitInfo.waitSemaphoreCount = 0;
650 submitInfo.pWaitDstStageMask = 0;
651 submitInfo.commandBufferCount = 1;
652 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
653 submitInfo.signalSemaphoreCount = 1;
654 // When this command buffer finishes we will signal this semaphore so that we know it is now
655 // safe to present the image to the screen.
656 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
657
658 // Attach second fence to submission here so we can track when the command buffer finishes.
659 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
660
661 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
662 // to the image is complete and that the layout has been change to present on the graphics
663 // queue.
664 const VkPresentInfoKHR presentInfo =
665 {
666 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
667 NULL, // pNext
668 1, // waitSemaphoreCount
669 &backbuffer->mRenderSemaphore, // pWaitSemaphores
670 1, // swapchainCount
671 &surface->mSwapchain, // pSwapchains
672 &backbuffer->mImageIndex, // pImageIndices
673 NULL // pResults
674 };
675
676 mQueuePresentKHR(mPresentQueue, &presentInfo);
677
678 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500679 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
680 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
681 surface->mCurrentTime++;
682}
683
684int VulkanManager::getAge(VulkanSurface* surface) {
685 VulkanSurface::BackbufferInfo* backbuffer = surface->mBackbuffers +
686 surface->mCurrentBackbufferIndex;
687 if (mSwapBehavior == SwapBehavior::Discard
688 || surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
689 return 0;
690 }
691 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
692 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500693}
694
695} /* namespace renderthread */
696} /* namespace uirenderer */
697} /* namespace android */