blob: 62f820a043ca2ff83559c399a003b35f2e6d8a83 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050022#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050023#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024
Greg Danielac2d2322017-07-12 11:30:15 -040025#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050026#include <GrContext.h>
27#include <GrTypes.h>
28#include <vk/GrVkTypes.h>
29
30namespace android {
31namespace uirenderer {
32namespace renderthread {
33
John Reck1bcacfd2017-11-03 10:12:19 -070034#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(instance, "vk" #F)
35#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(device, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050036
John Reck1bcacfd2017-11-03 10:12:19 -070037VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050038
39void VulkanManager::destroy() {
40 if (!hasVkContext()) return;
41
Greg Daniel45ec62b2017-01-04 14:27:00 -050042 mRenderThread.renderState().onVkContextDestroyed();
43 mRenderThread.setGrContext(nullptr);
44
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050045 if (VK_NULL_HANDLE != mCommandPool) {
46 mDestroyCommandPool(mBackendContext->fDevice, mCommandPool, nullptr);
47 mCommandPool = VK_NULL_HANDLE;
48 }
Greg Daniel45ec62b2017-01-04 14:27:00 -050049 mBackendContext.reset();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050}
51
52void VulkanManager::initialize() {
John Reck1bcacfd2017-11-03 10:12:19 -070053 if (hasVkContext()) {
54 return;
55 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050056
57 auto canPresent = [](VkInstance, VkPhysicalDevice, uint32_t) { return true; };
58
Greg Daniel53a35432017-04-25 13:44:00 -040059 mBackendContext.reset(GrVkBackendContext::Create(vkGetInstanceProcAddr, vkGetDeviceProcAddr,
John Reck1bcacfd2017-11-03 10:12:19 -070060 &mPresentQueueIndex, canPresent));
Greg Daniel660d6ec2017-12-08 11:44:27 -050061 LOG_ALWAYS_FATAL_IF(!mBackendContext.get());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050062
63 // Get all the addresses of needed vulkan functions
64 VkInstance instance = mBackendContext->fInstance;
65 VkDevice device = mBackendContext->fDevice;
66 GET_PROC(CreateAndroidSurfaceKHR);
67 GET_PROC(DestroySurfaceKHR);
68 GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
69 GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
70 GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
71 GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
72 GET_DEV_PROC(CreateSwapchainKHR);
73 GET_DEV_PROC(DestroySwapchainKHR);
74 GET_DEV_PROC(GetSwapchainImagesKHR);
75 GET_DEV_PROC(AcquireNextImageKHR);
76 GET_DEV_PROC(QueuePresentKHR);
77 GET_DEV_PROC(CreateCommandPool);
78 GET_DEV_PROC(DestroyCommandPool);
79 GET_DEV_PROC(AllocateCommandBuffers);
80 GET_DEV_PROC(FreeCommandBuffers);
81 GET_DEV_PROC(ResetCommandBuffer);
82 GET_DEV_PROC(BeginCommandBuffer);
83 GET_DEV_PROC(EndCommandBuffer);
84 GET_DEV_PROC(CmdPipelineBarrier);
85 GET_DEV_PROC(GetDeviceQueue);
86 GET_DEV_PROC(QueueSubmit);
87 GET_DEV_PROC(QueueWaitIdle);
88 GET_DEV_PROC(DeviceWaitIdle);
89 GET_DEV_PROC(CreateSemaphore);
90 GET_DEV_PROC(DestroySemaphore);
91 GET_DEV_PROC(CreateFence);
92 GET_DEV_PROC(DestroyFence);
93 GET_DEV_PROC(WaitForFences);
94 GET_DEV_PROC(ResetFences);
95
96 // create the command pool for the command buffers
97 if (VK_NULL_HANDLE == mCommandPool) {
98 VkCommandPoolCreateInfo commandPoolInfo;
99 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
100 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
101 // this needs to be on the render queue
102 commandPoolInfo.queueFamilyIndex = mBackendContext->fGraphicsQueueIndex;
103 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700104 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mBackendContext->fDevice, &commandPoolInfo,
105 nullptr, &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500106 SkASSERT(VK_SUCCESS == res);
107 }
108
109 mGetDeviceQueue(mBackendContext->fDevice, mPresentQueueIndex, 0, &mPresentQueue);
110
Stan Ilievd495f432017-10-09 15:49:32 -0400111 GrContextOptions options;
112 options.fDisableDistanceFieldPaths = true;
113 mRenderThread.cacheManager().configureContext(&options);
Greg Daniel660d6ec2017-12-08 11:44:27 -0500114 sk_sp<GrContext> grContext(GrContext::MakeVulkan(mBackendContext, options));
115 LOG_ALWAYS_FATAL_IF(!grContext.get());
116 mRenderThread.setGrContext(grContext);
Greg Daniel85e09072018-04-09 12:36:45 -0400117 DeviceInfo::initialize(mRenderThread.getGrContext()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500118
119 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
120 mSwapBehavior = SwapBehavior::BufferAge;
121 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500122
123 mRenderThread.renderState().onVkContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500124}
125
126// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
127// previous uses have finished before returning.
128VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
129 SkASSERT(surface->mBackbuffers);
130
131 ++surface->mCurrentBackbufferIndex;
132 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
133 surface->mCurrentBackbufferIndex = 0;
134 }
135
John Reck1bcacfd2017-11-03 10:12:19 -0700136 VulkanSurface::BackbufferInfo* backbuffer =
137 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500138
139 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
140 // reuse its commands buffers.
John Reck1bcacfd2017-11-03 10:12:19 -0700141 VkResult res =
142 mWaitForFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500143 if (res != VK_SUCCESS) {
144 return nullptr;
145 }
146
147 return backbuffer;
148}
149
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500150SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
151 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
152 SkASSERT(backbuffer);
153
154 VkResult res;
155
156 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
157 SkASSERT(VK_SUCCESS == res);
158
159 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
160 // finished presenting and that it is safe to begin sending new commands to the returned image.
161 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700162 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
163 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500164
165 if (VK_ERROR_SURFACE_LOST_KHR == res) {
166 // need to figure out how to create a new vkSurface without the platformData*
167 // maybe use attach somehow? but need a Window
168 return nullptr;
169 }
170 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
171 // tear swapchain down and try again
172 if (!createSwapchain(surface)) {
173 return nullptr;
174 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500175 backbuffer = getAvailableBackbuffer(surface);
176 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
177 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500178
179 // acquire the image
180 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700181 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
182 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500183
184 if (VK_SUCCESS != res) {
185 return nullptr;
186 }
187 }
188
189 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500190 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500191 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
John Reck1bcacfd2017-11-03 10:12:19 -0700192 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout)
193 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
194 : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500195 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700196 VkAccessFlags srcAccessMask =
197 (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500198 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
199
200 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700201 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
202 NULL, // pNext
203 srcAccessMask, // outputMask
204 dstAccessMask, // inputMask
205 layout, // oldLayout
206 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
207 mPresentQueueIndex, // srcQueueFamilyIndex
208 mBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex
209 surface->mImages[backbuffer->mImageIndex], // image
210 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500211 };
212 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
213
214 VkCommandBufferBeginInfo info;
215 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
216 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
217 info.flags = 0;
218 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
219
John Reck1bcacfd2017-11-03 10:12:19 -0700220 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
221 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500222
223 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
224
225 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
226 // insert the layout transfer into the queue and wait on the acquire
227 VkSubmitInfo submitInfo;
228 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
229 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
230 submitInfo.waitSemaphoreCount = 1;
231 // Wait to make sure aquire semaphore set above has signaled.
232 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
233 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
234 submitInfo.commandBufferCount = 1;
235 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
236 submitInfo.signalSemaphoreCount = 0;
237
238 // Attach first fence to submission here so we can track when the command buffer finishes.
239 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
240
241 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500242 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400243 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
244 SkSurface::kFlushRead_BackendHandleAccess);
245 if (!backendRT.isValid()) {
246 SkASSERT(backendRT.isValid());
247 return nullptr;
248 }
249 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500250
251 surface->mBackbuffer = std::move(skSurface);
252 return surface->mBackbuffer.get();
253}
254
255void VulkanManager::destroyBuffers(VulkanSurface* surface) {
256 if (surface->mBackbuffers) {
257 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
258 mWaitForFences(mBackendContext->fDevice, 2, surface->mBackbuffers[i].mUsageFences, true,
John Reck1bcacfd2017-11-03 10:12:19 -0700259 UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500260 surface->mBackbuffers[i].mImageIndex = -1;
261 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mAcquireSemaphore,
John Reck1bcacfd2017-11-03 10:12:19 -0700262 nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500263 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mRenderSemaphore,
John Reck1bcacfd2017-11-03 10:12:19 -0700264 nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500265 mFreeCommandBuffers(mBackendContext->fDevice, mCommandPool, 2,
John Reck1bcacfd2017-11-03 10:12:19 -0700266 surface->mBackbuffers[i].mTransitionCmdBuffers);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500267 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
268 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
269 }
270 }
271
272 delete[] surface->mBackbuffers;
273 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500274 delete[] surface->mImageInfos;
275 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500276 delete[] surface->mImages;
277 surface->mImages = nullptr;
278}
279
280void VulkanManager::destroySurface(VulkanSurface* surface) {
281 // Make sure all submit commands have finished before starting to destroy objects.
282 if (VK_NULL_HANDLE != mPresentQueue) {
283 mQueueWaitIdle(mPresentQueue);
284 }
285 mDeviceWaitIdle(mBackendContext->fDevice);
286
287 destroyBuffers(surface);
288
289 if (VK_NULL_HANDLE != surface->mSwapchain) {
290 mDestroySwapchainKHR(mBackendContext->fDevice, surface->mSwapchain, nullptr);
291 surface->mSwapchain = VK_NULL_HANDLE;
292 }
293
294 if (VK_NULL_HANDLE != surface->mVkSurface) {
295 mDestroySurfaceKHR(mBackendContext->fInstance, surface->mVkSurface, nullptr);
296 surface->mVkSurface = VK_NULL_HANDLE;
297 }
298 delete surface;
299}
300
301void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
302 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain, &surface->mImageCount,
John Reck1bcacfd2017-11-03 10:12:19 -0700303 nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500304 SkASSERT(surface->mImageCount);
305 surface->mImages = new VkImage[surface->mImageCount];
John Reck1bcacfd2017-11-03 10:12:19 -0700306 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain, &surface->mImageCount,
307 surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500308
309 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
310
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500311 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500312 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500313 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500314 GrVkImageInfo info;
315 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500316 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500317 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
318 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
319 info.fFormat = format;
320 info.fLevelCount = 1;
321
Greg Danielac2d2322017-07-12 11:30:15 -0400322 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500323
Greg Danielcd558522016-11-17 13:31:40 -0500324 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700325 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400326 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
327 kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500328 }
329
330 SkASSERT(mCommandPool != VK_NULL_HANDLE);
331
332 // set up the backbuffers
333 VkSemaphoreCreateInfo semaphoreInfo;
334 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
335 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
336 semaphoreInfo.pNext = nullptr;
337 semaphoreInfo.flags = 0;
338 VkCommandBufferAllocateInfo commandBuffersInfo;
339 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
340 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
341 commandBuffersInfo.pNext = nullptr;
342 commandBuffersInfo.commandPool = mCommandPool;
343 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
344 commandBuffersInfo.commandBufferCount = 2;
345 VkFenceCreateInfo fenceInfo;
346 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
347 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
348 fenceInfo.pNext = nullptr;
349 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
350
351 // we create one additional backbuffer structure here, because we want to
352 // give the command buffers they contain a chance to finish before we cycle back
353 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
354 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
355 SkDEBUGCODE(VkResult res);
356 surface->mBackbuffers[i].mImageIndex = -1;
John Reck1bcacfd2017-11-03 10:12:19 -0700357 SkDEBUGCODE(res =) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
358 &surface->mBackbuffers[i].mAcquireSemaphore);
359 SkDEBUGCODE(res =) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
360 &surface->mBackbuffers[i].mRenderSemaphore);
361 SkDEBUGCODE(res =) mAllocateCommandBuffers(mBackendContext->fDevice, &commandBuffersInfo,
362 surface->mBackbuffers[i].mTransitionCmdBuffers);
363 SkDEBUGCODE(res =) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
364 &surface->mBackbuffers[i].mUsageFences[0]);
365 SkDEBUGCODE(res =) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
366 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500367 SkASSERT(VK_SUCCESS == res);
368 }
369 surface->mCurrentBackbufferIndex = surface->mImageCount;
370}
371
372bool VulkanManager::createSwapchain(VulkanSurface* surface) {
373 // check for capabilities
374 VkSurfaceCapabilitiesKHR caps;
375 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mBackendContext->fPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700376 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500377 if (VK_SUCCESS != res) {
378 return false;
379 }
380
381 uint32_t surfaceFormatCount;
382 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700383 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500384 if (VK_SUCCESS != res) {
385 return false;
386 }
387
Ben Wagnereec27d52017-01-11 15:32:07 -0500388 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500389 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700390 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500391 if (VK_SUCCESS != res) {
392 return false;
393 }
394
395 uint32_t presentModeCount;
396 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700397 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500398 if (VK_SUCCESS != res) {
399 return false;
400 }
401
Ben Wagnereec27d52017-01-11 15:32:07 -0500402 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500403 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700404 surface->mVkSurface, &presentModeCount,
405 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500406 if (VK_SUCCESS != res) {
407 return false;
408 }
409
410 VkExtent2D extent = caps.currentExtent;
411 // clamp width; to handle currentExtent of -1 and protect us from broken hints
412 if (extent.width < caps.minImageExtent.width) {
413 extent.width = caps.minImageExtent.width;
414 }
415 SkASSERT(extent.width <= caps.maxImageExtent.width);
416 // clamp height
417 if (extent.height < caps.minImageExtent.height) {
418 extent.height = caps.minImageExtent.height;
419 }
420 SkASSERT(extent.height <= caps.maxImageExtent.height);
421
422 uint32_t imageCount = caps.minImageCount + 2;
423 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
424 // Application must settle for fewer images than desired:
425 imageCount = caps.maxImageCount;
426 }
427
428 // Currently Skia requires the images to be color attchments and support all transfer
429 // operations.
430 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
431 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
432 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
433 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
434 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700435 SkASSERT(caps.supportedCompositeAlpha &
436 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500437 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700438 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
439 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
440 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500441
442 // Pick our surface format. For now, just make sure it matches our sRGB request:
443 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
444 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
445
446 bool wantSRGB = false;
447#ifdef ANDROID_ENABLE_LINEAR_BLENDING
448 wantSRGB = true;
449#endif
450 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
451 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
452 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
453 if (desiredFormat == surfaceFormats[i].format) {
454 surfaceFormat = surfaceFormats[i].format;
455 colorSpace = surfaceFormats[i].colorSpace;
456 }
457 }
458
459 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
460 return false;
461 }
462
463 // If mailbox mode is available, use it, as it is the lowest-latency non-
464 // tearing mode. If not, fall back to FIFO which is always available.
465 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
466 for (uint32_t i = 0; i < presentModeCount; ++i) {
467 // use mailbox
468 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
469 mode = presentModes[i];
470 break;
471 }
472 }
473
474 VkSwapchainCreateInfoKHR swapchainCreateInfo;
475 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
476 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
477 swapchainCreateInfo.surface = surface->mVkSurface;
478 swapchainCreateInfo.minImageCount = imageCount;
479 swapchainCreateInfo.imageFormat = surfaceFormat;
480 swapchainCreateInfo.imageColorSpace = colorSpace;
481 swapchainCreateInfo.imageExtent = extent;
482 swapchainCreateInfo.imageArrayLayers = 1;
483 swapchainCreateInfo.imageUsage = usageFlags;
484
John Reck1bcacfd2017-11-03 10:12:19 -0700485 uint32_t queueFamilies[] = {mBackendContext->fGraphicsQueueIndex, mPresentQueueIndex};
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500486 if (mBackendContext->fGraphicsQueueIndex != mPresentQueueIndex) {
487 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
488 swapchainCreateInfo.queueFamilyIndexCount = 2;
489 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
490 } else {
491 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
492 swapchainCreateInfo.queueFamilyIndexCount = 0;
493 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
494 }
495
496 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
497 swapchainCreateInfo.compositeAlpha = composite_alpha;
498 swapchainCreateInfo.presentMode = mode;
499 swapchainCreateInfo.clipped = true;
500 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
501
502 res = mCreateSwapchainKHR(mBackendContext->fDevice, &swapchainCreateInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700503 &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500504 if (VK_SUCCESS != res) {
505 return false;
506 }
507
508 // destroy the old swapchain
509 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
510 mDeviceWaitIdle(mBackendContext->fDevice);
511
512 destroyBuffers(surface);
513
514 mDestroySwapchainKHR(mBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr);
515 }
516
517 createBuffers(surface, surfaceFormat, extent);
518
519 return true;
520}
521
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500522VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
523 initialize();
524
525 if (!window) {
526 return nullptr;
527 }
528
529 VulkanSurface* surface = new VulkanSurface();
530
531 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
532 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
533 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
534 surfaceCreateInfo.pNext = nullptr;
535 surfaceCreateInfo.flags = 0;
536 surfaceCreateInfo.window = window;
537
John Reck1bcacfd2017-11-03 10:12:19 -0700538 VkResult res = mCreateAndroidSurfaceKHR(mBackendContext->fInstance, &surfaceCreateInfo, nullptr,
539 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500540 if (VK_SUCCESS != res) {
541 delete surface;
542 return nullptr;
543 }
544
John Reck1bcacfd2017-11-03 10:12:19 -0700545 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
546 mBackendContext->fPhysicalDevice, mPresentQueueIndex,
547 surface->mVkSurface, &supported);
548 // All physical devices and queue families on Android must be capable of
549 // presentation with any
550 // native window.
551 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500552
553 if (!createSwapchain(surface)) {
554 destroySurface(surface);
555 return nullptr;
556 }
557
558 return surface;
559}
560
561// Helper to know which src stage flags we need to set when transitioning to the present layout
562static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
563 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
564 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
565 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
566 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
567 return VK_PIPELINE_STAGE_TRANSFER_BIT;
568 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
569 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
570 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
571 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
572 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
573 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
574 return VK_PIPELINE_STAGE_HOST_BIT;
575 }
576
577 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
578 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
579}
580
581// Helper to know which src access mask we need to set when transitioning to the present layout
582static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
583 VkAccessFlags flags = 0;
584 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
585 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700586 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
587 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
588 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500589 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
590 flags = VK_ACCESS_HOST_WRITE_BIT;
591 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
592 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
593 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
594 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
595 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
596 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
597 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
598 flags = VK_ACCESS_TRANSFER_READ_BIT;
599 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
600 flags = VK_ACCESS_SHADER_READ_BIT;
601 }
602 return flags;
603}
604
605void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500606 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
607 ATRACE_NAME("Finishing GPU work");
608 mDeviceWaitIdle(mBackendContext->fDevice);
609 }
610
Greg Daniel74ea2012017-11-10 11:32:58 -0500611 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700612 VulkanSurface::BackbufferInfo* backbuffer =
613 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400614
Greg Danielcd558522016-11-17 13:31:40 -0500615 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400616 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
617 SkSurface::kFlushRead_BackendHandleAccess);
618 SkASSERT(backendRT.isValid());
619
620 GrVkImageInfo imageInfo;
621 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
622
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500623 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400624 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500625
626 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
627 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400628 VkImageLayout layout = imageInfo.fImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500629 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
630 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
631 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
632 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
633
634 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700635 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
636 NULL, // pNext
637 srcAccessMask, // outputMask
638 dstAccessMask, // inputMask
639 layout, // oldLayout
640 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
641 mBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex
642 mPresentQueueIndex, // dstQueueFamilyIndex
643 surface->mImages[backbuffer->mImageIndex], // image
644 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500645 };
646
647 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
648 VkCommandBufferBeginInfo info;
649 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
650 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
651 info.flags = 0;
652 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700653 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
654 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500655 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
656
Greg Danielcd558522016-11-17 13:31:40 -0500657 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500658
659 // insert the layout transfer into the queue and wait on the acquire
660 VkSubmitInfo submitInfo;
661 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
662 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
663 submitInfo.waitSemaphoreCount = 0;
664 submitInfo.pWaitDstStageMask = 0;
665 submitInfo.commandBufferCount = 1;
666 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
667 submitInfo.signalSemaphoreCount = 1;
668 // When this command buffer finishes we will signal this semaphore so that we know it is now
669 // safe to present the image to the screen.
670 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
671
672 // Attach second fence to submission here so we can track when the command buffer finishes.
673 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
674
675 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
676 // to the image is complete and that the layout has been change to present on the graphics
677 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700678 const VkPresentInfoKHR presentInfo = {
679 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
680 NULL, // pNext
681 1, // waitSemaphoreCount
682 &backbuffer->mRenderSemaphore, // pWaitSemaphores
683 1, // swapchainCount
684 &surface->mSwapchain, // pSwapchains
685 &backbuffer->mImageIndex, // pImageIndices
686 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500687 };
688
689 mQueuePresentKHR(mPresentQueue, &presentInfo);
690
691 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500692 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
693 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
694 surface->mCurrentTime++;
695}
696
697int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -0500698 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700699 VulkanSurface::BackbufferInfo* backbuffer =
700 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
701 if (mSwapBehavior == SwapBehavior::Discard ||
702 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500703 return 0;
704 }
705 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
706 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500707}
708
709} /* namespace renderthread */
710} /* namespace uirenderer */
711} /* namespace android */