blob: 3272d697e2225935ce15b5af395564b646ee1555 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050022#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050023#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024
Greg Danielac2d2322017-07-12 11:30:15 -040025#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050026#include <GrContext.h>
27#include <GrTypes.h>
28#include <vk/GrVkTypes.h>
29
30namespace android {
31namespace uirenderer {
32namespace renderthread {
33
John Reck1bcacfd2017-11-03 10:12:19 -070034#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(instance, "vk" #F)
35#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(device, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050036
John Reck1bcacfd2017-11-03 10:12:19 -070037VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050038
39void VulkanManager::destroy() {
40 if (!hasVkContext()) return;
41
Greg Daniel45ec62b2017-01-04 14:27:00 -050042 mRenderThread.renderState().onVkContextDestroyed();
43 mRenderThread.setGrContext(nullptr);
44
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050045 if (VK_NULL_HANDLE != mCommandPool) {
46 mDestroyCommandPool(mBackendContext->fDevice, mCommandPool, nullptr);
47 mCommandPool = VK_NULL_HANDLE;
48 }
Greg Daniel45ec62b2017-01-04 14:27:00 -050049 mBackendContext.reset();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050}
51
52void VulkanManager::initialize() {
John Reck1bcacfd2017-11-03 10:12:19 -070053 if (hasVkContext()) {
54 return;
55 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050056
57 auto canPresent = [](VkInstance, VkPhysicalDevice, uint32_t) { return true; };
58
Greg Daniel53a35432017-04-25 13:44:00 -040059 mBackendContext.reset(GrVkBackendContext::Create(vkGetInstanceProcAddr, vkGetDeviceProcAddr,
John Reck1bcacfd2017-11-03 10:12:19 -070060 &mPresentQueueIndex, canPresent));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050061
62 // Get all the addresses of needed vulkan functions
63 VkInstance instance = mBackendContext->fInstance;
64 VkDevice device = mBackendContext->fDevice;
65 GET_PROC(CreateAndroidSurfaceKHR);
66 GET_PROC(DestroySurfaceKHR);
67 GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
68 GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
69 GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
70 GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
71 GET_DEV_PROC(CreateSwapchainKHR);
72 GET_DEV_PROC(DestroySwapchainKHR);
73 GET_DEV_PROC(GetSwapchainImagesKHR);
74 GET_DEV_PROC(AcquireNextImageKHR);
75 GET_DEV_PROC(QueuePresentKHR);
76 GET_DEV_PROC(CreateCommandPool);
77 GET_DEV_PROC(DestroyCommandPool);
78 GET_DEV_PROC(AllocateCommandBuffers);
79 GET_DEV_PROC(FreeCommandBuffers);
80 GET_DEV_PROC(ResetCommandBuffer);
81 GET_DEV_PROC(BeginCommandBuffer);
82 GET_DEV_PROC(EndCommandBuffer);
83 GET_DEV_PROC(CmdPipelineBarrier);
84 GET_DEV_PROC(GetDeviceQueue);
85 GET_DEV_PROC(QueueSubmit);
86 GET_DEV_PROC(QueueWaitIdle);
87 GET_DEV_PROC(DeviceWaitIdle);
88 GET_DEV_PROC(CreateSemaphore);
89 GET_DEV_PROC(DestroySemaphore);
90 GET_DEV_PROC(CreateFence);
91 GET_DEV_PROC(DestroyFence);
92 GET_DEV_PROC(WaitForFences);
93 GET_DEV_PROC(ResetFences);
94
95 // create the command pool for the command buffers
96 if (VK_NULL_HANDLE == mCommandPool) {
97 VkCommandPoolCreateInfo commandPoolInfo;
98 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
99 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
100 // this needs to be on the render queue
101 commandPoolInfo.queueFamilyIndex = mBackendContext->fGraphicsQueueIndex;
102 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700103 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mBackendContext->fDevice, &commandPoolInfo,
104 nullptr, &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500105 SkASSERT(VK_SUCCESS == res);
106 }
107
108 mGetDeviceQueue(mBackendContext->fDevice, mPresentQueueIndex, 0, &mPresentQueue);
109
John Reck1bcacfd2017-11-03 10:12:19 -0700110 mRenderThread.setGrContext(
111 GrContext::Create(kVulkan_GrBackend, (GrBackendContext)mBackendContext.get()));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500112 DeviceInfo::initialize(mRenderThread.getGrContext()->caps()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500113
114 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
115 mSwapBehavior = SwapBehavior::BufferAge;
116 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500117
118 mRenderThread.renderState().onVkContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500119}
120
121// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
122// previous uses have finished before returning.
123VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
124 SkASSERT(surface->mBackbuffers);
125
126 ++surface->mCurrentBackbufferIndex;
127 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
128 surface->mCurrentBackbufferIndex = 0;
129 }
130
John Reck1bcacfd2017-11-03 10:12:19 -0700131 VulkanSurface::BackbufferInfo* backbuffer =
132 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500133
134 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
135 // reuse its commands buffers.
John Reck1bcacfd2017-11-03 10:12:19 -0700136 VkResult res =
137 mWaitForFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500138 if (res != VK_SUCCESS) {
139 return nullptr;
140 }
141
142 return backbuffer;
143}
144
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500145SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
146 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
147 SkASSERT(backbuffer);
148
149 VkResult res;
150
151 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
152 SkASSERT(VK_SUCCESS == res);
153
154 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
155 // finished presenting and that it is safe to begin sending new commands to the returned image.
156 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700157 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
158 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500159
160 if (VK_ERROR_SURFACE_LOST_KHR == res) {
161 // need to figure out how to create a new vkSurface without the platformData*
162 // maybe use attach somehow? but need a Window
163 return nullptr;
164 }
165 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
166 // tear swapchain down and try again
167 if (!createSwapchain(surface)) {
168 return nullptr;
169 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500170 backbuffer = getAvailableBackbuffer(surface);
171 res = mResetFences(mBackendContext->fDevice, 2, backbuffer->mUsageFences);
172 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500173
174 // acquire the image
175 res = mAcquireNextImageKHR(mBackendContext->fDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700176 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
177 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500178
179 if (VK_SUCCESS != res) {
180 return nullptr;
181 }
182 }
183
184 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500185 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500186 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
John Reck1bcacfd2017-11-03 10:12:19 -0700187 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout)
188 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
189 : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500190 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700191 VkAccessFlags srcAccessMask =
192 (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500193 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
194
195 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700196 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
197 NULL, // pNext
198 srcAccessMask, // outputMask
199 dstAccessMask, // inputMask
200 layout, // oldLayout
201 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
202 mPresentQueueIndex, // srcQueueFamilyIndex
203 mBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex
204 surface->mImages[backbuffer->mImageIndex], // image
205 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500206 };
207 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
208
209 VkCommandBufferBeginInfo info;
210 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
211 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
212 info.flags = 0;
213 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
214
John Reck1bcacfd2017-11-03 10:12:19 -0700215 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
216 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500217
218 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
219
220 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
221 // insert the layout transfer into the queue and wait on the acquire
222 VkSubmitInfo submitInfo;
223 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
224 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
225 submitInfo.waitSemaphoreCount = 1;
226 // Wait to make sure aquire semaphore set above has signaled.
227 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
228 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
229 submitInfo.commandBufferCount = 1;
230 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
231 submitInfo.signalSemaphoreCount = 0;
232
233 // Attach first fence to submission here so we can track when the command buffer finishes.
234 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
235
236 // We need to notify Skia that we changed the layout of the wrapped VkImage
237 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500238 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500239 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700240 SkSurface::kFlushRead_BackendHandleAccess);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500241 imageInfo->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
242
243 surface->mBackbuffer = std::move(skSurface);
244 return surface->mBackbuffer.get();
245}
246
247void VulkanManager::destroyBuffers(VulkanSurface* surface) {
248 if (surface->mBackbuffers) {
249 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
250 mWaitForFences(mBackendContext->fDevice, 2, surface->mBackbuffers[i].mUsageFences, true,
John Reck1bcacfd2017-11-03 10:12:19 -0700251 UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500252 surface->mBackbuffers[i].mImageIndex = -1;
253 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mAcquireSemaphore,
John Reck1bcacfd2017-11-03 10:12:19 -0700254 nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500255 mDestroySemaphore(mBackendContext->fDevice, surface->mBackbuffers[i].mRenderSemaphore,
John Reck1bcacfd2017-11-03 10:12:19 -0700256 nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500257 mFreeCommandBuffers(mBackendContext->fDevice, mCommandPool, 2,
John Reck1bcacfd2017-11-03 10:12:19 -0700258 surface->mBackbuffers[i].mTransitionCmdBuffers);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500259 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
260 mDestroyFence(mBackendContext->fDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
261 }
262 }
263
264 delete[] surface->mBackbuffers;
265 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500266 delete[] surface->mImageInfos;
267 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500268 delete[] surface->mImages;
269 surface->mImages = nullptr;
270}
271
272void VulkanManager::destroySurface(VulkanSurface* surface) {
273 // Make sure all submit commands have finished before starting to destroy objects.
274 if (VK_NULL_HANDLE != mPresentQueue) {
275 mQueueWaitIdle(mPresentQueue);
276 }
277 mDeviceWaitIdle(mBackendContext->fDevice);
278
279 destroyBuffers(surface);
280
281 if (VK_NULL_HANDLE != surface->mSwapchain) {
282 mDestroySwapchainKHR(mBackendContext->fDevice, surface->mSwapchain, nullptr);
283 surface->mSwapchain = VK_NULL_HANDLE;
284 }
285
286 if (VK_NULL_HANDLE != surface->mVkSurface) {
287 mDestroySurfaceKHR(mBackendContext->fInstance, surface->mVkSurface, nullptr);
288 surface->mVkSurface = VK_NULL_HANDLE;
289 }
290 delete surface;
291}
292
293void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
294 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain, &surface->mImageCount,
John Reck1bcacfd2017-11-03 10:12:19 -0700295 nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500296 SkASSERT(surface->mImageCount);
297 surface->mImages = new VkImage[surface->mImageCount];
John Reck1bcacfd2017-11-03 10:12:19 -0700298 mGetSwapchainImagesKHR(mBackendContext->fDevice, surface->mSwapchain, &surface->mImageCount,
299 surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500300
301 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
302
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500303 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500304 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500305 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500306 GrVkImageInfo info;
307 info.fImage = surface->mImages[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700308 info.fAlloc = {VK_NULL_HANDLE, 0, 0, 0};
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500309 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
310 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
311 info.fFormat = format;
312 info.fLevelCount = 1;
313
Greg Danielac2d2322017-07-12 11:30:15 -0400314 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500315
Greg Danielcd558522016-11-17 13:31:40 -0500316 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700317 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
318 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500319 }
320
321 SkASSERT(mCommandPool != VK_NULL_HANDLE);
322
323 // set up the backbuffers
324 VkSemaphoreCreateInfo semaphoreInfo;
325 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
326 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
327 semaphoreInfo.pNext = nullptr;
328 semaphoreInfo.flags = 0;
329 VkCommandBufferAllocateInfo commandBuffersInfo;
330 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
331 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
332 commandBuffersInfo.pNext = nullptr;
333 commandBuffersInfo.commandPool = mCommandPool;
334 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
335 commandBuffersInfo.commandBufferCount = 2;
336 VkFenceCreateInfo fenceInfo;
337 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
338 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
339 fenceInfo.pNext = nullptr;
340 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
341
342 // we create one additional backbuffer structure here, because we want to
343 // give the command buffers they contain a chance to finish before we cycle back
344 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
345 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
346 SkDEBUGCODE(VkResult res);
347 surface->mBackbuffers[i].mImageIndex = -1;
John Reck1bcacfd2017-11-03 10:12:19 -0700348 SkDEBUGCODE(res =) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
349 &surface->mBackbuffers[i].mAcquireSemaphore);
350 SkDEBUGCODE(res =) mCreateSemaphore(mBackendContext->fDevice, &semaphoreInfo, nullptr,
351 &surface->mBackbuffers[i].mRenderSemaphore);
352 SkDEBUGCODE(res =) mAllocateCommandBuffers(mBackendContext->fDevice, &commandBuffersInfo,
353 surface->mBackbuffers[i].mTransitionCmdBuffers);
354 SkDEBUGCODE(res =) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
355 &surface->mBackbuffers[i].mUsageFences[0]);
356 SkDEBUGCODE(res =) mCreateFence(mBackendContext->fDevice, &fenceInfo, nullptr,
357 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500358 SkASSERT(VK_SUCCESS == res);
359 }
360 surface->mCurrentBackbufferIndex = surface->mImageCount;
361}
362
363bool VulkanManager::createSwapchain(VulkanSurface* surface) {
364 // check for capabilities
365 VkSurfaceCapabilitiesKHR caps;
366 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mBackendContext->fPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700367 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500368 if (VK_SUCCESS != res) {
369 return false;
370 }
371
372 uint32_t surfaceFormatCount;
373 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700374 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500375 if (VK_SUCCESS != res) {
376 return false;
377 }
378
Ben Wagnereec27d52017-01-11 15:32:07 -0500379 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500380 res = mGetPhysicalDeviceSurfaceFormatsKHR(mBackendContext->fPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700381 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500382 if (VK_SUCCESS != res) {
383 return false;
384 }
385
386 uint32_t presentModeCount;
387 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700388 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500389 if (VK_SUCCESS != res) {
390 return false;
391 }
392
Ben Wagnereec27d52017-01-11 15:32:07 -0500393 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500394 res = mGetPhysicalDeviceSurfacePresentModesKHR(mBackendContext->fPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700395 surface->mVkSurface, &presentModeCount,
396 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500397 if (VK_SUCCESS != res) {
398 return false;
399 }
400
401 VkExtent2D extent = caps.currentExtent;
402 // clamp width; to handle currentExtent of -1 and protect us from broken hints
403 if (extent.width < caps.minImageExtent.width) {
404 extent.width = caps.minImageExtent.width;
405 }
406 SkASSERT(extent.width <= caps.maxImageExtent.width);
407 // clamp height
408 if (extent.height < caps.minImageExtent.height) {
409 extent.height = caps.minImageExtent.height;
410 }
411 SkASSERT(extent.height <= caps.maxImageExtent.height);
412
413 uint32_t imageCount = caps.minImageCount + 2;
414 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
415 // Application must settle for fewer images than desired:
416 imageCount = caps.maxImageCount;
417 }
418
419 // Currently Skia requires the images to be color attchments and support all transfer
420 // operations.
421 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
422 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
423 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
424 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
425 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700426 SkASSERT(caps.supportedCompositeAlpha &
427 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500428 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700429 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
430 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
431 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500432
433 // Pick our surface format. For now, just make sure it matches our sRGB request:
434 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
435 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
436
437 bool wantSRGB = false;
438#ifdef ANDROID_ENABLE_LINEAR_BLENDING
439 wantSRGB = true;
440#endif
441 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
442 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
443 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
444 if (desiredFormat == surfaceFormats[i].format) {
445 surfaceFormat = surfaceFormats[i].format;
446 colorSpace = surfaceFormats[i].colorSpace;
447 }
448 }
449
450 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
451 return false;
452 }
453
454 // If mailbox mode is available, use it, as it is the lowest-latency non-
455 // tearing mode. If not, fall back to FIFO which is always available.
456 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
457 for (uint32_t i = 0; i < presentModeCount; ++i) {
458 // use mailbox
459 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
460 mode = presentModes[i];
461 break;
462 }
463 }
464
465 VkSwapchainCreateInfoKHR swapchainCreateInfo;
466 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
467 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
468 swapchainCreateInfo.surface = surface->mVkSurface;
469 swapchainCreateInfo.minImageCount = imageCount;
470 swapchainCreateInfo.imageFormat = surfaceFormat;
471 swapchainCreateInfo.imageColorSpace = colorSpace;
472 swapchainCreateInfo.imageExtent = extent;
473 swapchainCreateInfo.imageArrayLayers = 1;
474 swapchainCreateInfo.imageUsage = usageFlags;
475
John Reck1bcacfd2017-11-03 10:12:19 -0700476 uint32_t queueFamilies[] = {mBackendContext->fGraphicsQueueIndex, mPresentQueueIndex};
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500477 if (mBackendContext->fGraphicsQueueIndex != mPresentQueueIndex) {
478 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
479 swapchainCreateInfo.queueFamilyIndexCount = 2;
480 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
481 } else {
482 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
483 swapchainCreateInfo.queueFamilyIndexCount = 0;
484 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
485 }
486
487 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
488 swapchainCreateInfo.compositeAlpha = composite_alpha;
489 swapchainCreateInfo.presentMode = mode;
490 swapchainCreateInfo.clipped = true;
491 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
492
493 res = mCreateSwapchainKHR(mBackendContext->fDevice, &swapchainCreateInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700494 &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500495 if (VK_SUCCESS != res) {
496 return false;
497 }
498
499 // destroy the old swapchain
500 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
501 mDeviceWaitIdle(mBackendContext->fDevice);
502
503 destroyBuffers(surface);
504
505 mDestroySwapchainKHR(mBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr);
506 }
507
508 createBuffers(surface, surfaceFormat, extent);
509
510 return true;
511}
512
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500513VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
514 initialize();
515
516 if (!window) {
517 return nullptr;
518 }
519
520 VulkanSurface* surface = new VulkanSurface();
521
522 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
523 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
524 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
525 surfaceCreateInfo.pNext = nullptr;
526 surfaceCreateInfo.flags = 0;
527 surfaceCreateInfo.window = window;
528
John Reck1bcacfd2017-11-03 10:12:19 -0700529 VkResult res = mCreateAndroidSurfaceKHR(mBackendContext->fInstance, &surfaceCreateInfo, nullptr,
530 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500531 if (VK_SUCCESS != res) {
532 delete surface;
533 return nullptr;
534 }
535
John Reck1bcacfd2017-11-03 10:12:19 -0700536 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
537 mBackendContext->fPhysicalDevice, mPresentQueueIndex,
538 surface->mVkSurface, &supported);
539 // All physical devices and queue families on Android must be capable of
540 // presentation with any
541 // native window.
542 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500543
544 if (!createSwapchain(surface)) {
545 destroySurface(surface);
546 return nullptr;
547 }
548
549 return surface;
550}
551
552// Helper to know which src stage flags we need to set when transitioning to the present layout
553static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
554 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
555 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
556 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
557 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
558 return VK_PIPELINE_STAGE_TRANSFER_BIT;
559 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
560 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
561 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
562 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
563 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
564 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
565 return VK_PIPELINE_STAGE_HOST_BIT;
566 }
567
568 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
569 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
570}
571
572// Helper to know which src access mask we need to set when transitioning to the present layout
573static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
574 VkAccessFlags flags = 0;
575 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
576 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700577 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
578 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
579 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500580 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
581 flags = VK_ACCESS_HOST_WRITE_BIT;
582 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
583 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
584 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
585 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
586 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
587 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
588 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
589 flags = VK_ACCESS_TRANSFER_READ_BIT;
590 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
591 flags = VK_ACCESS_SHADER_READ_BIT;
592 }
593 return flags;
594}
595
596void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500597 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
598 ATRACE_NAME("Finishing GPU work");
599 mDeviceWaitIdle(mBackendContext->fDevice);
600 }
601
John Reck1bcacfd2017-11-03 10:12:19 -0700602 VulkanSurface::BackbufferInfo* backbuffer =
603 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500604 GrVkImageInfo* imageInfo;
Greg Danielcd558522016-11-17 13:31:40 -0500605 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500606 skSurface->getRenderTargetHandle((GrBackendObject*)&imageInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700607 SkSurface::kFlushRead_BackendHandleAccess);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500608 // Check to make sure we never change the actually wrapped image
609 SkASSERT(imageInfo->fImage == surface->mImages[backbuffer->mImageIndex]);
610
611 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
612 // previous work is complete for before presenting. So we first add the necessary barrier here.
613 VkImageLayout layout = imageInfo->fImageLayout;
614 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
615 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
616 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
617 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
618
619 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700620 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
621 NULL, // pNext
622 srcAccessMask, // outputMask
623 dstAccessMask, // inputMask
624 layout, // oldLayout
625 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
626 mBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex
627 mPresentQueueIndex, // dstQueueFamilyIndex
628 surface->mImages[backbuffer->mImageIndex], // image
629 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500630 };
631
632 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
633 VkCommandBufferBeginInfo info;
634 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
635 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
636 info.flags = 0;
637 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700638 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
639 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500640 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
641
Greg Danielcd558522016-11-17 13:31:40 -0500642 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500643
644 // insert the layout transfer into the queue and wait on the acquire
645 VkSubmitInfo submitInfo;
646 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
647 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
648 submitInfo.waitSemaphoreCount = 0;
649 submitInfo.pWaitDstStageMask = 0;
650 submitInfo.commandBufferCount = 1;
651 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
652 submitInfo.signalSemaphoreCount = 1;
653 // When this command buffer finishes we will signal this semaphore so that we know it is now
654 // safe to present the image to the screen.
655 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
656
657 // Attach second fence to submission here so we can track when the command buffer finishes.
658 mQueueSubmit(mBackendContext->fQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
659
660 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
661 // to the image is complete and that the layout has been change to present on the graphics
662 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700663 const VkPresentInfoKHR presentInfo = {
664 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
665 NULL, // pNext
666 1, // waitSemaphoreCount
667 &backbuffer->mRenderSemaphore, // pWaitSemaphores
668 1, // swapchainCount
669 &surface->mSwapchain, // pSwapchains
670 &backbuffer->mImageIndex, // pImageIndices
671 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500672 };
673
674 mQueuePresentKHR(mPresentQueue, &presentInfo);
675
676 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500677 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
678 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
679 surface->mCurrentTime++;
680}
681
682int VulkanManager::getAge(VulkanSurface* surface) {
John Reck1bcacfd2017-11-03 10:12:19 -0700683 VulkanSurface::BackbufferInfo* backbuffer =
684 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
685 if (mSwapBehavior == SwapBehavior::Discard ||
686 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500687 return 0;
688 }
689 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
690 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500691}
692
693} /* namespace renderthread */
694} /* namespace uirenderer */
695} /* namespace android */