blob: 038e13c513fd14b3e069c3b2bce80e2908c2aa49 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
19#include "DeviceInfo.h"
Greg Danielcd558522016-11-17 13:31:40 -050020#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050021#include "RenderThread.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050022#include "renderstate/RenderState.h"
Ben Wagnereec27d52017-01-11 15:32:07 -050023#include "utils/FatVector.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050024
Greg Danielac2d2322017-07-12 11:30:15 -040025#include <GrBackendSurface.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050026#include <GrContext.h>
27#include <GrTypes.h>
Greg Daniela227dbb2018-08-20 09:19:48 -040028#include <GrTypes.h>
29#include <vk/GrVkExtensions.h>
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050030#include <vk/GrVkTypes.h>
31
32namespace android {
33namespace uirenderer {
34namespace renderthread {
35
Greg Daniel2ff202712018-06-14 11:50:10 -040036#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
37#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
38#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050039
John Reck1bcacfd2017-11-03 10:12:19 -070040VulkanManager::VulkanManager(RenderThread& thread) : mRenderThread(thread) {}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050041
42void VulkanManager::destroy() {
Stan Iliev564ca3e2018-09-04 22:00:00 +000043 mRenderThread.renderState().onContextDestroyed();
Greg Daniel45ec62b2017-01-04 14:27:00 -050044 mRenderThread.setGrContext(nullptr);
45
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050046 if (VK_NULL_HANDLE != mCommandPool) {
Greg Daniel2ff202712018-06-14 11:50:10 -040047 mDestroyCommandPool(mDevice, mCommandPool, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050048 mCommandPool = VK_NULL_HANDLE;
49 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050050
Greg Daniel2ff202712018-06-14 11:50:10 -040051 if (mDevice != VK_NULL_HANDLE) {
52 mDeviceWaitIdle(mDevice);
53 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -070054 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050055
Greg Daniel2ff202712018-06-14 11:50:10 -040056 if (mInstance != VK_NULL_HANDLE) {
57 mDestroyInstance(mInstance, nullptr);
58 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050059
Greg Daniel2ff202712018-06-14 11:50:10 -040060 mGraphicsQueue = VK_NULL_HANDLE;
61 mPresentQueue = VK_NULL_HANDLE;
62 mDevice = VK_NULL_HANDLE;
63 mPhysicalDevice = VK_NULL_HANDLE;
64 mInstance = VK_NULL_HANDLE;
65}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050066
Greg Daniela227dbb2018-08-20 09:19:48 -040067bool VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -040068 VkResult err;
69
70 constexpr VkApplicationInfo app_info = {
71 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
72 nullptr, // pNext
73 "android framework", // pApplicationName
74 0, // applicationVersion
75 "android framework", // pEngineName
76 0, // engineVerison
77 VK_MAKE_VERSION(1, 0, 0), // apiVersion
78 };
79
80 std::vector<const char*> instanceExtensions;
81 {
82 GET_PROC(EnumerateInstanceExtensionProperties);
83
84 uint32_t extensionCount = 0;
85 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
86 if (VK_SUCCESS != err) {
87 return false;
88 }
89 std::unique_ptr<VkExtensionProperties[]> extensions(
90 new VkExtensionProperties[extensionCount]);
91 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.get());
92 if (VK_SUCCESS != err) {
93 return false;
94 }
95 bool hasKHRSurfaceExtension = false;
96 bool hasKHRAndroidSurfaceExtension = false;
97 for (uint32_t i = 0; i < extensionCount; ++i) {
98 instanceExtensions.push_back(extensions[i].extensionName);
99 if (!strcmp(extensions[i].extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
100 hasKHRSurfaceExtension = true;
101 }
102 if (!strcmp(extensions[i].extensionName,VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
103 hasKHRAndroidSurfaceExtension = true;
104 }
105 }
106 if (!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension) {
107 this->destroy();
108 return false;
109 }
110 }
111
112 const VkInstanceCreateInfo instance_create = {
113 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
114 nullptr, // pNext
115 0, // flags
116 &app_info, // pApplicationInfo
117 0, // enabledLayerNameCount
118 nullptr, // ppEnabledLayerNames
119 (uint32_t) instanceExtensions.size(), // enabledExtensionNameCount
120 instanceExtensions.data(), // ppEnabledExtensionNames
121 };
122
123 GET_PROC(CreateInstance);
124 err = mCreateInstance(&instance_create, nullptr, &mInstance);
125 if (err < 0) {
126 this->destroy();
127 return false;
128 }
129
130 GET_INST_PROC(DestroyInstance);
131 GET_INST_PROC(EnumeratePhysicalDevices);
132 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniela227dbb2018-08-20 09:19:48 -0400133 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400134 GET_INST_PROC(CreateDevice);
135 GET_INST_PROC(EnumerateDeviceExtensionProperties);
136 GET_INST_PROC(CreateAndroidSurfaceKHR);
137 GET_INST_PROC(DestroySurfaceKHR);
138 GET_INST_PROC(GetPhysicalDeviceSurfaceSupportKHR);
139 GET_INST_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
140 GET_INST_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
141 GET_INST_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
142
143 uint32_t gpuCount;
144 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr);
145 if (err) {
146 this->destroy();
147 return false;
148 }
149 if (!gpuCount) {
150 this->destroy();
151 return false;
152 }
153 // Just returning the first physical device instead of getting the whole array. Since there
154 // should only be one device on android.
155 gpuCount = 1;
156 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
157 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
158 if (err && VK_INCOMPLETE != err) {
159 this->destroy();
160 return false;
161 }
162
163 // query to get the initial queue props size
164 uint32_t queueCount;
165 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
166 if (!queueCount) {
167 this->destroy();
168 return false;
169 }
170
171 // now get the actual queue props
172 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
173 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
174
175 // iterate to find the graphics queue
176 mGraphicsQueueIndex = queueCount;
177 for (uint32_t i = 0; i < queueCount; i++) {
178 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
179 mGraphicsQueueIndex = i;
180 break;
181 }
182 }
183 if (mGraphicsQueueIndex == queueCount) {
184 this->destroy();
185 return false;
186 }
187
188 // All physical devices and queue families on Android must be capable of
189 // presentation with any native window. So just use the first one.
190 mPresentQueueIndex = 0;
191
192 std::vector<const char*> deviceExtensions;
193 {
194 uint32_t extensionCount = 0;
195 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
196 nullptr);
197 if (VK_SUCCESS != err) {
198 this->destroy();
199 return false;
200 }
201 std::unique_ptr<VkExtensionProperties[]> extensions(
202 new VkExtensionProperties[extensionCount]);
203 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
204 extensions.get());
205 if (VK_SUCCESS != err) {
206 this->destroy();
207 return false;
208 }
209 bool hasKHRSwapchainExtension = false;
210 for (uint32_t i = 0; i < extensionCount; ++i) {
211 deviceExtensions.push_back(extensions[i].extensionName);
212 if (!strcmp(extensions[i].extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
213 hasKHRSwapchainExtension = true;
214 }
215 }
216 if (!hasKHRSwapchainExtension) {
217 this->destroy();
218 return false;
219 }
220 }
221
Greg Daniela227dbb2018-08-20 09:19:48 -0400222 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
223 if (device != VK_NULL_HANDLE) {
224 return vkGetDeviceProcAddr(device, proc_name);
225 }
226 return vkGetInstanceProcAddr(instance, proc_name);
227 };
228 grExtensions.init(getProc, mInstance, mPhysicalDevice, instanceExtensions.size(),
229 instanceExtensions.data(), deviceExtensions.size(), deviceExtensions.data());
230
231 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
232 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
233 features.pNext = nullptr;
234
235 // Setup all extension feature structs we may want to use.
236 void** tailPNext = &features.pNext;
237
238 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
239 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
240 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) malloc(
241 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
242 LOG_ALWAYS_FATAL_IF(!blend);
243 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
244 blend->pNext = nullptr;
245 *tailPNext = blend;
246 tailPNext = &blend->pNext;
247 }
248
249 // query to get the physical device features
250 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400251 // this looks like it would slow things down,
252 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400253 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400254
255 float queuePriorities[1] = { 0.0 };
256
257 const VkDeviceQueueCreateInfo queueInfo[2] = {
258 {
259 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
260 nullptr, // pNext
261 0, // VkDeviceQueueCreateFlags
262 mGraphicsQueueIndex, // queueFamilyIndex
263 1, // queueCount
264 queuePriorities, // pQueuePriorities
265 },
266 {
267 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
268 nullptr, // pNext
269 0, // VkDeviceQueueCreateFlags
270 mPresentQueueIndex, // queueFamilyIndex
271 1, // queueCount
272 queuePriorities, // pQueuePriorities
273 }
274 };
275 uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1;
276
277 const VkDeviceCreateInfo deviceInfo = {
278 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
Greg Daniela227dbb2018-08-20 09:19:48 -0400279 &features, // pNext
Greg Daniel2ff202712018-06-14 11:50:10 -0400280 0, // VkDeviceCreateFlags
281 queueInfoCount, // queueCreateInfoCount
282 queueInfo, // pQueueCreateInfos
283 0, // layerCount
284 nullptr, // ppEnabledLayerNames
285 (uint32_t) deviceExtensions.size(), // extensionCount
286 deviceExtensions.data(), // ppEnabledExtensionNames
Greg Daniela227dbb2018-08-20 09:19:48 -0400287 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400288 };
289
290 err = mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice);
291 if (err) {
292 this->destroy();
293 return false;
294 }
295
296 GET_DEV_PROC(GetDeviceQueue);
297 GET_DEV_PROC(DeviceWaitIdle);
298 GET_DEV_PROC(DestroyDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500299 GET_DEV_PROC(CreateSwapchainKHR);
300 GET_DEV_PROC(DestroySwapchainKHR);
301 GET_DEV_PROC(GetSwapchainImagesKHR);
302 GET_DEV_PROC(AcquireNextImageKHR);
303 GET_DEV_PROC(QueuePresentKHR);
304 GET_DEV_PROC(CreateCommandPool);
305 GET_DEV_PROC(DestroyCommandPool);
306 GET_DEV_PROC(AllocateCommandBuffers);
307 GET_DEV_PROC(FreeCommandBuffers);
308 GET_DEV_PROC(ResetCommandBuffer);
309 GET_DEV_PROC(BeginCommandBuffer);
310 GET_DEV_PROC(EndCommandBuffer);
311 GET_DEV_PROC(CmdPipelineBarrier);
312 GET_DEV_PROC(GetDeviceQueue);
313 GET_DEV_PROC(QueueSubmit);
314 GET_DEV_PROC(QueueWaitIdle);
315 GET_DEV_PROC(DeviceWaitIdle);
316 GET_DEV_PROC(CreateSemaphore);
317 GET_DEV_PROC(DestroySemaphore);
318 GET_DEV_PROC(CreateFence);
319 GET_DEV_PROC(DestroyFence);
320 GET_DEV_PROC(WaitForFences);
321 GET_DEV_PROC(ResetFences);
322
Greg Daniel2ff202712018-06-14 11:50:10 -0400323 return true;
324}
325
Greg Daniela227dbb2018-08-20 09:19:48 -0400326static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
327 // All Vulkan structs that could be part of the features chain will start with the
328 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
329 // so we can get access to the pNext for the next struct.
330 struct CommonVulkanHeader {
331 VkStructureType sType;
332 void* pNext;
333 };
334
335 void* pNext = features.pNext;
336 while (pNext) {
337 void* current = pNext;
338 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
339 free(current);
340 }
341}
342
Greg Daniel2ff202712018-06-14 11:50:10 -0400343void VulkanManager::initialize() {
344 if (mDevice != VK_NULL_HANDLE) {
345 return;
346 }
347
Greg Daniela227dbb2018-08-20 09:19:48 -0400348 GET_PROC(EnumerateInstanceVersion);
349 uint32_t instanceVersion = 0;
350 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
351 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
352
353 GrVkExtensions extensions;
354 VkPhysicalDeviceFeatures2 features;
355 LOG_ALWAYS_FATAL_IF(!this->setupDevice(extensions, features));
Greg Daniel2ff202712018-06-14 11:50:10 -0400356
357 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
358
Greg Daniel2ff202712018-06-14 11:50:10 -0400359 auto getProc = [] (const char* proc_name, VkInstance instance, VkDevice device) {
360 if (device != VK_NULL_HANDLE) {
361 return vkGetDeviceProcAddr(device, proc_name);
362 }
363 return vkGetInstanceProcAddr(instance, proc_name);
364 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400365
366 GrVkBackendContext backendContext;
367 backendContext.fInstance = mInstance;
368 backendContext.fPhysicalDevice = mPhysicalDevice;
369 backendContext.fDevice = mDevice;
370 backendContext.fQueue = mGraphicsQueue;
371 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
Greg Daniela227dbb2018-08-20 09:19:48 -0400372 backendContext.fInstanceVersion = instanceVersion;
373 backendContext.fVkExtensions = &extensions;
374 backendContext.fDeviceFeatures2 = &features;
Greg Daniel4aa58672018-07-13 13:10:36 -0400375 backendContext.fGetProc = std::move(getProc);
Greg Daniel2ff202712018-06-14 11:50:10 -0400376
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500377 // create the command pool for the command buffers
378 if (VK_NULL_HANDLE == mCommandPool) {
379 VkCommandPoolCreateInfo commandPoolInfo;
380 memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
381 commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
382 // this needs to be on the render queue
Greg Daniel2ff202712018-06-14 11:50:10 -0400383 commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500384 commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
Greg Daniel2ff202712018-06-14 11:50:10 -0400385 SkDEBUGCODE(VkResult res =) mCreateCommandPool(mDevice, &commandPoolInfo, nullptr,
386 &mCommandPool);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500387 SkASSERT(VK_SUCCESS == res);
388 }
389
Greg Daniel2ff202712018-06-14 11:50:10 -0400390 mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500391
Stan Ilievd495f432017-10-09 15:49:32 -0400392 GrContextOptions options;
393 options.fDisableDistanceFieldPaths = true;
Yichi Chen9f959552018-03-29 21:21:54 +0800394 // TODO: get a string describing the SPIR-V compiler version and use it here
395 mRenderThread.cacheManager().configureContext(&options, nullptr, 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400396 sk_sp<GrContext> grContext(GrContext::MakeVulkan(backendContext, options));
Greg Daniel660d6ec2017-12-08 11:44:27 -0500397 LOG_ALWAYS_FATAL_IF(!grContext.get());
398 mRenderThread.setGrContext(grContext);
Greg Daniela227dbb2018-08-20 09:19:48 -0400399
400 free_features_extensions_structs(features);
401
Greg Daniel85e09072018-04-09 12:36:45 -0400402 DeviceInfo::initialize(mRenderThread.getGrContext()->maxRenderTargetSize());
Greg Danielcd558522016-11-17 13:31:40 -0500403
404 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
405 mSwapBehavior = SwapBehavior::BufferAge;
406 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500407
Stan Iliev564ca3e2018-09-04 22:00:00 +0000408 mRenderThread.renderState().onContextCreated();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500409}
410
411// Returns the next BackbufferInfo to use for the next draw. The function will make sure all
412// previous uses have finished before returning.
413VulkanSurface::BackbufferInfo* VulkanManager::getAvailableBackbuffer(VulkanSurface* surface) {
414 SkASSERT(surface->mBackbuffers);
415
416 ++surface->mCurrentBackbufferIndex;
417 if (surface->mCurrentBackbufferIndex > surface->mImageCount) {
418 surface->mCurrentBackbufferIndex = 0;
419 }
420
John Reck1bcacfd2017-11-03 10:12:19 -0700421 VulkanSurface::BackbufferInfo* backbuffer =
422 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500423
424 // Before we reuse a backbuffer, make sure its fences have all signaled so that we can safely
425 // reuse its commands buffers.
Greg Daniel2ff202712018-06-14 11:50:10 -0400426 VkResult res = mWaitForFences(mDevice, 2, backbuffer->mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500427 if (res != VK_SUCCESS) {
428 return nullptr;
429 }
430
431 return backbuffer;
432}
433
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500434SkSurface* VulkanManager::getBackbufferSurface(VulkanSurface* surface) {
435 VulkanSurface::BackbufferInfo* backbuffer = getAvailableBackbuffer(surface);
436 SkASSERT(backbuffer);
437
438 VkResult res;
439
Greg Daniel2ff202712018-06-14 11:50:10 -0400440 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500441 SkASSERT(VK_SUCCESS == res);
442
443 // The acquire will signal the attached mAcquireSemaphore. We use this to know the image has
444 // finished presenting and that it is safe to begin sending new commands to the returned image.
Greg Daniel2ff202712018-06-14 11:50:10 -0400445 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700446 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
447 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500448
449 if (VK_ERROR_SURFACE_LOST_KHR == res) {
450 // need to figure out how to create a new vkSurface without the platformData*
451 // maybe use attach somehow? but need a Window
452 return nullptr;
453 }
454 if (VK_ERROR_OUT_OF_DATE_KHR == res) {
455 // tear swapchain down and try again
456 if (!createSwapchain(surface)) {
457 return nullptr;
458 }
Greg Daniel45ec62b2017-01-04 14:27:00 -0500459 backbuffer = getAvailableBackbuffer(surface);
Greg Daniel2ff202712018-06-14 11:50:10 -0400460 res = mResetFences(mDevice, 2, backbuffer->mUsageFences);
Greg Daniel45ec62b2017-01-04 14:27:00 -0500461 SkASSERT(VK_SUCCESS == res);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500462
463 // acquire the image
Greg Daniel2ff202712018-06-14 11:50:10 -0400464 res = mAcquireNextImageKHR(mDevice, surface->mSwapchain, UINT64_MAX,
John Reck1bcacfd2017-11-03 10:12:19 -0700465 backbuffer->mAcquireSemaphore, VK_NULL_HANDLE,
466 &backbuffer->mImageIndex);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500467
468 if (VK_SUCCESS != res) {
469 return nullptr;
470 }
471 }
472
473 // set up layout transfer from initial to color attachment
Greg Danielcd558522016-11-17 13:31:40 -0500474 VkImageLayout layout = surface->mImageInfos[backbuffer->mImageIndex].mImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500475 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout || VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout);
John Reck1bcacfd2017-11-03 10:12:19 -0700476 VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout)
477 ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
478 : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500479 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
John Reck1bcacfd2017-11-03 10:12:19 -0700480 VkAccessFlags srcAccessMask =
481 (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500482 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
483
484 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700485 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
486 NULL, // pNext
487 srcAccessMask, // outputMask
488 dstAccessMask, // inputMask
489 layout, // oldLayout
490 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
491 mPresentQueueIndex, // srcQueueFamilyIndex
Greg Daniel2ff202712018-06-14 11:50:10 -0400492 mGraphicsQueueIndex, // dstQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700493 surface->mImages[backbuffer->mImageIndex], // image
494 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500495 };
496 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[0], 0);
497
498 VkCommandBufferBeginInfo info;
499 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
500 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
501 info.flags = 0;
502 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[0], &info);
503
John Reck1bcacfd2017-11-03 10:12:19 -0700504 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0,
505 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500506
507 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[0]);
508
509 VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
510 // insert the layout transfer into the queue and wait on the acquire
511 VkSubmitInfo submitInfo;
512 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
513 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
514 submitInfo.waitSemaphoreCount = 1;
515 // Wait to make sure aquire semaphore set above has signaled.
516 submitInfo.pWaitSemaphores = &backbuffer->mAcquireSemaphore;
517 submitInfo.pWaitDstStageMask = &waitDstStageFlags;
518 submitInfo.commandBufferCount = 1;
519 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[0];
520 submitInfo.signalSemaphoreCount = 0;
521
522 // Attach first fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400523 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[0]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500524
525 // We need to notify Skia that we changed the layout of the wrapped VkImage
Greg Danielcd558522016-11-17 13:31:40 -0500526 sk_sp<SkSurface> skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400527 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
528 SkSurface::kFlushRead_BackendHandleAccess);
529 if (!backendRT.isValid()) {
530 SkASSERT(backendRT.isValid());
531 return nullptr;
532 }
533 backendRT.setVkImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500534
535 surface->mBackbuffer = std::move(skSurface);
536 return surface->mBackbuffer.get();
537}
538
539void VulkanManager::destroyBuffers(VulkanSurface* surface) {
540 if (surface->mBackbuffers) {
541 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400542 mWaitForFences(mDevice, 2, surface->mBackbuffers[i].mUsageFences, true, UINT64_MAX);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500543 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400544 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mAcquireSemaphore, nullptr);
545 mDestroySemaphore(mDevice, surface->mBackbuffers[i].mRenderSemaphore, nullptr);
546 mFreeCommandBuffers(mDevice, mCommandPool, 2,
547 surface->mBackbuffers[i].mTransitionCmdBuffers);
548 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[0], 0);
549 mDestroyFence(mDevice, surface->mBackbuffers[i].mUsageFences[1], 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500550 }
551 }
552
553 delete[] surface->mBackbuffers;
554 surface->mBackbuffers = nullptr;
Greg Danielcd558522016-11-17 13:31:40 -0500555 delete[] surface->mImageInfos;
556 surface->mImageInfos = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500557 delete[] surface->mImages;
558 surface->mImages = nullptr;
559}
560
561void VulkanManager::destroySurface(VulkanSurface* surface) {
562 // Make sure all submit commands have finished before starting to destroy objects.
563 if (VK_NULL_HANDLE != mPresentQueue) {
564 mQueueWaitIdle(mPresentQueue);
565 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400566 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500567
568 destroyBuffers(surface);
569
570 if (VK_NULL_HANDLE != surface->mSwapchain) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400571 mDestroySwapchainKHR(mDevice, surface->mSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500572 surface->mSwapchain = VK_NULL_HANDLE;
573 }
574
575 if (VK_NULL_HANDLE != surface->mVkSurface) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400576 mDestroySurfaceKHR(mInstance, surface->mVkSurface, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500577 surface->mVkSurface = VK_NULL_HANDLE;
578 }
579 delete surface;
580}
581
582void VulkanManager::createBuffers(VulkanSurface* surface, VkFormat format, VkExtent2D extent) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400583 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500584 SkASSERT(surface->mImageCount);
585 surface->mImages = new VkImage[surface->mImageCount];
Greg Daniel2ff202712018-06-14 11:50:10 -0400586 mGetSwapchainImagesKHR(mDevice, surface->mSwapchain, &surface->mImageCount, surface->mImages);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500587
588 SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
589
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500590 // set up initial image layouts and create surfaces
Greg Danielcd558522016-11-17 13:31:40 -0500591 surface->mImageInfos = new VulkanSurface::ImageInfo[surface->mImageCount];
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500592 for (uint32_t i = 0; i < surface->mImageCount; ++i) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500593 GrVkImageInfo info;
594 info.fImage = surface->mImages[i];
Greg Danielc9a89452018-02-23 13:16:12 -0500595 info.fAlloc = GrVkAlloc();
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500596 info.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
597 info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
598 info.fFormat = format;
599 info.fLevelCount = 1;
600
Greg Danielac2d2322017-07-12 11:30:15 -0400601 GrBackendRenderTarget backendRT(extent.width, extent.height, 0, 0, info);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500602
Greg Danielcd558522016-11-17 13:31:40 -0500603 VulkanSurface::ImageInfo& imageInfo = surface->mImageInfos[i];
John Reck1bcacfd2017-11-03 10:12:19 -0700604 imageInfo.mSurface = SkSurface::MakeFromBackendRenderTarget(
Greg Danielc9da8e82018-03-21 10:50:24 -0400605 mRenderThread.getGrContext(), backendRT, kTopLeft_GrSurfaceOrigin,
606 kRGBA_8888_SkColorType, nullptr, &props);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500607 }
608
609 SkASSERT(mCommandPool != VK_NULL_HANDLE);
610
611 // set up the backbuffers
612 VkSemaphoreCreateInfo semaphoreInfo;
613 memset(&semaphoreInfo, 0, sizeof(VkSemaphoreCreateInfo));
614 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
615 semaphoreInfo.pNext = nullptr;
616 semaphoreInfo.flags = 0;
617 VkCommandBufferAllocateInfo commandBuffersInfo;
618 memset(&commandBuffersInfo, 0, sizeof(VkCommandBufferAllocateInfo));
619 commandBuffersInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
620 commandBuffersInfo.pNext = nullptr;
621 commandBuffersInfo.commandPool = mCommandPool;
622 commandBuffersInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
623 commandBuffersInfo.commandBufferCount = 2;
624 VkFenceCreateInfo fenceInfo;
625 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
626 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
627 fenceInfo.pNext = nullptr;
628 fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
629
630 // we create one additional backbuffer structure here, because we want to
631 // give the command buffers they contain a chance to finish before we cycle back
632 surface->mBackbuffers = new VulkanSurface::BackbufferInfo[surface->mImageCount + 1];
633 for (uint32_t i = 0; i < surface->mImageCount + 1; ++i) {
634 SkDEBUGCODE(VkResult res);
635 surface->mBackbuffers[i].mImageIndex = -1;
Greg Daniel2ff202712018-06-14 11:50:10 -0400636 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700637 &surface->mBackbuffers[i].mAcquireSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400638 SkDEBUGCODE(res =) mCreateSemaphore(mDevice, &semaphoreInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700639 &surface->mBackbuffers[i].mRenderSemaphore);
Greg Daniel2ff202712018-06-14 11:50:10 -0400640 SkDEBUGCODE(res =) mAllocateCommandBuffers(mDevice, &commandBuffersInfo,
John Reck1bcacfd2017-11-03 10:12:19 -0700641 surface->mBackbuffers[i].mTransitionCmdBuffers);
Greg Daniel2ff202712018-06-14 11:50:10 -0400642 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700643 &surface->mBackbuffers[i].mUsageFences[0]);
Greg Daniel2ff202712018-06-14 11:50:10 -0400644 SkDEBUGCODE(res =) mCreateFence(mDevice, &fenceInfo, nullptr,
John Reck1bcacfd2017-11-03 10:12:19 -0700645 &surface->mBackbuffers[i].mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500646 SkASSERT(VK_SUCCESS == res);
647 }
648 surface->mCurrentBackbufferIndex = surface->mImageCount;
649}
650
651bool VulkanManager::createSwapchain(VulkanSurface* surface) {
652 // check for capabilities
653 VkSurfaceCapabilitiesKHR caps;
Greg Daniel2ff202712018-06-14 11:50:10 -0400654 VkResult res = mGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700655 surface->mVkSurface, &caps);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500656 if (VK_SUCCESS != res) {
657 return false;
658 }
659
660 uint32_t surfaceFormatCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400661 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700662 &surfaceFormatCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500663 if (VK_SUCCESS != res) {
664 return false;
665 }
666
Ben Wagnereec27d52017-01-11 15:32:07 -0500667 FatVector<VkSurfaceFormatKHR, 4> surfaceFormats(surfaceFormatCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400668 res = mGetPhysicalDeviceSurfaceFormatsKHR(mPhysicalDevice, surface->mVkSurface,
John Reck1bcacfd2017-11-03 10:12:19 -0700669 &surfaceFormatCount, surfaceFormats.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500670 if (VK_SUCCESS != res) {
671 return false;
672 }
673
674 uint32_t presentModeCount;
Greg Daniel2ff202712018-06-14 11:50:10 -0400675 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700676 surface->mVkSurface, &presentModeCount, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500677 if (VK_SUCCESS != res) {
678 return false;
679 }
680
Ben Wagnereec27d52017-01-11 15:32:07 -0500681 FatVector<VkPresentModeKHR, VK_PRESENT_MODE_RANGE_SIZE_KHR> presentModes(presentModeCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400682 res = mGetPhysicalDeviceSurfacePresentModesKHR(mPhysicalDevice,
John Reck1bcacfd2017-11-03 10:12:19 -0700683 surface->mVkSurface, &presentModeCount,
684 presentModes.data());
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500685 if (VK_SUCCESS != res) {
686 return false;
687 }
688
689 VkExtent2D extent = caps.currentExtent;
690 // clamp width; to handle currentExtent of -1 and protect us from broken hints
691 if (extent.width < caps.minImageExtent.width) {
692 extent.width = caps.minImageExtent.width;
693 }
694 SkASSERT(extent.width <= caps.maxImageExtent.width);
695 // clamp height
696 if (extent.height < caps.minImageExtent.height) {
697 extent.height = caps.minImageExtent.height;
698 }
699 SkASSERT(extent.height <= caps.maxImageExtent.height);
700
701 uint32_t imageCount = caps.minImageCount + 2;
702 if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) {
703 // Application must settle for fewer images than desired:
704 imageCount = caps.maxImageCount;
705 }
706
707 // Currently Skia requires the images to be color attchments and support all transfer
708 // operations.
709 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
710 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
711 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
712 SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags);
713 SkASSERT(caps.supportedTransforms & caps.currentTransform);
John Reck1bcacfd2017-11-03 10:12:19 -0700714 SkASSERT(caps.supportedCompositeAlpha &
715 (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR));
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500716 VkCompositeAlphaFlagBitsKHR composite_alpha =
John Reck1bcacfd2017-11-03 10:12:19 -0700717 (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)
718 ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
719 : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500720
721 // Pick our surface format. For now, just make sure it matches our sRGB request:
722 VkFormat surfaceFormat = VK_FORMAT_UNDEFINED;
723 VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
724
725 bool wantSRGB = false;
726#ifdef ANDROID_ENABLE_LINEAR_BLENDING
727 wantSRGB = true;
728#endif
729 for (uint32_t i = 0; i < surfaceFormatCount; ++i) {
730 // We are assuming we can get either R8G8B8A8_UNORM or R8G8B8A8_SRGB
731 VkFormat desiredFormat = wantSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
732 if (desiredFormat == surfaceFormats[i].format) {
733 surfaceFormat = surfaceFormats[i].format;
734 colorSpace = surfaceFormats[i].colorSpace;
735 }
736 }
737
738 if (VK_FORMAT_UNDEFINED == surfaceFormat) {
739 return false;
740 }
741
742 // If mailbox mode is available, use it, as it is the lowest-latency non-
743 // tearing mode. If not, fall back to FIFO which is always available.
744 VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR;
745 for (uint32_t i = 0; i < presentModeCount; ++i) {
746 // use mailbox
747 if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) {
748 mode = presentModes[i];
749 break;
750 }
751 }
752
753 VkSwapchainCreateInfoKHR swapchainCreateInfo;
754 memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR));
755 swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
756 swapchainCreateInfo.surface = surface->mVkSurface;
757 swapchainCreateInfo.minImageCount = imageCount;
758 swapchainCreateInfo.imageFormat = surfaceFormat;
759 swapchainCreateInfo.imageColorSpace = colorSpace;
760 swapchainCreateInfo.imageExtent = extent;
761 swapchainCreateInfo.imageArrayLayers = 1;
762 swapchainCreateInfo.imageUsage = usageFlags;
763
Greg Daniel2ff202712018-06-14 11:50:10 -0400764 uint32_t queueFamilies[] = {mGraphicsQueueIndex, mPresentQueueIndex};
765 if (mGraphicsQueueIndex != mPresentQueueIndex) {
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500766 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
767 swapchainCreateInfo.queueFamilyIndexCount = 2;
768 swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
769 } else {
770 swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
771 swapchainCreateInfo.queueFamilyIndexCount = 0;
772 swapchainCreateInfo.pQueueFamilyIndices = nullptr;
773 }
774
775 swapchainCreateInfo.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
776 swapchainCreateInfo.compositeAlpha = composite_alpha;
777 swapchainCreateInfo.presentMode = mode;
778 swapchainCreateInfo.clipped = true;
779 swapchainCreateInfo.oldSwapchain = surface->mSwapchain;
780
Greg Daniel2ff202712018-06-14 11:50:10 -0400781 res = mCreateSwapchainKHR(mDevice, &swapchainCreateInfo, nullptr, &surface->mSwapchain);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500782 if (VK_SUCCESS != res) {
783 return false;
784 }
785
786 // destroy the old swapchain
787 if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400788 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500789
790 destroyBuffers(surface);
791
Greg Daniel2ff202712018-06-14 11:50:10 -0400792 mDestroySwapchainKHR(mDevice, swapchainCreateInfo.oldSwapchain, nullptr);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500793 }
794
795 createBuffers(surface, surfaceFormat, extent);
796
797 return true;
798}
799
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500800VulkanSurface* VulkanManager::createSurface(ANativeWindow* window) {
801 initialize();
802
803 if (!window) {
804 return nullptr;
805 }
806
807 VulkanSurface* surface = new VulkanSurface();
808
809 VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo;
810 memset(&surfaceCreateInfo, 0, sizeof(VkAndroidSurfaceCreateInfoKHR));
811 surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
812 surfaceCreateInfo.pNext = nullptr;
813 surfaceCreateInfo.flags = 0;
814 surfaceCreateInfo.window = window;
815
Greg Daniel2ff202712018-06-14 11:50:10 -0400816 VkResult res = mCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, nullptr,
817 &surface->mVkSurface);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500818 if (VK_SUCCESS != res) {
819 delete surface;
820 return nullptr;
821 }
822
John Reck1bcacfd2017-11-03 10:12:19 -0700823 SkDEBUGCODE(VkBool32 supported; res = mGetPhysicalDeviceSurfaceSupportKHR(
Greg Daniel2ff202712018-06-14 11:50:10 -0400824 mPhysicalDevice, mPresentQueueIndex, surface->mVkSurface, &supported);
825 // All physical devices and queue families on Android must be capable of
826 // presentation with any native window.
827 SkASSERT(VK_SUCCESS == res && supported););
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500828
829 if (!createSwapchain(surface)) {
830 destroySurface(surface);
831 return nullptr;
832 }
833
834 return surface;
835}
836
837// Helper to know which src stage flags we need to set when transitioning to the present layout
838static VkPipelineStageFlags layoutToPipelineStageFlags(const VkImageLayout layout) {
839 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
840 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
841 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
842 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
843 return VK_PIPELINE_STAGE_TRANSFER_BIT;
844 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
845 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
846 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
847 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
848 return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
849 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
850 return VK_PIPELINE_STAGE_HOST_BIT;
851 }
852
853 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
854 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
855}
856
857// Helper to know which src access mask we need to set when transitioning to the present layout
858static VkAccessFlags layoutToSrcAccessMask(const VkImageLayout layout) {
859 VkAccessFlags flags = 0;
860 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
861 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
John Reck1bcacfd2017-11-03 10:12:19 -0700862 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
863 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
864 VK_ACCESS_HOST_READ_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500865 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
866 flags = VK_ACCESS_HOST_WRITE_BIT;
867 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
868 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
869 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
870 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
871 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
872 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
873 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
874 flags = VK_ACCESS_TRANSFER_READ_BIT;
875 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
876 flags = VK_ACCESS_SHADER_READ_BIT;
877 }
878 return flags;
879}
880
881void VulkanManager::swapBuffers(VulkanSurface* surface) {
Greg Daniel4f708872017-02-03 10:23:39 -0500882 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
883 ATRACE_NAME("Finishing GPU work");
Greg Daniel2ff202712018-06-14 11:50:10 -0400884 mDeviceWaitIdle(mDevice);
Greg Daniel4f708872017-02-03 10:23:39 -0500885 }
886
Greg Daniel74ea2012017-11-10 11:32:58 -0500887 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700888 VulkanSurface::BackbufferInfo* backbuffer =
889 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
Greg Daniel1834a8c2018-04-12 12:22:43 -0400890
Greg Danielcd558522016-11-17 13:31:40 -0500891 SkSurface* skSurface = surface->mImageInfos[backbuffer->mImageIndex].mSurface.get();
Greg Daniel1834a8c2018-04-12 12:22:43 -0400892 GrBackendRenderTarget backendRT = skSurface->getBackendRenderTarget(
893 SkSurface::kFlushRead_BackendHandleAccess);
894 SkASSERT(backendRT.isValid());
895
896 GrVkImageInfo imageInfo;
897 SkAssertResult(backendRT.getVkImageInfo(&imageInfo));
898
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500899 // Check to make sure we never change the actually wrapped image
Greg Daniel1834a8c2018-04-12 12:22:43 -0400900 SkASSERT(imageInfo.fImage == surface->mImages[backbuffer->mImageIndex]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500901
902 // We need to transition the image to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and make sure that all
903 // previous work is complete for before presenting. So we first add the necessary barrier here.
Greg Daniel1834a8c2018-04-12 12:22:43 -0400904 VkImageLayout layout = imageInfo.fImageLayout;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500905 VkPipelineStageFlags srcStageMask = layoutToPipelineStageFlags(layout);
906 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
907 VkAccessFlags srcAccessMask = layoutToSrcAccessMask(layout);
908 VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
909
910 VkImageMemoryBarrier imageMemoryBarrier = {
John Reck1bcacfd2017-11-03 10:12:19 -0700911 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
912 NULL, // pNext
913 srcAccessMask, // outputMask
914 dstAccessMask, // inputMask
915 layout, // oldLayout
916 VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
Greg Daniel2ff202712018-06-14 11:50:10 -0400917 mGraphicsQueueIndex, // srcQueueFamilyIndex
John Reck1bcacfd2017-11-03 10:12:19 -0700918 mPresentQueueIndex, // dstQueueFamilyIndex
919 surface->mImages[backbuffer->mImageIndex], // image
920 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500921 };
922
923 mResetCommandBuffer(backbuffer->mTransitionCmdBuffers[1], 0);
924 VkCommandBufferBeginInfo info;
925 memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
926 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
927 info.flags = 0;
928 mBeginCommandBuffer(backbuffer->mTransitionCmdBuffers[1], &info);
John Reck1bcacfd2017-11-03 10:12:19 -0700929 mCmdPipelineBarrier(backbuffer->mTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0,
930 nullptr, 0, nullptr, 1, &imageMemoryBarrier);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500931 mEndCommandBuffer(backbuffer->mTransitionCmdBuffers[1]);
932
Greg Danielcd558522016-11-17 13:31:40 -0500933 surface->mImageInfos[backbuffer->mImageIndex].mImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500934
935 // insert the layout transfer into the queue and wait on the acquire
936 VkSubmitInfo submitInfo;
937 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
938 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
939 submitInfo.waitSemaphoreCount = 0;
940 submitInfo.pWaitDstStageMask = 0;
941 submitInfo.commandBufferCount = 1;
942 submitInfo.pCommandBuffers = &backbuffer->mTransitionCmdBuffers[1];
943 submitInfo.signalSemaphoreCount = 1;
944 // When this command buffer finishes we will signal this semaphore so that we know it is now
945 // safe to present the image to the screen.
946 submitInfo.pSignalSemaphores = &backbuffer->mRenderSemaphore;
947
948 // Attach second fence to submission here so we can track when the command buffer finishes.
Greg Daniel2ff202712018-06-14 11:50:10 -0400949 mQueueSubmit(mGraphicsQueue, 1, &submitInfo, backbuffer->mUsageFences[1]);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500950
951 // Submit present operation to present queue. We use a semaphore here to make sure all rendering
952 // to the image is complete and that the layout has been change to present on the graphics
953 // queue.
John Reck1bcacfd2017-11-03 10:12:19 -0700954 const VkPresentInfoKHR presentInfo = {
955 VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType
956 NULL, // pNext
957 1, // waitSemaphoreCount
958 &backbuffer->mRenderSemaphore, // pWaitSemaphores
959 1, // swapchainCount
960 &surface->mSwapchain, // pSwapchains
961 &backbuffer->mImageIndex, // pImageIndices
962 NULL // pResults
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500963 };
964
965 mQueuePresentKHR(mPresentQueue, &presentInfo);
966
967 surface->mBackbuffer.reset();
Greg Danielcd558522016-11-17 13:31:40 -0500968 surface->mImageInfos[backbuffer->mImageIndex].mLastUsed = surface->mCurrentTime;
969 surface->mImageInfos[backbuffer->mImageIndex].mInvalid = false;
970 surface->mCurrentTime++;
971}
972
973int VulkanManager::getAge(VulkanSurface* surface) {
Greg Daniel74ea2012017-11-10 11:32:58 -0500974 SkASSERT(surface->mBackbuffers);
John Reck1bcacfd2017-11-03 10:12:19 -0700975 VulkanSurface::BackbufferInfo* backbuffer =
976 surface->mBackbuffers + surface->mCurrentBackbufferIndex;
977 if (mSwapBehavior == SwapBehavior::Discard ||
978 surface->mImageInfos[backbuffer->mImageIndex].mInvalid) {
Greg Danielcd558522016-11-17 13:31:40 -0500979 return 0;
980 }
981 uint16_t lastUsed = surface->mImageInfos[backbuffer->mImageIndex].mLastUsed;
982 return surface->mCurrentTime - lastUsed;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500983}
984
Stan Iliev564ca3e2018-09-04 22:00:00 +0000985status_t VulkanManager::fenceWait(sp<Fence>& fence) {
986 //TODO: Insert a wait on fence command into the Vulkan command buffer.
987 // Block CPU on the fence.
988 status_t err = fence->waitForever("VulkanManager::fenceWait");
989 if (err != NO_ERROR) {
990 ALOGE("VulkanManager::fenceWait: error waiting for fence: %d", err);
991 return err;
992 }
993 return OK;
994}
995
996status_t VulkanManager::createReleaseFence(sp<Fence>& nativeFence) {
997 //TODO: Create a fence that is signaled, when all the pending Vulkan commands are flushed.
998 return OK;
999}
1000
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001001} /* namespace renderthread */
1002} /* namespace uirenderer */
1003} /* namespace android */