blob: ee87f49b076a2331b787af1293369e515f661068 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
24#include "GrVkProgram.h"
25#include "GrVkProgramBuilder.h"
26#include "GrVkProgramDesc.h"
27#include "GrVkRenderPass.h"
28#include "GrVkResourceProvider.h"
29#include "GrVkTexture.h"
30#include "GrVkTextureRenderTarget.h"
31#include "GrVkTransferBuffer.h"
32#include "GrVkVertexBuffer.h"
33
34#include "SkConfig8888.h"
35
36#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070037#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050038
39#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
40#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
41#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
42
43////////////////////////////////////////////////////////////////////////////////
44// Stuff used to set up a GrVkGpu secrectly for now.
45
jvanverthd2497f32016-03-18 12:39:05 -070046
47#ifdef ENABLE_VK_LAYERS
48VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
49 VkDebugReportFlagsEXT flags,
50 VkDebugReportObjectTypeEXT objectType,
51 uint64_t object,
52 size_t location,
53 int32_t messageCode,
54 const char* pLayerPrefix,
55 const char* pMessage,
56 void* pUserData) {
57 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
58 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
60 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
61 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
62 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
63 } else {
64 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
65 }
66 return VK_FALSE;
67}
68
69const char* kEnabledLayerNames[] = {
70 // elements of VK_LAYER_LUNARG_standard_validation
71 "VK_LAYER_LUNARG_threading",
72 "VK_LAYER_LUNARG_param_checker",
73 "VK_LAYER_LUNARG_device_limits",
74 "VK_LAYER_LUNARG_object_tracker",
75 "VK_LAYER_LUNARG_image",
76 "VK_LAYER_LUNARG_mem_tracker",
77 "VK_LAYER_LUNARG_draw_state",
78 "VK_LAYER_LUNARG_swapchain",
79 "VK_LAYER_GOOGLE_unique_objects",
80 // not included in standard_validation
81 //"VK_LAYER_LUNARG_api_dump",
82};
83const char* kEnabledInstanceExtensionNames[] = {
84 VK_EXT_DEBUG_REPORT_EXTENSION_NAME
85};
86
87bool verify_instance_layers() {
88 // make sure we can actually use the extensions and layers above
89 uint32_t extensionCount;
90 VkResult res = vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
91 if (VK_SUCCESS != res) {
92 return false;
93 }
94 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
95 res = vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
96 if (VK_SUCCESS != res) {
97 return false;
98 }
99 int instanceExtensionsFound = 0;
100 for (uint32_t j = 0; j < ARRAYSIZE(kEnabledInstanceExtensionNames); ++j) {
101 for (uint32_t i = 0; i < extensionCount; ++i) {
102 if (!strncmp(extensions[i].extensionName, kEnabledInstanceExtensionNames[j],
103 strlen(kEnabledInstanceExtensionNames[j]))) {
104 ++instanceExtensionsFound;
105 break;
106 }
107 }
108 }
109 delete[] extensions;
110
111 uint32_t layerCount;
112 res = vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
113 if (VK_SUCCESS != res) {
114 return false;
115 }
116 VkLayerProperties* layers = new VkLayerProperties[layerCount];
117 res = vkEnumerateInstanceLayerProperties(&layerCount, layers);
118 if (VK_SUCCESS != res) {
119 return false;
120 }
121 int instanceLayersFound = 0;
122 for (uint32_t j = 0; j < ARRAYSIZE(kEnabledLayerNames); ++j) {
123 for (uint32_t i = 0; i < layerCount; ++i) {
124 if (!strncmp(layers[i].layerName, kEnabledLayerNames[j],
125 strlen(kEnabledLayerNames[j]))) {
126 ++instanceLayersFound;
127 break;
128 }
129 }
130 }
131 delete[] layers;
132
133 return instanceExtensionsFound == ARRAYSIZE(kEnabledInstanceExtensionNames) &&
134 instanceLayersFound == ARRAYSIZE(kEnabledLayerNames);
135}
136
137bool verify_device_layers(VkPhysicalDevice physDev) {
138 uint32_t layerCount;
139 VkResult res = vkEnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
140 if (VK_SUCCESS != res) {
141 return false;
142 }
143 VkLayerProperties* layers = new VkLayerProperties[layerCount];
144 res = vkEnumerateDeviceLayerProperties(physDev, &layerCount, layers);
145 if (VK_SUCCESS != res) {
146 return false;
147 }
148 int deviceLayersFound = 0;
149 for (uint32_t j = 0; j < ARRAYSIZE(kEnabledLayerNames); ++j) {
150 for (uint32_t i = 0; i < layerCount; ++i) {
151 if (!strncmp(layers[i].layerName, kEnabledLayerNames[j],
152 strlen(kEnabledLayerNames[j]))) {
153 ++deviceLayersFound;
154 break;
155 }
156 }
157 }
158 delete[] layers;
159
160 return deviceLayersFound == ARRAYSIZE(kEnabledLayerNames);
161}
162#endif
163
Greg Daniel164a9f02016-02-22 09:56:40 -0500164// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
165// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
166// and mostly likely would take an optional device and queues to use.
167GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
168 GrContext* context) {
169 // Below is Vulkan setup code that normal would be done by a client, but will do here for now
170 // for testing purposes.
171 VkPhysicalDevice physDev;
172 VkDevice device;
173 VkInstance inst;
174 VkResult err;
175
176 const VkApplicationInfo app_info = {
177 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
178 nullptr, // pNext
179 "vktest", // pApplicationName
180 0, // applicationVersion
181 "vktest", // pEngineName
182 0, // engineVerison
jvanverthd2497f32016-03-18 12:39:05 -0700183 kGrVkMinimumVersion, // apiVersion
Greg Daniel164a9f02016-02-22 09:56:40 -0500184 };
jvanverthd2497f32016-03-18 12:39:05 -0700185
186 const char** enabledLayerNames = nullptr;
187 int enabledLayerCount = 0;
188 const char** enabledInstanceExtensionNames = nullptr;
189 int enabledInstanceExtensionCount = 0;
190#ifdef ENABLE_VK_LAYERS
191 if (verify_instance_layers()) {
192 enabledLayerNames = kEnabledLayerNames;
193 enabledLayerCount = ARRAYSIZE(kEnabledLayerNames);
194 enabledInstanceExtensionNames = kEnabledInstanceExtensionNames;
195 enabledInstanceExtensionCount = ARRAYSIZE(kEnabledInstanceExtensionNames);
196 }
197#endif
198
Greg Daniel164a9f02016-02-22 09:56:40 -0500199 const VkInstanceCreateInfo instance_create = {
200 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
201 nullptr, // pNext
202 0, // flags
203 &app_info, // pApplicationInfo
jvanverthd2497f32016-03-18 12:39:05 -0700204 enabledLayerCount, // enabledLayerNameCount
205 enabledLayerNames, // ppEnabledLayerNames
206 enabledInstanceExtensionCount, // enabledExtensionNameCount
207 enabledInstanceExtensionNames, // ppEnabledExtensionNames
Greg Daniel164a9f02016-02-22 09:56:40 -0500208 };
jvanverthd2497f32016-03-18 12:39:05 -0700209
Greg Daniel164a9f02016-02-22 09:56:40 -0500210 err = vkCreateInstance(&instance_create, nullptr, &inst);
211 if (err < 0) {
212 SkDebugf("vkCreateInstanced failed: %d\n", err);
213 SkFAIL("failing");
214 }
215
216 uint32_t gpuCount;
217 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
218 if (err) {
219 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
220 SkFAIL("failing");
221 }
222 SkASSERT(gpuCount > 0);
223 // Just returning the first physical device instead of getting the whole array.
224 gpuCount = 1;
225 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
226 if (err) {
227 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
228 SkFAIL("failing");
229 }
230
231 // query to get the initial queue props size
232 uint32_t queueCount;
233 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
234 SkASSERT(queueCount >= 1);
235
236 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
237 // now get the actual queue props
238 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
239
240 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
241
242 // iterate to find the graphics queue
243 uint32_t graphicsQueueIndex = -1;
244 for (uint32_t i = 0; i < queueCount; i++) {
245 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
246 graphicsQueueIndex = i;
247 break;
248 }
249 }
250 SkASSERT(graphicsQueueIndex < queueCount);
251
jvanverthd2497f32016-03-18 12:39:05 -0700252#ifdef ENABLE_VK_LAYERS
253 // unlikely that the device will have different layers than the instance, but good to check
254 if (!verify_device_layers(physDev)) {
255 enabledLayerNames = nullptr;
256 enabledLayerCount = 0;
257 }
258#endif
259
Greg Daniel164a9f02016-02-22 09:56:40 -0500260 float queuePriorities[1] = { 0.0 };
261 const VkDeviceQueueCreateInfo queueInfo = {
262 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
263 nullptr, // pNext
264 0, // VkDeviceQueueCreateFlags
265 0, // queueFamilyIndex
266 1, // queueCount
267 queuePriorities, // pQueuePriorities
268 };
269 const VkDeviceCreateInfo deviceInfo = {
270 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
271 nullptr, // pNext
272 0, // VkDeviceCreateFlags
273 1, // queueCreateInfoCount
274 &queueInfo, // pQueueCreateInfos
jvanverthd2497f32016-03-18 12:39:05 -0700275 enabledLayerCount, // layerCount
276 enabledLayerNames, // ppEnabledLayerNames
Greg Daniel164a9f02016-02-22 09:56:40 -0500277 0, // extensionCount
278 nullptr, // ppEnabledExtensionNames
279 nullptr // ppEnabledFeatures
280 };
281
282 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
283 if (err) {
284 SkDebugf("CreateDevice failed: %d\n", err);
285 SkFAIL("failing");
286 }
287
288 VkQueue queue;
289 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
290
291 const VkCommandPoolCreateInfo cmdPoolInfo = {
292 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
293 nullptr, // pNext
294 0, // CmdPoolCreateFlags
295 graphicsQueueIndex, // queueFamilyIndex
296 };
297
298 VkCommandPool cmdPool;
299 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
300 if (err) {
301 SkDebugf("CreateCommandPool failed: %d\n", err);
302 SkFAIL("failing");
303 }
304
305 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
306}
307
308////////////////////////////////////////////////////////////////////////////////
309
310GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
311 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
312 VkInstance inst)
313 : INHERITED(context)
314 , fDevice(device)
315 , fQueue(queue)
316 , fCmdPool(cmdPool)
317 , fResourceProvider(this)
318 , fVkInstance(inst) {
319 fInterface.reset(GrVkCreateInterface(fVkInstance));
320 fCompiler = shaderc_compiler_initialize();
321
322 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
323 fCaps.reset(SkRef(fVkCaps.get()));
324
jvanverth03509ea2016-03-02 13:19:47 -0800325 fResourceProvider.init();
326
Greg Daniel164a9f02016-02-22 09:56:40 -0500327 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
328 SkASSERT(fCurrentCmdBuffer);
329 fCurrentCmdBuffer->begin(this);
330 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
331
jvanverthd2497f32016-03-18 12:39:05 -0700332#ifdef ENABLE_VK_LAYERS
333 if (fInterface->hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
334 /* Setup callback creation information */
335 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
336 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
337 callbackCreateInfo.pNext = nullptr;
338 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
339 VK_DEBUG_REPORT_WARNING_BIT_EXT |
340 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
341 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
342 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
343 callbackCreateInfo.pfnCallback = &DebugReportCallback;
344 callbackCreateInfo.pUserData = nullptr;
345
346 /* Register the callback */
347 GR_VK_CALL_ERRCHECK(fInterface, CreateDebugReportCallbackEXT(inst, &callbackCreateInfo,
348 nullptr, &fCallback));
349 }
350#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500351}
352
353GrVkGpu::~GrVkGpu() {
354 shaderc_compiler_release(fCompiler);
355 fCurrentCmdBuffer->end(this);
356 fCurrentCmdBuffer->unref(this);
357
358 // wait for all commands to finish
jvanverthfd359ca2016-03-18 11:57:24 -0700359 VkResult res = VK_CALL(QueueWaitIdle(fQueue));
360 SkASSERT(res == VK_SUCCESS);
Greg Daniel164a9f02016-02-22 09:56:40 -0500361
362 // must call this just before we destroy the VkDevice
363 fResourceProvider.destroyResources();
364
jvanverthd2497f32016-03-18 12:39:05 -0700365#ifdef SK_DEBUG
366 VK_CALL(DestroyDebugReportCallbackEXT(fVkInstance, fCallback, nullptr));
367#endif
368
Greg Daniel164a9f02016-02-22 09:56:40 -0500369 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
370 VK_CALL(DestroyDevice(fDevice, nullptr));
371 VK_CALL(DestroyInstance(fVkInstance, nullptr));
372}
373
374///////////////////////////////////////////////////////////////////////////////
375
376void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
377 SkASSERT(fCurrentCmdBuffer);
378 fCurrentCmdBuffer->end(this);
379
380 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
381 fResourceProvider.checkCommandBuffers();
382
383 // Release old command buffer and create a new one
384 fCurrentCmdBuffer->unref(this);
385 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
386 SkASSERT(fCurrentCmdBuffer);
387
388 fCurrentCmdBuffer->begin(this);
389}
390
391///////////////////////////////////////////////////////////////////////////////
392GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
393 return GrVkVertexBuffer::Create(this, size, dynamic);
394}
395
396GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
397 return GrVkIndexBuffer::Create(this, size, dynamic);
398}
399
400GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
401 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
402 : GrVkBuffer::kCopyWrite_Type;
403 return GrVkTransferBuffer::Create(this, size, bufferType);
404}
405
406////////////////////////////////////////////////////////////////////////////////
407bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
408 GrPixelConfig srcConfig, DrawPreference* drawPreference,
409 WritePixelTempDrawInfo* tempDrawInfo) {
410 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
411 return false;
412 }
413
414 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
415 if (kNoDraw_DrawPreference != *drawPreference) {
416 return false;
417 }
418
419 if (dstSurface->config() != srcConfig) {
420 // TODO: This should fall back to drawing or copying to change config of dstSurface to
421 // match that of srcConfig.
422 return false;
423 }
424
425 return true;
426}
427
428bool GrVkGpu::onWritePixels(GrSurface* surface,
429 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800430 GrPixelConfig config,
431 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500432 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
433 if (!vkTex) {
434 return false;
435 }
436
bsalomona1e6b3b2016-03-02 10:58:23 -0800437 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800438 if (texels.empty() || !texels.begin()->fPixels) {
439 return false;
440 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800441
Greg Daniel164a9f02016-02-22 09:56:40 -0500442 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
443 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
444 return false;
445 }
446
447 bool success = false;
448 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
449 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
450 SkASSERT(config == vkTex->desc().fConfig);
451 // TODO: add compressed texture support
452 // delete the following two lines and uncomment the two after that when ready
453 vkTex->unref();
454 return false;
455 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
456 // height);
457 } else {
458 bool linearTiling = vkTex->isLinearTiled();
459 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
460 // Need to change the layout to general in order to perform a host write
461 VkImageLayout layout = vkTex->currentLayout();
462 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
463 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
464 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
465 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
466 vkTex->setImageLayout(this,
467 VK_IMAGE_LAYOUT_GENERAL,
468 srcAccessMask,
469 dstAccessMask,
470 srcStageMask,
471 dstStageMask,
472 false);
473 }
474 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800475 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500476 }
477
478 if (success) {
479 vkTex->texturePriv().dirtyMipMaps(true);
480 return true;
481 }
482
483 return false;
484}
485
486bool GrVkGpu::uploadTexData(GrVkTexture* tex,
487 int left, int top, int width, int height,
488 GrPixelConfig dataConfig,
489 const void* data,
490 size_t rowBytes) {
491 SkASSERT(data);
492
493 // If we're uploading compressed data then we should be using uploadCompressedTexData
494 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
495
496 bool linearTiling = tex->isLinearTiled();
497
498 size_t bpp = GrBytesPerPixel(dataConfig);
499
500 const GrSurfaceDesc& desc = tex->desc();
501
502 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
503 &width, &height, &data, &rowBytes)) {
504 return false;
505 }
506 size_t trimRowBytes = width * bpp;
507
508 if (linearTiling) {
509 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
510 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
511 const VkImageSubresource subres = {
512 VK_IMAGE_ASPECT_COLOR_BIT,
513 0, // mipLevel
514 0, // arraySlice
515 };
516 VkSubresourceLayout layout;
517 VkResult err;
518
519 const GrVkInterface* interface = this->vkInterface();
520
521 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
522 tex->textureImage(),
523 &subres,
524 &layout));
525
526 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
527 : top;
528 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
529 VkDeviceSize size = height*layout.rowPitch;
530 void* mapPtr;
531 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
532 &mapPtr));
533 if (err) {
534 return false;
535 }
536
537 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
538 // copy into buffer by rows
539 const char* srcRow = reinterpret_cast<const char*>(data);
540 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
541 for (int y = 0; y < height; y++) {
542 memcpy(dstRow, srcRow, trimRowBytes);
543 srcRow += rowBytes;
544 dstRow -= layout.rowPitch;
545 }
546 } else {
547 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
548 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
549 memcpy(mapPtr, data, trimRowBytes * height);
550 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800551 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
552 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500553 }
554 }
555
556 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
557 } else {
558 GrVkTransferBuffer* transferBuffer =
559 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
560
561 void* mapPtr = transferBuffer->map();
562
563 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
564 // copy into buffer by rows
565 const char* srcRow = reinterpret_cast<const char*>(data);
566 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
567 for (int y = 0; y < height; y++) {
568 memcpy(dstRow, srcRow, trimRowBytes);
569 srcRow += rowBytes;
570 dstRow -= trimRowBytes;
571 }
572 } else {
573 // If there is no padding on the src data rows, we can do a single memcpy
574 if (trimRowBytes == rowBytes) {
575 memcpy(mapPtr, data, trimRowBytes * height);
576 } else {
577 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
578 }
579 }
580
581 transferBuffer->unmap();
582
583 // make sure the unmap has finished
584 transferBuffer->addMemoryBarrier(this,
585 VK_ACCESS_HOST_WRITE_BIT,
586 VK_ACCESS_TRANSFER_READ_BIT,
587 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
588 VK_PIPELINE_STAGE_TRANSFER_BIT,
589 false);
590
591 // Set up copy region
592 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
593 VkOffset3D offset = {
594 left,
595 flipY ? tex->height() - top - height : top,
596 0
597 };
598
599 VkBufferImageCopy region;
600 memset(&region, 0, sizeof(VkBufferImageCopy));
601 region.bufferOffset = 0;
602 region.bufferRowLength = width;
603 region.bufferImageHeight = height;
604 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
605 region.imageOffset = offset;
606 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
607
608 // Change layout of our target so it can be copied to
609 VkImageLayout layout = tex->currentLayout();
610 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
611 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
612 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
613 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
614 tex->setImageLayout(this,
615 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
616 srcAccessMask,
617 dstAccessMask,
618 srcStageMask,
619 dstStageMask,
620 false);
621
622 // Copy the buffer to the image
623 fCurrentCmdBuffer->copyBufferToImage(this,
624 transferBuffer,
625 tex,
626 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
627 1,
628 &region);
629
630 // Submit the current command buffer to the Queue
631 this->submitCommandBuffer(kSkip_SyncQueue);
632
633 transferBuffer->unref();
634 }
635
636 return true;
637}
638
639////////////////////////////////////////////////////////////////////////////////
640GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800641 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500642 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
643
644 VkFormat pixelFormat;
645 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
646 return nullptr;
647 }
648
649 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
650 return nullptr;
651 }
652
653 bool linearTiling = false;
654 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
655 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
656 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
657 linearTiling = true;
658 } else {
659 return nullptr;
660 }
661 }
662
663 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
664 if (renderTarget) {
665 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
666 }
667
668 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
669 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
670 // will be using this texture in some copy or not. Also this assumes, as is the current case,
671 // that all render targets in vulkan are also texutres. If we change this practice of setting
672 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
673 // texture.
674 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
675
bsalomona1e6b3b2016-03-02 10:58:23 -0800676 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
677 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500678
679 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
680 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
681 // to 1.
682 GrVkImage::ImageDesc imageDesc;
683 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
684 imageDesc.fFormat = pixelFormat;
685 imageDesc.fWidth = desc.fWidth;
686 imageDesc.fHeight = desc.fHeight;
687 imageDesc.fLevels = 1;
688 imageDesc.fSamples = 1;
689 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
690 imageDesc.fUsageFlags = usageFlags;
691 imageDesc.fMemProps = memProps;
692
693 GrVkTexture* tex;
694 if (renderTarget) {
695 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
696 imageDesc);
egdaniel3d5d9ac2016-03-01 12:56:15 -0800697#if 0
698 // This clear can be included to fix warning described in htttps://bugs.skia.org/5045
699 // Obviously we do not want to be clearling needlessly every time we create a render target.
700 SkIRect rect = SkIRect::MakeWH(tex->width(), tex->height());
701 this->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
702#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500703 } else {
704 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
705 }
706
707 if (!tex) {
708 return nullptr;
709 }
710
bsalomona1e6b3b2016-03-02 10:58:23 -0800711 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800712 if (!texels.empty()) {
713 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800714 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
715 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500716 tex->unref();
717 return nullptr;
718 }
719 }
720
721 return tex;
722}
723
724////////////////////////////////////////////////////////////////////////////////
725
726static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
727 // By default, all textures in Vk use TopLeft
728 if (kDefault_GrSurfaceOrigin == origin) {
729 return kTopLeft_GrSurfaceOrigin;
730 } else {
731 return origin;
732 }
733}
734
735GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
736 GrWrapOwnership ownership) {
737 VkFormat format;
738 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
739 return nullptr;
740 }
741
742 if (0 == desc.fTextureHandle) {
743 return nullptr;
744 }
745
746 int maxSize = this->caps()->maxTextureSize();
747 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
748 return nullptr;
749 }
750
jvanverthfd359ca2016-03-18 11:57:24 -0700751 const GrVkTextureInfo* info = reinterpret_cast<const GrVkTextureInfo*>(desc.fTextureHandle);
752 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) {
753 return nullptr;
754 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500755
jvanverth0fcfb752016-03-09 09:57:52 -0800756 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
757 ? GrGpuResource::kAdopted_LifeCycle
758 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500759
760 GrSurfaceDesc surfDesc;
761 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
762 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
763 surfDesc.fWidth = desc.fWidth;
764 surfDesc.fHeight = desc.fHeight;
765 surfDesc.fConfig = desc.fConfig;
766 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
767 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
768 // In GL, Chrome assumes all textures are BottomLeft
769 // In VK, we don't have this restriction
770 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
771
772 GrVkTexture* texture = nullptr;
773 if (renderTarget) {
774 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
775 lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700776 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500777 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700778 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format,
779 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500780 }
781 if (!texture) {
782 return nullptr;
783 }
784
785 return texture;
786}
787
788GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
789 GrWrapOwnership ownership) {
790
jvanverthfd359ca2016-03-18 11:57:24 -0700791 const GrVkTextureInfo* info =
792 reinterpret_cast<const GrVkTextureInfo*>(wrapDesc.fRenderTargetHandle);
793 if (VK_NULL_HANDLE == info->fImage ||
794 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership)) {
795 return nullptr;
796 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500797
jvanverth0fcfb752016-03-09 09:57:52 -0800798 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
799 ? GrGpuResource::kAdopted_LifeCycle
800 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500801
802 GrSurfaceDesc desc;
803 desc.fConfig = wrapDesc.fConfig;
804 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
805 desc.fWidth = wrapDesc.fWidth;
806 desc.fHeight = wrapDesc.fHeight;
807 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
808
809 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
810
811 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
jvanverthfd359ca2016-03-18 11:57:24 -0700812 lifeCycle,
813 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500814 if (tgt && wrapDesc.fStencilBits) {
815 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
816 tgt->unref();
817 return nullptr;
818 }
819 }
820 return tgt;
821}
822
823////////////////////////////////////////////////////////////////////////////////
824
825void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
egdaniel0e1853c2016-03-17 11:35:45 -0700826 const GrNonInstancedMesh& mesh) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500827 GrVkVertexBuffer* vbuf;
egdaniel0e1853c2016-03-17 11:35:45 -0700828 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500829 SkASSERT(vbuf);
830 SkASSERT(!vbuf->isMapped());
831
832 vbuf->addMemoryBarrier(this,
833 VK_ACCESS_HOST_WRITE_BIT,
834 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
835 VK_PIPELINE_STAGE_HOST_BIT,
836 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
837 false);
838
839 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
840
egdaniel0e1853c2016-03-17 11:35:45 -0700841 if (mesh.isIndexed()) {
842 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500843 SkASSERT(ibuf);
844 SkASSERT(!ibuf->isMapped());
845
846 ibuf->addMemoryBarrier(this,
847 VK_ACCESS_HOST_WRITE_BIT,
848 VK_ACCESS_INDEX_READ_BIT,
849 VK_PIPELINE_STAGE_HOST_BIT,
850 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
851 false);
852
853 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
854 }
855}
856
Greg Daniel164a9f02016-02-22 09:56:40 -0500857////////////////////////////////////////////////////////////////////////////////
858
859GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
860 int width,
861 int height) {
862 SkASSERT(rt->asTexture());
863 SkASSERT(width >= rt->width());
864 SkASSERT(height >= rt->height());
865
866 int samples = rt->numStencilSamples();
867
868 SkASSERT(this->vkCaps().stencilFormats().count());
869 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
870
871 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
872 GrGpuResource::kCached_LifeCycle,
873 width,
874 height,
875 samples,
876 sFmt));
877 fStats.incStencilAttachmentCreates();
878 return stencil;
879}
880
881////////////////////////////////////////////////////////////////////////////////
882
883GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
884 GrPixelConfig config) {
885
886 VkFormat pixelFormat;
887 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
888 return 0;
889 }
890
891 bool linearTiling = false;
892 if (!fVkCaps->isConfigTexturable(config)) {
893 return 0;
894 }
895
896 if (fVkCaps->isConfigTexurableLinearly(config)) {
897 linearTiling = true;
898 }
899
900 // Currently this is not supported since it requires a copy which has not yet been implemented.
901 if (srcData && !linearTiling) {
902 return 0;
903 }
904
905 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
906 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
907 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
908
909 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
910 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
911
jvanverthfd359ca2016-03-18 11:57:24 -0700912 VkImage image = VK_NULL_HANDLE;
913 VkDeviceMemory alloc = VK_NULL_HANDLE;
Greg Daniel164a9f02016-02-22 09:56:40 -0500914
jvanverthfd359ca2016-03-18 11:57:24 -0700915 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
916 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
917 ? VK_IMAGE_LAYOUT_PREINITIALIZED
918 : VK_IMAGE_LAYOUT_UNDEFINED;
919
920 // Create Image
921 VkSampleCountFlagBits vkSamples;
922 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
923 return 0;
924 }
925
926 const VkImageCreateInfo imageCreateInfo = {
927 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
928 NULL, // pNext
929 0, // VkImageCreateFlags
930 VK_IMAGE_TYPE_2D, // VkImageType
931 pixelFormat, // VkFormat
932 { w, h, 1 }, // VkExtent3D
933 1, // mipLevels
934 1, // arrayLayers
935 vkSamples, // samples
936 imageTiling, // VkImageTiling
937 usageFlags, // VkImageUsageFlags
938 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
939 0, // queueFamilyCount
940 0, // pQueueFamilyIndices
941 initialLayout // initialLayout
942 };
943
944 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
945
946 if (!GrVkMemory::AllocAndBindImageMemory(this, image, memProps, &alloc)) {
947 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500948 return 0;
949 }
950
951 if (srcData) {
952 if (linearTiling) {
953 const VkImageSubresource subres = {
954 VK_IMAGE_ASPECT_COLOR_BIT,
955 0, // mipLevel
956 0, // arraySlice
957 };
958 VkSubresourceLayout layout;
959 VkResult err;
960
jvanverthfd359ca2016-03-18 11:57:24 -0700961 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500962
963 void* mapPtr;
jvanverthfd359ca2016-03-18 11:57:24 -0700964 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500965 if (err) {
jvanverthfd359ca2016-03-18 11:57:24 -0700966 VK_CALL(FreeMemory(this->device(), alloc, nullptr));
967 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500968 return 0;
969 }
970
971 size_t bpp = GrBytesPerPixel(config);
972 size_t rowCopyBytes = bpp * w;
973 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
974 // This assumes the srcData comes in with no padding.
975 if (rowCopyBytes == layout.rowPitch) {
976 memcpy(mapPtr, srcData, rowCopyBytes * h);
977 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700978 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
979 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500980 }
jvanverthfd359ca2016-03-18 11:57:24 -0700981 VK_CALL(UnmapMemory(fDevice, alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500982 } else {
983 // TODO: Add support for copying to optimal tiling
984 SkASSERT(false);
985 }
986 }
987
jvanverthfd359ca2016-03-18 11:57:24 -0700988 GrVkTextureInfo* info = new GrVkTextureInfo;
989 info->fImage = image;
990 info->fAlloc = alloc;
991 info->fImageTiling = imageTiling;
992 info->fImageLayout = initialLayout;
993
994 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -0500995}
996
997bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
jvanverthfd359ca2016-03-18 11:57:24 -0700998 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500999
1000 if (backend && backend->fImage && backend->fAlloc) {
1001 VkMemoryRequirements req;
1002 memset(&req, 0, sizeof(req));
1003 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1004 backend->fImage,
1005 &req));
1006 // TODO: find a better check
1007 // This will probably fail with a different driver
1008 return (req.size > 0) && (req.size <= 8192 * 8192);
1009 }
1010
1011 return false;
1012}
1013
1014void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -07001015 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -05001016
1017 if (backend) {
1018 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -07001019 // something in the command buffer may still be using this, so force submit
1020 this->submitCommandBuffer(kForce_SyncQueue);
1021
1022 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr));
1023 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -05001024 }
jvanverthfd359ca2016-03-18 11:57:24 -07001025 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -05001026 }
1027}
1028
1029////////////////////////////////////////////////////////////////////////////////
1030
1031void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
1032 VkPipelineStageFlags dstStageMask,
1033 bool byRegion,
1034 VkMemoryBarrier* barrier) const {
1035 SkASSERT(fCurrentCmdBuffer);
1036 fCurrentCmdBuffer->pipelineBarrier(this,
1037 srcStageMask,
1038 dstStageMask,
1039 byRegion,
1040 GrVkCommandBuffer::kMemory_BarrierType,
1041 barrier);
1042}
1043
1044void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1045 VkPipelineStageFlags dstStageMask,
1046 bool byRegion,
1047 VkBufferMemoryBarrier* barrier) const {
1048 SkASSERT(fCurrentCmdBuffer);
1049 fCurrentCmdBuffer->pipelineBarrier(this,
1050 srcStageMask,
1051 dstStageMask,
1052 byRegion,
1053 GrVkCommandBuffer::kBufferMemory_BarrierType,
1054 barrier);
1055}
1056
1057void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
1058 VkPipelineStageFlags dstStageMask,
1059 bool byRegion,
1060 VkImageMemoryBarrier* barrier) const {
1061 SkASSERT(fCurrentCmdBuffer);
1062 fCurrentCmdBuffer->pipelineBarrier(this,
1063 srcStageMask,
1064 dstStageMask,
1065 byRegion,
1066 GrVkCommandBuffer::kImageMemory_BarrierType,
1067 barrier);
1068}
1069
1070void GrVkGpu::finishDrawTarget() {
1071 // Submit the current command buffer to the Queue
1072 this->submitCommandBuffer(kSkip_SyncQueue);
1073}
1074
egdaniel3d5d9ac2016-03-01 12:56:15 -08001075void GrVkGpu::clearStencil(GrRenderTarget* target) {
1076 if (nullptr == target) {
1077 return;
1078 }
1079 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
1080 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1081
1082
1083 VkClearDepthStencilValue vkStencilColor;
1084 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
1085
1086 VkImageLayout origDstLayout = vkStencil->currentLayout();
1087
1088 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1089 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1090
1091 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1092 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1093
1094 vkStencil->setImageLayout(this,
1095 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1096 srcAccessMask,
1097 dstAccessMask,
1098 srcStageMask,
1099 dstStageMask,
1100 false);
1101
1102
1103 VkImageSubresourceRange subRange;
1104 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1105 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1106 subRange.baseMipLevel = 0;
1107 subRange.levelCount = 1;
1108 subRange.baseArrayLayer = 0;
1109 subRange.layerCount = 1;
1110
1111 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
1112 // draw. Thus we should look into using the load op functions on the render pass to clear out
1113 // the stencil there.
1114 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
1115}
1116
1117void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
1118 SkASSERT(target);
1119
1120 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1121 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
1122 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
1123
1124 // this should only be called internally when we know we have a
1125 // stencil buffer.
1126 SkASSERT(sb);
1127 int stencilBitCount = sb->bits();
1128
1129 // The contract with the callers does not guarantee that we preserve all bits in the stencil
1130 // during this clear. Thus we will clear the entire stencil to the desired value.
1131
1132 VkClearDepthStencilValue vkStencilColor;
1133 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
1134 if (insideClip) {
1135 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
1136 } else {
1137 vkStencilColor.stencil = 0;
1138 }
1139
1140 VkImageLayout origDstLayout = vkStencil->currentLayout();
1141 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1142 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
1143 VkPipelineStageFlags srcStageMask =
1144 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1145 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1146 vkStencil->setImageLayout(this,
1147 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1148 srcAccessMask,
1149 dstAccessMask,
1150 srcStageMask,
1151 dstStageMask,
1152 false);
1153
1154 VkClearRect clearRect;
1155 // Flip rect if necessary
1156 SkIRect vkRect = rect;
1157
1158 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1159 vkRect.fTop = vkRT->height() - rect.fBottom;
1160 vkRect.fBottom = vkRT->height() - rect.fTop;
1161 }
1162
1163 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1164 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
1165
1166 clearRect.baseArrayLayer = 0;
1167 clearRect.layerCount = 1;
1168
1169 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1170 SkASSERT(renderPass);
1171 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1172
1173 uint32_t stencilIndex;
1174 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
1175
1176 VkClearAttachment attachment;
1177 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1178 attachment.colorAttachment = 0; // this value shouldn't matter
1179 attachment.clearValue.depthStencil = vkStencilColor;
1180
1181 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1182 fCurrentCmdBuffer->endRenderPass(this);
1183
1184 return;
1185}
1186
Greg Daniel164a9f02016-02-22 09:56:40 -05001187void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
1188 // parent class should never let us get here with no RT
1189 SkASSERT(target);
1190
1191 VkClearColorValue vkColor;
1192 GrColorToRGBAFloat(color, vkColor.float32);
1193
1194 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1195 VkImageLayout origDstLayout = vkRT->currentLayout();
1196
1197 if (rect.width() != target->width() || rect.height() != target->height()) {
1198 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1199 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1200 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -08001201 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -05001202 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1203 vkRT->setImageLayout(this,
1204 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1205 srcAccessMask,
1206 dstAccessMask,
1207 srcStageMask,
1208 dstStageMask,
1209 false);
1210
1211 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001212 // Flip rect if necessary
1213 SkIRect vkRect = rect;
1214 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1215 vkRect.fTop = vkRT->height() - rect.fBottom;
1216 vkRect.fBottom = vkRT->height() - rect.fTop;
1217 }
1218 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1219 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001220 clearRect.baseArrayLayer = 0;
1221 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001222
1223 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1224 SkASSERT(renderPass);
1225 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1226
1227 uint32_t colorIndex;
1228 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1229
1230 VkClearAttachment attachment;
1231 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1232 attachment.colorAttachment = colorIndex;
1233 attachment.clearValue.color = vkColor;
1234
1235 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1236 fCurrentCmdBuffer->endRenderPass(this);
1237 return;
1238 }
1239
1240 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1241 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1242
1243 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1244 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1245
1246 vkRT->setImageLayout(this,
1247 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1248 srcAccessMask,
1249 dstAccessMask,
1250 srcStageMask,
1251 dstStageMask,
1252 false);
1253
1254
1255 VkImageSubresourceRange subRange;
1256 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1257 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1258 subRange.baseMipLevel = 0;
1259 subRange.levelCount = 1;
1260 subRange.baseArrayLayer = 0;
1261 subRange.layerCount = 1;
1262
1263 // In the future we may not actually be doing this type of clear at all. If we are inside a
1264 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1265 // common use case will be clearing an attachment at the start of a render pass, in which case
1266 // we will use the clear load ops.
1267 fCurrentCmdBuffer->clearColorImage(this,
1268 vkRT,
1269 &vkColor,
1270 1, &subRange);
1271}
1272
1273inline bool can_copy_image(const GrSurface* dst,
1274 const GrSurface* src,
1275 const GrVkGpu* gpu) {
1276 if (src->asTexture() &&
1277 dst->asTexture() &&
1278 src->origin() == dst->origin() &&
1279 src->config() == dst->config()) {
1280 return true;
1281 }
1282
1283 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1284 // or the resolved image here?
1285
1286 return false;
1287}
1288
1289void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1290 GrSurface* src,
1291 const SkIRect& srcRect,
1292 const SkIPoint& dstPoint) {
1293 SkASSERT(can_copy_image(dst, src, this));
1294
1295 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1296 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1297 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1298
1299 VkImageLayout origDstLayout = dstTex->currentLayout();
1300 VkImageLayout origSrcLayout = srcTex->currentLayout();
1301
1302 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1303 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1304
1305 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1306 // the cache is flushed since it is only being written to.
1307 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1308 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1309
1310 dstTex->setImageLayout(this,
1311 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1312 srcAccessMask,
1313 dstAccessMask,
1314 srcStageMask,
1315 dstStageMask,
1316 false);
1317
1318 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1319 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1320
1321 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1322 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1323
1324 srcTex->setImageLayout(this,
1325 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1326 srcAccessMask,
1327 dstAccessMask,
1328 srcStageMask,
1329 dstStageMask,
1330 false);
1331
1332 // Flip rect if necessary
1333 SkIRect srcVkRect = srcRect;
1334 int32_t dstY = dstPoint.fY;
1335
1336 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1337 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1338 srcVkRect.fTop = src->height() - srcRect.fBottom;
1339 srcVkRect.fBottom = src->height() - srcRect.fTop;
1340 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1341 }
1342
1343 VkImageCopy copyRegion;
1344 memset(&copyRegion, 0, sizeof(VkImageCopy));
1345 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1346 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1347 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1348 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1349 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1350
1351 fCurrentCmdBuffer->copyImage(this,
1352 srcTex,
1353 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1354 dstTex,
1355 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1356 1,
1357 &copyRegion);
1358}
1359
1360inline bool can_copy_as_draw(const GrSurface* dst,
1361 const GrSurface* src,
1362 const GrVkGpu* gpu) {
1363 return false;
1364}
1365
1366void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1367 GrSurface* src,
1368 const SkIRect& srcRect,
1369 const SkIPoint& dstPoint) {
1370 SkASSERT(false);
1371}
1372
1373bool GrVkGpu::onCopySurface(GrSurface* dst,
1374 GrSurface* src,
1375 const SkIRect& srcRect,
1376 const SkIPoint& dstPoint) {
1377 if (can_copy_image(dst, src, this)) {
1378 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1379 return true;
1380 }
1381
1382 if (can_copy_as_draw(dst, src, this)) {
1383 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1384 return true;
1385 }
1386
1387 return false;
1388}
1389
cdalton28f45b92016-03-07 13:58:26 -08001390void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1391 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1392 // TODO: stub.
1393 SkASSERT(!this->caps()->sampleLocationsSupport());
1394 *effectiveSampleCnt = rt->desc().fSampleCnt;
1395}
1396
Greg Daniel164a9f02016-02-22 09:56:40 -05001397bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1398 GrPixelConfig readConfig, DrawPreference* drawPreference,
1399 ReadPixelTempDrawInfo* tempDrawInfo) {
1400 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1401 if (kNoDraw_DrawPreference != *drawPreference) {
1402 return false;
1403 }
1404
1405 if (srcSurface->config() != readConfig) {
1406 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1407 // that of readConfig.
1408 return false;
1409 }
1410
1411 return true;
1412}
1413
1414bool GrVkGpu::onReadPixels(GrSurface* surface,
1415 int left, int top, int width, int height,
1416 GrPixelConfig config,
1417 void* buffer,
1418 size_t rowBytes) {
1419 VkFormat pixelFormat;
1420 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1421 return false;
1422 }
1423
1424 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1425 if (!tgt) {
1426 return false;
1427 }
1428
1429 // Change layout of our target so it can be used as copy
1430 VkImageLayout layout = tgt->currentLayout();
1431 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1432 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1433 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1434 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1435 tgt->setImageLayout(this,
1436 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1437 srcAccessMask,
1438 dstAccessMask,
1439 srcStageMask,
1440 dstStageMask,
1441 false);
1442
1443 GrVkTransferBuffer* transferBuffer =
1444 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1445 kGpuToCpu_TransferType));
1446
1447 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1448 VkOffset3D offset = {
1449 left,
1450 flipY ? surface->height() - top - height : top,
1451 0
1452 };
1453
1454 // Copy the image to a buffer so we can map it to cpu memory
1455 VkBufferImageCopy region;
1456 memset(&region, 0, sizeof(VkBufferImageCopy));
1457 region.bufferOffset = 0;
1458 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1459 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1460 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1461 region.imageOffset = offset;
1462 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1463
1464 fCurrentCmdBuffer->copyImageToBuffer(this,
1465 tgt,
1466 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1467 transferBuffer,
1468 1,
1469 &region);
1470
1471 // make sure the copy to buffer has finished
1472 transferBuffer->addMemoryBarrier(this,
1473 VK_ACCESS_TRANSFER_WRITE_BIT,
1474 VK_ACCESS_HOST_READ_BIT,
1475 VK_PIPELINE_STAGE_TRANSFER_BIT,
1476 VK_PIPELINE_STAGE_HOST_BIT,
1477 false);
1478
1479 // We need to submit the current command buffer to the Queue and make sure it finishes before
1480 // we can copy the data out of the buffer.
1481 this->submitCommandBuffer(kForce_SyncQueue);
1482
1483 void* mappedMemory = transferBuffer->map();
1484
1485 memcpy(buffer, mappedMemory, rowBytes*height);
1486
1487 transferBuffer->unmap();
1488 transferBuffer->unref();
1489
1490 if (flipY) {
1491 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1492 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1493 scratch.reset(tightRowBytes);
1494 void* tmpRow = scratch.get();
1495 // flip y in-place by rows
1496 const int halfY = height >> 1;
1497 char* top = reinterpret_cast<char*>(buffer);
1498 char* bottom = top + (height - 1) * rowBytes;
1499 for (int y = 0; y < halfY; y++) {
1500 memcpy(tmpRow, top, tightRowBytes);
1501 memcpy(top, bottom, tightRowBytes);
1502 memcpy(bottom, tmpRow, tightRowBytes);
1503 top += rowBytes;
1504 bottom -= rowBytes;
1505 }
1506 }
1507
1508 return true;
1509}
1510
egdaniel0e1853c2016-03-17 11:35:45 -07001511bool GrVkGpu::prepareDrawState(const GrPipeline& pipeline,
1512 const GrPrimitiveProcessor& primProc,
1513 GrPrimitiveType primitiveType,
1514 const GrVkRenderPass& renderPass,
1515 GrVkProgram** program) {
1516 // Get GrVkProgramDesc
1517 GrVkProgramDesc desc;
1518 if (!GrVkProgramDescBuilder::Build(&desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
1519 GrCapsDebugf(this->caps(), "Failed to vk program descriptor!\n");
1520 return false;
1521 }
1522
1523 *program = GrVkProgramBuilder::CreateProgram(this,
1524 pipeline,
1525 primProc,
1526 primitiveType,
1527 desc,
1528 renderPass);
1529 if (!program) {
1530 return false;
1531 }
1532
1533 (*program)->setData(this, primProc, pipeline);
1534
1535 (*program)->bind(this, fCurrentCmdBuffer);
1536 return true;
1537}
1538
1539void GrVkGpu::onDraw(const GrPipeline& pipeline,
1540 const GrPrimitiveProcessor& primProc,
1541 const GrMesh* meshes,
1542 int meshCount) {
1543 if (!meshCount) {
1544 return;
1545 }
1546 GrRenderTarget* rt = pipeline.getRenderTarget();
Greg Daniel164a9f02016-02-22 09:56:40 -05001547 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1548 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1549 SkASSERT(renderPass);
1550
egdaniel0e1853c2016-03-17 11:35:45 -07001551 GrVkProgram* program = nullptr;
1552 GrPrimitiveType primitiveType = meshes[0].primitiveType();
1553 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass, &program)) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001554 return;
1555 }
1556
Greg Daniel164a9f02016-02-22 09:56:40 -05001557 // Change layout of our render target so it can be used as the color attachment
1558 VkImageLayout layout = vkRT->currentLayout();
1559 // Our color attachment is purely a destination and won't be read so don't need to flush or
1560 // invalidate any caches
1561 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1562 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1563 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1564 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1565 vkRT->setImageLayout(this,
1566 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1567 srcAccessMask,
1568 dstAccessMask,
1569 srcStageMask,
1570 dstStageMask,
1571 false);
1572
egdaniel3d5d9ac2016-03-01 12:56:15 -08001573 // If we are using a stencil attachment we also need to update its layout
egdaniel0e1853c2016-03-17 11:35:45 -07001574 if (!pipeline.getStencil().isDisabled()) {
egdaniel3d5d9ac2016-03-01 12:56:15 -08001575 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1576 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1577 VkImageLayout origDstLayout = vkStencil->currentLayout();
1578 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1579 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
egdaniel0e1853c2016-03-17 11:35:45 -07001580 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001581 VkPipelineStageFlags srcStageMask =
1582 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1583 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1584 vkStencil->setImageLayout(this,
1585 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1586 srcAccessMask,
1587 dstAccessMask,
1588 srcStageMask,
1589 dstStageMask,
1590 false);
1591 }
1592
egdaniel0e1853c2016-03-17 11:35:45 -07001593 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1594
1595 for (int i = 0; i < meshCount; ++i) {
1596 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
1597 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1598 }
1599
1600 const GrMesh& mesh = meshes[i];
1601 GrMesh::Iterator iter;
1602 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1603 do {
1604 if (nonIdxMesh->primitiveType() != primitiveType) {
1605 // Technically we don't have to call this here (since there is a safety check in
1606 // program:setData but this will allow for quicker freeing of resources if the
1607 // program sits in a cache for a while.
1608 program->freeTempResources(this);
1609 // This free will go away once we setup a program cache, and then the cache will be
1610 // responsible for call freeGpuResources.
1611 program->freeGPUResources(this);
1612 program->unref();
1613 SkDEBUGCODE(program = nullptr);
1614 primitiveType = nonIdxMesh->primitiveType();
1615 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass,
1616 &program)) {
1617 return;
1618 }
1619 }
1620 SkASSERT(program);
1621 this->bindGeometry(primProc, *nonIdxMesh);
1622
1623 if (nonIdxMesh->isIndexed()) {
1624 fCurrentCmdBuffer->drawIndexed(this,
1625 nonIdxMesh->indexCount(),
1626 1,
1627 nonIdxMesh->startIndex(),
1628 nonIdxMesh->startVertex(),
1629 0);
1630 } else {
1631 fCurrentCmdBuffer->draw(this,
1632 nonIdxMesh->vertexCount(),
1633 1,
1634 nonIdxMesh->startVertex(),
1635 0);
1636 }
1637
1638 fStats.incNumDraws();
1639 } while ((nonIdxMesh = iter.next()));
Greg Daniel164a9f02016-02-22 09:56:40 -05001640 }
1641
1642 fCurrentCmdBuffer->endRenderPass(this);
1643
1644 // Technically we don't have to call this here (since there is a safety check in program:setData
1645 // but this will allow for quicker freeing of resources if the program sits in a cache for a
1646 // while.
1647 program->freeTempResources(this);
1648 // This free will go away once we setup a program cache, and then the cache will be responsible
1649 // for call freeGpuResources.
1650 program->freeGPUResources(this);
1651 program->unref();
1652
1653#if SWAP_PER_DRAW
1654 glFlush();
1655#if defined(SK_BUILD_FOR_MAC)
1656 aglSwapBuffers(aglGetCurrentContext());
1657 int set_a_break_pt_here = 9;
1658 aglSwapBuffers(aglGetCurrentContext());
1659#elif defined(SK_BUILD_FOR_WIN32)
1660 SwapBuf();
1661 int set_a_break_pt_here = 9;
1662 SwapBuf();
1663#endif
1664#endif
1665}
1666