blob: 0666f10f7e200cc421a77616cf43f575d1cb8bae [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
24#include "GrVkProgram.h"
25#include "GrVkProgramBuilder.h"
26#include "GrVkProgramDesc.h"
27#include "GrVkRenderPass.h"
28#include "GrVkResourceProvider.h"
29#include "GrVkTexture.h"
30#include "GrVkTextureRenderTarget.h"
31#include "GrVkTransferBuffer.h"
32#include "GrVkVertexBuffer.h"
33
34#include "SkConfig8888.h"
35
36#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070037#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050038
39#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
40#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
41#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
42
43////////////////////////////////////////////////////////////////////////////////
44// Stuff used to set up a GrVkGpu secrectly for now.
45
jvanverthd2497f32016-03-18 12:39:05 -070046
47#ifdef ENABLE_VK_LAYERS
48VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
49 VkDebugReportFlagsEXT flags,
50 VkDebugReportObjectTypeEXT objectType,
51 uint64_t object,
52 size_t location,
53 int32_t messageCode,
54 const char* pLayerPrefix,
55 const char* pMessage,
56 void* pUserData) {
57 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
58 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
60 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
61 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
62 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
63 } else {
64 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
65 }
66 return VK_FALSE;
67}
68
69const char* kEnabledLayerNames[] = {
70 // elements of VK_LAYER_LUNARG_standard_validation
71 "VK_LAYER_LUNARG_threading",
72 "VK_LAYER_LUNARG_param_checker",
73 "VK_LAYER_LUNARG_device_limits",
74 "VK_LAYER_LUNARG_object_tracker",
75 "VK_LAYER_LUNARG_image",
76 "VK_LAYER_LUNARG_mem_tracker",
77 "VK_LAYER_LUNARG_draw_state",
78 "VK_LAYER_LUNARG_swapchain",
79 "VK_LAYER_GOOGLE_unique_objects",
80 // not included in standard_validation
81 //"VK_LAYER_LUNARG_api_dump",
82};
83const char* kEnabledInstanceExtensionNames[] = {
84 VK_EXT_DEBUG_REPORT_EXTENSION_NAME
85};
86
87bool verify_instance_layers() {
88 // make sure we can actually use the extensions and layers above
89 uint32_t extensionCount;
90 VkResult res = vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
91 if (VK_SUCCESS != res) {
92 return false;
93 }
94 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
95 res = vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
96 if (VK_SUCCESS != res) {
97 return false;
98 }
99 int instanceExtensionsFound = 0;
100 for (uint32_t j = 0; j < ARRAYSIZE(kEnabledInstanceExtensionNames); ++j) {
101 for (uint32_t i = 0; i < extensionCount; ++i) {
102 if (!strncmp(extensions[i].extensionName, kEnabledInstanceExtensionNames[j],
103 strlen(kEnabledInstanceExtensionNames[j]))) {
104 ++instanceExtensionsFound;
105 break;
106 }
107 }
108 }
109 delete[] extensions;
110
111 uint32_t layerCount;
112 res = vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
113 if (VK_SUCCESS != res) {
114 return false;
115 }
116 VkLayerProperties* layers = new VkLayerProperties[layerCount];
117 res = vkEnumerateInstanceLayerProperties(&layerCount, layers);
118 if (VK_SUCCESS != res) {
119 return false;
120 }
121 int instanceLayersFound = 0;
122 for (uint32_t j = 0; j < ARRAYSIZE(kEnabledLayerNames); ++j) {
123 for (uint32_t i = 0; i < layerCount; ++i) {
124 if (!strncmp(layers[i].layerName, kEnabledLayerNames[j],
125 strlen(kEnabledLayerNames[j]))) {
126 ++instanceLayersFound;
127 break;
128 }
129 }
130 }
131 delete[] layers;
132
133 return instanceExtensionsFound == ARRAYSIZE(kEnabledInstanceExtensionNames) &&
134 instanceLayersFound == ARRAYSIZE(kEnabledLayerNames);
135}
136
137bool verify_device_layers(VkPhysicalDevice physDev) {
138 uint32_t layerCount;
139 VkResult res = vkEnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
140 if (VK_SUCCESS != res) {
141 return false;
142 }
143 VkLayerProperties* layers = new VkLayerProperties[layerCount];
144 res = vkEnumerateDeviceLayerProperties(physDev, &layerCount, layers);
145 if (VK_SUCCESS != res) {
146 return false;
147 }
148 int deviceLayersFound = 0;
149 for (uint32_t j = 0; j < ARRAYSIZE(kEnabledLayerNames); ++j) {
150 for (uint32_t i = 0; i < layerCount; ++i) {
151 if (!strncmp(layers[i].layerName, kEnabledLayerNames[j],
152 strlen(kEnabledLayerNames[j]))) {
153 ++deviceLayersFound;
154 break;
155 }
156 }
157 }
158 delete[] layers;
159
160 return deviceLayersFound == ARRAYSIZE(kEnabledLayerNames);
161}
162#endif
163
Greg Daniel164a9f02016-02-22 09:56:40 -0500164// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
165// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
166// and mostly likely would take an optional device and queues to use.
167GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
168 GrContext* context) {
169 // Below is Vulkan setup code that normal would be done by a client, but will do here for now
170 // for testing purposes.
171 VkPhysicalDevice physDev;
172 VkDevice device;
173 VkInstance inst;
174 VkResult err;
175
176 const VkApplicationInfo app_info = {
177 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
178 nullptr, // pNext
179 "vktest", // pApplicationName
180 0, // applicationVersion
181 "vktest", // pEngineName
182 0, // engineVerison
jvanverthd2497f32016-03-18 12:39:05 -0700183 kGrVkMinimumVersion, // apiVersion
Greg Daniel164a9f02016-02-22 09:56:40 -0500184 };
jvanverthd2497f32016-03-18 12:39:05 -0700185
186 const char** enabledLayerNames = nullptr;
187 int enabledLayerCount = 0;
188 const char** enabledInstanceExtensionNames = nullptr;
189 int enabledInstanceExtensionCount = 0;
190#ifdef ENABLE_VK_LAYERS
191 if (verify_instance_layers()) {
192 enabledLayerNames = kEnabledLayerNames;
193 enabledLayerCount = ARRAYSIZE(kEnabledLayerNames);
194 enabledInstanceExtensionNames = kEnabledInstanceExtensionNames;
195 enabledInstanceExtensionCount = ARRAYSIZE(kEnabledInstanceExtensionNames);
196 }
197#endif
198
Greg Daniel164a9f02016-02-22 09:56:40 -0500199 const VkInstanceCreateInfo instance_create = {
200 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
201 nullptr, // pNext
202 0, // flags
203 &app_info, // pApplicationInfo
jvanverthd2497f32016-03-18 12:39:05 -0700204 enabledLayerCount, // enabledLayerNameCount
205 enabledLayerNames, // ppEnabledLayerNames
206 enabledInstanceExtensionCount, // enabledExtensionNameCount
207 enabledInstanceExtensionNames, // ppEnabledExtensionNames
Greg Daniel164a9f02016-02-22 09:56:40 -0500208 };
jvanverthd2497f32016-03-18 12:39:05 -0700209
Greg Daniel164a9f02016-02-22 09:56:40 -0500210 err = vkCreateInstance(&instance_create, nullptr, &inst);
211 if (err < 0) {
212 SkDebugf("vkCreateInstanced failed: %d\n", err);
213 SkFAIL("failing");
214 }
215
216 uint32_t gpuCount;
217 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
218 if (err) {
219 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
220 SkFAIL("failing");
221 }
222 SkASSERT(gpuCount > 0);
223 // Just returning the first physical device instead of getting the whole array.
224 gpuCount = 1;
225 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
226 if (err) {
227 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
228 SkFAIL("failing");
229 }
230
231 // query to get the initial queue props size
232 uint32_t queueCount;
233 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
234 SkASSERT(queueCount >= 1);
235
236 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
237 // now get the actual queue props
238 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
239
240 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
241
242 // iterate to find the graphics queue
243 uint32_t graphicsQueueIndex = -1;
244 for (uint32_t i = 0; i < queueCount; i++) {
245 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
246 graphicsQueueIndex = i;
247 break;
248 }
249 }
250 SkASSERT(graphicsQueueIndex < queueCount);
251
jvanverthd2497f32016-03-18 12:39:05 -0700252#ifdef ENABLE_VK_LAYERS
253 // unlikely that the device will have different layers than the instance, but good to check
254 if (!verify_device_layers(physDev)) {
255 enabledLayerNames = nullptr;
256 enabledLayerCount = 0;
257 }
258#endif
259
Greg Daniel164a9f02016-02-22 09:56:40 -0500260 float queuePriorities[1] = { 0.0 };
261 const VkDeviceQueueCreateInfo queueInfo = {
262 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
263 nullptr, // pNext
264 0, // VkDeviceQueueCreateFlags
265 0, // queueFamilyIndex
266 1, // queueCount
267 queuePriorities, // pQueuePriorities
268 };
269 const VkDeviceCreateInfo deviceInfo = {
270 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
271 nullptr, // pNext
272 0, // VkDeviceCreateFlags
273 1, // queueCreateInfoCount
274 &queueInfo, // pQueueCreateInfos
jvanverthd2497f32016-03-18 12:39:05 -0700275 enabledLayerCount, // layerCount
276 enabledLayerNames, // ppEnabledLayerNames
Greg Daniel164a9f02016-02-22 09:56:40 -0500277 0, // extensionCount
278 nullptr, // ppEnabledExtensionNames
279 nullptr // ppEnabledFeatures
280 };
281
282 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
283 if (err) {
284 SkDebugf("CreateDevice failed: %d\n", err);
285 SkFAIL("failing");
286 }
287
288 VkQueue queue;
289 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
290
291 const VkCommandPoolCreateInfo cmdPoolInfo = {
292 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
293 nullptr, // pNext
294 0, // CmdPoolCreateFlags
295 graphicsQueueIndex, // queueFamilyIndex
296 };
297
298 VkCommandPool cmdPool;
299 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
300 if (err) {
301 SkDebugf("CreateCommandPool failed: %d\n", err);
302 SkFAIL("failing");
303 }
304
305 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
306}
307
308////////////////////////////////////////////////////////////////////////////////
309
310GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
311 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
312 VkInstance inst)
313 : INHERITED(context)
314 , fDevice(device)
315 , fQueue(queue)
316 , fCmdPool(cmdPool)
317 , fResourceProvider(this)
318 , fVkInstance(inst) {
319 fInterface.reset(GrVkCreateInterface(fVkInstance));
320 fCompiler = shaderc_compiler_initialize();
321
322 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
323 fCaps.reset(SkRef(fVkCaps.get()));
324
jvanverth03509ea2016-03-02 13:19:47 -0800325 fResourceProvider.init();
326
Greg Daniel164a9f02016-02-22 09:56:40 -0500327 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
328 SkASSERT(fCurrentCmdBuffer);
329 fCurrentCmdBuffer->begin(this);
330 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
331
jvanverthd2497f32016-03-18 12:39:05 -0700332#ifdef ENABLE_VK_LAYERS
333 if (fInterface->hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
334 /* Setup callback creation information */
335 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
336 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
337 callbackCreateInfo.pNext = nullptr;
338 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
339 VK_DEBUG_REPORT_WARNING_BIT_EXT |
340 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
341 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
342 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
343 callbackCreateInfo.pfnCallback = &DebugReportCallback;
344 callbackCreateInfo.pUserData = nullptr;
345
346 /* Register the callback */
347 GR_VK_CALL_ERRCHECK(fInterface, CreateDebugReportCallbackEXT(inst, &callbackCreateInfo,
348 nullptr, &fCallback));
349 }
350#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500351}
352
353GrVkGpu::~GrVkGpu() {
354 shaderc_compiler_release(fCompiler);
355 fCurrentCmdBuffer->end(this);
356 fCurrentCmdBuffer->unref(this);
357
358 // wait for all commands to finish
jvanverthddf98352016-03-21 11:46:00 -0700359 fResourceProvider.checkCommandBuffers();
jvanverthfd359ca2016-03-18 11:57:24 -0700360 VkResult res = VK_CALL(QueueWaitIdle(fQueue));
jvanverthddf98352016-03-21 11:46:00 -0700361 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec)
362 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
363
Greg Daniel164a9f02016-02-22 09:56:40 -0500364 // must call this just before we destroy the VkDevice
365 fResourceProvider.destroyResources();
366
jvanverthd2497f32016-03-18 12:39:05 -0700367#ifdef SK_DEBUG
368 VK_CALL(DestroyDebugReportCallbackEXT(fVkInstance, fCallback, nullptr));
369#endif
370
Greg Daniel164a9f02016-02-22 09:56:40 -0500371 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
372 VK_CALL(DestroyDevice(fDevice, nullptr));
373 VK_CALL(DestroyInstance(fVkInstance, nullptr));
374}
375
376///////////////////////////////////////////////////////////////////////////////
377
378void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
379 SkASSERT(fCurrentCmdBuffer);
380 fCurrentCmdBuffer->end(this);
381
382 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
383 fResourceProvider.checkCommandBuffers();
384
385 // Release old command buffer and create a new one
386 fCurrentCmdBuffer->unref(this);
387 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
388 SkASSERT(fCurrentCmdBuffer);
389
390 fCurrentCmdBuffer->begin(this);
391}
392
393///////////////////////////////////////////////////////////////////////////////
394GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
395 return GrVkVertexBuffer::Create(this, size, dynamic);
396}
397
398GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
399 return GrVkIndexBuffer::Create(this, size, dynamic);
400}
401
402GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
403 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
404 : GrVkBuffer::kCopyWrite_Type;
405 return GrVkTransferBuffer::Create(this, size, bufferType);
406}
407
408////////////////////////////////////////////////////////////////////////////////
409bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
410 GrPixelConfig srcConfig, DrawPreference* drawPreference,
411 WritePixelTempDrawInfo* tempDrawInfo) {
412 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
413 return false;
414 }
415
416 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
417 if (kNoDraw_DrawPreference != *drawPreference) {
418 return false;
419 }
420
421 if (dstSurface->config() != srcConfig) {
422 // TODO: This should fall back to drawing or copying to change config of dstSurface to
423 // match that of srcConfig.
424 return false;
425 }
426
427 return true;
428}
429
430bool GrVkGpu::onWritePixels(GrSurface* surface,
431 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800432 GrPixelConfig config,
433 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500434 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
435 if (!vkTex) {
436 return false;
437 }
438
bsalomona1e6b3b2016-03-02 10:58:23 -0800439 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800440 if (texels.empty() || !texels.begin()->fPixels) {
441 return false;
442 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800443
Greg Daniel164a9f02016-02-22 09:56:40 -0500444 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
445 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
446 return false;
447 }
448
449 bool success = false;
450 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
451 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
452 SkASSERT(config == vkTex->desc().fConfig);
453 // TODO: add compressed texture support
454 // delete the following two lines and uncomment the two after that when ready
455 vkTex->unref();
456 return false;
457 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
458 // height);
459 } else {
460 bool linearTiling = vkTex->isLinearTiled();
461 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
462 // Need to change the layout to general in order to perform a host write
463 VkImageLayout layout = vkTex->currentLayout();
464 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
465 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
466 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
467 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
468 vkTex->setImageLayout(this,
469 VK_IMAGE_LAYOUT_GENERAL,
470 srcAccessMask,
471 dstAccessMask,
472 srcStageMask,
473 dstStageMask,
474 false);
475 }
476 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800477 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500478 }
479
480 if (success) {
481 vkTex->texturePriv().dirtyMipMaps(true);
482 return true;
483 }
484
485 return false;
486}
487
488bool GrVkGpu::uploadTexData(GrVkTexture* tex,
489 int left, int top, int width, int height,
490 GrPixelConfig dataConfig,
491 const void* data,
492 size_t rowBytes) {
493 SkASSERT(data);
494
495 // If we're uploading compressed data then we should be using uploadCompressedTexData
496 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
497
498 bool linearTiling = tex->isLinearTiled();
499
500 size_t bpp = GrBytesPerPixel(dataConfig);
501
502 const GrSurfaceDesc& desc = tex->desc();
503
504 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
505 &width, &height, &data, &rowBytes)) {
506 return false;
507 }
508 size_t trimRowBytes = width * bpp;
509
510 if (linearTiling) {
511 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
512 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
513 const VkImageSubresource subres = {
514 VK_IMAGE_ASPECT_COLOR_BIT,
515 0, // mipLevel
516 0, // arraySlice
517 };
518 VkSubresourceLayout layout;
519 VkResult err;
520
521 const GrVkInterface* interface = this->vkInterface();
522
523 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
524 tex->textureImage(),
525 &subres,
526 &layout));
527
528 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
529 : top;
530 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
531 VkDeviceSize size = height*layout.rowPitch;
532 void* mapPtr;
533 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
534 &mapPtr));
535 if (err) {
536 return false;
537 }
538
539 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
540 // copy into buffer by rows
541 const char* srcRow = reinterpret_cast<const char*>(data);
542 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
543 for (int y = 0; y < height; y++) {
544 memcpy(dstRow, srcRow, trimRowBytes);
545 srcRow += rowBytes;
546 dstRow -= layout.rowPitch;
547 }
548 } else {
549 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
550 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
551 memcpy(mapPtr, data, trimRowBytes * height);
552 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800553 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
554 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500555 }
556 }
557
558 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
559 } else {
560 GrVkTransferBuffer* transferBuffer =
561 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
562
563 void* mapPtr = transferBuffer->map();
564
565 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
566 // copy into buffer by rows
567 const char* srcRow = reinterpret_cast<const char*>(data);
568 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
569 for (int y = 0; y < height; y++) {
570 memcpy(dstRow, srcRow, trimRowBytes);
571 srcRow += rowBytes;
572 dstRow -= trimRowBytes;
573 }
574 } else {
575 // If there is no padding on the src data rows, we can do a single memcpy
576 if (trimRowBytes == rowBytes) {
577 memcpy(mapPtr, data, trimRowBytes * height);
578 } else {
579 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
580 }
581 }
582
583 transferBuffer->unmap();
584
585 // make sure the unmap has finished
586 transferBuffer->addMemoryBarrier(this,
587 VK_ACCESS_HOST_WRITE_BIT,
588 VK_ACCESS_TRANSFER_READ_BIT,
589 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
590 VK_PIPELINE_STAGE_TRANSFER_BIT,
591 false);
592
593 // Set up copy region
594 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
595 VkOffset3D offset = {
596 left,
597 flipY ? tex->height() - top - height : top,
598 0
599 };
600
601 VkBufferImageCopy region;
602 memset(&region, 0, sizeof(VkBufferImageCopy));
603 region.bufferOffset = 0;
604 region.bufferRowLength = width;
605 region.bufferImageHeight = height;
606 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
607 region.imageOffset = offset;
608 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
609
610 // Change layout of our target so it can be copied to
611 VkImageLayout layout = tex->currentLayout();
612 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
613 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
614 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
615 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
616 tex->setImageLayout(this,
617 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
618 srcAccessMask,
619 dstAccessMask,
620 srcStageMask,
621 dstStageMask,
622 false);
623
624 // Copy the buffer to the image
625 fCurrentCmdBuffer->copyBufferToImage(this,
626 transferBuffer,
627 tex,
628 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
629 1,
630 &region);
631
632 // Submit the current command buffer to the Queue
633 this->submitCommandBuffer(kSkip_SyncQueue);
634
635 transferBuffer->unref();
636 }
637
638 return true;
639}
640
641////////////////////////////////////////////////////////////////////////////////
642GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800643 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500644 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
645
646 VkFormat pixelFormat;
647 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
648 return nullptr;
649 }
650
651 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
652 return nullptr;
653 }
654
655 bool linearTiling = false;
656 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
657 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
658 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
659 linearTiling = true;
660 } else {
661 return nullptr;
662 }
663 }
664
665 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
666 if (renderTarget) {
667 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
668 }
669
670 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
671 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
672 // will be using this texture in some copy or not. Also this assumes, as is the current case,
673 // that all render targets in vulkan are also texutres. If we change this practice of setting
674 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
675 // texture.
676 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
677
bsalomona1e6b3b2016-03-02 10:58:23 -0800678 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
679 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500680
681 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
682 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
683 // to 1.
684 GrVkImage::ImageDesc imageDesc;
685 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
686 imageDesc.fFormat = pixelFormat;
687 imageDesc.fWidth = desc.fWidth;
688 imageDesc.fHeight = desc.fHeight;
689 imageDesc.fLevels = 1;
690 imageDesc.fSamples = 1;
691 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
692 imageDesc.fUsageFlags = usageFlags;
693 imageDesc.fMemProps = memProps;
694
695 GrVkTexture* tex;
696 if (renderTarget) {
697 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
698 imageDesc);
egdaniel3d5d9ac2016-03-01 12:56:15 -0800699#if 0
700 // This clear can be included to fix warning described in htttps://bugs.skia.org/5045
701 // Obviously we do not want to be clearling needlessly every time we create a render target.
702 SkIRect rect = SkIRect::MakeWH(tex->width(), tex->height());
703 this->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
704#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500705 } else {
706 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
707 }
708
709 if (!tex) {
710 return nullptr;
711 }
712
bsalomona1e6b3b2016-03-02 10:58:23 -0800713 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800714 if (!texels.empty()) {
715 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800716 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
717 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500718 tex->unref();
719 return nullptr;
720 }
721 }
722
723 return tex;
724}
725
726////////////////////////////////////////////////////////////////////////////////
727
728static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
729 // By default, all textures in Vk use TopLeft
730 if (kDefault_GrSurfaceOrigin == origin) {
731 return kTopLeft_GrSurfaceOrigin;
732 } else {
733 return origin;
734 }
735}
736
737GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
738 GrWrapOwnership ownership) {
739 VkFormat format;
740 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
741 return nullptr;
742 }
743
744 if (0 == desc.fTextureHandle) {
745 return nullptr;
746 }
747
748 int maxSize = this->caps()->maxTextureSize();
749 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
750 return nullptr;
751 }
752
jvanverthfd359ca2016-03-18 11:57:24 -0700753 const GrVkTextureInfo* info = reinterpret_cast<const GrVkTextureInfo*>(desc.fTextureHandle);
754 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) {
755 return nullptr;
756 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500757
jvanverth0fcfb752016-03-09 09:57:52 -0800758 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
759 ? GrGpuResource::kAdopted_LifeCycle
760 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500761
762 GrSurfaceDesc surfDesc;
763 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
764 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
765 surfDesc.fWidth = desc.fWidth;
766 surfDesc.fHeight = desc.fHeight;
767 surfDesc.fConfig = desc.fConfig;
768 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
769 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
770 // In GL, Chrome assumes all textures are BottomLeft
771 // In VK, we don't have this restriction
772 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
773
774 GrVkTexture* texture = nullptr;
775 if (renderTarget) {
776 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
777 lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700778 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500779 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700780 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format,
781 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500782 }
783 if (!texture) {
784 return nullptr;
785 }
786
787 return texture;
788}
789
790GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
791 GrWrapOwnership ownership) {
792
jvanverthfd359ca2016-03-18 11:57:24 -0700793 const GrVkTextureInfo* info =
794 reinterpret_cast<const GrVkTextureInfo*>(wrapDesc.fRenderTargetHandle);
795 if (VK_NULL_HANDLE == info->fImage ||
796 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership)) {
797 return nullptr;
798 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500799
jvanverth0fcfb752016-03-09 09:57:52 -0800800 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
801 ? GrGpuResource::kAdopted_LifeCycle
802 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500803
804 GrSurfaceDesc desc;
805 desc.fConfig = wrapDesc.fConfig;
806 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
807 desc.fWidth = wrapDesc.fWidth;
808 desc.fHeight = wrapDesc.fHeight;
809 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
810
811 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
812
813 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
jvanverthfd359ca2016-03-18 11:57:24 -0700814 lifeCycle,
815 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500816 if (tgt && wrapDesc.fStencilBits) {
817 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
818 tgt->unref();
819 return nullptr;
820 }
821 }
822 return tgt;
823}
824
825////////////////////////////////////////////////////////////////////////////////
826
827void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
egdaniel0e1853c2016-03-17 11:35:45 -0700828 const GrNonInstancedMesh& mesh) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500829 GrVkVertexBuffer* vbuf;
egdaniel0e1853c2016-03-17 11:35:45 -0700830 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500831 SkASSERT(vbuf);
832 SkASSERT(!vbuf->isMapped());
833
834 vbuf->addMemoryBarrier(this,
835 VK_ACCESS_HOST_WRITE_BIT,
836 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
837 VK_PIPELINE_STAGE_HOST_BIT,
838 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
839 false);
840
841 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
842
egdaniel0e1853c2016-03-17 11:35:45 -0700843 if (mesh.isIndexed()) {
844 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500845 SkASSERT(ibuf);
846 SkASSERT(!ibuf->isMapped());
847
848 ibuf->addMemoryBarrier(this,
849 VK_ACCESS_HOST_WRITE_BIT,
850 VK_ACCESS_INDEX_READ_BIT,
851 VK_PIPELINE_STAGE_HOST_BIT,
852 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
853 false);
854
855 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
856 }
857}
858
Greg Daniel164a9f02016-02-22 09:56:40 -0500859////////////////////////////////////////////////////////////////////////////////
860
861GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
862 int width,
863 int height) {
864 SkASSERT(rt->asTexture());
865 SkASSERT(width >= rt->width());
866 SkASSERT(height >= rt->height());
867
868 int samples = rt->numStencilSamples();
869
870 SkASSERT(this->vkCaps().stencilFormats().count());
871 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
872
873 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
874 GrGpuResource::kCached_LifeCycle,
875 width,
876 height,
877 samples,
878 sFmt));
879 fStats.incStencilAttachmentCreates();
880 return stencil;
881}
882
883////////////////////////////////////////////////////////////////////////////////
884
885GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
886 GrPixelConfig config) {
887
888 VkFormat pixelFormat;
889 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
890 return 0;
891 }
892
893 bool linearTiling = false;
894 if (!fVkCaps->isConfigTexturable(config)) {
895 return 0;
896 }
897
898 if (fVkCaps->isConfigTexurableLinearly(config)) {
899 linearTiling = true;
900 }
901
902 // Currently this is not supported since it requires a copy which has not yet been implemented.
903 if (srcData && !linearTiling) {
904 return 0;
905 }
906
907 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
908 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
909 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
910
911 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
912 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
913
jvanverthfd359ca2016-03-18 11:57:24 -0700914 VkImage image = VK_NULL_HANDLE;
915 VkDeviceMemory alloc = VK_NULL_HANDLE;
Greg Daniel164a9f02016-02-22 09:56:40 -0500916
jvanverthfd359ca2016-03-18 11:57:24 -0700917 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
918 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
919 ? VK_IMAGE_LAYOUT_PREINITIALIZED
920 : VK_IMAGE_LAYOUT_UNDEFINED;
921
922 // Create Image
923 VkSampleCountFlagBits vkSamples;
924 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
925 return 0;
926 }
927
928 const VkImageCreateInfo imageCreateInfo = {
929 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
930 NULL, // pNext
931 0, // VkImageCreateFlags
932 VK_IMAGE_TYPE_2D, // VkImageType
933 pixelFormat, // VkFormat
934 { w, h, 1 }, // VkExtent3D
935 1, // mipLevels
936 1, // arrayLayers
937 vkSamples, // samples
938 imageTiling, // VkImageTiling
939 usageFlags, // VkImageUsageFlags
940 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
941 0, // queueFamilyCount
942 0, // pQueueFamilyIndices
943 initialLayout // initialLayout
944 };
945
946 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
947
948 if (!GrVkMemory::AllocAndBindImageMemory(this, image, memProps, &alloc)) {
949 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500950 return 0;
951 }
952
953 if (srcData) {
954 if (linearTiling) {
955 const VkImageSubresource subres = {
956 VK_IMAGE_ASPECT_COLOR_BIT,
957 0, // mipLevel
958 0, // arraySlice
959 };
960 VkSubresourceLayout layout;
961 VkResult err;
962
jvanverthfd359ca2016-03-18 11:57:24 -0700963 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500964
965 void* mapPtr;
jvanverthfd359ca2016-03-18 11:57:24 -0700966 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500967 if (err) {
jvanverthfd359ca2016-03-18 11:57:24 -0700968 VK_CALL(FreeMemory(this->device(), alloc, nullptr));
969 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500970 return 0;
971 }
972
973 size_t bpp = GrBytesPerPixel(config);
974 size_t rowCopyBytes = bpp * w;
975 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
976 // This assumes the srcData comes in with no padding.
977 if (rowCopyBytes == layout.rowPitch) {
978 memcpy(mapPtr, srcData, rowCopyBytes * h);
979 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700980 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
981 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500982 }
jvanverthfd359ca2016-03-18 11:57:24 -0700983 VK_CALL(UnmapMemory(fDevice, alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500984 } else {
985 // TODO: Add support for copying to optimal tiling
986 SkASSERT(false);
987 }
988 }
989
jvanverthfd359ca2016-03-18 11:57:24 -0700990 GrVkTextureInfo* info = new GrVkTextureInfo;
991 info->fImage = image;
992 info->fAlloc = alloc;
993 info->fImageTiling = imageTiling;
994 info->fImageLayout = initialLayout;
995
996 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -0500997}
998
999bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
jvanverthfd359ca2016-03-18 11:57:24 -07001000 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -05001001
1002 if (backend && backend->fImage && backend->fAlloc) {
1003 VkMemoryRequirements req;
1004 memset(&req, 0, sizeof(req));
1005 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1006 backend->fImage,
1007 &req));
1008 // TODO: find a better check
1009 // This will probably fail with a different driver
1010 return (req.size > 0) && (req.size <= 8192 * 8192);
1011 }
1012
1013 return false;
1014}
1015
1016void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -07001017 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -05001018
1019 if (backend) {
1020 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -07001021 // something in the command buffer may still be using this, so force submit
1022 this->submitCommandBuffer(kForce_SyncQueue);
1023
1024 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr));
1025 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -05001026 }
jvanverthfd359ca2016-03-18 11:57:24 -07001027 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -05001028 }
1029}
1030
1031////////////////////////////////////////////////////////////////////////////////
1032
1033void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
1034 VkPipelineStageFlags dstStageMask,
1035 bool byRegion,
1036 VkMemoryBarrier* barrier) const {
1037 SkASSERT(fCurrentCmdBuffer);
1038 fCurrentCmdBuffer->pipelineBarrier(this,
1039 srcStageMask,
1040 dstStageMask,
1041 byRegion,
1042 GrVkCommandBuffer::kMemory_BarrierType,
1043 barrier);
1044}
1045
1046void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1047 VkPipelineStageFlags dstStageMask,
1048 bool byRegion,
1049 VkBufferMemoryBarrier* barrier) const {
1050 SkASSERT(fCurrentCmdBuffer);
1051 fCurrentCmdBuffer->pipelineBarrier(this,
1052 srcStageMask,
1053 dstStageMask,
1054 byRegion,
1055 GrVkCommandBuffer::kBufferMemory_BarrierType,
1056 barrier);
1057}
1058
1059void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
1060 VkPipelineStageFlags dstStageMask,
1061 bool byRegion,
1062 VkImageMemoryBarrier* barrier) const {
1063 SkASSERT(fCurrentCmdBuffer);
1064 fCurrentCmdBuffer->pipelineBarrier(this,
1065 srcStageMask,
1066 dstStageMask,
1067 byRegion,
1068 GrVkCommandBuffer::kImageMemory_BarrierType,
1069 barrier);
1070}
1071
1072void GrVkGpu::finishDrawTarget() {
1073 // Submit the current command buffer to the Queue
1074 this->submitCommandBuffer(kSkip_SyncQueue);
1075}
1076
egdaniel3d5d9ac2016-03-01 12:56:15 -08001077void GrVkGpu::clearStencil(GrRenderTarget* target) {
1078 if (nullptr == target) {
1079 return;
1080 }
1081 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
1082 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1083
1084
1085 VkClearDepthStencilValue vkStencilColor;
1086 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
1087
1088 VkImageLayout origDstLayout = vkStencil->currentLayout();
1089
1090 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1091 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1092
1093 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1094 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1095
1096 vkStencil->setImageLayout(this,
1097 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1098 srcAccessMask,
1099 dstAccessMask,
1100 srcStageMask,
1101 dstStageMask,
1102 false);
1103
1104
1105 VkImageSubresourceRange subRange;
1106 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1107 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1108 subRange.baseMipLevel = 0;
1109 subRange.levelCount = 1;
1110 subRange.baseArrayLayer = 0;
1111 subRange.layerCount = 1;
1112
1113 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
1114 // draw. Thus we should look into using the load op functions on the render pass to clear out
1115 // the stencil there.
1116 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
1117}
1118
1119void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
1120 SkASSERT(target);
1121
1122 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1123 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
1124 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
1125
1126 // this should only be called internally when we know we have a
1127 // stencil buffer.
1128 SkASSERT(sb);
1129 int stencilBitCount = sb->bits();
1130
1131 // The contract with the callers does not guarantee that we preserve all bits in the stencil
1132 // during this clear. Thus we will clear the entire stencil to the desired value.
1133
1134 VkClearDepthStencilValue vkStencilColor;
1135 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
1136 if (insideClip) {
1137 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
1138 } else {
1139 vkStencilColor.stencil = 0;
1140 }
1141
1142 VkImageLayout origDstLayout = vkStencil->currentLayout();
1143 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1144 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
1145 VkPipelineStageFlags srcStageMask =
1146 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1147 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1148 vkStencil->setImageLayout(this,
1149 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1150 srcAccessMask,
1151 dstAccessMask,
1152 srcStageMask,
1153 dstStageMask,
1154 false);
1155
1156 VkClearRect clearRect;
1157 // Flip rect if necessary
1158 SkIRect vkRect = rect;
1159
1160 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1161 vkRect.fTop = vkRT->height() - rect.fBottom;
1162 vkRect.fBottom = vkRT->height() - rect.fTop;
1163 }
1164
1165 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1166 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
1167
1168 clearRect.baseArrayLayer = 0;
1169 clearRect.layerCount = 1;
1170
1171 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1172 SkASSERT(renderPass);
1173 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1174
1175 uint32_t stencilIndex;
1176 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
1177
1178 VkClearAttachment attachment;
1179 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1180 attachment.colorAttachment = 0; // this value shouldn't matter
1181 attachment.clearValue.depthStencil = vkStencilColor;
1182
1183 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1184 fCurrentCmdBuffer->endRenderPass(this);
1185
1186 return;
1187}
1188
Greg Daniel164a9f02016-02-22 09:56:40 -05001189void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
1190 // parent class should never let us get here with no RT
1191 SkASSERT(target);
1192
1193 VkClearColorValue vkColor;
1194 GrColorToRGBAFloat(color, vkColor.float32);
1195
1196 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1197 VkImageLayout origDstLayout = vkRT->currentLayout();
1198
1199 if (rect.width() != target->width() || rect.height() != target->height()) {
1200 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1201 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1202 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -08001203 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -05001204 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1205 vkRT->setImageLayout(this,
1206 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1207 srcAccessMask,
1208 dstAccessMask,
1209 srcStageMask,
1210 dstStageMask,
1211 false);
1212
1213 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001214 // Flip rect if necessary
1215 SkIRect vkRect = rect;
1216 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1217 vkRect.fTop = vkRT->height() - rect.fBottom;
1218 vkRect.fBottom = vkRT->height() - rect.fTop;
1219 }
1220 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1221 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001222 clearRect.baseArrayLayer = 0;
1223 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001224
1225 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1226 SkASSERT(renderPass);
1227 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1228
1229 uint32_t colorIndex;
1230 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1231
1232 VkClearAttachment attachment;
1233 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1234 attachment.colorAttachment = colorIndex;
1235 attachment.clearValue.color = vkColor;
1236
1237 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1238 fCurrentCmdBuffer->endRenderPass(this);
1239 return;
1240 }
1241
1242 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1243 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1244
1245 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1246 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1247
1248 vkRT->setImageLayout(this,
1249 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1250 srcAccessMask,
1251 dstAccessMask,
1252 srcStageMask,
1253 dstStageMask,
1254 false);
1255
1256
1257 VkImageSubresourceRange subRange;
1258 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1259 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1260 subRange.baseMipLevel = 0;
1261 subRange.levelCount = 1;
1262 subRange.baseArrayLayer = 0;
1263 subRange.layerCount = 1;
1264
1265 // In the future we may not actually be doing this type of clear at all. If we are inside a
1266 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1267 // common use case will be clearing an attachment at the start of a render pass, in which case
1268 // we will use the clear load ops.
1269 fCurrentCmdBuffer->clearColorImage(this,
1270 vkRT,
1271 &vkColor,
1272 1, &subRange);
1273}
1274
1275inline bool can_copy_image(const GrSurface* dst,
1276 const GrSurface* src,
1277 const GrVkGpu* gpu) {
1278 if (src->asTexture() &&
1279 dst->asTexture() &&
1280 src->origin() == dst->origin() &&
1281 src->config() == dst->config()) {
1282 return true;
1283 }
1284
1285 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1286 // or the resolved image here?
1287
1288 return false;
1289}
1290
1291void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1292 GrSurface* src,
1293 const SkIRect& srcRect,
1294 const SkIPoint& dstPoint) {
1295 SkASSERT(can_copy_image(dst, src, this));
1296
1297 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1298 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1299 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1300
1301 VkImageLayout origDstLayout = dstTex->currentLayout();
1302 VkImageLayout origSrcLayout = srcTex->currentLayout();
1303
1304 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1305 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1306
1307 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1308 // the cache is flushed since it is only being written to.
1309 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1310 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1311
1312 dstTex->setImageLayout(this,
1313 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1314 srcAccessMask,
1315 dstAccessMask,
1316 srcStageMask,
1317 dstStageMask,
1318 false);
1319
1320 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1321 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1322
1323 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1324 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1325
1326 srcTex->setImageLayout(this,
1327 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1328 srcAccessMask,
1329 dstAccessMask,
1330 srcStageMask,
1331 dstStageMask,
1332 false);
1333
1334 // Flip rect if necessary
1335 SkIRect srcVkRect = srcRect;
1336 int32_t dstY = dstPoint.fY;
1337
1338 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1339 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1340 srcVkRect.fTop = src->height() - srcRect.fBottom;
1341 srcVkRect.fBottom = src->height() - srcRect.fTop;
1342 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1343 }
1344
1345 VkImageCopy copyRegion;
1346 memset(&copyRegion, 0, sizeof(VkImageCopy));
1347 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1348 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1349 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1350 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1351 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1352
1353 fCurrentCmdBuffer->copyImage(this,
1354 srcTex,
1355 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1356 dstTex,
1357 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1358 1,
1359 &copyRegion);
1360}
1361
1362inline bool can_copy_as_draw(const GrSurface* dst,
1363 const GrSurface* src,
1364 const GrVkGpu* gpu) {
1365 return false;
1366}
1367
1368void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1369 GrSurface* src,
1370 const SkIRect& srcRect,
1371 const SkIPoint& dstPoint) {
1372 SkASSERT(false);
1373}
1374
1375bool GrVkGpu::onCopySurface(GrSurface* dst,
1376 GrSurface* src,
1377 const SkIRect& srcRect,
1378 const SkIPoint& dstPoint) {
1379 if (can_copy_image(dst, src, this)) {
1380 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1381 return true;
1382 }
1383
1384 if (can_copy_as_draw(dst, src, this)) {
1385 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1386 return true;
1387 }
1388
1389 return false;
1390}
1391
cdalton28f45b92016-03-07 13:58:26 -08001392void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1393 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1394 // TODO: stub.
1395 SkASSERT(!this->caps()->sampleLocationsSupport());
1396 *effectiveSampleCnt = rt->desc().fSampleCnt;
1397}
1398
Greg Daniel164a9f02016-02-22 09:56:40 -05001399bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1400 GrPixelConfig readConfig, DrawPreference* drawPreference,
1401 ReadPixelTempDrawInfo* tempDrawInfo) {
1402 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1403 if (kNoDraw_DrawPreference != *drawPreference) {
1404 return false;
1405 }
1406
1407 if (srcSurface->config() != readConfig) {
1408 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1409 // that of readConfig.
1410 return false;
1411 }
1412
1413 return true;
1414}
1415
1416bool GrVkGpu::onReadPixels(GrSurface* surface,
1417 int left, int top, int width, int height,
1418 GrPixelConfig config,
1419 void* buffer,
1420 size_t rowBytes) {
1421 VkFormat pixelFormat;
1422 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1423 return false;
1424 }
1425
1426 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1427 if (!tgt) {
1428 return false;
1429 }
1430
1431 // Change layout of our target so it can be used as copy
1432 VkImageLayout layout = tgt->currentLayout();
1433 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1434 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1435 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1436 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1437 tgt->setImageLayout(this,
1438 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1439 srcAccessMask,
1440 dstAccessMask,
1441 srcStageMask,
1442 dstStageMask,
1443 false);
1444
1445 GrVkTransferBuffer* transferBuffer =
1446 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1447 kGpuToCpu_TransferType));
1448
1449 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1450 VkOffset3D offset = {
1451 left,
1452 flipY ? surface->height() - top - height : top,
1453 0
1454 };
1455
1456 // Copy the image to a buffer so we can map it to cpu memory
1457 VkBufferImageCopy region;
1458 memset(&region, 0, sizeof(VkBufferImageCopy));
1459 region.bufferOffset = 0;
1460 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1461 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1462 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1463 region.imageOffset = offset;
1464 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1465
1466 fCurrentCmdBuffer->copyImageToBuffer(this,
1467 tgt,
1468 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1469 transferBuffer,
1470 1,
1471 &region);
1472
1473 // make sure the copy to buffer has finished
1474 transferBuffer->addMemoryBarrier(this,
1475 VK_ACCESS_TRANSFER_WRITE_BIT,
1476 VK_ACCESS_HOST_READ_BIT,
1477 VK_PIPELINE_STAGE_TRANSFER_BIT,
1478 VK_PIPELINE_STAGE_HOST_BIT,
1479 false);
1480
1481 // We need to submit the current command buffer to the Queue and make sure it finishes before
1482 // we can copy the data out of the buffer.
1483 this->submitCommandBuffer(kForce_SyncQueue);
1484
1485 void* mappedMemory = transferBuffer->map();
1486
1487 memcpy(buffer, mappedMemory, rowBytes*height);
1488
1489 transferBuffer->unmap();
1490 transferBuffer->unref();
1491
1492 if (flipY) {
1493 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1494 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1495 scratch.reset(tightRowBytes);
1496 void* tmpRow = scratch.get();
1497 // flip y in-place by rows
1498 const int halfY = height >> 1;
1499 char* top = reinterpret_cast<char*>(buffer);
1500 char* bottom = top + (height - 1) * rowBytes;
1501 for (int y = 0; y < halfY; y++) {
1502 memcpy(tmpRow, top, tightRowBytes);
1503 memcpy(top, bottom, tightRowBytes);
1504 memcpy(bottom, tmpRow, tightRowBytes);
1505 top += rowBytes;
1506 bottom -= rowBytes;
1507 }
1508 }
1509
1510 return true;
1511}
1512
egdaniel0e1853c2016-03-17 11:35:45 -07001513bool GrVkGpu::prepareDrawState(const GrPipeline& pipeline,
1514 const GrPrimitiveProcessor& primProc,
1515 GrPrimitiveType primitiveType,
1516 const GrVkRenderPass& renderPass,
1517 GrVkProgram** program) {
1518 // Get GrVkProgramDesc
1519 GrVkProgramDesc desc;
1520 if (!GrVkProgramDescBuilder::Build(&desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
1521 GrCapsDebugf(this->caps(), "Failed to vk program descriptor!\n");
1522 return false;
1523 }
1524
1525 *program = GrVkProgramBuilder::CreateProgram(this,
1526 pipeline,
1527 primProc,
1528 primitiveType,
1529 desc,
1530 renderPass);
1531 if (!program) {
1532 return false;
1533 }
1534
1535 (*program)->setData(this, primProc, pipeline);
1536
1537 (*program)->bind(this, fCurrentCmdBuffer);
egdaniel470d77a2016-03-18 12:50:27 -07001538
1539 GrVkPipeline::SetDynamicState(this, fCurrentCmdBuffer, pipeline);
1540
egdaniel0e1853c2016-03-17 11:35:45 -07001541 return true;
1542}
1543
1544void GrVkGpu::onDraw(const GrPipeline& pipeline,
1545 const GrPrimitiveProcessor& primProc,
1546 const GrMesh* meshes,
1547 int meshCount) {
1548 if (!meshCount) {
1549 return;
1550 }
1551 GrRenderTarget* rt = pipeline.getRenderTarget();
Greg Daniel164a9f02016-02-22 09:56:40 -05001552 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1553 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1554 SkASSERT(renderPass);
1555
egdaniel470d77a2016-03-18 12:50:27 -07001556 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1557
egdaniel0e1853c2016-03-17 11:35:45 -07001558 GrVkProgram* program = nullptr;
1559 GrPrimitiveType primitiveType = meshes[0].primitiveType();
1560 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass, &program)) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001561 return;
1562 }
1563
Greg Daniel164a9f02016-02-22 09:56:40 -05001564 // Change layout of our render target so it can be used as the color attachment
1565 VkImageLayout layout = vkRT->currentLayout();
1566 // Our color attachment is purely a destination and won't be read so don't need to flush or
1567 // invalidate any caches
1568 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1569 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1570 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1571 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1572 vkRT->setImageLayout(this,
1573 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1574 srcAccessMask,
1575 dstAccessMask,
1576 srcStageMask,
1577 dstStageMask,
1578 false);
1579
egdaniel3d5d9ac2016-03-01 12:56:15 -08001580 // If we are using a stencil attachment we also need to update its layout
egdaniel0e1853c2016-03-17 11:35:45 -07001581 if (!pipeline.getStencil().isDisabled()) {
egdaniel3d5d9ac2016-03-01 12:56:15 -08001582 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1583 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1584 VkImageLayout origDstLayout = vkStencil->currentLayout();
1585 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1586 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
egdaniel0e1853c2016-03-17 11:35:45 -07001587 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001588 VkPipelineStageFlags srcStageMask =
1589 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1590 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1591 vkStencil->setImageLayout(this,
1592 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1593 srcAccessMask,
1594 dstAccessMask,
1595 srcStageMask,
1596 dstStageMask,
1597 false);
1598 }
1599
egdaniel0e1853c2016-03-17 11:35:45 -07001600
1601 for (int i = 0; i < meshCount; ++i) {
1602 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
1603 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1604 }
1605
1606 const GrMesh& mesh = meshes[i];
1607 GrMesh::Iterator iter;
1608 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1609 do {
1610 if (nonIdxMesh->primitiveType() != primitiveType) {
1611 // Technically we don't have to call this here (since there is a safety check in
1612 // program:setData but this will allow for quicker freeing of resources if the
1613 // program sits in a cache for a while.
1614 program->freeTempResources(this);
1615 // This free will go away once we setup a program cache, and then the cache will be
1616 // responsible for call freeGpuResources.
1617 program->freeGPUResources(this);
1618 program->unref();
1619 SkDEBUGCODE(program = nullptr);
1620 primitiveType = nonIdxMesh->primitiveType();
1621 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass,
1622 &program)) {
1623 return;
1624 }
1625 }
1626 SkASSERT(program);
1627 this->bindGeometry(primProc, *nonIdxMesh);
1628
1629 if (nonIdxMesh->isIndexed()) {
1630 fCurrentCmdBuffer->drawIndexed(this,
1631 nonIdxMesh->indexCount(),
1632 1,
1633 nonIdxMesh->startIndex(),
1634 nonIdxMesh->startVertex(),
1635 0);
1636 } else {
1637 fCurrentCmdBuffer->draw(this,
1638 nonIdxMesh->vertexCount(),
1639 1,
1640 nonIdxMesh->startVertex(),
1641 0);
1642 }
1643
1644 fStats.incNumDraws();
1645 } while ((nonIdxMesh = iter.next()));
Greg Daniel164a9f02016-02-22 09:56:40 -05001646 }
1647
1648 fCurrentCmdBuffer->endRenderPass(this);
1649
1650 // Technically we don't have to call this here (since there is a safety check in program:setData
1651 // but this will allow for quicker freeing of resources if the program sits in a cache for a
1652 // while.
1653 program->freeTempResources(this);
1654 // This free will go away once we setup a program cache, and then the cache will be responsible
1655 // for call freeGpuResources.
1656 program->freeGPUResources(this);
1657 program->unref();
1658
1659#if SWAP_PER_DRAW
1660 glFlush();
1661#if defined(SK_BUILD_FOR_MAC)
1662 aglSwapBuffers(aglGetCurrentContext());
1663 int set_a_break_pt_here = 9;
1664 aglSwapBuffers(aglGetCurrentContext());
1665#elif defined(SK_BUILD_FOR_WIN32)
1666 SwapBuf();
1667 int set_a_break_pt_here = 9;
1668 SwapBuf();
1669#endif
1670#endif
1671}
1672