blob: 16526f6613965fc3b126b3349c615bd1b59bb4b9 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
24#include "GrVkProgram.h"
25#include "GrVkProgramBuilder.h"
26#include "GrVkProgramDesc.h"
27#include "GrVkRenderPass.h"
28#include "GrVkResourceProvider.h"
29#include "GrVkTexture.h"
30#include "GrVkTextureRenderTarget.h"
31#include "GrVkTransferBuffer.h"
32#include "GrVkVertexBuffer.h"
33
34#include "SkConfig8888.h"
35
36#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070037#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050038
39#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
40#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
41#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
42
43////////////////////////////////////////////////////////////////////////////////
44// Stuff used to set up a GrVkGpu secrectly for now.
45
46// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
47// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
48// and mostly likely would take an optional device and queues to use.
49GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
50 GrContext* context) {
51 // Below is Vulkan setup code that normal would be done by a client, but will do here for now
52 // for testing purposes.
53 VkPhysicalDevice physDev;
54 VkDevice device;
55 VkInstance inst;
56 VkResult err;
57
58 const VkApplicationInfo app_info = {
59 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
60 nullptr, // pNext
61 "vktest", // pApplicationName
62 0, // applicationVersion
63 "vktest", // pEngineName
64 0, // engineVerison
65 VK_API_VERSION, // apiVersion
66 };
67 const VkInstanceCreateInfo instance_create = {
68 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
69 nullptr, // pNext
70 0, // flags
71 &app_info, // pApplicationInfo
72 0, // enabledLayerNameCount
73 nullptr, // ppEnabledLayerNames
74 0, // enabledExtensionNameCount
75 nullptr, // ppEnabledExtensionNames
76 };
77 err = vkCreateInstance(&instance_create, nullptr, &inst);
78 if (err < 0) {
79 SkDebugf("vkCreateInstanced failed: %d\n", err);
80 SkFAIL("failing");
81 }
82
83 uint32_t gpuCount;
84 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
85 if (err) {
86 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
87 SkFAIL("failing");
88 }
89 SkASSERT(gpuCount > 0);
90 // Just returning the first physical device instead of getting the whole array.
91 gpuCount = 1;
92 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
93 if (err) {
94 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
95 SkFAIL("failing");
96 }
97
98 // query to get the initial queue props size
99 uint32_t queueCount;
100 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
101 SkASSERT(queueCount >= 1);
102
103 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
104 // now get the actual queue props
105 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
106
107 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
108
109 // iterate to find the graphics queue
110 uint32_t graphicsQueueIndex = -1;
111 for (uint32_t i = 0; i < queueCount; i++) {
112 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
113 graphicsQueueIndex = i;
114 break;
115 }
116 }
117 SkASSERT(graphicsQueueIndex < queueCount);
118
119 float queuePriorities[1] = { 0.0 };
120 const VkDeviceQueueCreateInfo queueInfo = {
121 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
122 nullptr, // pNext
123 0, // VkDeviceQueueCreateFlags
124 0, // queueFamilyIndex
125 1, // queueCount
126 queuePriorities, // pQueuePriorities
127 };
128 const VkDeviceCreateInfo deviceInfo = {
129 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
130 nullptr, // pNext
131 0, // VkDeviceCreateFlags
132 1, // queueCreateInfoCount
133 &queueInfo, // pQueueCreateInfos
134 0, // layerCount
135 nullptr, // ppEnabledLayerNames
136 0, // extensionCount
137 nullptr, // ppEnabledExtensionNames
138 nullptr // ppEnabledFeatures
139 };
140
141 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
142 if (err) {
143 SkDebugf("CreateDevice failed: %d\n", err);
144 SkFAIL("failing");
145 }
146
147 VkQueue queue;
148 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
149
150 const VkCommandPoolCreateInfo cmdPoolInfo = {
151 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
152 nullptr, // pNext
153 0, // CmdPoolCreateFlags
154 graphicsQueueIndex, // queueFamilyIndex
155 };
156
157 VkCommandPool cmdPool;
158 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
159 if (err) {
160 SkDebugf("CreateCommandPool failed: %d\n", err);
161 SkFAIL("failing");
162 }
163
164 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
165}
166
167////////////////////////////////////////////////////////////////////////////////
168
169GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
170 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
171 VkInstance inst)
172 : INHERITED(context)
173 , fDevice(device)
174 , fQueue(queue)
175 , fCmdPool(cmdPool)
176 , fResourceProvider(this)
177 , fVkInstance(inst) {
178 fInterface.reset(GrVkCreateInterface(fVkInstance));
179 fCompiler = shaderc_compiler_initialize();
180
181 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
182 fCaps.reset(SkRef(fVkCaps.get()));
183
jvanverth03509ea2016-03-02 13:19:47 -0800184 fResourceProvider.init();
185
Greg Daniel164a9f02016-02-22 09:56:40 -0500186 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
187 SkASSERT(fCurrentCmdBuffer);
188 fCurrentCmdBuffer->begin(this);
189 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
190
191}
192
193GrVkGpu::~GrVkGpu() {
194 shaderc_compiler_release(fCompiler);
195 fCurrentCmdBuffer->end(this);
196 fCurrentCmdBuffer->unref(this);
197
198 // wait for all commands to finish
jvanverthfd359ca2016-03-18 11:57:24 -0700199 VkResult res = VK_CALL(QueueWaitIdle(fQueue));
200 SkASSERT(res == VK_SUCCESS);
Greg Daniel164a9f02016-02-22 09:56:40 -0500201
202 // must call this just before we destroy the VkDevice
203 fResourceProvider.destroyResources();
204
205 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
206 VK_CALL(DestroyDevice(fDevice, nullptr));
207 VK_CALL(DestroyInstance(fVkInstance, nullptr));
208}
209
210///////////////////////////////////////////////////////////////////////////////
211
212void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
213 SkASSERT(fCurrentCmdBuffer);
214 fCurrentCmdBuffer->end(this);
215
216 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
217 fResourceProvider.checkCommandBuffers();
218
219 // Release old command buffer and create a new one
220 fCurrentCmdBuffer->unref(this);
221 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
222 SkASSERT(fCurrentCmdBuffer);
223
224 fCurrentCmdBuffer->begin(this);
225}
226
227///////////////////////////////////////////////////////////////////////////////
228GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
229 return GrVkVertexBuffer::Create(this, size, dynamic);
230}
231
232GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
233 return GrVkIndexBuffer::Create(this, size, dynamic);
234}
235
236GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
237 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
238 : GrVkBuffer::kCopyWrite_Type;
239 return GrVkTransferBuffer::Create(this, size, bufferType);
240}
241
242////////////////////////////////////////////////////////////////////////////////
243bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
244 GrPixelConfig srcConfig, DrawPreference* drawPreference,
245 WritePixelTempDrawInfo* tempDrawInfo) {
246 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
247 return false;
248 }
249
250 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
251 if (kNoDraw_DrawPreference != *drawPreference) {
252 return false;
253 }
254
255 if (dstSurface->config() != srcConfig) {
256 // TODO: This should fall back to drawing or copying to change config of dstSurface to
257 // match that of srcConfig.
258 return false;
259 }
260
261 return true;
262}
263
264bool GrVkGpu::onWritePixels(GrSurface* surface,
265 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800266 GrPixelConfig config,
267 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500268 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
269 if (!vkTex) {
270 return false;
271 }
272
bsalomona1e6b3b2016-03-02 10:58:23 -0800273 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800274 if (texels.empty() || !texels.begin()->fPixels) {
275 return false;
276 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800277
Greg Daniel164a9f02016-02-22 09:56:40 -0500278 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
279 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
280 return false;
281 }
282
283 bool success = false;
284 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
285 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
286 SkASSERT(config == vkTex->desc().fConfig);
287 // TODO: add compressed texture support
288 // delete the following two lines and uncomment the two after that when ready
289 vkTex->unref();
290 return false;
291 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
292 // height);
293 } else {
294 bool linearTiling = vkTex->isLinearTiled();
295 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
296 // Need to change the layout to general in order to perform a host write
297 VkImageLayout layout = vkTex->currentLayout();
298 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
299 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
300 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
301 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
302 vkTex->setImageLayout(this,
303 VK_IMAGE_LAYOUT_GENERAL,
304 srcAccessMask,
305 dstAccessMask,
306 srcStageMask,
307 dstStageMask,
308 false);
309 }
310 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800311 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500312 }
313
314 if (success) {
315 vkTex->texturePriv().dirtyMipMaps(true);
316 return true;
317 }
318
319 return false;
320}
321
322bool GrVkGpu::uploadTexData(GrVkTexture* tex,
323 int left, int top, int width, int height,
324 GrPixelConfig dataConfig,
325 const void* data,
326 size_t rowBytes) {
327 SkASSERT(data);
328
329 // If we're uploading compressed data then we should be using uploadCompressedTexData
330 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
331
332 bool linearTiling = tex->isLinearTiled();
333
334 size_t bpp = GrBytesPerPixel(dataConfig);
335
336 const GrSurfaceDesc& desc = tex->desc();
337
338 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
339 &width, &height, &data, &rowBytes)) {
340 return false;
341 }
342 size_t trimRowBytes = width * bpp;
343
344 if (linearTiling) {
345 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
346 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
347 const VkImageSubresource subres = {
348 VK_IMAGE_ASPECT_COLOR_BIT,
349 0, // mipLevel
350 0, // arraySlice
351 };
352 VkSubresourceLayout layout;
353 VkResult err;
354
355 const GrVkInterface* interface = this->vkInterface();
356
357 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
358 tex->textureImage(),
359 &subres,
360 &layout));
361
362 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
363 : top;
364 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
365 VkDeviceSize size = height*layout.rowPitch;
366 void* mapPtr;
367 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
368 &mapPtr));
369 if (err) {
370 return false;
371 }
372
373 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
374 // copy into buffer by rows
375 const char* srcRow = reinterpret_cast<const char*>(data);
376 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
377 for (int y = 0; y < height; y++) {
378 memcpy(dstRow, srcRow, trimRowBytes);
379 srcRow += rowBytes;
380 dstRow -= layout.rowPitch;
381 }
382 } else {
383 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
384 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
385 memcpy(mapPtr, data, trimRowBytes * height);
386 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800387 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
388 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500389 }
390 }
391
392 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
393 } else {
394 GrVkTransferBuffer* transferBuffer =
395 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
396
397 void* mapPtr = transferBuffer->map();
398
399 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
400 // copy into buffer by rows
401 const char* srcRow = reinterpret_cast<const char*>(data);
402 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
403 for (int y = 0; y < height; y++) {
404 memcpy(dstRow, srcRow, trimRowBytes);
405 srcRow += rowBytes;
406 dstRow -= trimRowBytes;
407 }
408 } else {
409 // If there is no padding on the src data rows, we can do a single memcpy
410 if (trimRowBytes == rowBytes) {
411 memcpy(mapPtr, data, trimRowBytes * height);
412 } else {
413 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
414 }
415 }
416
417 transferBuffer->unmap();
418
419 // make sure the unmap has finished
420 transferBuffer->addMemoryBarrier(this,
421 VK_ACCESS_HOST_WRITE_BIT,
422 VK_ACCESS_TRANSFER_READ_BIT,
423 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
424 VK_PIPELINE_STAGE_TRANSFER_BIT,
425 false);
426
427 // Set up copy region
428 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
429 VkOffset3D offset = {
430 left,
431 flipY ? tex->height() - top - height : top,
432 0
433 };
434
435 VkBufferImageCopy region;
436 memset(&region, 0, sizeof(VkBufferImageCopy));
437 region.bufferOffset = 0;
438 region.bufferRowLength = width;
439 region.bufferImageHeight = height;
440 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
441 region.imageOffset = offset;
442 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
443
444 // Change layout of our target so it can be copied to
445 VkImageLayout layout = tex->currentLayout();
446 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
447 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
448 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
449 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
450 tex->setImageLayout(this,
451 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
452 srcAccessMask,
453 dstAccessMask,
454 srcStageMask,
455 dstStageMask,
456 false);
457
458 // Copy the buffer to the image
459 fCurrentCmdBuffer->copyBufferToImage(this,
460 transferBuffer,
461 tex,
462 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
463 1,
464 &region);
465
466 // Submit the current command buffer to the Queue
467 this->submitCommandBuffer(kSkip_SyncQueue);
468
469 transferBuffer->unref();
470 }
471
472 return true;
473}
474
475////////////////////////////////////////////////////////////////////////////////
476GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800477 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500478 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
479
480 VkFormat pixelFormat;
481 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
482 return nullptr;
483 }
484
485 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
486 return nullptr;
487 }
488
489 bool linearTiling = false;
490 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
491 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
492 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
493 linearTiling = true;
494 } else {
495 return nullptr;
496 }
497 }
498
499 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
500 if (renderTarget) {
501 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
502 }
503
504 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
505 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
506 // will be using this texture in some copy or not. Also this assumes, as is the current case,
507 // that all render targets in vulkan are also texutres. If we change this practice of setting
508 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
509 // texture.
510 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
511
bsalomona1e6b3b2016-03-02 10:58:23 -0800512 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
513 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500514
515 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
516 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
517 // to 1.
518 GrVkImage::ImageDesc imageDesc;
519 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
520 imageDesc.fFormat = pixelFormat;
521 imageDesc.fWidth = desc.fWidth;
522 imageDesc.fHeight = desc.fHeight;
523 imageDesc.fLevels = 1;
524 imageDesc.fSamples = 1;
525 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
526 imageDesc.fUsageFlags = usageFlags;
527 imageDesc.fMemProps = memProps;
528
529 GrVkTexture* tex;
530 if (renderTarget) {
531 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
532 imageDesc);
egdaniel3d5d9ac2016-03-01 12:56:15 -0800533#if 0
534 // This clear can be included to fix warning described in htttps://bugs.skia.org/5045
535 // Obviously we do not want to be clearling needlessly every time we create a render target.
536 SkIRect rect = SkIRect::MakeWH(tex->width(), tex->height());
537 this->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
538#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500539 } else {
540 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
541 }
542
543 if (!tex) {
544 return nullptr;
545 }
546
bsalomona1e6b3b2016-03-02 10:58:23 -0800547 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800548 if (!texels.empty()) {
549 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800550 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
551 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500552 tex->unref();
553 return nullptr;
554 }
555 }
556
557 return tex;
558}
559
560////////////////////////////////////////////////////////////////////////////////
561
562static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
563 // By default, all textures in Vk use TopLeft
564 if (kDefault_GrSurfaceOrigin == origin) {
565 return kTopLeft_GrSurfaceOrigin;
566 } else {
567 return origin;
568 }
569}
570
571GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
572 GrWrapOwnership ownership) {
573 VkFormat format;
574 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
575 return nullptr;
576 }
577
578 if (0 == desc.fTextureHandle) {
579 return nullptr;
580 }
581
582 int maxSize = this->caps()->maxTextureSize();
583 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
584 return nullptr;
585 }
586
jvanverthfd359ca2016-03-18 11:57:24 -0700587 const GrVkTextureInfo* info = reinterpret_cast<const GrVkTextureInfo*>(desc.fTextureHandle);
588 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) {
589 return nullptr;
590 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500591
jvanverth0fcfb752016-03-09 09:57:52 -0800592 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
593 ? GrGpuResource::kAdopted_LifeCycle
594 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500595
596 GrSurfaceDesc surfDesc;
597 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
598 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
599 surfDesc.fWidth = desc.fWidth;
600 surfDesc.fHeight = desc.fHeight;
601 surfDesc.fConfig = desc.fConfig;
602 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
603 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
604 // In GL, Chrome assumes all textures are BottomLeft
605 // In VK, we don't have this restriction
606 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
607
608 GrVkTexture* texture = nullptr;
609 if (renderTarget) {
610 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
611 lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700612 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500613 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700614 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format,
615 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500616 }
617 if (!texture) {
618 return nullptr;
619 }
620
621 return texture;
622}
623
624GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
625 GrWrapOwnership ownership) {
626
jvanverthfd359ca2016-03-18 11:57:24 -0700627 const GrVkTextureInfo* info =
628 reinterpret_cast<const GrVkTextureInfo*>(wrapDesc.fRenderTargetHandle);
629 if (VK_NULL_HANDLE == info->fImage ||
630 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership)) {
631 return nullptr;
632 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500633
jvanverth0fcfb752016-03-09 09:57:52 -0800634 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
635 ? GrGpuResource::kAdopted_LifeCycle
636 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500637
638 GrSurfaceDesc desc;
639 desc.fConfig = wrapDesc.fConfig;
640 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
641 desc.fWidth = wrapDesc.fWidth;
642 desc.fHeight = wrapDesc.fHeight;
643 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
644
645 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
646
647 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
jvanverthfd359ca2016-03-18 11:57:24 -0700648 lifeCycle,
649 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500650 if (tgt && wrapDesc.fStencilBits) {
651 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
652 tgt->unref();
653 return nullptr;
654 }
655 }
656 return tgt;
657}
658
659////////////////////////////////////////////////////////////////////////////////
660
661void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
egdaniel0e1853c2016-03-17 11:35:45 -0700662 const GrNonInstancedMesh& mesh) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500663 GrVkVertexBuffer* vbuf;
egdaniel0e1853c2016-03-17 11:35:45 -0700664 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500665 SkASSERT(vbuf);
666 SkASSERT(!vbuf->isMapped());
667
668 vbuf->addMemoryBarrier(this,
669 VK_ACCESS_HOST_WRITE_BIT,
670 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
671 VK_PIPELINE_STAGE_HOST_BIT,
672 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
673 false);
674
675 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
676
egdaniel0e1853c2016-03-17 11:35:45 -0700677 if (mesh.isIndexed()) {
678 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500679 SkASSERT(ibuf);
680 SkASSERT(!ibuf->isMapped());
681
682 ibuf->addMemoryBarrier(this,
683 VK_ACCESS_HOST_WRITE_BIT,
684 VK_ACCESS_INDEX_READ_BIT,
685 VK_PIPELINE_STAGE_HOST_BIT,
686 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
687 false);
688
689 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
690 }
691}
692
Greg Daniel164a9f02016-02-22 09:56:40 -0500693////////////////////////////////////////////////////////////////////////////////
694
695GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
696 int width,
697 int height) {
698 SkASSERT(rt->asTexture());
699 SkASSERT(width >= rt->width());
700 SkASSERT(height >= rt->height());
701
702 int samples = rt->numStencilSamples();
703
704 SkASSERT(this->vkCaps().stencilFormats().count());
705 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
706
707 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
708 GrGpuResource::kCached_LifeCycle,
709 width,
710 height,
711 samples,
712 sFmt));
713 fStats.incStencilAttachmentCreates();
714 return stencil;
715}
716
717////////////////////////////////////////////////////////////////////////////////
718
719GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
720 GrPixelConfig config) {
721
722 VkFormat pixelFormat;
723 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
724 return 0;
725 }
726
727 bool linearTiling = false;
728 if (!fVkCaps->isConfigTexturable(config)) {
729 return 0;
730 }
731
732 if (fVkCaps->isConfigTexurableLinearly(config)) {
733 linearTiling = true;
734 }
735
736 // Currently this is not supported since it requires a copy which has not yet been implemented.
737 if (srcData && !linearTiling) {
738 return 0;
739 }
740
741 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
742 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
743 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
744
745 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
746 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
747
jvanverthfd359ca2016-03-18 11:57:24 -0700748 VkImage image = VK_NULL_HANDLE;
749 VkDeviceMemory alloc = VK_NULL_HANDLE;
Greg Daniel164a9f02016-02-22 09:56:40 -0500750
jvanverthfd359ca2016-03-18 11:57:24 -0700751 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
752 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
753 ? VK_IMAGE_LAYOUT_PREINITIALIZED
754 : VK_IMAGE_LAYOUT_UNDEFINED;
755
756 // Create Image
757 VkSampleCountFlagBits vkSamples;
758 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
759 return 0;
760 }
761
762 const VkImageCreateInfo imageCreateInfo = {
763 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
764 NULL, // pNext
765 0, // VkImageCreateFlags
766 VK_IMAGE_TYPE_2D, // VkImageType
767 pixelFormat, // VkFormat
768 { w, h, 1 }, // VkExtent3D
769 1, // mipLevels
770 1, // arrayLayers
771 vkSamples, // samples
772 imageTiling, // VkImageTiling
773 usageFlags, // VkImageUsageFlags
774 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
775 0, // queueFamilyCount
776 0, // pQueueFamilyIndices
777 initialLayout // initialLayout
778 };
779
780 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
781
782 if (!GrVkMemory::AllocAndBindImageMemory(this, image, memProps, &alloc)) {
783 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500784 return 0;
785 }
786
787 if (srcData) {
788 if (linearTiling) {
789 const VkImageSubresource subres = {
790 VK_IMAGE_ASPECT_COLOR_BIT,
791 0, // mipLevel
792 0, // arraySlice
793 };
794 VkSubresourceLayout layout;
795 VkResult err;
796
jvanverthfd359ca2016-03-18 11:57:24 -0700797 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500798
799 void* mapPtr;
jvanverthfd359ca2016-03-18 11:57:24 -0700800 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500801 if (err) {
jvanverthfd359ca2016-03-18 11:57:24 -0700802 VK_CALL(FreeMemory(this->device(), alloc, nullptr));
803 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500804 return 0;
805 }
806
807 size_t bpp = GrBytesPerPixel(config);
808 size_t rowCopyBytes = bpp * w;
809 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
810 // This assumes the srcData comes in with no padding.
811 if (rowCopyBytes == layout.rowPitch) {
812 memcpy(mapPtr, srcData, rowCopyBytes * h);
813 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700814 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
815 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500816 }
jvanverthfd359ca2016-03-18 11:57:24 -0700817 VK_CALL(UnmapMemory(fDevice, alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500818 } else {
819 // TODO: Add support for copying to optimal tiling
820 SkASSERT(false);
821 }
822 }
823
jvanverthfd359ca2016-03-18 11:57:24 -0700824 GrVkTextureInfo* info = new GrVkTextureInfo;
825 info->fImage = image;
826 info->fAlloc = alloc;
827 info->fImageTiling = imageTiling;
828 info->fImageLayout = initialLayout;
829
830 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -0500831}
832
833bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
jvanverthfd359ca2016-03-18 11:57:24 -0700834 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500835
836 if (backend && backend->fImage && backend->fAlloc) {
837 VkMemoryRequirements req;
838 memset(&req, 0, sizeof(req));
839 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
840 backend->fImage,
841 &req));
842 // TODO: find a better check
843 // This will probably fail with a different driver
844 return (req.size > 0) && (req.size <= 8192 * 8192);
845 }
846
847 return false;
848}
849
850void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700851 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500852
853 if (backend) {
854 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700855 // something in the command buffer may still be using this, so force submit
856 this->submitCommandBuffer(kForce_SyncQueue);
857
858 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr));
859 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500860 }
jvanverthfd359ca2016-03-18 11:57:24 -0700861 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -0500862 }
863}
864
865////////////////////////////////////////////////////////////////////////////////
866
867void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
868 VkPipelineStageFlags dstStageMask,
869 bool byRegion,
870 VkMemoryBarrier* barrier) const {
871 SkASSERT(fCurrentCmdBuffer);
872 fCurrentCmdBuffer->pipelineBarrier(this,
873 srcStageMask,
874 dstStageMask,
875 byRegion,
876 GrVkCommandBuffer::kMemory_BarrierType,
877 barrier);
878}
879
880void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
881 VkPipelineStageFlags dstStageMask,
882 bool byRegion,
883 VkBufferMemoryBarrier* barrier) const {
884 SkASSERT(fCurrentCmdBuffer);
885 fCurrentCmdBuffer->pipelineBarrier(this,
886 srcStageMask,
887 dstStageMask,
888 byRegion,
889 GrVkCommandBuffer::kBufferMemory_BarrierType,
890 barrier);
891}
892
893void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
894 VkPipelineStageFlags dstStageMask,
895 bool byRegion,
896 VkImageMemoryBarrier* barrier) const {
897 SkASSERT(fCurrentCmdBuffer);
898 fCurrentCmdBuffer->pipelineBarrier(this,
899 srcStageMask,
900 dstStageMask,
901 byRegion,
902 GrVkCommandBuffer::kImageMemory_BarrierType,
903 barrier);
904}
905
906void GrVkGpu::finishDrawTarget() {
907 // Submit the current command buffer to the Queue
908 this->submitCommandBuffer(kSkip_SyncQueue);
909}
910
egdaniel3d5d9ac2016-03-01 12:56:15 -0800911void GrVkGpu::clearStencil(GrRenderTarget* target) {
912 if (nullptr == target) {
913 return;
914 }
915 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
916 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
917
918
919 VkClearDepthStencilValue vkStencilColor;
920 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
921
922 VkImageLayout origDstLayout = vkStencil->currentLayout();
923
924 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
925 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
926
927 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
928 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
929
930 vkStencil->setImageLayout(this,
931 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
932 srcAccessMask,
933 dstAccessMask,
934 srcStageMask,
935 dstStageMask,
936 false);
937
938
939 VkImageSubresourceRange subRange;
940 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
941 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
942 subRange.baseMipLevel = 0;
943 subRange.levelCount = 1;
944 subRange.baseArrayLayer = 0;
945 subRange.layerCount = 1;
946
947 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
948 // draw. Thus we should look into using the load op functions on the render pass to clear out
949 // the stencil there.
950 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
951}
952
953void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
954 SkASSERT(target);
955
956 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
957 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
958 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
959
960 // this should only be called internally when we know we have a
961 // stencil buffer.
962 SkASSERT(sb);
963 int stencilBitCount = sb->bits();
964
965 // The contract with the callers does not guarantee that we preserve all bits in the stencil
966 // during this clear. Thus we will clear the entire stencil to the desired value.
967
968 VkClearDepthStencilValue vkStencilColor;
969 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
970 if (insideClip) {
971 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
972 } else {
973 vkStencilColor.stencil = 0;
974 }
975
976 VkImageLayout origDstLayout = vkStencil->currentLayout();
977 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
978 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
979 VkPipelineStageFlags srcStageMask =
980 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
981 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
982 vkStencil->setImageLayout(this,
983 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
984 srcAccessMask,
985 dstAccessMask,
986 srcStageMask,
987 dstStageMask,
988 false);
989
990 VkClearRect clearRect;
991 // Flip rect if necessary
992 SkIRect vkRect = rect;
993
994 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
995 vkRect.fTop = vkRT->height() - rect.fBottom;
996 vkRect.fBottom = vkRT->height() - rect.fTop;
997 }
998
999 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1000 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
1001
1002 clearRect.baseArrayLayer = 0;
1003 clearRect.layerCount = 1;
1004
1005 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1006 SkASSERT(renderPass);
1007 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1008
1009 uint32_t stencilIndex;
1010 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
1011
1012 VkClearAttachment attachment;
1013 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1014 attachment.colorAttachment = 0; // this value shouldn't matter
1015 attachment.clearValue.depthStencil = vkStencilColor;
1016
1017 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1018 fCurrentCmdBuffer->endRenderPass(this);
1019
1020 return;
1021}
1022
Greg Daniel164a9f02016-02-22 09:56:40 -05001023void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
1024 // parent class should never let us get here with no RT
1025 SkASSERT(target);
1026
1027 VkClearColorValue vkColor;
1028 GrColorToRGBAFloat(color, vkColor.float32);
1029
1030 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1031 VkImageLayout origDstLayout = vkRT->currentLayout();
1032
1033 if (rect.width() != target->width() || rect.height() != target->height()) {
1034 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1035 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1036 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -08001037 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -05001038 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1039 vkRT->setImageLayout(this,
1040 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1041 srcAccessMask,
1042 dstAccessMask,
1043 srcStageMask,
1044 dstStageMask,
1045 false);
1046
1047 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001048 // Flip rect if necessary
1049 SkIRect vkRect = rect;
1050 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1051 vkRect.fTop = vkRT->height() - rect.fBottom;
1052 vkRect.fBottom = vkRT->height() - rect.fTop;
1053 }
1054 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1055 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001056 clearRect.baseArrayLayer = 0;
1057 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001058
1059 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1060 SkASSERT(renderPass);
1061 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1062
1063 uint32_t colorIndex;
1064 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1065
1066 VkClearAttachment attachment;
1067 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1068 attachment.colorAttachment = colorIndex;
1069 attachment.clearValue.color = vkColor;
1070
1071 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1072 fCurrentCmdBuffer->endRenderPass(this);
1073 return;
1074 }
1075
1076 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1077 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1078
1079 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1080 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1081
1082 vkRT->setImageLayout(this,
1083 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1084 srcAccessMask,
1085 dstAccessMask,
1086 srcStageMask,
1087 dstStageMask,
1088 false);
1089
1090
1091 VkImageSubresourceRange subRange;
1092 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1093 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1094 subRange.baseMipLevel = 0;
1095 subRange.levelCount = 1;
1096 subRange.baseArrayLayer = 0;
1097 subRange.layerCount = 1;
1098
1099 // In the future we may not actually be doing this type of clear at all. If we are inside a
1100 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1101 // common use case will be clearing an attachment at the start of a render pass, in which case
1102 // we will use the clear load ops.
1103 fCurrentCmdBuffer->clearColorImage(this,
1104 vkRT,
1105 &vkColor,
1106 1, &subRange);
1107}
1108
1109inline bool can_copy_image(const GrSurface* dst,
1110 const GrSurface* src,
1111 const GrVkGpu* gpu) {
1112 if (src->asTexture() &&
1113 dst->asTexture() &&
1114 src->origin() == dst->origin() &&
1115 src->config() == dst->config()) {
1116 return true;
1117 }
1118
1119 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1120 // or the resolved image here?
1121
1122 return false;
1123}
1124
1125void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1126 GrSurface* src,
1127 const SkIRect& srcRect,
1128 const SkIPoint& dstPoint) {
1129 SkASSERT(can_copy_image(dst, src, this));
1130
1131 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1132 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1133 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1134
1135 VkImageLayout origDstLayout = dstTex->currentLayout();
1136 VkImageLayout origSrcLayout = srcTex->currentLayout();
1137
1138 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1139 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1140
1141 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1142 // the cache is flushed since it is only being written to.
1143 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1144 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1145
1146 dstTex->setImageLayout(this,
1147 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1148 srcAccessMask,
1149 dstAccessMask,
1150 srcStageMask,
1151 dstStageMask,
1152 false);
1153
1154 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1155 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1156
1157 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1158 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1159
1160 srcTex->setImageLayout(this,
1161 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1162 srcAccessMask,
1163 dstAccessMask,
1164 srcStageMask,
1165 dstStageMask,
1166 false);
1167
1168 // Flip rect if necessary
1169 SkIRect srcVkRect = srcRect;
1170 int32_t dstY = dstPoint.fY;
1171
1172 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1173 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1174 srcVkRect.fTop = src->height() - srcRect.fBottom;
1175 srcVkRect.fBottom = src->height() - srcRect.fTop;
1176 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1177 }
1178
1179 VkImageCopy copyRegion;
1180 memset(&copyRegion, 0, sizeof(VkImageCopy));
1181 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1182 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1183 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1184 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1185 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1186
1187 fCurrentCmdBuffer->copyImage(this,
1188 srcTex,
1189 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1190 dstTex,
1191 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1192 1,
1193 &copyRegion);
1194}
1195
1196inline bool can_copy_as_draw(const GrSurface* dst,
1197 const GrSurface* src,
1198 const GrVkGpu* gpu) {
1199 return false;
1200}
1201
1202void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1203 GrSurface* src,
1204 const SkIRect& srcRect,
1205 const SkIPoint& dstPoint) {
1206 SkASSERT(false);
1207}
1208
1209bool GrVkGpu::onCopySurface(GrSurface* dst,
1210 GrSurface* src,
1211 const SkIRect& srcRect,
1212 const SkIPoint& dstPoint) {
1213 if (can_copy_image(dst, src, this)) {
1214 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1215 return true;
1216 }
1217
1218 if (can_copy_as_draw(dst, src, this)) {
1219 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1220 return true;
1221 }
1222
1223 return false;
1224}
1225
cdalton28f45b92016-03-07 13:58:26 -08001226void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1227 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1228 // TODO: stub.
1229 SkASSERT(!this->caps()->sampleLocationsSupport());
1230 *effectiveSampleCnt = rt->desc().fSampleCnt;
1231}
1232
Greg Daniel164a9f02016-02-22 09:56:40 -05001233bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1234 GrPixelConfig readConfig, DrawPreference* drawPreference,
1235 ReadPixelTempDrawInfo* tempDrawInfo) {
1236 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1237 if (kNoDraw_DrawPreference != *drawPreference) {
1238 return false;
1239 }
1240
1241 if (srcSurface->config() != readConfig) {
1242 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1243 // that of readConfig.
1244 return false;
1245 }
1246
1247 return true;
1248}
1249
1250bool GrVkGpu::onReadPixels(GrSurface* surface,
1251 int left, int top, int width, int height,
1252 GrPixelConfig config,
1253 void* buffer,
1254 size_t rowBytes) {
1255 VkFormat pixelFormat;
1256 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1257 return false;
1258 }
1259
1260 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1261 if (!tgt) {
1262 return false;
1263 }
1264
1265 // Change layout of our target so it can be used as copy
1266 VkImageLayout layout = tgt->currentLayout();
1267 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1268 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1269 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1270 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1271 tgt->setImageLayout(this,
1272 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1273 srcAccessMask,
1274 dstAccessMask,
1275 srcStageMask,
1276 dstStageMask,
1277 false);
1278
1279 GrVkTransferBuffer* transferBuffer =
1280 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1281 kGpuToCpu_TransferType));
1282
1283 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1284 VkOffset3D offset = {
1285 left,
1286 flipY ? surface->height() - top - height : top,
1287 0
1288 };
1289
1290 // Copy the image to a buffer so we can map it to cpu memory
1291 VkBufferImageCopy region;
1292 memset(&region, 0, sizeof(VkBufferImageCopy));
1293 region.bufferOffset = 0;
1294 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1295 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1296 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1297 region.imageOffset = offset;
1298 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1299
1300 fCurrentCmdBuffer->copyImageToBuffer(this,
1301 tgt,
1302 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1303 transferBuffer,
1304 1,
1305 &region);
1306
1307 // make sure the copy to buffer has finished
1308 transferBuffer->addMemoryBarrier(this,
1309 VK_ACCESS_TRANSFER_WRITE_BIT,
1310 VK_ACCESS_HOST_READ_BIT,
1311 VK_PIPELINE_STAGE_TRANSFER_BIT,
1312 VK_PIPELINE_STAGE_HOST_BIT,
1313 false);
1314
1315 // We need to submit the current command buffer to the Queue and make sure it finishes before
1316 // we can copy the data out of the buffer.
1317 this->submitCommandBuffer(kForce_SyncQueue);
1318
1319 void* mappedMemory = transferBuffer->map();
1320
1321 memcpy(buffer, mappedMemory, rowBytes*height);
1322
1323 transferBuffer->unmap();
1324 transferBuffer->unref();
1325
1326 if (flipY) {
1327 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1328 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1329 scratch.reset(tightRowBytes);
1330 void* tmpRow = scratch.get();
1331 // flip y in-place by rows
1332 const int halfY = height >> 1;
1333 char* top = reinterpret_cast<char*>(buffer);
1334 char* bottom = top + (height - 1) * rowBytes;
1335 for (int y = 0; y < halfY; y++) {
1336 memcpy(tmpRow, top, tightRowBytes);
1337 memcpy(top, bottom, tightRowBytes);
1338 memcpy(bottom, tmpRow, tightRowBytes);
1339 top += rowBytes;
1340 bottom -= rowBytes;
1341 }
1342 }
1343
1344 return true;
1345}
1346
egdaniel0e1853c2016-03-17 11:35:45 -07001347bool GrVkGpu::prepareDrawState(const GrPipeline& pipeline,
1348 const GrPrimitiveProcessor& primProc,
1349 GrPrimitiveType primitiveType,
1350 const GrVkRenderPass& renderPass,
1351 GrVkProgram** program) {
1352 // Get GrVkProgramDesc
1353 GrVkProgramDesc desc;
1354 if (!GrVkProgramDescBuilder::Build(&desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
1355 GrCapsDebugf(this->caps(), "Failed to vk program descriptor!\n");
1356 return false;
1357 }
1358
1359 *program = GrVkProgramBuilder::CreateProgram(this,
1360 pipeline,
1361 primProc,
1362 primitiveType,
1363 desc,
1364 renderPass);
1365 if (!program) {
1366 return false;
1367 }
1368
1369 (*program)->setData(this, primProc, pipeline);
1370
1371 (*program)->bind(this, fCurrentCmdBuffer);
1372 return true;
1373}
1374
1375void GrVkGpu::onDraw(const GrPipeline& pipeline,
1376 const GrPrimitiveProcessor& primProc,
1377 const GrMesh* meshes,
1378 int meshCount) {
1379 if (!meshCount) {
1380 return;
1381 }
1382 GrRenderTarget* rt = pipeline.getRenderTarget();
Greg Daniel164a9f02016-02-22 09:56:40 -05001383 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1384 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1385 SkASSERT(renderPass);
1386
egdaniel0e1853c2016-03-17 11:35:45 -07001387 GrVkProgram* program = nullptr;
1388 GrPrimitiveType primitiveType = meshes[0].primitiveType();
1389 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass, &program)) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001390 return;
1391 }
1392
Greg Daniel164a9f02016-02-22 09:56:40 -05001393 // Change layout of our render target so it can be used as the color attachment
1394 VkImageLayout layout = vkRT->currentLayout();
1395 // Our color attachment is purely a destination and won't be read so don't need to flush or
1396 // invalidate any caches
1397 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1398 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1399 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1400 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1401 vkRT->setImageLayout(this,
1402 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1403 srcAccessMask,
1404 dstAccessMask,
1405 srcStageMask,
1406 dstStageMask,
1407 false);
1408
egdaniel3d5d9ac2016-03-01 12:56:15 -08001409 // If we are using a stencil attachment we also need to update its layout
egdaniel0e1853c2016-03-17 11:35:45 -07001410 if (!pipeline.getStencil().isDisabled()) {
egdaniel3d5d9ac2016-03-01 12:56:15 -08001411 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1412 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1413 VkImageLayout origDstLayout = vkStencil->currentLayout();
1414 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1415 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
egdaniel0e1853c2016-03-17 11:35:45 -07001416 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001417 VkPipelineStageFlags srcStageMask =
1418 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1419 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1420 vkStencil->setImageLayout(this,
1421 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1422 srcAccessMask,
1423 dstAccessMask,
1424 srcStageMask,
1425 dstStageMask,
1426 false);
1427 }
1428
egdaniel0e1853c2016-03-17 11:35:45 -07001429 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1430
1431 for (int i = 0; i < meshCount; ++i) {
1432 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
1433 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1434 }
1435
1436 const GrMesh& mesh = meshes[i];
1437 GrMesh::Iterator iter;
1438 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1439 do {
1440 if (nonIdxMesh->primitiveType() != primitiveType) {
1441 // Technically we don't have to call this here (since there is a safety check in
1442 // program:setData but this will allow for quicker freeing of resources if the
1443 // program sits in a cache for a while.
1444 program->freeTempResources(this);
1445 // This free will go away once we setup a program cache, and then the cache will be
1446 // responsible for call freeGpuResources.
1447 program->freeGPUResources(this);
1448 program->unref();
1449 SkDEBUGCODE(program = nullptr);
1450 primitiveType = nonIdxMesh->primitiveType();
1451 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass,
1452 &program)) {
1453 return;
1454 }
1455 }
1456 SkASSERT(program);
1457 this->bindGeometry(primProc, *nonIdxMesh);
1458
1459 if (nonIdxMesh->isIndexed()) {
1460 fCurrentCmdBuffer->drawIndexed(this,
1461 nonIdxMesh->indexCount(),
1462 1,
1463 nonIdxMesh->startIndex(),
1464 nonIdxMesh->startVertex(),
1465 0);
1466 } else {
1467 fCurrentCmdBuffer->draw(this,
1468 nonIdxMesh->vertexCount(),
1469 1,
1470 nonIdxMesh->startVertex(),
1471 0);
1472 }
1473
1474 fStats.incNumDraws();
1475 } while ((nonIdxMesh = iter.next()));
Greg Daniel164a9f02016-02-22 09:56:40 -05001476 }
1477
1478 fCurrentCmdBuffer->endRenderPass(this);
1479
1480 // Technically we don't have to call this here (since there is a safety check in program:setData
1481 // but this will allow for quicker freeing of resources if the program sits in a cache for a
1482 // while.
1483 program->freeTempResources(this);
1484 // This free will go away once we setup a program cache, and then the cache will be responsible
1485 // for call freeGpuResources.
1486 program->freeGPUResources(this);
1487 program->unref();
1488
1489#if SWAP_PER_DRAW
1490 glFlush();
1491#if defined(SK_BUILD_FOR_MAC)
1492 aglSwapBuffers(aglGetCurrentContext());
1493 int set_a_break_pt_here = 9;
1494 aglSwapBuffers(aglGetCurrentContext());
1495#elif defined(SK_BUILD_FOR_WIN32)
1496 SwapBuf();
1497 int set_a_break_pt_here = 9;
1498 SwapBuf();
1499#endif
1500#endif
1501}
1502