blob: db520faad1e0d978c5c1ef945b0c3c9b6f6ed0e2 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
13#include "GrPipeline.h"
14#include "GrRenderTargetPriv.h"
15#include "GrSurfacePriv.h"
16#include "GrTexturePriv.h"
17#include "GrVertices.h"
18
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
24#include "GrVkProgram.h"
25#include "GrVkProgramBuilder.h"
26#include "GrVkProgramDesc.h"
27#include "GrVkRenderPass.h"
28#include "GrVkResourceProvider.h"
29#include "GrVkTexture.h"
30#include "GrVkTextureRenderTarget.h"
31#include "GrVkTransferBuffer.h"
32#include "GrVkVertexBuffer.h"
33
34#include "SkConfig8888.h"
35
36#include "vk/GrVkInterface.h"
37
38#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
39#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
40#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
41
42////////////////////////////////////////////////////////////////////////////////
43// Stuff used to set up a GrVkGpu secrectly for now.
44
45// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
46// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
47// and mostly likely would take an optional device and queues to use.
48GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
49 GrContext* context) {
50 // Below is Vulkan setup code that normal would be done by a client, but will do here for now
51 // for testing purposes.
52 VkPhysicalDevice physDev;
53 VkDevice device;
54 VkInstance inst;
55 VkResult err;
56
57 const VkApplicationInfo app_info = {
58 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
59 nullptr, // pNext
60 "vktest", // pApplicationName
61 0, // applicationVersion
62 "vktest", // pEngineName
63 0, // engineVerison
64 VK_API_VERSION, // apiVersion
65 };
66 const VkInstanceCreateInfo instance_create = {
67 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
68 nullptr, // pNext
69 0, // flags
70 &app_info, // pApplicationInfo
71 0, // enabledLayerNameCount
72 nullptr, // ppEnabledLayerNames
73 0, // enabledExtensionNameCount
74 nullptr, // ppEnabledExtensionNames
75 };
76 err = vkCreateInstance(&instance_create, nullptr, &inst);
77 if (err < 0) {
78 SkDebugf("vkCreateInstanced failed: %d\n", err);
79 SkFAIL("failing");
80 }
81
82 uint32_t gpuCount;
83 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
84 if (err) {
85 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
86 SkFAIL("failing");
87 }
88 SkASSERT(gpuCount > 0);
89 // Just returning the first physical device instead of getting the whole array.
90 gpuCount = 1;
91 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
92 if (err) {
93 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
94 SkFAIL("failing");
95 }
96
97 // query to get the initial queue props size
98 uint32_t queueCount;
99 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
100 SkASSERT(queueCount >= 1);
101
102 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
103 // now get the actual queue props
104 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
105
106 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
107
108 // iterate to find the graphics queue
109 uint32_t graphicsQueueIndex = -1;
110 for (uint32_t i = 0; i < queueCount; i++) {
111 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
112 graphicsQueueIndex = i;
113 break;
114 }
115 }
116 SkASSERT(graphicsQueueIndex < queueCount);
117
118 float queuePriorities[1] = { 0.0 };
119 const VkDeviceQueueCreateInfo queueInfo = {
120 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
121 nullptr, // pNext
122 0, // VkDeviceQueueCreateFlags
123 0, // queueFamilyIndex
124 1, // queueCount
125 queuePriorities, // pQueuePriorities
126 };
127 const VkDeviceCreateInfo deviceInfo = {
128 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
129 nullptr, // pNext
130 0, // VkDeviceCreateFlags
131 1, // queueCreateInfoCount
132 &queueInfo, // pQueueCreateInfos
133 0, // layerCount
134 nullptr, // ppEnabledLayerNames
135 0, // extensionCount
136 nullptr, // ppEnabledExtensionNames
137 nullptr // ppEnabledFeatures
138 };
139
140 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
141 if (err) {
142 SkDebugf("CreateDevice failed: %d\n", err);
143 SkFAIL("failing");
144 }
145
146 VkQueue queue;
147 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
148
149 const VkCommandPoolCreateInfo cmdPoolInfo = {
150 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
151 nullptr, // pNext
152 0, // CmdPoolCreateFlags
153 graphicsQueueIndex, // queueFamilyIndex
154 };
155
156 VkCommandPool cmdPool;
157 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
158 if (err) {
159 SkDebugf("CreateCommandPool failed: %d\n", err);
160 SkFAIL("failing");
161 }
162
163 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
164}
165
166////////////////////////////////////////////////////////////////////////////////
167
168GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
169 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
170 VkInstance inst)
171 : INHERITED(context)
172 , fDevice(device)
173 , fQueue(queue)
174 , fCmdPool(cmdPool)
175 , fResourceProvider(this)
176 , fVkInstance(inst) {
177 fInterface.reset(GrVkCreateInterface(fVkInstance));
178 fCompiler = shaderc_compiler_initialize();
179
180 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
181 fCaps.reset(SkRef(fVkCaps.get()));
182
jvanverth03509ea2016-03-02 13:19:47 -0800183 fResourceProvider.init();
184
Greg Daniel164a9f02016-02-22 09:56:40 -0500185 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
186 SkASSERT(fCurrentCmdBuffer);
187 fCurrentCmdBuffer->begin(this);
188 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
189
190}
191
192GrVkGpu::~GrVkGpu() {
193 shaderc_compiler_release(fCompiler);
194 fCurrentCmdBuffer->end(this);
195 fCurrentCmdBuffer->unref(this);
196
197 // wait for all commands to finish
198 VK_CALL(QueueWaitIdle(fQueue));
199
200 // must call this just before we destroy the VkDevice
201 fResourceProvider.destroyResources();
202
203 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
204 VK_CALL(DestroyDevice(fDevice, nullptr));
205 VK_CALL(DestroyInstance(fVkInstance, nullptr));
206}
207
208///////////////////////////////////////////////////////////////////////////////
209
210void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
211 SkASSERT(fCurrentCmdBuffer);
212 fCurrentCmdBuffer->end(this);
213
214 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
215 fResourceProvider.checkCommandBuffers();
216
217 // Release old command buffer and create a new one
218 fCurrentCmdBuffer->unref(this);
219 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
220 SkASSERT(fCurrentCmdBuffer);
221
222 fCurrentCmdBuffer->begin(this);
223}
224
225///////////////////////////////////////////////////////////////////////////////
226GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
227 return GrVkVertexBuffer::Create(this, size, dynamic);
228}
229
230GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
231 return GrVkIndexBuffer::Create(this, size, dynamic);
232}
233
234GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
235 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
236 : GrVkBuffer::kCopyWrite_Type;
237 return GrVkTransferBuffer::Create(this, size, bufferType);
238}
239
240////////////////////////////////////////////////////////////////////////////////
241bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
242 GrPixelConfig srcConfig, DrawPreference* drawPreference,
243 WritePixelTempDrawInfo* tempDrawInfo) {
244 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
245 return false;
246 }
247
248 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
249 if (kNoDraw_DrawPreference != *drawPreference) {
250 return false;
251 }
252
253 if (dstSurface->config() != srcConfig) {
254 // TODO: This should fall back to drawing or copying to change config of dstSurface to
255 // match that of srcConfig.
256 return false;
257 }
258
259 return true;
260}
261
262bool GrVkGpu::onWritePixels(GrSurface* surface,
263 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800264 GrPixelConfig config,
265 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500266 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
267 if (!vkTex) {
268 return false;
269 }
270
bsalomona1e6b3b2016-03-02 10:58:23 -0800271 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800272 if (texels.empty() || !texels.begin()->fPixels) {
273 return false;
274 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800275
Greg Daniel164a9f02016-02-22 09:56:40 -0500276 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
277 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
278 return false;
279 }
280
281 bool success = false;
282 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
283 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
284 SkASSERT(config == vkTex->desc().fConfig);
285 // TODO: add compressed texture support
286 // delete the following two lines and uncomment the two after that when ready
287 vkTex->unref();
288 return false;
289 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
290 // height);
291 } else {
292 bool linearTiling = vkTex->isLinearTiled();
293 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
294 // Need to change the layout to general in order to perform a host write
295 VkImageLayout layout = vkTex->currentLayout();
296 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
297 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
298 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
299 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
300 vkTex->setImageLayout(this,
301 VK_IMAGE_LAYOUT_GENERAL,
302 srcAccessMask,
303 dstAccessMask,
304 srcStageMask,
305 dstStageMask,
306 false);
307 }
308 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800309 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500310 }
311
312 if (success) {
313 vkTex->texturePriv().dirtyMipMaps(true);
314 return true;
315 }
316
317 return false;
318}
319
320bool GrVkGpu::uploadTexData(GrVkTexture* tex,
321 int left, int top, int width, int height,
322 GrPixelConfig dataConfig,
323 const void* data,
324 size_t rowBytes) {
325 SkASSERT(data);
326
327 // If we're uploading compressed data then we should be using uploadCompressedTexData
328 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
329
330 bool linearTiling = tex->isLinearTiled();
331
332 size_t bpp = GrBytesPerPixel(dataConfig);
333
334 const GrSurfaceDesc& desc = tex->desc();
335
336 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
337 &width, &height, &data, &rowBytes)) {
338 return false;
339 }
340 size_t trimRowBytes = width * bpp;
341
342 if (linearTiling) {
343 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
344 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
345 const VkImageSubresource subres = {
346 VK_IMAGE_ASPECT_COLOR_BIT,
347 0, // mipLevel
348 0, // arraySlice
349 };
350 VkSubresourceLayout layout;
351 VkResult err;
352
353 const GrVkInterface* interface = this->vkInterface();
354
355 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
356 tex->textureImage(),
357 &subres,
358 &layout));
359
360 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
361 : top;
362 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
363 VkDeviceSize size = height*layout.rowPitch;
364 void* mapPtr;
365 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
366 &mapPtr));
367 if (err) {
368 return false;
369 }
370
371 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
372 // copy into buffer by rows
373 const char* srcRow = reinterpret_cast<const char*>(data);
374 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
375 for (int y = 0; y < height; y++) {
376 memcpy(dstRow, srcRow, trimRowBytes);
377 srcRow += rowBytes;
378 dstRow -= layout.rowPitch;
379 }
380 } else {
381 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
382 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
383 memcpy(mapPtr, data, trimRowBytes * height);
384 } else {
385 SkRectMemcpy(mapPtr, layout.rowPitch, data, rowBytes, trimRowBytes, height);
386 }
387 }
388
389 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
390 } else {
391 GrVkTransferBuffer* transferBuffer =
392 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
393
394 void* mapPtr = transferBuffer->map();
395
396 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
397 // copy into buffer by rows
398 const char* srcRow = reinterpret_cast<const char*>(data);
399 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
400 for (int y = 0; y < height; y++) {
401 memcpy(dstRow, srcRow, trimRowBytes);
402 srcRow += rowBytes;
403 dstRow -= trimRowBytes;
404 }
405 } else {
406 // If there is no padding on the src data rows, we can do a single memcpy
407 if (trimRowBytes == rowBytes) {
408 memcpy(mapPtr, data, trimRowBytes * height);
409 } else {
410 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
411 }
412 }
413
414 transferBuffer->unmap();
415
416 // make sure the unmap has finished
417 transferBuffer->addMemoryBarrier(this,
418 VK_ACCESS_HOST_WRITE_BIT,
419 VK_ACCESS_TRANSFER_READ_BIT,
420 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
421 VK_PIPELINE_STAGE_TRANSFER_BIT,
422 false);
423
424 // Set up copy region
425 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
426 VkOffset3D offset = {
427 left,
428 flipY ? tex->height() - top - height : top,
429 0
430 };
431
432 VkBufferImageCopy region;
433 memset(&region, 0, sizeof(VkBufferImageCopy));
434 region.bufferOffset = 0;
435 region.bufferRowLength = width;
436 region.bufferImageHeight = height;
437 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
438 region.imageOffset = offset;
439 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
440
441 // Change layout of our target so it can be copied to
442 VkImageLayout layout = tex->currentLayout();
443 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
444 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
445 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
446 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
447 tex->setImageLayout(this,
448 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
449 srcAccessMask,
450 dstAccessMask,
451 srcStageMask,
452 dstStageMask,
453 false);
454
455 // Copy the buffer to the image
456 fCurrentCmdBuffer->copyBufferToImage(this,
457 transferBuffer,
458 tex,
459 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
460 1,
461 &region);
462
463 // Submit the current command buffer to the Queue
464 this->submitCommandBuffer(kSkip_SyncQueue);
465
466 transferBuffer->unref();
467 }
468
469 return true;
470}
471
472////////////////////////////////////////////////////////////////////////////////
473GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800474 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500475 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
476
477 VkFormat pixelFormat;
478 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
479 return nullptr;
480 }
481
482 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
483 return nullptr;
484 }
485
486 bool linearTiling = false;
487 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
488 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
489 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
490 linearTiling = true;
491 } else {
492 return nullptr;
493 }
494 }
495
496 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
497 if (renderTarget) {
498 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
499 }
500
501 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
502 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
503 // will be using this texture in some copy or not. Also this assumes, as is the current case,
504 // that all render targets in vulkan are also texutres. If we change this practice of setting
505 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
506 // texture.
507 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
508
bsalomona1e6b3b2016-03-02 10:58:23 -0800509 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
510 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500511
512 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
513 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
514 // to 1.
515 GrVkImage::ImageDesc imageDesc;
516 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
517 imageDesc.fFormat = pixelFormat;
518 imageDesc.fWidth = desc.fWidth;
519 imageDesc.fHeight = desc.fHeight;
520 imageDesc.fLevels = 1;
521 imageDesc.fSamples = 1;
522 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
523 imageDesc.fUsageFlags = usageFlags;
524 imageDesc.fMemProps = memProps;
525
526 GrVkTexture* tex;
527 if (renderTarget) {
528 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
529 imageDesc);
egdaniel3d5d9ac2016-03-01 12:56:15 -0800530#if 0
531 // This clear can be included to fix warning described in htttps://bugs.skia.org/5045
532 // Obviously we do not want to be clearling needlessly every time we create a render target.
533 SkIRect rect = SkIRect::MakeWH(tex->width(), tex->height());
534 this->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
535#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500536 } else {
537 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
538 }
539
540 if (!tex) {
541 return nullptr;
542 }
543
bsalomona1e6b3b2016-03-02 10:58:23 -0800544 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800545 if (!texels.empty()) {
546 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800547 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
548 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500549 tex->unref();
550 return nullptr;
551 }
552 }
553
554 return tex;
555}
556
557////////////////////////////////////////////////////////////////////////////////
558
559static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
560 // By default, all textures in Vk use TopLeft
561 if (kDefault_GrSurfaceOrigin == origin) {
562 return kTopLeft_GrSurfaceOrigin;
563 } else {
564 return origin;
565 }
566}
567
568GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
569 GrWrapOwnership ownership) {
570 VkFormat format;
571 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
572 return nullptr;
573 }
574
575 if (0 == desc.fTextureHandle) {
576 return nullptr;
577 }
578
579 int maxSize = this->caps()->maxTextureSize();
580 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
581 return nullptr;
582 }
583
584 // TODO: determine what format Chrome will actually send us and turn it into a Resource
585 GrVkImage::Resource* imageRsrc = reinterpret_cast<GrVkImage::Resource*>(desc.fTextureHandle);
586
jvanverth0fcfb752016-03-09 09:57:52 -0800587 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
588 ? GrGpuResource::kAdopted_LifeCycle
589 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500590
591 GrSurfaceDesc surfDesc;
592 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
593 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
594 surfDesc.fWidth = desc.fWidth;
595 surfDesc.fHeight = desc.fHeight;
596 surfDesc.fConfig = desc.fConfig;
597 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
598 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
599 // In GL, Chrome assumes all textures are BottomLeft
600 // In VK, we don't have this restriction
601 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
602
603 GrVkTexture* texture = nullptr;
604 if (renderTarget) {
605 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
606 lifeCycle, format,
607 imageRsrc);
608 } else {
609 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format, imageRsrc);
610 }
611 if (!texture) {
612 return nullptr;
613 }
614
615 return texture;
616}
617
618GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
619 GrWrapOwnership ownership) {
620
621 // TODO: determine what format Chrome will actually send us and turn it into a Resource
622 GrVkImage::Resource* imageRsrc =
623 reinterpret_cast<GrVkImage::Resource*>(wrapDesc.fRenderTargetHandle);
624
jvanverth0fcfb752016-03-09 09:57:52 -0800625 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
626 ? GrGpuResource::kAdopted_LifeCycle
627 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500628
629 GrSurfaceDesc desc;
630 desc.fConfig = wrapDesc.fConfig;
631 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
632 desc.fWidth = wrapDesc.fWidth;
633 desc.fHeight = wrapDesc.fHeight;
634 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
635
636 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
637
638 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
639 lifeCycle, imageRsrc);
640 if (tgt && wrapDesc.fStencilBits) {
641 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
642 tgt->unref();
643 return nullptr;
644 }
645 }
646 return tgt;
647}
648
649////////////////////////////////////////////////////////////////////////////////
650
651void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
652 const GrNonInstancedVertices& vertices) {
653 GrVkVertexBuffer* vbuf;
654 vbuf = (GrVkVertexBuffer*)vertices.vertexBuffer();
655 SkASSERT(vbuf);
656 SkASSERT(!vbuf->isMapped());
657
658 vbuf->addMemoryBarrier(this,
659 VK_ACCESS_HOST_WRITE_BIT,
660 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
661 VK_PIPELINE_STAGE_HOST_BIT,
662 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
663 false);
664
665 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
666
667 if (vertices.isIndexed()) {
668 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)vertices.indexBuffer();
669 SkASSERT(ibuf);
670 SkASSERT(!ibuf->isMapped());
671
672 ibuf->addMemoryBarrier(this,
673 VK_ACCESS_HOST_WRITE_BIT,
674 VK_ACCESS_INDEX_READ_BIT,
675 VK_PIPELINE_STAGE_HOST_BIT,
676 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
677 false);
678
679 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
680 }
681}
682
683void GrVkGpu::buildProgramDesc(GrProgramDesc* desc,
684 const GrPrimitiveProcessor& primProc,
685 const GrPipeline& pipeline) const {
686 if (!GrVkProgramDescBuilder::Build(desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
687 SkDEBUGFAIL("Failed to generate GL program descriptor");
688 }
689}
690
691////////////////////////////////////////////////////////////////////////////////
692
693GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
694 int width,
695 int height) {
696 SkASSERT(rt->asTexture());
697 SkASSERT(width >= rt->width());
698 SkASSERT(height >= rt->height());
699
700 int samples = rt->numStencilSamples();
701
702 SkASSERT(this->vkCaps().stencilFormats().count());
703 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
704
705 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
706 GrGpuResource::kCached_LifeCycle,
707 width,
708 height,
709 samples,
710 sFmt));
711 fStats.incStencilAttachmentCreates();
712 return stencil;
713}
714
715////////////////////////////////////////////////////////////////////////////////
716
717GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
718 GrPixelConfig config) {
719
720 VkFormat pixelFormat;
721 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
722 return 0;
723 }
724
725 bool linearTiling = false;
726 if (!fVkCaps->isConfigTexturable(config)) {
727 return 0;
728 }
729
730 if (fVkCaps->isConfigTexurableLinearly(config)) {
731 linearTiling = true;
732 }
733
734 // Currently this is not supported since it requires a copy which has not yet been implemented.
735 if (srcData && !linearTiling) {
736 return 0;
737 }
738
739 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
740 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
741 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
742
743 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
744 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
745
746 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
747 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
748 // to 1.
749 GrVkImage::ImageDesc imageDesc;
750 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
751 imageDesc.fFormat = pixelFormat;
752 imageDesc.fWidth = w;
753 imageDesc.fHeight = h;
754 imageDesc.fLevels = 1;
755 imageDesc.fSamples = 1;
756 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
757 imageDesc.fUsageFlags = usageFlags;
758 imageDesc.fMemProps = memProps;
759
760 const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(this, imageDesc);
761 if (!imageRsrc) {
762 return 0;
763 }
764
765 if (srcData) {
766 if (linearTiling) {
767 const VkImageSubresource subres = {
768 VK_IMAGE_ASPECT_COLOR_BIT,
769 0, // mipLevel
770 0, // arraySlice
771 };
772 VkSubresourceLayout layout;
773 VkResult err;
774
775 const GrVkInterface* interface = this->vkInterface();
776
777 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
778 imageRsrc->fImage,
779 &subres,
780 &layout));
781
782 void* mapPtr;
783 err = GR_VK_CALL(interface, MapMemory(fDevice,
784 imageRsrc->fAlloc,
785 0,
786 layout.rowPitch * h,
787 0,
788 &mapPtr));
789 if (err) {
790 imageRsrc->unref(this);
791 return 0;
792 }
793
794 size_t bpp = GrBytesPerPixel(config);
795 size_t rowCopyBytes = bpp * w;
796 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
797 // This assumes the srcData comes in with no padding.
798 if (rowCopyBytes == layout.rowPitch) {
799 memcpy(mapPtr, srcData, rowCopyBytes * h);
800 } else {
801 SkRectMemcpy(mapPtr, layout.rowPitch, srcData, w, rowCopyBytes, h);
802 }
803 GR_VK_CALL(interface, UnmapMemory(fDevice, imageRsrc->fAlloc));
804 } else {
805 // TODO: Add support for copying to optimal tiling
806 SkASSERT(false);
807 }
808 }
809
810 return (GrBackendObject)imageRsrc;
811}
812
813bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
814 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
815
816 if (backend && backend->fImage && backend->fAlloc) {
817 VkMemoryRequirements req;
818 memset(&req, 0, sizeof(req));
819 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
820 backend->fImage,
821 &req));
822 // TODO: find a better check
823 // This will probably fail with a different driver
824 return (req.size > 0) && (req.size <= 8192 * 8192);
825 }
826
827 return false;
828}
829
830void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
831 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
832
833 if (backend) {
834 if (!abandon) {
835 backend->unref(this);
836 } else {
837 backend->unrefAndAbandon();
838 }
839 }
840}
841
842////////////////////////////////////////////////////////////////////////////////
843
844void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
845 VkPipelineStageFlags dstStageMask,
846 bool byRegion,
847 VkMemoryBarrier* barrier) const {
848 SkASSERT(fCurrentCmdBuffer);
849 fCurrentCmdBuffer->pipelineBarrier(this,
850 srcStageMask,
851 dstStageMask,
852 byRegion,
853 GrVkCommandBuffer::kMemory_BarrierType,
854 barrier);
855}
856
857void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
858 VkPipelineStageFlags dstStageMask,
859 bool byRegion,
860 VkBufferMemoryBarrier* barrier) const {
861 SkASSERT(fCurrentCmdBuffer);
862 fCurrentCmdBuffer->pipelineBarrier(this,
863 srcStageMask,
864 dstStageMask,
865 byRegion,
866 GrVkCommandBuffer::kBufferMemory_BarrierType,
867 barrier);
868}
869
870void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
871 VkPipelineStageFlags dstStageMask,
872 bool byRegion,
873 VkImageMemoryBarrier* barrier) const {
874 SkASSERT(fCurrentCmdBuffer);
875 fCurrentCmdBuffer->pipelineBarrier(this,
876 srcStageMask,
877 dstStageMask,
878 byRegion,
879 GrVkCommandBuffer::kImageMemory_BarrierType,
880 barrier);
881}
882
883void GrVkGpu::finishDrawTarget() {
884 // Submit the current command buffer to the Queue
885 this->submitCommandBuffer(kSkip_SyncQueue);
886}
887
egdaniel3d5d9ac2016-03-01 12:56:15 -0800888void GrVkGpu::clearStencil(GrRenderTarget* target) {
889 if (nullptr == target) {
890 return;
891 }
892 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
893 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
894
895
896 VkClearDepthStencilValue vkStencilColor;
897 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
898
899 VkImageLayout origDstLayout = vkStencil->currentLayout();
900
901 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
902 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
903
904 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
905 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
906
907 vkStencil->setImageLayout(this,
908 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
909 srcAccessMask,
910 dstAccessMask,
911 srcStageMask,
912 dstStageMask,
913 false);
914
915
916 VkImageSubresourceRange subRange;
917 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
918 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
919 subRange.baseMipLevel = 0;
920 subRange.levelCount = 1;
921 subRange.baseArrayLayer = 0;
922 subRange.layerCount = 1;
923
924 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
925 // draw. Thus we should look into using the load op functions on the render pass to clear out
926 // the stencil there.
927 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
928}
929
930void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
931 SkASSERT(target);
932
933 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
934 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
935 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
936
937 // this should only be called internally when we know we have a
938 // stencil buffer.
939 SkASSERT(sb);
940 int stencilBitCount = sb->bits();
941
942 // The contract with the callers does not guarantee that we preserve all bits in the stencil
943 // during this clear. Thus we will clear the entire stencil to the desired value.
944
945 VkClearDepthStencilValue vkStencilColor;
946 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
947 if (insideClip) {
948 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
949 } else {
950 vkStencilColor.stencil = 0;
951 }
952
953 VkImageLayout origDstLayout = vkStencil->currentLayout();
954 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
955 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
956 VkPipelineStageFlags srcStageMask =
957 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
958 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
959 vkStencil->setImageLayout(this,
960 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
961 srcAccessMask,
962 dstAccessMask,
963 srcStageMask,
964 dstStageMask,
965 false);
966
967 VkClearRect clearRect;
968 // Flip rect if necessary
969 SkIRect vkRect = rect;
970
971 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
972 vkRect.fTop = vkRT->height() - rect.fBottom;
973 vkRect.fBottom = vkRT->height() - rect.fTop;
974 }
975
976 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
977 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
978
979 clearRect.baseArrayLayer = 0;
980 clearRect.layerCount = 1;
981
982 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
983 SkASSERT(renderPass);
984 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
985
986 uint32_t stencilIndex;
987 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
988
989 VkClearAttachment attachment;
990 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
991 attachment.colorAttachment = 0; // this value shouldn't matter
992 attachment.clearValue.depthStencil = vkStencilColor;
993
994 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
995 fCurrentCmdBuffer->endRenderPass(this);
996
997 return;
998}
999
Greg Daniel164a9f02016-02-22 09:56:40 -05001000void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
1001 // parent class should never let us get here with no RT
1002 SkASSERT(target);
1003
1004 VkClearColorValue vkColor;
1005 GrColorToRGBAFloat(color, vkColor.float32);
1006
1007 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1008 VkImageLayout origDstLayout = vkRT->currentLayout();
1009
1010 if (rect.width() != target->width() || rect.height() != target->height()) {
1011 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1012 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1013 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -08001014 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -05001015 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1016 vkRT->setImageLayout(this,
1017 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1018 srcAccessMask,
1019 dstAccessMask,
1020 srcStageMask,
1021 dstStageMask,
1022 false);
1023
1024 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001025 // Flip rect if necessary
1026 SkIRect vkRect = rect;
1027 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1028 vkRect.fTop = vkRT->height() - rect.fBottom;
1029 vkRect.fBottom = vkRT->height() - rect.fTop;
1030 }
1031 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1032 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001033 clearRect.baseArrayLayer = 0;
1034 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001035
1036 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1037 SkASSERT(renderPass);
1038 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1039
1040 uint32_t colorIndex;
1041 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1042
1043 VkClearAttachment attachment;
1044 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1045 attachment.colorAttachment = colorIndex;
1046 attachment.clearValue.color = vkColor;
1047
1048 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1049 fCurrentCmdBuffer->endRenderPass(this);
1050 return;
1051 }
1052
1053 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1054 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1055
1056 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1057 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1058
1059 vkRT->setImageLayout(this,
1060 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1061 srcAccessMask,
1062 dstAccessMask,
1063 srcStageMask,
1064 dstStageMask,
1065 false);
1066
1067
1068 VkImageSubresourceRange subRange;
1069 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1070 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1071 subRange.baseMipLevel = 0;
1072 subRange.levelCount = 1;
1073 subRange.baseArrayLayer = 0;
1074 subRange.layerCount = 1;
1075
1076 // In the future we may not actually be doing this type of clear at all. If we are inside a
1077 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1078 // common use case will be clearing an attachment at the start of a render pass, in which case
1079 // we will use the clear load ops.
1080 fCurrentCmdBuffer->clearColorImage(this,
1081 vkRT,
1082 &vkColor,
1083 1, &subRange);
1084}
1085
1086inline bool can_copy_image(const GrSurface* dst,
1087 const GrSurface* src,
1088 const GrVkGpu* gpu) {
1089 if (src->asTexture() &&
1090 dst->asTexture() &&
1091 src->origin() == dst->origin() &&
1092 src->config() == dst->config()) {
1093 return true;
1094 }
1095
1096 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1097 // or the resolved image here?
1098
1099 return false;
1100}
1101
1102void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1103 GrSurface* src,
1104 const SkIRect& srcRect,
1105 const SkIPoint& dstPoint) {
1106 SkASSERT(can_copy_image(dst, src, this));
1107
1108 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1109 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1110 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1111
1112 VkImageLayout origDstLayout = dstTex->currentLayout();
1113 VkImageLayout origSrcLayout = srcTex->currentLayout();
1114
1115 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1116 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1117
1118 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1119 // the cache is flushed since it is only being written to.
1120 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1121 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1122
1123 dstTex->setImageLayout(this,
1124 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1125 srcAccessMask,
1126 dstAccessMask,
1127 srcStageMask,
1128 dstStageMask,
1129 false);
1130
1131 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1132 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1133
1134 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1135 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1136
1137 srcTex->setImageLayout(this,
1138 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1139 srcAccessMask,
1140 dstAccessMask,
1141 srcStageMask,
1142 dstStageMask,
1143 false);
1144
1145 // Flip rect if necessary
1146 SkIRect srcVkRect = srcRect;
1147 int32_t dstY = dstPoint.fY;
1148
1149 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1150 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1151 srcVkRect.fTop = src->height() - srcRect.fBottom;
1152 srcVkRect.fBottom = src->height() - srcRect.fTop;
1153 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1154 }
1155
1156 VkImageCopy copyRegion;
1157 memset(&copyRegion, 0, sizeof(VkImageCopy));
1158 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1159 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1160 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1161 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1162 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1163
1164 fCurrentCmdBuffer->copyImage(this,
1165 srcTex,
1166 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1167 dstTex,
1168 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1169 1,
1170 &copyRegion);
1171}
1172
1173inline bool can_copy_as_draw(const GrSurface* dst,
1174 const GrSurface* src,
1175 const GrVkGpu* gpu) {
1176 return false;
1177}
1178
1179void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1180 GrSurface* src,
1181 const SkIRect& srcRect,
1182 const SkIPoint& dstPoint) {
1183 SkASSERT(false);
1184}
1185
1186bool GrVkGpu::onCopySurface(GrSurface* dst,
1187 GrSurface* src,
1188 const SkIRect& srcRect,
1189 const SkIPoint& dstPoint) {
1190 if (can_copy_image(dst, src, this)) {
1191 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1192 return true;
1193 }
1194
1195 if (can_copy_as_draw(dst, src, this)) {
1196 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1197 return true;
1198 }
1199
1200 return false;
1201}
1202
cdalton28f45b92016-03-07 13:58:26 -08001203void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1204 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1205 // TODO: stub.
1206 SkASSERT(!this->caps()->sampleLocationsSupport());
1207 *effectiveSampleCnt = rt->desc().fSampleCnt;
1208}
1209
Greg Daniel164a9f02016-02-22 09:56:40 -05001210bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1211 GrPixelConfig readConfig, DrawPreference* drawPreference,
1212 ReadPixelTempDrawInfo* tempDrawInfo) {
1213 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1214 if (kNoDraw_DrawPreference != *drawPreference) {
1215 return false;
1216 }
1217
1218 if (srcSurface->config() != readConfig) {
1219 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1220 // that of readConfig.
1221 return false;
1222 }
1223
1224 return true;
1225}
1226
1227bool GrVkGpu::onReadPixels(GrSurface* surface,
1228 int left, int top, int width, int height,
1229 GrPixelConfig config,
1230 void* buffer,
1231 size_t rowBytes) {
1232 VkFormat pixelFormat;
1233 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1234 return false;
1235 }
1236
1237 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1238 if (!tgt) {
1239 return false;
1240 }
1241
1242 // Change layout of our target so it can be used as copy
1243 VkImageLayout layout = tgt->currentLayout();
1244 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1245 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1246 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1247 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1248 tgt->setImageLayout(this,
1249 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1250 srcAccessMask,
1251 dstAccessMask,
1252 srcStageMask,
1253 dstStageMask,
1254 false);
1255
1256 GrVkTransferBuffer* transferBuffer =
1257 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1258 kGpuToCpu_TransferType));
1259
1260 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1261 VkOffset3D offset = {
1262 left,
1263 flipY ? surface->height() - top - height : top,
1264 0
1265 };
1266
1267 // Copy the image to a buffer so we can map it to cpu memory
1268 VkBufferImageCopy region;
1269 memset(&region, 0, sizeof(VkBufferImageCopy));
1270 region.bufferOffset = 0;
1271 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1272 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1273 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1274 region.imageOffset = offset;
1275 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1276
1277 fCurrentCmdBuffer->copyImageToBuffer(this,
1278 tgt,
1279 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1280 transferBuffer,
1281 1,
1282 &region);
1283
1284 // make sure the copy to buffer has finished
1285 transferBuffer->addMemoryBarrier(this,
1286 VK_ACCESS_TRANSFER_WRITE_BIT,
1287 VK_ACCESS_HOST_READ_BIT,
1288 VK_PIPELINE_STAGE_TRANSFER_BIT,
1289 VK_PIPELINE_STAGE_HOST_BIT,
1290 false);
1291
1292 // We need to submit the current command buffer to the Queue and make sure it finishes before
1293 // we can copy the data out of the buffer.
1294 this->submitCommandBuffer(kForce_SyncQueue);
1295
1296 void* mappedMemory = transferBuffer->map();
1297
1298 memcpy(buffer, mappedMemory, rowBytes*height);
1299
1300 transferBuffer->unmap();
1301 transferBuffer->unref();
1302
1303 if (flipY) {
1304 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1305 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1306 scratch.reset(tightRowBytes);
1307 void* tmpRow = scratch.get();
1308 // flip y in-place by rows
1309 const int halfY = height >> 1;
1310 char* top = reinterpret_cast<char*>(buffer);
1311 char* bottom = top + (height - 1) * rowBytes;
1312 for (int y = 0; y < halfY; y++) {
1313 memcpy(tmpRow, top, tightRowBytes);
1314 memcpy(top, bottom, tightRowBytes);
1315 memcpy(bottom, tmpRow, tightRowBytes);
1316 top += rowBytes;
1317 bottom -= rowBytes;
1318 }
1319 }
1320
1321 return true;
1322}
1323
1324void GrVkGpu::onDraw(const DrawArgs& args, const GrNonInstancedVertices& vertices) {
1325 GrRenderTarget* rt = args.fPipeline->getRenderTarget();
1326 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1327 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1328 SkASSERT(renderPass);
1329
Greg Daniel164a9f02016-02-22 09:56:40 -05001330 GrVkProgram* program = GrVkProgramBuilder::CreateProgram(this, args,
1331 vertices.primitiveType(),
1332 *renderPass);
1333
1334 if (!program) {
1335 return;
1336 }
1337
1338 program->setData(this, *args.fPrimitiveProcessor, *args.fPipeline);
1339
1340 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1341
1342 program->bind(this, fCurrentCmdBuffer);
1343
1344 this->bindGeometry(*args.fPrimitiveProcessor, vertices);
1345
1346 // Change layout of our render target so it can be used as the color attachment
1347 VkImageLayout layout = vkRT->currentLayout();
1348 // Our color attachment is purely a destination and won't be read so don't need to flush or
1349 // invalidate any caches
1350 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1351 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1352 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1353 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1354 vkRT->setImageLayout(this,
1355 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1356 srcAccessMask,
1357 dstAccessMask,
1358 srcStageMask,
1359 dstStageMask,
1360 false);
1361
egdaniel3d5d9ac2016-03-01 12:56:15 -08001362 // If we are using a stencil attachment we also need to update its layout
1363 if (!args.fPipeline->getStencil().isDisabled()) {
1364 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1365 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1366 VkImageLayout origDstLayout = vkStencil->currentLayout();
1367 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1368 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
1369 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
1370 VkPipelineStageFlags srcStageMask =
1371 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1372 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1373 vkStencil->setImageLayout(this,
1374 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1375 srcAccessMask,
1376 dstAccessMask,
1377 srcStageMask,
1378 dstStageMask,
1379 false);
1380 }
1381
Greg Daniel164a9f02016-02-22 09:56:40 -05001382 if (vertices.isIndexed()) {
1383 fCurrentCmdBuffer->drawIndexed(this,
1384 vertices.indexCount(),
1385 1,
1386 vertices.startIndex(),
1387 vertices.startVertex(),
1388 0);
1389 } else {
1390 fCurrentCmdBuffer->draw(this, vertices.vertexCount(), 1, vertices.startVertex(), 0);
1391 }
1392
1393 fCurrentCmdBuffer->endRenderPass(this);
1394
1395 // Technically we don't have to call this here (since there is a safety check in program:setData
1396 // but this will allow for quicker freeing of resources if the program sits in a cache for a
1397 // while.
1398 program->freeTempResources(this);
1399 // This free will go away once we setup a program cache, and then the cache will be responsible
1400 // for call freeGpuResources.
1401 program->freeGPUResources(this);
1402 program->unref();
1403
1404#if SWAP_PER_DRAW
1405 glFlush();
1406#if defined(SK_BUILD_FOR_MAC)
1407 aglSwapBuffers(aglGetCurrentContext());
1408 int set_a_break_pt_here = 9;
1409 aglSwapBuffers(aglGetCurrentContext());
1410#elif defined(SK_BUILD_FOR_WIN32)
1411 SwapBuf();
1412 int set_a_break_pt_here = 9;
1413 SwapBuf();
1414#endif
1415#endif
1416}
1417