blob: fce7173effeabaa33a2f49f0ff9e687fa3ffa26f [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
13#include "GrPipeline.h"
14#include "GrRenderTargetPriv.h"
15#include "GrSurfacePriv.h"
16#include "GrTexturePriv.h"
17#include "GrVertices.h"
18
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
24#include "GrVkProgram.h"
25#include "GrVkProgramBuilder.h"
26#include "GrVkProgramDesc.h"
27#include "GrVkRenderPass.h"
28#include "GrVkResourceProvider.h"
29#include "GrVkTexture.h"
30#include "GrVkTextureRenderTarget.h"
31#include "GrVkTransferBuffer.h"
32#include "GrVkVertexBuffer.h"
33
34#include "SkConfig8888.h"
35
36#include "vk/GrVkInterface.h"
37
38#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
39#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
40#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
41
42////////////////////////////////////////////////////////////////////////////////
43// Stuff used to set up a GrVkGpu secrectly for now.
44
45// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
46// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
47// and mostly likely would take an optional device and queues to use.
48GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
49 GrContext* context) {
50 // Below is Vulkan setup code that normal would be done by a client, but will do here for now
51 // for testing purposes.
52 VkPhysicalDevice physDev;
53 VkDevice device;
54 VkInstance inst;
55 VkResult err;
56
57 const VkApplicationInfo app_info = {
58 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
59 nullptr, // pNext
60 "vktest", // pApplicationName
61 0, // applicationVersion
62 "vktest", // pEngineName
63 0, // engineVerison
64 VK_API_VERSION, // apiVersion
65 };
66 const VkInstanceCreateInfo instance_create = {
67 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
68 nullptr, // pNext
69 0, // flags
70 &app_info, // pApplicationInfo
71 0, // enabledLayerNameCount
72 nullptr, // ppEnabledLayerNames
73 0, // enabledExtensionNameCount
74 nullptr, // ppEnabledExtensionNames
75 };
76 err = vkCreateInstance(&instance_create, nullptr, &inst);
77 if (err < 0) {
78 SkDebugf("vkCreateInstanced failed: %d\n", err);
79 SkFAIL("failing");
80 }
81
82 uint32_t gpuCount;
83 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
84 if (err) {
85 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
86 SkFAIL("failing");
87 }
88 SkASSERT(gpuCount > 0);
89 // Just returning the first physical device instead of getting the whole array.
90 gpuCount = 1;
91 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
92 if (err) {
93 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
94 SkFAIL("failing");
95 }
96
97 // query to get the initial queue props size
98 uint32_t queueCount;
99 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
100 SkASSERT(queueCount >= 1);
101
102 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
103 // now get the actual queue props
104 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
105
106 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
107
108 // iterate to find the graphics queue
109 uint32_t graphicsQueueIndex = -1;
110 for (uint32_t i = 0; i < queueCount; i++) {
111 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
112 graphicsQueueIndex = i;
113 break;
114 }
115 }
116 SkASSERT(graphicsQueueIndex < queueCount);
117
118 float queuePriorities[1] = { 0.0 };
119 const VkDeviceQueueCreateInfo queueInfo = {
120 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
121 nullptr, // pNext
122 0, // VkDeviceQueueCreateFlags
123 0, // queueFamilyIndex
124 1, // queueCount
125 queuePriorities, // pQueuePriorities
126 };
127 const VkDeviceCreateInfo deviceInfo = {
128 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
129 nullptr, // pNext
130 0, // VkDeviceCreateFlags
131 1, // queueCreateInfoCount
132 &queueInfo, // pQueueCreateInfos
133 0, // layerCount
134 nullptr, // ppEnabledLayerNames
135 0, // extensionCount
136 nullptr, // ppEnabledExtensionNames
137 nullptr // ppEnabledFeatures
138 };
139
140 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
141 if (err) {
142 SkDebugf("CreateDevice failed: %d\n", err);
143 SkFAIL("failing");
144 }
145
146 VkQueue queue;
147 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
148
149 const VkCommandPoolCreateInfo cmdPoolInfo = {
150 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
151 nullptr, // pNext
152 0, // CmdPoolCreateFlags
153 graphicsQueueIndex, // queueFamilyIndex
154 };
155
156 VkCommandPool cmdPool;
157 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
158 if (err) {
159 SkDebugf("CreateCommandPool failed: %d\n", err);
160 SkFAIL("failing");
161 }
162
163 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
164}
165
166////////////////////////////////////////////////////////////////////////////////
167
168GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
169 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
170 VkInstance inst)
171 : INHERITED(context)
172 , fDevice(device)
173 , fQueue(queue)
174 , fCmdPool(cmdPool)
175 , fResourceProvider(this)
176 , fVkInstance(inst) {
177 fInterface.reset(GrVkCreateInterface(fVkInstance));
178 fCompiler = shaderc_compiler_initialize();
179
180 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
181 fCaps.reset(SkRef(fVkCaps.get()));
182
jvanverth03509ea2016-03-02 13:19:47 -0800183 fResourceProvider.init();
184
Greg Daniel164a9f02016-02-22 09:56:40 -0500185 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
186 SkASSERT(fCurrentCmdBuffer);
187 fCurrentCmdBuffer->begin(this);
188 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
189
190}
191
192GrVkGpu::~GrVkGpu() {
193 shaderc_compiler_release(fCompiler);
194 fCurrentCmdBuffer->end(this);
195 fCurrentCmdBuffer->unref(this);
196
197 // wait for all commands to finish
198 VK_CALL(QueueWaitIdle(fQueue));
199
200 // must call this just before we destroy the VkDevice
201 fResourceProvider.destroyResources();
202
203 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
204 VK_CALL(DestroyDevice(fDevice, nullptr));
205 VK_CALL(DestroyInstance(fVkInstance, nullptr));
206}
207
208///////////////////////////////////////////////////////////////////////////////
209
210void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
211 SkASSERT(fCurrentCmdBuffer);
212 fCurrentCmdBuffer->end(this);
213
214 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
215 fResourceProvider.checkCommandBuffers();
216
217 // Release old command buffer and create a new one
218 fCurrentCmdBuffer->unref(this);
219 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
220 SkASSERT(fCurrentCmdBuffer);
221
222 fCurrentCmdBuffer->begin(this);
223}
224
225///////////////////////////////////////////////////////////////////////////////
226GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
227 return GrVkVertexBuffer::Create(this, size, dynamic);
228}
229
230GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
231 return GrVkIndexBuffer::Create(this, size, dynamic);
232}
233
234GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
235 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
236 : GrVkBuffer::kCopyWrite_Type;
237 return GrVkTransferBuffer::Create(this, size, bufferType);
238}
239
240////////////////////////////////////////////////////////////////////////////////
241bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
242 GrPixelConfig srcConfig, DrawPreference* drawPreference,
243 WritePixelTempDrawInfo* tempDrawInfo) {
244 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
245 return false;
246 }
247
248 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
249 if (kNoDraw_DrawPreference != *drawPreference) {
250 return false;
251 }
252
253 if (dstSurface->config() != srcConfig) {
254 // TODO: This should fall back to drawing or copying to change config of dstSurface to
255 // match that of srcConfig.
256 return false;
257 }
258
259 return true;
260}
261
262bool GrVkGpu::onWritePixels(GrSurface* surface,
263 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800264 GrPixelConfig config,
265 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500266 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
267 if (!vkTex) {
268 return false;
269 }
270
bsalomona1e6b3b2016-03-02 10:58:23 -0800271 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800272 if (texels.empty() || !texels.begin()->fPixels) {
273 return false;
274 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800275
Greg Daniel164a9f02016-02-22 09:56:40 -0500276 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
277 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
278 return false;
279 }
280
281 bool success = false;
282 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
283 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
284 SkASSERT(config == vkTex->desc().fConfig);
285 // TODO: add compressed texture support
286 // delete the following two lines and uncomment the two after that when ready
287 vkTex->unref();
288 return false;
289 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
290 // height);
291 } else {
292 bool linearTiling = vkTex->isLinearTiled();
293 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
294 // Need to change the layout to general in order to perform a host write
295 VkImageLayout layout = vkTex->currentLayout();
296 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
297 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
298 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
299 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
300 vkTex->setImageLayout(this,
301 VK_IMAGE_LAYOUT_GENERAL,
302 srcAccessMask,
303 dstAccessMask,
304 srcStageMask,
305 dstStageMask,
306 false);
307 }
308 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800309 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500310 }
311
312 if (success) {
313 vkTex->texturePriv().dirtyMipMaps(true);
314 return true;
315 }
316
317 return false;
318}
319
320bool GrVkGpu::uploadTexData(GrVkTexture* tex,
321 int left, int top, int width, int height,
322 GrPixelConfig dataConfig,
323 const void* data,
324 size_t rowBytes) {
325 SkASSERT(data);
326
327 // If we're uploading compressed data then we should be using uploadCompressedTexData
328 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
329
330 bool linearTiling = tex->isLinearTiled();
331
332 size_t bpp = GrBytesPerPixel(dataConfig);
333
334 const GrSurfaceDesc& desc = tex->desc();
335
336 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
337 &width, &height, &data, &rowBytes)) {
338 return false;
339 }
340 size_t trimRowBytes = width * bpp;
341
342 if (linearTiling) {
343 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
344 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
345 const VkImageSubresource subres = {
346 VK_IMAGE_ASPECT_COLOR_BIT,
347 0, // mipLevel
348 0, // arraySlice
349 };
350 VkSubresourceLayout layout;
351 VkResult err;
352
353 const GrVkInterface* interface = this->vkInterface();
354
355 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
356 tex->textureImage(),
357 &subres,
358 &layout));
359
360 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
361 : top;
362 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
363 VkDeviceSize size = height*layout.rowPitch;
364 void* mapPtr;
365 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
366 &mapPtr));
367 if (err) {
368 return false;
369 }
370
371 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
372 // copy into buffer by rows
373 const char* srcRow = reinterpret_cast<const char*>(data);
374 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
375 for (int y = 0; y < height; y++) {
376 memcpy(dstRow, srcRow, trimRowBytes);
377 srcRow += rowBytes;
378 dstRow -= layout.rowPitch;
379 }
380 } else {
381 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
382 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
383 memcpy(mapPtr, data, trimRowBytes * height);
384 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800385 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
386 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500387 }
388 }
389
390 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
391 } else {
392 GrVkTransferBuffer* transferBuffer =
393 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
394
395 void* mapPtr = transferBuffer->map();
396
397 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
398 // copy into buffer by rows
399 const char* srcRow = reinterpret_cast<const char*>(data);
400 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
401 for (int y = 0; y < height; y++) {
402 memcpy(dstRow, srcRow, trimRowBytes);
403 srcRow += rowBytes;
404 dstRow -= trimRowBytes;
405 }
406 } else {
407 // If there is no padding on the src data rows, we can do a single memcpy
408 if (trimRowBytes == rowBytes) {
409 memcpy(mapPtr, data, trimRowBytes * height);
410 } else {
411 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
412 }
413 }
414
415 transferBuffer->unmap();
416
417 // make sure the unmap has finished
418 transferBuffer->addMemoryBarrier(this,
419 VK_ACCESS_HOST_WRITE_BIT,
420 VK_ACCESS_TRANSFER_READ_BIT,
421 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
422 VK_PIPELINE_STAGE_TRANSFER_BIT,
423 false);
424
425 // Set up copy region
426 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
427 VkOffset3D offset = {
428 left,
429 flipY ? tex->height() - top - height : top,
430 0
431 };
432
433 VkBufferImageCopy region;
434 memset(&region, 0, sizeof(VkBufferImageCopy));
435 region.bufferOffset = 0;
436 region.bufferRowLength = width;
437 region.bufferImageHeight = height;
438 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
439 region.imageOffset = offset;
440 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
441
442 // Change layout of our target so it can be copied to
443 VkImageLayout layout = tex->currentLayout();
444 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
445 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
446 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
447 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
448 tex->setImageLayout(this,
449 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
450 srcAccessMask,
451 dstAccessMask,
452 srcStageMask,
453 dstStageMask,
454 false);
455
456 // Copy the buffer to the image
457 fCurrentCmdBuffer->copyBufferToImage(this,
458 transferBuffer,
459 tex,
460 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
461 1,
462 &region);
463
464 // Submit the current command buffer to the Queue
465 this->submitCommandBuffer(kSkip_SyncQueue);
466
467 transferBuffer->unref();
468 }
469
470 return true;
471}
472
473////////////////////////////////////////////////////////////////////////////////
474GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800475 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500476 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
477
478 VkFormat pixelFormat;
479 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
480 return nullptr;
481 }
482
483 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
484 return nullptr;
485 }
486
487 bool linearTiling = false;
488 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
489 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
490 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
491 linearTiling = true;
492 } else {
493 return nullptr;
494 }
495 }
496
497 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
498 if (renderTarget) {
499 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
500 }
501
502 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
503 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
504 // will be using this texture in some copy or not. Also this assumes, as is the current case,
505 // that all render targets in vulkan are also texutres. If we change this practice of setting
506 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
507 // texture.
508 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
509
bsalomona1e6b3b2016-03-02 10:58:23 -0800510 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
511 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500512
513 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
514 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
515 // to 1.
516 GrVkImage::ImageDesc imageDesc;
517 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
518 imageDesc.fFormat = pixelFormat;
519 imageDesc.fWidth = desc.fWidth;
520 imageDesc.fHeight = desc.fHeight;
521 imageDesc.fLevels = 1;
522 imageDesc.fSamples = 1;
523 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
524 imageDesc.fUsageFlags = usageFlags;
525 imageDesc.fMemProps = memProps;
526
527 GrVkTexture* tex;
528 if (renderTarget) {
529 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
530 imageDesc);
egdaniel3d5d9ac2016-03-01 12:56:15 -0800531#if 0
532 // This clear can be included to fix warning described in htttps://bugs.skia.org/5045
533 // Obviously we do not want to be clearling needlessly every time we create a render target.
534 SkIRect rect = SkIRect::MakeWH(tex->width(), tex->height());
535 this->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
536#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500537 } else {
538 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
539 }
540
541 if (!tex) {
542 return nullptr;
543 }
544
bsalomona1e6b3b2016-03-02 10:58:23 -0800545 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800546 if (!texels.empty()) {
547 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800548 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
549 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500550 tex->unref();
551 return nullptr;
552 }
553 }
554
555 return tex;
556}
557
558////////////////////////////////////////////////////////////////////////////////
559
560static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
561 // By default, all textures in Vk use TopLeft
562 if (kDefault_GrSurfaceOrigin == origin) {
563 return kTopLeft_GrSurfaceOrigin;
564 } else {
565 return origin;
566 }
567}
568
569GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
570 GrWrapOwnership ownership) {
571 VkFormat format;
572 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
573 return nullptr;
574 }
575
576 if (0 == desc.fTextureHandle) {
577 return nullptr;
578 }
579
580 int maxSize = this->caps()->maxTextureSize();
581 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
582 return nullptr;
583 }
584
585 // TODO: determine what format Chrome will actually send us and turn it into a Resource
586 GrVkImage::Resource* imageRsrc = reinterpret_cast<GrVkImage::Resource*>(desc.fTextureHandle);
587
jvanverth0fcfb752016-03-09 09:57:52 -0800588 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
589 ? GrGpuResource::kAdopted_LifeCycle
590 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500591
592 GrSurfaceDesc surfDesc;
593 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
594 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
595 surfDesc.fWidth = desc.fWidth;
596 surfDesc.fHeight = desc.fHeight;
597 surfDesc.fConfig = desc.fConfig;
598 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
599 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
600 // In GL, Chrome assumes all textures are BottomLeft
601 // In VK, we don't have this restriction
602 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
603
604 GrVkTexture* texture = nullptr;
605 if (renderTarget) {
606 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
607 lifeCycle, format,
608 imageRsrc);
609 } else {
610 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format, imageRsrc);
611 }
612 if (!texture) {
613 return nullptr;
614 }
615
616 return texture;
617}
618
619GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
620 GrWrapOwnership ownership) {
621
622 // TODO: determine what format Chrome will actually send us and turn it into a Resource
623 GrVkImage::Resource* imageRsrc =
624 reinterpret_cast<GrVkImage::Resource*>(wrapDesc.fRenderTargetHandle);
625
jvanverth0fcfb752016-03-09 09:57:52 -0800626 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
627 ? GrGpuResource::kAdopted_LifeCycle
628 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500629
630 GrSurfaceDesc desc;
631 desc.fConfig = wrapDesc.fConfig;
632 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
633 desc.fWidth = wrapDesc.fWidth;
634 desc.fHeight = wrapDesc.fHeight;
635 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
636
637 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
638
639 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
640 lifeCycle, imageRsrc);
641 if (tgt && wrapDesc.fStencilBits) {
642 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
643 tgt->unref();
644 return nullptr;
645 }
646 }
647 return tgt;
648}
649
650////////////////////////////////////////////////////////////////////////////////
651
652void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
653 const GrNonInstancedVertices& vertices) {
654 GrVkVertexBuffer* vbuf;
655 vbuf = (GrVkVertexBuffer*)vertices.vertexBuffer();
656 SkASSERT(vbuf);
657 SkASSERT(!vbuf->isMapped());
658
659 vbuf->addMemoryBarrier(this,
660 VK_ACCESS_HOST_WRITE_BIT,
661 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
662 VK_PIPELINE_STAGE_HOST_BIT,
663 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
664 false);
665
666 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
667
668 if (vertices.isIndexed()) {
669 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)vertices.indexBuffer();
670 SkASSERT(ibuf);
671 SkASSERT(!ibuf->isMapped());
672
673 ibuf->addMemoryBarrier(this,
674 VK_ACCESS_HOST_WRITE_BIT,
675 VK_ACCESS_INDEX_READ_BIT,
676 VK_PIPELINE_STAGE_HOST_BIT,
677 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
678 false);
679
680 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
681 }
682}
683
684void GrVkGpu::buildProgramDesc(GrProgramDesc* desc,
685 const GrPrimitiveProcessor& primProc,
686 const GrPipeline& pipeline) const {
687 if (!GrVkProgramDescBuilder::Build(desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
688 SkDEBUGFAIL("Failed to generate GL program descriptor");
689 }
690}
691
692////////////////////////////////////////////////////////////////////////////////
693
694GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
695 int width,
696 int height) {
697 SkASSERT(rt->asTexture());
698 SkASSERT(width >= rt->width());
699 SkASSERT(height >= rt->height());
700
701 int samples = rt->numStencilSamples();
702
703 SkASSERT(this->vkCaps().stencilFormats().count());
704 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
705
706 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
707 GrGpuResource::kCached_LifeCycle,
708 width,
709 height,
710 samples,
711 sFmt));
712 fStats.incStencilAttachmentCreates();
713 return stencil;
714}
715
716////////////////////////////////////////////////////////////////////////////////
717
718GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
719 GrPixelConfig config) {
720
721 VkFormat pixelFormat;
722 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
723 return 0;
724 }
725
726 bool linearTiling = false;
727 if (!fVkCaps->isConfigTexturable(config)) {
728 return 0;
729 }
730
731 if (fVkCaps->isConfigTexurableLinearly(config)) {
732 linearTiling = true;
733 }
734
735 // Currently this is not supported since it requires a copy which has not yet been implemented.
736 if (srcData && !linearTiling) {
737 return 0;
738 }
739
740 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
741 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
742 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
743
744 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
745 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
746
747 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
748 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
749 // to 1.
750 GrVkImage::ImageDesc imageDesc;
751 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
752 imageDesc.fFormat = pixelFormat;
753 imageDesc.fWidth = w;
754 imageDesc.fHeight = h;
755 imageDesc.fLevels = 1;
756 imageDesc.fSamples = 1;
757 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
758 imageDesc.fUsageFlags = usageFlags;
759 imageDesc.fMemProps = memProps;
760
761 const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(this, imageDesc);
762 if (!imageRsrc) {
763 return 0;
764 }
765
766 if (srcData) {
767 if (linearTiling) {
768 const VkImageSubresource subres = {
769 VK_IMAGE_ASPECT_COLOR_BIT,
770 0, // mipLevel
771 0, // arraySlice
772 };
773 VkSubresourceLayout layout;
774 VkResult err;
775
776 const GrVkInterface* interface = this->vkInterface();
777
778 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
779 imageRsrc->fImage,
780 &subres,
781 &layout));
782
783 void* mapPtr;
784 err = GR_VK_CALL(interface, MapMemory(fDevice,
785 imageRsrc->fAlloc,
786 0,
787 layout.rowPitch * h,
788 0,
789 &mapPtr));
790 if (err) {
791 imageRsrc->unref(this);
792 return 0;
793 }
794
795 size_t bpp = GrBytesPerPixel(config);
796 size_t rowCopyBytes = bpp * w;
797 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
798 // This assumes the srcData comes in with no padding.
799 if (rowCopyBytes == layout.rowPitch) {
800 memcpy(mapPtr, srcData, rowCopyBytes * h);
801 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800802 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, w, rowCopyBytes,
803 h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500804 }
805 GR_VK_CALL(interface, UnmapMemory(fDevice, imageRsrc->fAlloc));
806 } else {
807 // TODO: Add support for copying to optimal tiling
808 SkASSERT(false);
809 }
810 }
811
812 return (GrBackendObject)imageRsrc;
813}
814
815bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
816 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
817
818 if (backend && backend->fImage && backend->fAlloc) {
819 VkMemoryRequirements req;
820 memset(&req, 0, sizeof(req));
821 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
822 backend->fImage,
823 &req));
824 // TODO: find a better check
825 // This will probably fail with a different driver
826 return (req.size > 0) && (req.size <= 8192 * 8192);
827 }
828
829 return false;
830}
831
832void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
833 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
834
835 if (backend) {
836 if (!abandon) {
837 backend->unref(this);
838 } else {
839 backend->unrefAndAbandon();
840 }
841 }
842}
843
844////////////////////////////////////////////////////////////////////////////////
845
846void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
847 VkPipelineStageFlags dstStageMask,
848 bool byRegion,
849 VkMemoryBarrier* barrier) const {
850 SkASSERT(fCurrentCmdBuffer);
851 fCurrentCmdBuffer->pipelineBarrier(this,
852 srcStageMask,
853 dstStageMask,
854 byRegion,
855 GrVkCommandBuffer::kMemory_BarrierType,
856 barrier);
857}
858
859void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
860 VkPipelineStageFlags dstStageMask,
861 bool byRegion,
862 VkBufferMemoryBarrier* barrier) const {
863 SkASSERT(fCurrentCmdBuffer);
864 fCurrentCmdBuffer->pipelineBarrier(this,
865 srcStageMask,
866 dstStageMask,
867 byRegion,
868 GrVkCommandBuffer::kBufferMemory_BarrierType,
869 barrier);
870}
871
872void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
873 VkPipelineStageFlags dstStageMask,
874 bool byRegion,
875 VkImageMemoryBarrier* barrier) const {
876 SkASSERT(fCurrentCmdBuffer);
877 fCurrentCmdBuffer->pipelineBarrier(this,
878 srcStageMask,
879 dstStageMask,
880 byRegion,
881 GrVkCommandBuffer::kImageMemory_BarrierType,
882 barrier);
883}
884
885void GrVkGpu::finishDrawTarget() {
886 // Submit the current command buffer to the Queue
887 this->submitCommandBuffer(kSkip_SyncQueue);
888}
889
egdaniel3d5d9ac2016-03-01 12:56:15 -0800890void GrVkGpu::clearStencil(GrRenderTarget* target) {
891 if (nullptr == target) {
892 return;
893 }
894 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
895 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
896
897
898 VkClearDepthStencilValue vkStencilColor;
899 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
900
901 VkImageLayout origDstLayout = vkStencil->currentLayout();
902
903 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
904 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
905
906 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
907 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
908
909 vkStencil->setImageLayout(this,
910 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
911 srcAccessMask,
912 dstAccessMask,
913 srcStageMask,
914 dstStageMask,
915 false);
916
917
918 VkImageSubresourceRange subRange;
919 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
920 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
921 subRange.baseMipLevel = 0;
922 subRange.levelCount = 1;
923 subRange.baseArrayLayer = 0;
924 subRange.layerCount = 1;
925
926 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
927 // draw. Thus we should look into using the load op functions on the render pass to clear out
928 // the stencil there.
929 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
930}
931
932void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
933 SkASSERT(target);
934
935 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
936 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
937 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
938
939 // this should only be called internally when we know we have a
940 // stencil buffer.
941 SkASSERT(sb);
942 int stencilBitCount = sb->bits();
943
944 // The contract with the callers does not guarantee that we preserve all bits in the stencil
945 // during this clear. Thus we will clear the entire stencil to the desired value.
946
947 VkClearDepthStencilValue vkStencilColor;
948 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
949 if (insideClip) {
950 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
951 } else {
952 vkStencilColor.stencil = 0;
953 }
954
955 VkImageLayout origDstLayout = vkStencil->currentLayout();
956 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
957 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
958 VkPipelineStageFlags srcStageMask =
959 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
960 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
961 vkStencil->setImageLayout(this,
962 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
963 srcAccessMask,
964 dstAccessMask,
965 srcStageMask,
966 dstStageMask,
967 false);
968
969 VkClearRect clearRect;
970 // Flip rect if necessary
971 SkIRect vkRect = rect;
972
973 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
974 vkRect.fTop = vkRT->height() - rect.fBottom;
975 vkRect.fBottom = vkRT->height() - rect.fTop;
976 }
977
978 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
979 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
980
981 clearRect.baseArrayLayer = 0;
982 clearRect.layerCount = 1;
983
984 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
985 SkASSERT(renderPass);
986 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
987
988 uint32_t stencilIndex;
989 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
990
991 VkClearAttachment attachment;
992 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
993 attachment.colorAttachment = 0; // this value shouldn't matter
994 attachment.clearValue.depthStencil = vkStencilColor;
995
996 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
997 fCurrentCmdBuffer->endRenderPass(this);
998
999 return;
1000}
1001
Greg Daniel164a9f02016-02-22 09:56:40 -05001002void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
1003 // parent class should never let us get here with no RT
1004 SkASSERT(target);
1005
1006 VkClearColorValue vkColor;
1007 GrColorToRGBAFloat(color, vkColor.float32);
1008
1009 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1010 VkImageLayout origDstLayout = vkRT->currentLayout();
1011
1012 if (rect.width() != target->width() || rect.height() != target->height()) {
1013 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1014 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1015 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -08001016 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -05001017 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1018 vkRT->setImageLayout(this,
1019 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1020 srcAccessMask,
1021 dstAccessMask,
1022 srcStageMask,
1023 dstStageMask,
1024 false);
1025
1026 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001027 // Flip rect if necessary
1028 SkIRect vkRect = rect;
1029 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1030 vkRect.fTop = vkRT->height() - rect.fBottom;
1031 vkRect.fBottom = vkRT->height() - rect.fTop;
1032 }
1033 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1034 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001035 clearRect.baseArrayLayer = 0;
1036 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001037
1038 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1039 SkASSERT(renderPass);
1040 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1041
1042 uint32_t colorIndex;
1043 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1044
1045 VkClearAttachment attachment;
1046 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1047 attachment.colorAttachment = colorIndex;
1048 attachment.clearValue.color = vkColor;
1049
1050 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1051 fCurrentCmdBuffer->endRenderPass(this);
1052 return;
1053 }
1054
1055 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1056 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1057
1058 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1059 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1060
1061 vkRT->setImageLayout(this,
1062 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1063 srcAccessMask,
1064 dstAccessMask,
1065 srcStageMask,
1066 dstStageMask,
1067 false);
1068
1069
1070 VkImageSubresourceRange subRange;
1071 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1072 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1073 subRange.baseMipLevel = 0;
1074 subRange.levelCount = 1;
1075 subRange.baseArrayLayer = 0;
1076 subRange.layerCount = 1;
1077
1078 // In the future we may not actually be doing this type of clear at all. If we are inside a
1079 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1080 // common use case will be clearing an attachment at the start of a render pass, in which case
1081 // we will use the clear load ops.
1082 fCurrentCmdBuffer->clearColorImage(this,
1083 vkRT,
1084 &vkColor,
1085 1, &subRange);
1086}
1087
1088inline bool can_copy_image(const GrSurface* dst,
1089 const GrSurface* src,
1090 const GrVkGpu* gpu) {
1091 if (src->asTexture() &&
1092 dst->asTexture() &&
1093 src->origin() == dst->origin() &&
1094 src->config() == dst->config()) {
1095 return true;
1096 }
1097
1098 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1099 // or the resolved image here?
1100
1101 return false;
1102}
1103
1104void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1105 GrSurface* src,
1106 const SkIRect& srcRect,
1107 const SkIPoint& dstPoint) {
1108 SkASSERT(can_copy_image(dst, src, this));
1109
1110 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1111 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1112 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1113
1114 VkImageLayout origDstLayout = dstTex->currentLayout();
1115 VkImageLayout origSrcLayout = srcTex->currentLayout();
1116
1117 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1118 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1119
1120 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1121 // the cache is flushed since it is only being written to.
1122 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1123 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1124
1125 dstTex->setImageLayout(this,
1126 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1127 srcAccessMask,
1128 dstAccessMask,
1129 srcStageMask,
1130 dstStageMask,
1131 false);
1132
1133 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1134 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1135
1136 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1137 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1138
1139 srcTex->setImageLayout(this,
1140 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1141 srcAccessMask,
1142 dstAccessMask,
1143 srcStageMask,
1144 dstStageMask,
1145 false);
1146
1147 // Flip rect if necessary
1148 SkIRect srcVkRect = srcRect;
1149 int32_t dstY = dstPoint.fY;
1150
1151 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1152 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1153 srcVkRect.fTop = src->height() - srcRect.fBottom;
1154 srcVkRect.fBottom = src->height() - srcRect.fTop;
1155 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1156 }
1157
1158 VkImageCopy copyRegion;
1159 memset(&copyRegion, 0, sizeof(VkImageCopy));
1160 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1161 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1162 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1163 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1164 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1165
1166 fCurrentCmdBuffer->copyImage(this,
1167 srcTex,
1168 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1169 dstTex,
1170 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1171 1,
1172 &copyRegion);
1173}
1174
1175inline bool can_copy_as_draw(const GrSurface* dst,
1176 const GrSurface* src,
1177 const GrVkGpu* gpu) {
1178 return false;
1179}
1180
1181void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1182 GrSurface* src,
1183 const SkIRect& srcRect,
1184 const SkIPoint& dstPoint) {
1185 SkASSERT(false);
1186}
1187
1188bool GrVkGpu::onCopySurface(GrSurface* dst,
1189 GrSurface* src,
1190 const SkIRect& srcRect,
1191 const SkIPoint& dstPoint) {
1192 if (can_copy_image(dst, src, this)) {
1193 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1194 return true;
1195 }
1196
1197 if (can_copy_as_draw(dst, src, this)) {
1198 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1199 return true;
1200 }
1201
1202 return false;
1203}
1204
cdalton28f45b92016-03-07 13:58:26 -08001205void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1206 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1207 // TODO: stub.
1208 SkASSERT(!this->caps()->sampleLocationsSupport());
1209 *effectiveSampleCnt = rt->desc().fSampleCnt;
1210}
1211
Greg Daniel164a9f02016-02-22 09:56:40 -05001212bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1213 GrPixelConfig readConfig, DrawPreference* drawPreference,
1214 ReadPixelTempDrawInfo* tempDrawInfo) {
1215 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1216 if (kNoDraw_DrawPreference != *drawPreference) {
1217 return false;
1218 }
1219
1220 if (srcSurface->config() != readConfig) {
1221 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1222 // that of readConfig.
1223 return false;
1224 }
1225
1226 return true;
1227}
1228
1229bool GrVkGpu::onReadPixels(GrSurface* surface,
1230 int left, int top, int width, int height,
1231 GrPixelConfig config,
1232 void* buffer,
1233 size_t rowBytes) {
1234 VkFormat pixelFormat;
1235 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1236 return false;
1237 }
1238
1239 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1240 if (!tgt) {
1241 return false;
1242 }
1243
1244 // Change layout of our target so it can be used as copy
1245 VkImageLayout layout = tgt->currentLayout();
1246 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1247 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1248 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1249 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1250 tgt->setImageLayout(this,
1251 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1252 srcAccessMask,
1253 dstAccessMask,
1254 srcStageMask,
1255 dstStageMask,
1256 false);
1257
1258 GrVkTransferBuffer* transferBuffer =
1259 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1260 kGpuToCpu_TransferType));
1261
1262 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1263 VkOffset3D offset = {
1264 left,
1265 flipY ? surface->height() - top - height : top,
1266 0
1267 };
1268
1269 // Copy the image to a buffer so we can map it to cpu memory
1270 VkBufferImageCopy region;
1271 memset(&region, 0, sizeof(VkBufferImageCopy));
1272 region.bufferOffset = 0;
1273 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1274 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1275 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1276 region.imageOffset = offset;
1277 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1278
1279 fCurrentCmdBuffer->copyImageToBuffer(this,
1280 tgt,
1281 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1282 transferBuffer,
1283 1,
1284 &region);
1285
1286 // make sure the copy to buffer has finished
1287 transferBuffer->addMemoryBarrier(this,
1288 VK_ACCESS_TRANSFER_WRITE_BIT,
1289 VK_ACCESS_HOST_READ_BIT,
1290 VK_PIPELINE_STAGE_TRANSFER_BIT,
1291 VK_PIPELINE_STAGE_HOST_BIT,
1292 false);
1293
1294 // We need to submit the current command buffer to the Queue and make sure it finishes before
1295 // we can copy the data out of the buffer.
1296 this->submitCommandBuffer(kForce_SyncQueue);
1297
1298 void* mappedMemory = transferBuffer->map();
1299
1300 memcpy(buffer, mappedMemory, rowBytes*height);
1301
1302 transferBuffer->unmap();
1303 transferBuffer->unref();
1304
1305 if (flipY) {
1306 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1307 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1308 scratch.reset(tightRowBytes);
1309 void* tmpRow = scratch.get();
1310 // flip y in-place by rows
1311 const int halfY = height >> 1;
1312 char* top = reinterpret_cast<char*>(buffer);
1313 char* bottom = top + (height - 1) * rowBytes;
1314 for (int y = 0; y < halfY; y++) {
1315 memcpy(tmpRow, top, tightRowBytes);
1316 memcpy(top, bottom, tightRowBytes);
1317 memcpy(bottom, tmpRow, tightRowBytes);
1318 top += rowBytes;
1319 bottom -= rowBytes;
1320 }
1321 }
1322
1323 return true;
1324}
1325
1326void GrVkGpu::onDraw(const DrawArgs& args, const GrNonInstancedVertices& vertices) {
1327 GrRenderTarget* rt = args.fPipeline->getRenderTarget();
1328 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1329 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1330 SkASSERT(renderPass);
1331
Greg Daniel164a9f02016-02-22 09:56:40 -05001332 GrVkProgram* program = GrVkProgramBuilder::CreateProgram(this, args,
1333 vertices.primitiveType(),
1334 *renderPass);
1335
1336 if (!program) {
1337 return;
1338 }
1339
1340 program->setData(this, *args.fPrimitiveProcessor, *args.fPipeline);
1341
1342 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1343
1344 program->bind(this, fCurrentCmdBuffer);
1345
1346 this->bindGeometry(*args.fPrimitiveProcessor, vertices);
1347
1348 // Change layout of our render target so it can be used as the color attachment
1349 VkImageLayout layout = vkRT->currentLayout();
1350 // Our color attachment is purely a destination and won't be read so don't need to flush or
1351 // invalidate any caches
1352 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1353 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1354 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1355 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1356 vkRT->setImageLayout(this,
1357 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1358 srcAccessMask,
1359 dstAccessMask,
1360 srcStageMask,
1361 dstStageMask,
1362 false);
1363
egdaniel3d5d9ac2016-03-01 12:56:15 -08001364 // If we are using a stencil attachment we also need to update its layout
1365 if (!args.fPipeline->getStencil().isDisabled()) {
1366 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1367 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1368 VkImageLayout origDstLayout = vkStencil->currentLayout();
1369 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1370 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
1371 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
1372 VkPipelineStageFlags srcStageMask =
1373 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1374 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1375 vkStencil->setImageLayout(this,
1376 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1377 srcAccessMask,
1378 dstAccessMask,
1379 srcStageMask,
1380 dstStageMask,
1381 false);
1382 }
1383
Greg Daniel164a9f02016-02-22 09:56:40 -05001384 if (vertices.isIndexed()) {
1385 fCurrentCmdBuffer->drawIndexed(this,
1386 vertices.indexCount(),
1387 1,
1388 vertices.startIndex(),
1389 vertices.startVertex(),
1390 0);
1391 } else {
1392 fCurrentCmdBuffer->draw(this, vertices.vertexCount(), 1, vertices.startVertex(), 0);
1393 }
1394
1395 fCurrentCmdBuffer->endRenderPass(this);
1396
1397 // Technically we don't have to call this here (since there is a safety check in program:setData
1398 // but this will allow for quicker freeing of resources if the program sits in a cache for a
1399 // while.
1400 program->freeTempResources(this);
1401 // This free will go away once we setup a program cache, and then the cache will be responsible
1402 // for call freeGpuResources.
1403 program->freeGPUResources(this);
1404 program->unref();
1405
1406#if SWAP_PER_DRAW
1407 glFlush();
1408#if defined(SK_BUILD_FOR_MAC)
1409 aglSwapBuffers(aglGetCurrentContext());
1410 int set_a_break_pt_here = 9;
1411 aglSwapBuffers(aglGetCurrentContext());
1412#elif defined(SK_BUILD_FOR_WIN32)
1413 SwapBuf();
1414 int set_a_break_pt_here = 9;
1415 SwapBuf();
1416#endif
1417#endif
1418}
1419