blob: d357f32e1fe5dfe047cd8d41b1c819fe32003bd8 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
13#include "GrPipeline.h"
14#include "GrRenderTargetPriv.h"
15#include "GrSurfacePriv.h"
16#include "GrTexturePriv.h"
17#include "GrVertices.h"
18
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
24#include "GrVkProgram.h"
25#include "GrVkProgramBuilder.h"
26#include "GrVkProgramDesc.h"
27#include "GrVkRenderPass.h"
28#include "GrVkResourceProvider.h"
29#include "GrVkTexture.h"
30#include "GrVkTextureRenderTarget.h"
31#include "GrVkTransferBuffer.h"
32#include "GrVkVertexBuffer.h"
33
34#include "SkConfig8888.h"
35
36#include "vk/GrVkInterface.h"
37
38#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
39#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
40#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
41
42////////////////////////////////////////////////////////////////////////////////
43// Stuff used to set up a GrVkGpu secrectly for now.
44
45// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
46// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
47// and mostly likely would take an optional device and queues to use.
48GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
49 GrContext* context) {
50 // Below is Vulkan setup code that normal would be done by a client, but will do here for now
51 // for testing purposes.
52 VkPhysicalDevice physDev;
53 VkDevice device;
54 VkInstance inst;
55 VkResult err;
56
57 const VkApplicationInfo app_info = {
58 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
59 nullptr, // pNext
60 "vktest", // pApplicationName
61 0, // applicationVersion
62 "vktest", // pEngineName
63 0, // engineVerison
64 VK_API_VERSION, // apiVersion
65 };
66 const VkInstanceCreateInfo instance_create = {
67 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
68 nullptr, // pNext
69 0, // flags
70 &app_info, // pApplicationInfo
71 0, // enabledLayerNameCount
72 nullptr, // ppEnabledLayerNames
73 0, // enabledExtensionNameCount
74 nullptr, // ppEnabledExtensionNames
75 };
76 err = vkCreateInstance(&instance_create, nullptr, &inst);
77 if (err < 0) {
78 SkDebugf("vkCreateInstanced failed: %d\n", err);
79 SkFAIL("failing");
80 }
81
82 uint32_t gpuCount;
83 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
84 if (err) {
85 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
86 SkFAIL("failing");
87 }
88 SkASSERT(gpuCount > 0);
89 // Just returning the first physical device instead of getting the whole array.
90 gpuCount = 1;
91 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
92 if (err) {
93 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
94 SkFAIL("failing");
95 }
96
97 // query to get the initial queue props size
98 uint32_t queueCount;
99 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
100 SkASSERT(queueCount >= 1);
101
102 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
103 // now get the actual queue props
104 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
105
106 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
107
108 // iterate to find the graphics queue
109 uint32_t graphicsQueueIndex = -1;
110 for (uint32_t i = 0; i < queueCount; i++) {
111 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
112 graphicsQueueIndex = i;
113 break;
114 }
115 }
116 SkASSERT(graphicsQueueIndex < queueCount);
117
118 float queuePriorities[1] = { 0.0 };
119 const VkDeviceQueueCreateInfo queueInfo = {
120 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
121 nullptr, // pNext
122 0, // VkDeviceQueueCreateFlags
123 0, // queueFamilyIndex
124 1, // queueCount
125 queuePriorities, // pQueuePriorities
126 };
127 const VkDeviceCreateInfo deviceInfo = {
128 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
129 nullptr, // pNext
130 0, // VkDeviceCreateFlags
131 1, // queueCreateInfoCount
132 &queueInfo, // pQueueCreateInfos
133 0, // layerCount
134 nullptr, // ppEnabledLayerNames
135 0, // extensionCount
136 nullptr, // ppEnabledExtensionNames
137 nullptr // ppEnabledFeatures
138 };
139
140 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
141 if (err) {
142 SkDebugf("CreateDevice failed: %d\n", err);
143 SkFAIL("failing");
144 }
145
146 VkQueue queue;
147 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
148
149 const VkCommandPoolCreateInfo cmdPoolInfo = {
150 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
151 nullptr, // pNext
152 0, // CmdPoolCreateFlags
153 graphicsQueueIndex, // queueFamilyIndex
154 };
155
156 VkCommandPool cmdPool;
157 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
158 if (err) {
159 SkDebugf("CreateCommandPool failed: %d\n", err);
160 SkFAIL("failing");
161 }
162
163 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
164}
165
166////////////////////////////////////////////////////////////////////////////////
167
168GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
169 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
170 VkInstance inst)
171 : INHERITED(context)
172 , fDevice(device)
173 , fQueue(queue)
174 , fCmdPool(cmdPool)
175 , fResourceProvider(this)
176 , fVkInstance(inst) {
177 fInterface.reset(GrVkCreateInterface(fVkInstance));
178 fCompiler = shaderc_compiler_initialize();
179
180 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
181 fCaps.reset(SkRef(fVkCaps.get()));
182
jvanverth03509ea2016-03-02 13:19:47 -0800183 fResourceProvider.init();
184
Greg Daniel164a9f02016-02-22 09:56:40 -0500185 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
186 SkASSERT(fCurrentCmdBuffer);
187 fCurrentCmdBuffer->begin(this);
188 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
189
190}
191
192GrVkGpu::~GrVkGpu() {
193 shaderc_compiler_release(fCompiler);
194 fCurrentCmdBuffer->end(this);
195 fCurrentCmdBuffer->unref(this);
196
197 // wait for all commands to finish
198 VK_CALL(QueueWaitIdle(fQueue));
199
200 // must call this just before we destroy the VkDevice
201 fResourceProvider.destroyResources();
202
203 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
204 VK_CALL(DestroyDevice(fDevice, nullptr));
205 VK_CALL(DestroyInstance(fVkInstance, nullptr));
206}
207
208///////////////////////////////////////////////////////////////////////////////
209
210void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
211 SkASSERT(fCurrentCmdBuffer);
212 fCurrentCmdBuffer->end(this);
213
214 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
215 fResourceProvider.checkCommandBuffers();
216
217 // Release old command buffer and create a new one
218 fCurrentCmdBuffer->unref(this);
219 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
220 SkASSERT(fCurrentCmdBuffer);
221
222 fCurrentCmdBuffer->begin(this);
223}
224
225///////////////////////////////////////////////////////////////////////////////
226GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
227 return GrVkVertexBuffer::Create(this, size, dynamic);
228}
229
230GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
231 return GrVkIndexBuffer::Create(this, size, dynamic);
232}
233
234GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
235 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
236 : GrVkBuffer::kCopyWrite_Type;
237 return GrVkTransferBuffer::Create(this, size, bufferType);
238}
239
240////////////////////////////////////////////////////////////////////////////////
241bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
242 GrPixelConfig srcConfig, DrawPreference* drawPreference,
243 WritePixelTempDrawInfo* tempDrawInfo) {
244 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
245 return false;
246 }
247
248 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
249 if (kNoDraw_DrawPreference != *drawPreference) {
250 return false;
251 }
252
253 if (dstSurface->config() != srcConfig) {
254 // TODO: This should fall back to drawing or copying to change config of dstSurface to
255 // match that of srcConfig.
256 return false;
257 }
258
259 return true;
260}
261
262bool GrVkGpu::onWritePixels(GrSurface* surface,
263 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800264 GrPixelConfig config,
265 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500266 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
267 if (!vkTex) {
268 return false;
269 }
270
bsalomona1e6b3b2016-03-02 10:58:23 -0800271 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800272 if (texels.empty() || !texels.begin()->fPixels) {
273 return false;
274 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800275
Greg Daniel164a9f02016-02-22 09:56:40 -0500276 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
277 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
278 return false;
279 }
280
281 bool success = false;
282 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
283 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
284 SkASSERT(config == vkTex->desc().fConfig);
285 // TODO: add compressed texture support
286 // delete the following two lines and uncomment the two after that when ready
287 vkTex->unref();
288 return false;
289 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
290 // height);
291 } else {
292 bool linearTiling = vkTex->isLinearTiled();
293 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
294 // Need to change the layout to general in order to perform a host write
295 VkImageLayout layout = vkTex->currentLayout();
296 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
297 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
298 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
299 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
300 vkTex->setImageLayout(this,
301 VK_IMAGE_LAYOUT_GENERAL,
302 srcAccessMask,
303 dstAccessMask,
304 srcStageMask,
305 dstStageMask,
306 false);
307 }
308 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800309 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500310 }
311
312 if (success) {
313 vkTex->texturePriv().dirtyMipMaps(true);
314 return true;
315 }
316
317 return false;
318}
319
320bool GrVkGpu::uploadTexData(GrVkTexture* tex,
321 int left, int top, int width, int height,
322 GrPixelConfig dataConfig,
323 const void* data,
324 size_t rowBytes) {
325 SkASSERT(data);
326
327 // If we're uploading compressed data then we should be using uploadCompressedTexData
328 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
329
330 bool linearTiling = tex->isLinearTiled();
331
332 size_t bpp = GrBytesPerPixel(dataConfig);
333
334 const GrSurfaceDesc& desc = tex->desc();
335
336 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
337 &width, &height, &data, &rowBytes)) {
338 return false;
339 }
340 size_t trimRowBytes = width * bpp;
341
342 if (linearTiling) {
343 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
344 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
345 const VkImageSubresource subres = {
346 VK_IMAGE_ASPECT_COLOR_BIT,
347 0, // mipLevel
348 0, // arraySlice
349 };
350 VkSubresourceLayout layout;
351 VkResult err;
352
353 const GrVkInterface* interface = this->vkInterface();
354
355 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
356 tex->textureImage(),
357 &subres,
358 &layout));
359
360 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
361 : top;
362 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
363 VkDeviceSize size = height*layout.rowPitch;
364 void* mapPtr;
365 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
366 &mapPtr));
367 if (err) {
368 return false;
369 }
370
371 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
372 // copy into buffer by rows
373 const char* srcRow = reinterpret_cast<const char*>(data);
374 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
375 for (int y = 0; y < height; y++) {
376 memcpy(dstRow, srcRow, trimRowBytes);
377 srcRow += rowBytes;
378 dstRow -= layout.rowPitch;
379 }
380 } else {
381 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
382 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
383 memcpy(mapPtr, data, trimRowBytes * height);
384 } else {
385 SkRectMemcpy(mapPtr, layout.rowPitch, data, rowBytes, trimRowBytes, height);
386 }
387 }
388
389 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
390 } else {
391 GrVkTransferBuffer* transferBuffer =
392 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
393
394 void* mapPtr = transferBuffer->map();
395
396 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
397 // copy into buffer by rows
398 const char* srcRow = reinterpret_cast<const char*>(data);
399 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
400 for (int y = 0; y < height; y++) {
401 memcpy(dstRow, srcRow, trimRowBytes);
402 srcRow += rowBytes;
403 dstRow -= trimRowBytes;
404 }
405 } else {
406 // If there is no padding on the src data rows, we can do a single memcpy
407 if (trimRowBytes == rowBytes) {
408 memcpy(mapPtr, data, trimRowBytes * height);
409 } else {
410 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
411 }
412 }
413
414 transferBuffer->unmap();
415
416 // make sure the unmap has finished
417 transferBuffer->addMemoryBarrier(this,
418 VK_ACCESS_HOST_WRITE_BIT,
419 VK_ACCESS_TRANSFER_READ_BIT,
420 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
421 VK_PIPELINE_STAGE_TRANSFER_BIT,
422 false);
423
424 // Set up copy region
425 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
426 VkOffset3D offset = {
427 left,
428 flipY ? tex->height() - top - height : top,
429 0
430 };
431
432 VkBufferImageCopy region;
433 memset(&region, 0, sizeof(VkBufferImageCopy));
434 region.bufferOffset = 0;
435 region.bufferRowLength = width;
436 region.bufferImageHeight = height;
437 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
438 region.imageOffset = offset;
439 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
440
441 // Change layout of our target so it can be copied to
442 VkImageLayout layout = tex->currentLayout();
443 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
444 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
445 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
446 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
447 tex->setImageLayout(this,
448 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
449 srcAccessMask,
450 dstAccessMask,
451 srcStageMask,
452 dstStageMask,
453 false);
454
455 // Copy the buffer to the image
456 fCurrentCmdBuffer->copyBufferToImage(this,
457 transferBuffer,
458 tex,
459 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
460 1,
461 &region);
462
463 // Submit the current command buffer to the Queue
464 this->submitCommandBuffer(kSkip_SyncQueue);
465
466 transferBuffer->unref();
467 }
468
469 return true;
470}
471
472////////////////////////////////////////////////////////////////////////////////
473GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800474 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500475 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
476
477 VkFormat pixelFormat;
478 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
479 return nullptr;
480 }
481
482 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
483 return nullptr;
484 }
485
486 bool linearTiling = false;
487 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
488 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
489 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
490 linearTiling = true;
491 } else {
492 return nullptr;
493 }
494 }
495
496 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
497 if (renderTarget) {
498 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
499 }
500
501 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
502 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
503 // will be using this texture in some copy or not. Also this assumes, as is the current case,
504 // that all render targets in vulkan are also texutres. If we change this practice of setting
505 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
506 // texture.
507 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
508
bsalomona1e6b3b2016-03-02 10:58:23 -0800509 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
510 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500511
512 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
513 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
514 // to 1.
515 GrVkImage::ImageDesc imageDesc;
516 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
517 imageDesc.fFormat = pixelFormat;
518 imageDesc.fWidth = desc.fWidth;
519 imageDesc.fHeight = desc.fHeight;
520 imageDesc.fLevels = 1;
521 imageDesc.fSamples = 1;
522 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
523 imageDesc.fUsageFlags = usageFlags;
524 imageDesc.fMemProps = memProps;
525
526 GrVkTexture* tex;
527 if (renderTarget) {
528 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
529 imageDesc);
egdaniel3d5d9ac2016-03-01 12:56:15 -0800530#if 0
531 // This clear can be included to fix warning described in htttps://bugs.skia.org/5045
532 // Obviously we do not want to be clearling needlessly every time we create a render target.
533 SkIRect rect = SkIRect::MakeWH(tex->width(), tex->height());
534 this->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
535#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500536 } else {
537 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
538 }
539
540 if (!tex) {
541 return nullptr;
542 }
543
bsalomona1e6b3b2016-03-02 10:58:23 -0800544 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800545 if (!texels.empty()) {
546 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800547 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
548 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500549 tex->unref();
550 return nullptr;
551 }
552 }
553
554 return tex;
555}
556
557////////////////////////////////////////////////////////////////////////////////
558
559static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
560 // By default, all textures in Vk use TopLeft
561 if (kDefault_GrSurfaceOrigin == origin) {
562 return kTopLeft_GrSurfaceOrigin;
563 } else {
564 return origin;
565 }
566}
567
568GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
569 GrWrapOwnership ownership) {
570 VkFormat format;
571 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
572 return nullptr;
573 }
574
575 if (0 == desc.fTextureHandle) {
576 return nullptr;
577 }
578
579 int maxSize = this->caps()->maxTextureSize();
580 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
581 return nullptr;
582 }
583
584 // TODO: determine what format Chrome will actually send us and turn it into a Resource
585 GrVkImage::Resource* imageRsrc = reinterpret_cast<GrVkImage::Resource*>(desc.fTextureHandle);
586
587 GrGpuResource::LifeCycle lifeCycle;
588 switch (ownership) {
589 case kAdopt_GrWrapOwnership:
590 lifeCycle = GrGpuResource::kAdopted_LifeCycle;
591 break;
592 case kBorrow_GrWrapOwnership:
593 lifeCycle = GrGpuResource::kBorrowed_LifeCycle;
594 break;
595 }
596
597 GrSurfaceDesc surfDesc;
598 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
599 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
600 surfDesc.fWidth = desc.fWidth;
601 surfDesc.fHeight = desc.fHeight;
602 surfDesc.fConfig = desc.fConfig;
603 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
604 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
605 // In GL, Chrome assumes all textures are BottomLeft
606 // In VK, we don't have this restriction
607 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
608
609 GrVkTexture* texture = nullptr;
610 if (renderTarget) {
611 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
612 lifeCycle, format,
613 imageRsrc);
614 } else {
615 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format, imageRsrc);
616 }
617 if (!texture) {
618 return nullptr;
619 }
620
621 return texture;
622}
623
624GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
625 GrWrapOwnership ownership) {
626
627 // TODO: determine what format Chrome will actually send us and turn it into a Resource
628 GrVkImage::Resource* imageRsrc =
629 reinterpret_cast<GrVkImage::Resource*>(wrapDesc.fRenderTargetHandle);
630
631 GrGpuResource::LifeCycle lifeCycle;
632 switch (ownership) {
633 case kAdopt_GrWrapOwnership:
634 lifeCycle = GrGpuResource::kAdopted_LifeCycle;
635 break;
636 case kBorrow_GrWrapOwnership:
637 lifeCycle = GrGpuResource::kBorrowed_LifeCycle;
638 break;
639 }
640
641 GrSurfaceDesc desc;
642 desc.fConfig = wrapDesc.fConfig;
643 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
644 desc.fWidth = wrapDesc.fWidth;
645 desc.fHeight = wrapDesc.fHeight;
646 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
647
648 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
649
650 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
651 lifeCycle, imageRsrc);
652 if (tgt && wrapDesc.fStencilBits) {
653 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
654 tgt->unref();
655 return nullptr;
656 }
657 }
658 return tgt;
659}
660
661////////////////////////////////////////////////////////////////////////////////
662
663void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
664 const GrNonInstancedVertices& vertices) {
665 GrVkVertexBuffer* vbuf;
666 vbuf = (GrVkVertexBuffer*)vertices.vertexBuffer();
667 SkASSERT(vbuf);
668 SkASSERT(!vbuf->isMapped());
669
670 vbuf->addMemoryBarrier(this,
671 VK_ACCESS_HOST_WRITE_BIT,
672 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
673 VK_PIPELINE_STAGE_HOST_BIT,
674 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
675 false);
676
677 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
678
679 if (vertices.isIndexed()) {
680 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)vertices.indexBuffer();
681 SkASSERT(ibuf);
682 SkASSERT(!ibuf->isMapped());
683
684 ibuf->addMemoryBarrier(this,
685 VK_ACCESS_HOST_WRITE_BIT,
686 VK_ACCESS_INDEX_READ_BIT,
687 VK_PIPELINE_STAGE_HOST_BIT,
688 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
689 false);
690
691 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
692 }
693}
694
695void GrVkGpu::buildProgramDesc(GrProgramDesc* desc,
696 const GrPrimitiveProcessor& primProc,
697 const GrPipeline& pipeline) const {
698 if (!GrVkProgramDescBuilder::Build(desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
699 SkDEBUGFAIL("Failed to generate GL program descriptor");
700 }
701}
702
703////////////////////////////////////////////////////////////////////////////////
704
705GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
706 int width,
707 int height) {
708 SkASSERT(rt->asTexture());
709 SkASSERT(width >= rt->width());
710 SkASSERT(height >= rt->height());
711
712 int samples = rt->numStencilSamples();
713
714 SkASSERT(this->vkCaps().stencilFormats().count());
715 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
716
717 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
718 GrGpuResource::kCached_LifeCycle,
719 width,
720 height,
721 samples,
722 sFmt));
723 fStats.incStencilAttachmentCreates();
724 return stencil;
725}
726
727////////////////////////////////////////////////////////////////////////////////
728
729GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
730 GrPixelConfig config) {
731
732 VkFormat pixelFormat;
733 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
734 return 0;
735 }
736
737 bool linearTiling = false;
738 if (!fVkCaps->isConfigTexturable(config)) {
739 return 0;
740 }
741
742 if (fVkCaps->isConfigTexurableLinearly(config)) {
743 linearTiling = true;
744 }
745
746 // Currently this is not supported since it requires a copy which has not yet been implemented.
747 if (srcData && !linearTiling) {
748 return 0;
749 }
750
751 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
752 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
753 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
754
755 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
756 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
757
758 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
759 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
760 // to 1.
761 GrVkImage::ImageDesc imageDesc;
762 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
763 imageDesc.fFormat = pixelFormat;
764 imageDesc.fWidth = w;
765 imageDesc.fHeight = h;
766 imageDesc.fLevels = 1;
767 imageDesc.fSamples = 1;
768 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
769 imageDesc.fUsageFlags = usageFlags;
770 imageDesc.fMemProps = memProps;
771
772 const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(this, imageDesc);
773 if (!imageRsrc) {
774 return 0;
775 }
776
777 if (srcData) {
778 if (linearTiling) {
779 const VkImageSubresource subres = {
780 VK_IMAGE_ASPECT_COLOR_BIT,
781 0, // mipLevel
782 0, // arraySlice
783 };
784 VkSubresourceLayout layout;
785 VkResult err;
786
787 const GrVkInterface* interface = this->vkInterface();
788
789 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
790 imageRsrc->fImage,
791 &subres,
792 &layout));
793
794 void* mapPtr;
795 err = GR_VK_CALL(interface, MapMemory(fDevice,
796 imageRsrc->fAlloc,
797 0,
798 layout.rowPitch * h,
799 0,
800 &mapPtr));
801 if (err) {
802 imageRsrc->unref(this);
803 return 0;
804 }
805
806 size_t bpp = GrBytesPerPixel(config);
807 size_t rowCopyBytes = bpp * w;
808 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
809 // This assumes the srcData comes in with no padding.
810 if (rowCopyBytes == layout.rowPitch) {
811 memcpy(mapPtr, srcData, rowCopyBytes * h);
812 } else {
813 SkRectMemcpy(mapPtr, layout.rowPitch, srcData, w, rowCopyBytes, h);
814 }
815 GR_VK_CALL(interface, UnmapMemory(fDevice, imageRsrc->fAlloc));
816 } else {
817 // TODO: Add support for copying to optimal tiling
818 SkASSERT(false);
819 }
820 }
821
822 return (GrBackendObject)imageRsrc;
823}
824
825bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
826 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
827
828 if (backend && backend->fImage && backend->fAlloc) {
829 VkMemoryRequirements req;
830 memset(&req, 0, sizeof(req));
831 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
832 backend->fImage,
833 &req));
834 // TODO: find a better check
835 // This will probably fail with a different driver
836 return (req.size > 0) && (req.size <= 8192 * 8192);
837 }
838
839 return false;
840}
841
842void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
843 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
844
845 if (backend) {
846 if (!abandon) {
847 backend->unref(this);
848 } else {
849 backend->unrefAndAbandon();
850 }
851 }
852}
853
854////////////////////////////////////////////////////////////////////////////////
855
856void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
857 VkPipelineStageFlags dstStageMask,
858 bool byRegion,
859 VkMemoryBarrier* barrier) const {
860 SkASSERT(fCurrentCmdBuffer);
861 fCurrentCmdBuffer->pipelineBarrier(this,
862 srcStageMask,
863 dstStageMask,
864 byRegion,
865 GrVkCommandBuffer::kMemory_BarrierType,
866 barrier);
867}
868
869void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
870 VkPipelineStageFlags dstStageMask,
871 bool byRegion,
872 VkBufferMemoryBarrier* barrier) const {
873 SkASSERT(fCurrentCmdBuffer);
874 fCurrentCmdBuffer->pipelineBarrier(this,
875 srcStageMask,
876 dstStageMask,
877 byRegion,
878 GrVkCommandBuffer::kBufferMemory_BarrierType,
879 barrier);
880}
881
882void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
883 VkPipelineStageFlags dstStageMask,
884 bool byRegion,
885 VkImageMemoryBarrier* barrier) const {
886 SkASSERT(fCurrentCmdBuffer);
887 fCurrentCmdBuffer->pipelineBarrier(this,
888 srcStageMask,
889 dstStageMask,
890 byRegion,
891 GrVkCommandBuffer::kImageMemory_BarrierType,
892 barrier);
893}
894
895void GrVkGpu::finishDrawTarget() {
896 // Submit the current command buffer to the Queue
897 this->submitCommandBuffer(kSkip_SyncQueue);
898}
899
egdaniel3d5d9ac2016-03-01 12:56:15 -0800900void GrVkGpu::clearStencil(GrRenderTarget* target) {
901 if (nullptr == target) {
902 return;
903 }
904 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
905 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
906
907
908 VkClearDepthStencilValue vkStencilColor;
909 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
910
911 VkImageLayout origDstLayout = vkStencil->currentLayout();
912
913 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
914 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
915
916 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
917 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
918
919 vkStencil->setImageLayout(this,
920 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
921 srcAccessMask,
922 dstAccessMask,
923 srcStageMask,
924 dstStageMask,
925 false);
926
927
928 VkImageSubresourceRange subRange;
929 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
930 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
931 subRange.baseMipLevel = 0;
932 subRange.levelCount = 1;
933 subRange.baseArrayLayer = 0;
934 subRange.layerCount = 1;
935
936 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
937 // draw. Thus we should look into using the load op functions on the render pass to clear out
938 // the stencil there.
939 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
940}
941
942void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
943 SkASSERT(target);
944
945 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
946 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
947 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
948
949 // this should only be called internally when we know we have a
950 // stencil buffer.
951 SkASSERT(sb);
952 int stencilBitCount = sb->bits();
953
954 // The contract with the callers does not guarantee that we preserve all bits in the stencil
955 // during this clear. Thus we will clear the entire stencil to the desired value.
956
957 VkClearDepthStencilValue vkStencilColor;
958 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
959 if (insideClip) {
960 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
961 } else {
962 vkStencilColor.stencil = 0;
963 }
964
965 VkImageLayout origDstLayout = vkStencil->currentLayout();
966 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
967 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
968 VkPipelineStageFlags srcStageMask =
969 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
970 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
971 vkStencil->setImageLayout(this,
972 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
973 srcAccessMask,
974 dstAccessMask,
975 srcStageMask,
976 dstStageMask,
977 false);
978
979 VkClearRect clearRect;
980 // Flip rect if necessary
981 SkIRect vkRect = rect;
982
983 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
984 vkRect.fTop = vkRT->height() - rect.fBottom;
985 vkRect.fBottom = vkRT->height() - rect.fTop;
986 }
987
988 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
989 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
990
991 clearRect.baseArrayLayer = 0;
992 clearRect.layerCount = 1;
993
994 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
995 SkASSERT(renderPass);
996 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
997
998 uint32_t stencilIndex;
999 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
1000
1001 VkClearAttachment attachment;
1002 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1003 attachment.colorAttachment = 0; // this value shouldn't matter
1004 attachment.clearValue.depthStencil = vkStencilColor;
1005
1006 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1007 fCurrentCmdBuffer->endRenderPass(this);
1008
1009 return;
1010}
1011
Greg Daniel164a9f02016-02-22 09:56:40 -05001012void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
1013 // parent class should never let us get here with no RT
1014 SkASSERT(target);
1015
1016 VkClearColorValue vkColor;
1017 GrColorToRGBAFloat(color, vkColor.float32);
1018
1019 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1020 VkImageLayout origDstLayout = vkRT->currentLayout();
1021
1022 if (rect.width() != target->width() || rect.height() != target->height()) {
1023 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1024 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1025 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -08001026 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -05001027 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1028 vkRT->setImageLayout(this,
1029 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1030 srcAccessMask,
1031 dstAccessMask,
1032 srcStageMask,
1033 dstStageMask,
1034 false);
1035
1036 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001037 // Flip rect if necessary
1038 SkIRect vkRect = rect;
1039 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1040 vkRect.fTop = vkRT->height() - rect.fBottom;
1041 vkRect.fBottom = vkRT->height() - rect.fTop;
1042 }
1043 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1044 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001045 clearRect.baseArrayLayer = 0;
1046 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001047
1048 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1049 SkASSERT(renderPass);
1050 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1051
1052 uint32_t colorIndex;
1053 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1054
1055 VkClearAttachment attachment;
1056 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1057 attachment.colorAttachment = colorIndex;
1058 attachment.clearValue.color = vkColor;
1059
1060 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1061 fCurrentCmdBuffer->endRenderPass(this);
1062 return;
1063 }
1064
1065 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1066 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1067
1068 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1069 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1070
1071 vkRT->setImageLayout(this,
1072 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1073 srcAccessMask,
1074 dstAccessMask,
1075 srcStageMask,
1076 dstStageMask,
1077 false);
1078
1079
1080 VkImageSubresourceRange subRange;
1081 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1082 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1083 subRange.baseMipLevel = 0;
1084 subRange.levelCount = 1;
1085 subRange.baseArrayLayer = 0;
1086 subRange.layerCount = 1;
1087
1088 // In the future we may not actually be doing this type of clear at all. If we are inside a
1089 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1090 // common use case will be clearing an attachment at the start of a render pass, in which case
1091 // we will use the clear load ops.
1092 fCurrentCmdBuffer->clearColorImage(this,
1093 vkRT,
1094 &vkColor,
1095 1, &subRange);
1096}
1097
1098inline bool can_copy_image(const GrSurface* dst,
1099 const GrSurface* src,
1100 const GrVkGpu* gpu) {
1101 if (src->asTexture() &&
1102 dst->asTexture() &&
1103 src->origin() == dst->origin() &&
1104 src->config() == dst->config()) {
1105 return true;
1106 }
1107
1108 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1109 // or the resolved image here?
1110
1111 return false;
1112}
1113
1114void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1115 GrSurface* src,
1116 const SkIRect& srcRect,
1117 const SkIPoint& dstPoint) {
1118 SkASSERT(can_copy_image(dst, src, this));
1119
1120 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1121 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1122 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1123
1124 VkImageLayout origDstLayout = dstTex->currentLayout();
1125 VkImageLayout origSrcLayout = srcTex->currentLayout();
1126
1127 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1128 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1129
1130 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1131 // the cache is flushed since it is only being written to.
1132 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1133 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1134
1135 dstTex->setImageLayout(this,
1136 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1137 srcAccessMask,
1138 dstAccessMask,
1139 srcStageMask,
1140 dstStageMask,
1141 false);
1142
1143 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1144 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1145
1146 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1147 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1148
1149 srcTex->setImageLayout(this,
1150 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1151 srcAccessMask,
1152 dstAccessMask,
1153 srcStageMask,
1154 dstStageMask,
1155 false);
1156
1157 // Flip rect if necessary
1158 SkIRect srcVkRect = srcRect;
1159 int32_t dstY = dstPoint.fY;
1160
1161 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1162 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1163 srcVkRect.fTop = src->height() - srcRect.fBottom;
1164 srcVkRect.fBottom = src->height() - srcRect.fTop;
1165 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1166 }
1167
1168 VkImageCopy copyRegion;
1169 memset(&copyRegion, 0, sizeof(VkImageCopy));
1170 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1171 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1172 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1173 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1174 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1175
1176 fCurrentCmdBuffer->copyImage(this,
1177 srcTex,
1178 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1179 dstTex,
1180 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1181 1,
1182 &copyRegion);
1183}
1184
1185inline bool can_copy_as_draw(const GrSurface* dst,
1186 const GrSurface* src,
1187 const GrVkGpu* gpu) {
1188 return false;
1189}
1190
1191void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1192 GrSurface* src,
1193 const SkIRect& srcRect,
1194 const SkIPoint& dstPoint) {
1195 SkASSERT(false);
1196}
1197
1198bool GrVkGpu::onCopySurface(GrSurface* dst,
1199 GrSurface* src,
1200 const SkIRect& srcRect,
1201 const SkIPoint& dstPoint) {
1202 if (can_copy_image(dst, src, this)) {
1203 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1204 return true;
1205 }
1206
1207 if (can_copy_as_draw(dst, src, this)) {
1208 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1209 return true;
1210 }
1211
1212 return false;
1213}
1214
cdalton28f45b92016-03-07 13:58:26 -08001215void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1216 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1217 // TODO: stub.
1218 SkASSERT(!this->caps()->sampleLocationsSupport());
1219 *effectiveSampleCnt = rt->desc().fSampleCnt;
1220}
1221
Greg Daniel164a9f02016-02-22 09:56:40 -05001222bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1223 GrPixelConfig readConfig, DrawPreference* drawPreference,
1224 ReadPixelTempDrawInfo* tempDrawInfo) {
1225 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1226 if (kNoDraw_DrawPreference != *drawPreference) {
1227 return false;
1228 }
1229
1230 if (srcSurface->config() != readConfig) {
1231 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1232 // that of readConfig.
1233 return false;
1234 }
1235
1236 return true;
1237}
1238
1239bool GrVkGpu::onReadPixels(GrSurface* surface,
1240 int left, int top, int width, int height,
1241 GrPixelConfig config,
1242 void* buffer,
1243 size_t rowBytes) {
1244 VkFormat pixelFormat;
1245 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1246 return false;
1247 }
1248
1249 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1250 if (!tgt) {
1251 return false;
1252 }
1253
1254 // Change layout of our target so it can be used as copy
1255 VkImageLayout layout = tgt->currentLayout();
1256 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1257 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1258 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1259 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1260 tgt->setImageLayout(this,
1261 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1262 srcAccessMask,
1263 dstAccessMask,
1264 srcStageMask,
1265 dstStageMask,
1266 false);
1267
1268 GrVkTransferBuffer* transferBuffer =
1269 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1270 kGpuToCpu_TransferType));
1271
1272 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1273 VkOffset3D offset = {
1274 left,
1275 flipY ? surface->height() - top - height : top,
1276 0
1277 };
1278
1279 // Copy the image to a buffer so we can map it to cpu memory
1280 VkBufferImageCopy region;
1281 memset(&region, 0, sizeof(VkBufferImageCopy));
1282 region.bufferOffset = 0;
1283 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1284 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1285 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1286 region.imageOffset = offset;
1287 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1288
1289 fCurrentCmdBuffer->copyImageToBuffer(this,
1290 tgt,
1291 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1292 transferBuffer,
1293 1,
1294 &region);
1295
1296 // make sure the copy to buffer has finished
1297 transferBuffer->addMemoryBarrier(this,
1298 VK_ACCESS_TRANSFER_WRITE_BIT,
1299 VK_ACCESS_HOST_READ_BIT,
1300 VK_PIPELINE_STAGE_TRANSFER_BIT,
1301 VK_PIPELINE_STAGE_HOST_BIT,
1302 false);
1303
1304 // We need to submit the current command buffer to the Queue and make sure it finishes before
1305 // we can copy the data out of the buffer.
1306 this->submitCommandBuffer(kForce_SyncQueue);
1307
1308 void* mappedMemory = transferBuffer->map();
1309
1310 memcpy(buffer, mappedMemory, rowBytes*height);
1311
1312 transferBuffer->unmap();
1313 transferBuffer->unref();
1314
1315 if (flipY) {
1316 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1317 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1318 scratch.reset(tightRowBytes);
1319 void* tmpRow = scratch.get();
1320 // flip y in-place by rows
1321 const int halfY = height >> 1;
1322 char* top = reinterpret_cast<char*>(buffer);
1323 char* bottom = top + (height - 1) * rowBytes;
1324 for (int y = 0; y < halfY; y++) {
1325 memcpy(tmpRow, top, tightRowBytes);
1326 memcpy(top, bottom, tightRowBytes);
1327 memcpy(bottom, tmpRow, tightRowBytes);
1328 top += rowBytes;
1329 bottom -= rowBytes;
1330 }
1331 }
1332
1333 return true;
1334}
1335
1336void GrVkGpu::onDraw(const DrawArgs& args, const GrNonInstancedVertices& vertices) {
1337 GrRenderTarget* rt = args.fPipeline->getRenderTarget();
1338 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1339 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1340 SkASSERT(renderPass);
1341
Greg Daniel164a9f02016-02-22 09:56:40 -05001342 GrVkProgram* program = GrVkProgramBuilder::CreateProgram(this, args,
1343 vertices.primitiveType(),
1344 *renderPass);
1345
1346 if (!program) {
1347 return;
1348 }
1349
1350 program->setData(this, *args.fPrimitiveProcessor, *args.fPipeline);
1351
1352 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1353
1354 program->bind(this, fCurrentCmdBuffer);
1355
1356 this->bindGeometry(*args.fPrimitiveProcessor, vertices);
1357
1358 // Change layout of our render target so it can be used as the color attachment
1359 VkImageLayout layout = vkRT->currentLayout();
1360 // Our color attachment is purely a destination and won't be read so don't need to flush or
1361 // invalidate any caches
1362 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1363 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1364 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1365 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1366 vkRT->setImageLayout(this,
1367 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1368 srcAccessMask,
1369 dstAccessMask,
1370 srcStageMask,
1371 dstStageMask,
1372 false);
1373
egdaniel3d5d9ac2016-03-01 12:56:15 -08001374 // If we are using a stencil attachment we also need to update its layout
1375 if (!args.fPipeline->getStencil().isDisabled()) {
1376 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1377 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1378 VkImageLayout origDstLayout = vkStencil->currentLayout();
1379 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1380 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
1381 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
1382 VkPipelineStageFlags srcStageMask =
1383 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1384 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1385 vkStencil->setImageLayout(this,
1386 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1387 srcAccessMask,
1388 dstAccessMask,
1389 srcStageMask,
1390 dstStageMask,
1391 false);
1392 }
1393
Greg Daniel164a9f02016-02-22 09:56:40 -05001394 if (vertices.isIndexed()) {
1395 fCurrentCmdBuffer->drawIndexed(this,
1396 vertices.indexCount(),
1397 1,
1398 vertices.startIndex(),
1399 vertices.startVertex(),
1400 0);
1401 } else {
1402 fCurrentCmdBuffer->draw(this, vertices.vertexCount(), 1, vertices.startVertex(), 0);
1403 }
1404
1405 fCurrentCmdBuffer->endRenderPass(this);
1406
1407 // Technically we don't have to call this here (since there is a safety check in program:setData
1408 // but this will allow for quicker freeing of resources if the program sits in a cache for a
1409 // while.
1410 program->freeTempResources(this);
1411 // This free will go away once we setup a program cache, and then the cache will be responsible
1412 // for call freeGpuResources.
1413 program->freeGPUResources(this);
1414 program->unref();
1415
1416#if SWAP_PER_DRAW
1417 glFlush();
1418#if defined(SK_BUILD_FOR_MAC)
1419 aglSwapBuffers(aglGetCurrentContext());
1420 int set_a_break_pt_here = 9;
1421 aglSwapBuffers(aglGetCurrentContext());
1422#elif defined(SK_BUILD_FOR_WIN32)
1423 SwapBuf();
1424 int set_a_break_pt_here = 9;
1425 SwapBuf();
1426#endif
1427#endif
1428}
1429