blob: 180ba3be66b85686d0a3c2e59b32b4f1c9af3e5f [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
24#include "GrVkProgram.h"
25#include "GrVkProgramBuilder.h"
26#include "GrVkProgramDesc.h"
27#include "GrVkRenderPass.h"
28#include "GrVkResourceProvider.h"
29#include "GrVkTexture.h"
30#include "GrVkTextureRenderTarget.h"
31#include "GrVkTransferBuffer.h"
32#include "GrVkVertexBuffer.h"
33
34#include "SkConfig8888.h"
35
36#include "vk/GrVkInterface.h"
37
38#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
39#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
40#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
41
42////////////////////////////////////////////////////////////////////////////////
43// Stuff used to set up a GrVkGpu secrectly for now.
44
45// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
46// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
47// and mostly likely would take an optional device and queues to use.
48GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
49 GrContext* context) {
50 // Below is Vulkan setup code that normal would be done by a client, but will do here for now
51 // for testing purposes.
52 VkPhysicalDevice physDev;
53 VkDevice device;
54 VkInstance inst;
55 VkResult err;
56
57 const VkApplicationInfo app_info = {
58 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
59 nullptr, // pNext
60 "vktest", // pApplicationName
61 0, // applicationVersion
62 "vktest", // pEngineName
63 0, // engineVerison
64 VK_API_VERSION, // apiVersion
65 };
66 const VkInstanceCreateInfo instance_create = {
67 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
68 nullptr, // pNext
69 0, // flags
70 &app_info, // pApplicationInfo
71 0, // enabledLayerNameCount
72 nullptr, // ppEnabledLayerNames
73 0, // enabledExtensionNameCount
74 nullptr, // ppEnabledExtensionNames
75 };
76 err = vkCreateInstance(&instance_create, nullptr, &inst);
77 if (err < 0) {
78 SkDebugf("vkCreateInstanced failed: %d\n", err);
79 SkFAIL("failing");
80 }
81
82 uint32_t gpuCount;
83 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
84 if (err) {
85 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
86 SkFAIL("failing");
87 }
88 SkASSERT(gpuCount > 0);
89 // Just returning the first physical device instead of getting the whole array.
90 gpuCount = 1;
91 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
92 if (err) {
93 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
94 SkFAIL("failing");
95 }
96
97 // query to get the initial queue props size
98 uint32_t queueCount;
99 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
100 SkASSERT(queueCount >= 1);
101
102 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
103 // now get the actual queue props
104 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
105
106 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
107
108 // iterate to find the graphics queue
109 uint32_t graphicsQueueIndex = -1;
110 for (uint32_t i = 0; i < queueCount; i++) {
111 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
112 graphicsQueueIndex = i;
113 break;
114 }
115 }
116 SkASSERT(graphicsQueueIndex < queueCount);
117
118 float queuePriorities[1] = { 0.0 };
119 const VkDeviceQueueCreateInfo queueInfo = {
120 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
121 nullptr, // pNext
122 0, // VkDeviceQueueCreateFlags
123 0, // queueFamilyIndex
124 1, // queueCount
125 queuePriorities, // pQueuePriorities
126 };
127 const VkDeviceCreateInfo deviceInfo = {
128 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
129 nullptr, // pNext
130 0, // VkDeviceCreateFlags
131 1, // queueCreateInfoCount
132 &queueInfo, // pQueueCreateInfos
133 0, // layerCount
134 nullptr, // ppEnabledLayerNames
135 0, // extensionCount
136 nullptr, // ppEnabledExtensionNames
137 nullptr // ppEnabledFeatures
138 };
139
140 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
141 if (err) {
142 SkDebugf("CreateDevice failed: %d\n", err);
143 SkFAIL("failing");
144 }
145
146 VkQueue queue;
147 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
148
149 const VkCommandPoolCreateInfo cmdPoolInfo = {
150 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
151 nullptr, // pNext
152 0, // CmdPoolCreateFlags
153 graphicsQueueIndex, // queueFamilyIndex
154 };
155
156 VkCommandPool cmdPool;
157 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
158 if (err) {
159 SkDebugf("CreateCommandPool failed: %d\n", err);
160 SkFAIL("failing");
161 }
162
163 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
164}
165
166////////////////////////////////////////////////////////////////////////////////
167
168GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
169 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
170 VkInstance inst)
171 : INHERITED(context)
172 , fDevice(device)
173 , fQueue(queue)
174 , fCmdPool(cmdPool)
175 , fResourceProvider(this)
176 , fVkInstance(inst) {
177 fInterface.reset(GrVkCreateInterface(fVkInstance));
178 fCompiler = shaderc_compiler_initialize();
179
180 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
181 fCaps.reset(SkRef(fVkCaps.get()));
182
jvanverth03509ea2016-03-02 13:19:47 -0800183 fResourceProvider.init();
184
Greg Daniel164a9f02016-02-22 09:56:40 -0500185 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
186 SkASSERT(fCurrentCmdBuffer);
187 fCurrentCmdBuffer->begin(this);
188 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
189
190}
191
192GrVkGpu::~GrVkGpu() {
193 shaderc_compiler_release(fCompiler);
194 fCurrentCmdBuffer->end(this);
195 fCurrentCmdBuffer->unref(this);
196
197 // wait for all commands to finish
198 VK_CALL(QueueWaitIdle(fQueue));
199
200 // must call this just before we destroy the VkDevice
201 fResourceProvider.destroyResources();
202
203 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
204 VK_CALL(DestroyDevice(fDevice, nullptr));
205 VK_CALL(DestroyInstance(fVkInstance, nullptr));
206}
207
208///////////////////////////////////////////////////////////////////////////////
209
210void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
211 SkASSERT(fCurrentCmdBuffer);
212 fCurrentCmdBuffer->end(this);
213
214 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
215 fResourceProvider.checkCommandBuffers();
216
217 // Release old command buffer and create a new one
218 fCurrentCmdBuffer->unref(this);
219 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
220 SkASSERT(fCurrentCmdBuffer);
221
222 fCurrentCmdBuffer->begin(this);
223}
224
225///////////////////////////////////////////////////////////////////////////////
226GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
227 return GrVkVertexBuffer::Create(this, size, dynamic);
228}
229
230GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
231 return GrVkIndexBuffer::Create(this, size, dynamic);
232}
233
234GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
235 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
236 : GrVkBuffer::kCopyWrite_Type;
237 return GrVkTransferBuffer::Create(this, size, bufferType);
238}
239
240////////////////////////////////////////////////////////////////////////////////
241bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
242 GrPixelConfig srcConfig, DrawPreference* drawPreference,
243 WritePixelTempDrawInfo* tempDrawInfo) {
244 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
245 return false;
246 }
247
248 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
249 if (kNoDraw_DrawPreference != *drawPreference) {
250 return false;
251 }
252
253 if (dstSurface->config() != srcConfig) {
254 // TODO: This should fall back to drawing or copying to change config of dstSurface to
255 // match that of srcConfig.
256 return false;
257 }
258
259 return true;
260}
261
262bool GrVkGpu::onWritePixels(GrSurface* surface,
263 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800264 GrPixelConfig config,
265 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500266 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
267 if (!vkTex) {
268 return false;
269 }
270
bsalomona1e6b3b2016-03-02 10:58:23 -0800271 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800272 if (texels.empty() || !texels.begin()->fPixels) {
273 return false;
274 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800275
Greg Daniel164a9f02016-02-22 09:56:40 -0500276 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
277 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
278 return false;
279 }
280
281 bool success = false;
282 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
283 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
284 SkASSERT(config == vkTex->desc().fConfig);
285 // TODO: add compressed texture support
286 // delete the following two lines and uncomment the two after that when ready
287 vkTex->unref();
288 return false;
289 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
290 // height);
291 } else {
292 bool linearTiling = vkTex->isLinearTiled();
293 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
294 // Need to change the layout to general in order to perform a host write
295 VkImageLayout layout = vkTex->currentLayout();
296 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
297 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
298 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
299 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
300 vkTex->setImageLayout(this,
301 VK_IMAGE_LAYOUT_GENERAL,
302 srcAccessMask,
303 dstAccessMask,
304 srcStageMask,
305 dstStageMask,
306 false);
307 }
308 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800309 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500310 }
311
312 if (success) {
313 vkTex->texturePriv().dirtyMipMaps(true);
314 return true;
315 }
316
317 return false;
318}
319
320bool GrVkGpu::uploadTexData(GrVkTexture* tex,
321 int left, int top, int width, int height,
322 GrPixelConfig dataConfig,
323 const void* data,
324 size_t rowBytes) {
325 SkASSERT(data);
326
327 // If we're uploading compressed data then we should be using uploadCompressedTexData
328 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
329
330 bool linearTiling = tex->isLinearTiled();
331
332 size_t bpp = GrBytesPerPixel(dataConfig);
333
334 const GrSurfaceDesc& desc = tex->desc();
335
336 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
337 &width, &height, &data, &rowBytes)) {
338 return false;
339 }
340 size_t trimRowBytes = width * bpp;
341
342 if (linearTiling) {
343 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
344 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
345 const VkImageSubresource subres = {
346 VK_IMAGE_ASPECT_COLOR_BIT,
347 0, // mipLevel
348 0, // arraySlice
349 };
350 VkSubresourceLayout layout;
351 VkResult err;
352
353 const GrVkInterface* interface = this->vkInterface();
354
355 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
356 tex->textureImage(),
357 &subres,
358 &layout));
359
360 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
361 : top;
362 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
363 VkDeviceSize size = height*layout.rowPitch;
364 void* mapPtr;
365 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
366 &mapPtr));
367 if (err) {
368 return false;
369 }
370
371 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
372 // copy into buffer by rows
373 const char* srcRow = reinterpret_cast<const char*>(data);
374 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
375 for (int y = 0; y < height; y++) {
376 memcpy(dstRow, srcRow, trimRowBytes);
377 srcRow += rowBytes;
378 dstRow -= layout.rowPitch;
379 }
380 } else {
381 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
382 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
383 memcpy(mapPtr, data, trimRowBytes * height);
384 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800385 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
386 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500387 }
388 }
389
390 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
391 } else {
392 GrVkTransferBuffer* transferBuffer =
393 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
394
395 void* mapPtr = transferBuffer->map();
396
397 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
398 // copy into buffer by rows
399 const char* srcRow = reinterpret_cast<const char*>(data);
400 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
401 for (int y = 0; y < height; y++) {
402 memcpy(dstRow, srcRow, trimRowBytes);
403 srcRow += rowBytes;
404 dstRow -= trimRowBytes;
405 }
406 } else {
407 // If there is no padding on the src data rows, we can do a single memcpy
408 if (trimRowBytes == rowBytes) {
409 memcpy(mapPtr, data, trimRowBytes * height);
410 } else {
411 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
412 }
413 }
414
415 transferBuffer->unmap();
416
417 // make sure the unmap has finished
418 transferBuffer->addMemoryBarrier(this,
419 VK_ACCESS_HOST_WRITE_BIT,
420 VK_ACCESS_TRANSFER_READ_BIT,
421 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
422 VK_PIPELINE_STAGE_TRANSFER_BIT,
423 false);
424
425 // Set up copy region
426 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
427 VkOffset3D offset = {
428 left,
429 flipY ? tex->height() - top - height : top,
430 0
431 };
432
433 VkBufferImageCopy region;
434 memset(&region, 0, sizeof(VkBufferImageCopy));
435 region.bufferOffset = 0;
436 region.bufferRowLength = width;
437 region.bufferImageHeight = height;
438 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
439 region.imageOffset = offset;
440 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
441
442 // Change layout of our target so it can be copied to
443 VkImageLayout layout = tex->currentLayout();
444 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
445 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
446 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
447 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
448 tex->setImageLayout(this,
449 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
450 srcAccessMask,
451 dstAccessMask,
452 srcStageMask,
453 dstStageMask,
454 false);
455
456 // Copy the buffer to the image
457 fCurrentCmdBuffer->copyBufferToImage(this,
458 transferBuffer,
459 tex,
460 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
461 1,
462 &region);
463
464 // Submit the current command buffer to the Queue
465 this->submitCommandBuffer(kSkip_SyncQueue);
466
467 transferBuffer->unref();
468 }
469
470 return true;
471}
472
473////////////////////////////////////////////////////////////////////////////////
474GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800475 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500476 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
477
478 VkFormat pixelFormat;
479 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
480 return nullptr;
481 }
482
483 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
484 return nullptr;
485 }
486
487 bool linearTiling = false;
488 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
489 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
490 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
491 linearTiling = true;
492 } else {
493 return nullptr;
494 }
495 }
496
497 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
498 if (renderTarget) {
499 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
500 }
501
502 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
503 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
504 // will be using this texture in some copy or not. Also this assumes, as is the current case,
505 // that all render targets in vulkan are also texutres. If we change this practice of setting
506 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
507 // texture.
508 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
509
bsalomona1e6b3b2016-03-02 10:58:23 -0800510 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
511 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500512
513 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
514 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
515 // to 1.
516 GrVkImage::ImageDesc imageDesc;
517 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
518 imageDesc.fFormat = pixelFormat;
519 imageDesc.fWidth = desc.fWidth;
520 imageDesc.fHeight = desc.fHeight;
521 imageDesc.fLevels = 1;
522 imageDesc.fSamples = 1;
523 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
524 imageDesc.fUsageFlags = usageFlags;
525 imageDesc.fMemProps = memProps;
526
527 GrVkTexture* tex;
528 if (renderTarget) {
529 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
530 imageDesc);
egdaniel3d5d9ac2016-03-01 12:56:15 -0800531#if 0
532 // This clear can be included to fix warning described in htttps://bugs.skia.org/5045
533 // Obviously we do not want to be clearling needlessly every time we create a render target.
534 SkIRect rect = SkIRect::MakeWH(tex->width(), tex->height());
535 this->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
536#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500537 } else {
538 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
539 }
540
541 if (!tex) {
542 return nullptr;
543 }
544
bsalomona1e6b3b2016-03-02 10:58:23 -0800545 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800546 if (!texels.empty()) {
547 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800548 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
549 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500550 tex->unref();
551 return nullptr;
552 }
553 }
554
555 return tex;
556}
557
558////////////////////////////////////////////////////////////////////////////////
559
560static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
561 // By default, all textures in Vk use TopLeft
562 if (kDefault_GrSurfaceOrigin == origin) {
563 return kTopLeft_GrSurfaceOrigin;
564 } else {
565 return origin;
566 }
567}
568
569GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
570 GrWrapOwnership ownership) {
571 VkFormat format;
572 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
573 return nullptr;
574 }
575
576 if (0 == desc.fTextureHandle) {
577 return nullptr;
578 }
579
580 int maxSize = this->caps()->maxTextureSize();
581 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
582 return nullptr;
583 }
584
585 // TODO: determine what format Chrome will actually send us and turn it into a Resource
586 GrVkImage::Resource* imageRsrc = reinterpret_cast<GrVkImage::Resource*>(desc.fTextureHandle);
587
jvanverth0fcfb752016-03-09 09:57:52 -0800588 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
589 ? GrGpuResource::kAdopted_LifeCycle
590 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500591
592 GrSurfaceDesc surfDesc;
593 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
594 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
595 surfDesc.fWidth = desc.fWidth;
596 surfDesc.fHeight = desc.fHeight;
597 surfDesc.fConfig = desc.fConfig;
598 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
599 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
600 // In GL, Chrome assumes all textures are BottomLeft
601 // In VK, we don't have this restriction
602 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
603
604 GrVkTexture* texture = nullptr;
605 if (renderTarget) {
606 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
607 lifeCycle, format,
608 imageRsrc);
609 } else {
610 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format, imageRsrc);
611 }
612 if (!texture) {
613 return nullptr;
614 }
615
616 return texture;
617}
618
619GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
620 GrWrapOwnership ownership) {
621
622 // TODO: determine what format Chrome will actually send us and turn it into a Resource
623 GrVkImage::Resource* imageRsrc =
624 reinterpret_cast<GrVkImage::Resource*>(wrapDesc.fRenderTargetHandle);
625
jvanverth0fcfb752016-03-09 09:57:52 -0800626 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
627 ? GrGpuResource::kAdopted_LifeCycle
628 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500629
630 GrSurfaceDesc desc;
631 desc.fConfig = wrapDesc.fConfig;
632 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
633 desc.fWidth = wrapDesc.fWidth;
634 desc.fHeight = wrapDesc.fHeight;
635 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
636
637 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
638
639 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
640 lifeCycle, imageRsrc);
641 if (tgt && wrapDesc.fStencilBits) {
642 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
643 tgt->unref();
644 return nullptr;
645 }
646 }
647 return tgt;
648}
649
650////////////////////////////////////////////////////////////////////////////////
651
652void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
egdaniel0e1853c2016-03-17 11:35:45 -0700653 const GrNonInstancedMesh& mesh) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500654 GrVkVertexBuffer* vbuf;
egdaniel0e1853c2016-03-17 11:35:45 -0700655 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500656 SkASSERT(vbuf);
657 SkASSERT(!vbuf->isMapped());
658
659 vbuf->addMemoryBarrier(this,
660 VK_ACCESS_HOST_WRITE_BIT,
661 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
662 VK_PIPELINE_STAGE_HOST_BIT,
663 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
664 false);
665
666 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
667
egdaniel0e1853c2016-03-17 11:35:45 -0700668 if (mesh.isIndexed()) {
669 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500670 SkASSERT(ibuf);
671 SkASSERT(!ibuf->isMapped());
672
673 ibuf->addMemoryBarrier(this,
674 VK_ACCESS_HOST_WRITE_BIT,
675 VK_ACCESS_INDEX_READ_BIT,
676 VK_PIPELINE_STAGE_HOST_BIT,
677 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
678 false);
679
680 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
681 }
682}
683
Greg Daniel164a9f02016-02-22 09:56:40 -0500684////////////////////////////////////////////////////////////////////////////////
685
686GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
687 int width,
688 int height) {
689 SkASSERT(rt->asTexture());
690 SkASSERT(width >= rt->width());
691 SkASSERT(height >= rt->height());
692
693 int samples = rt->numStencilSamples();
694
695 SkASSERT(this->vkCaps().stencilFormats().count());
696 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
697
698 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
699 GrGpuResource::kCached_LifeCycle,
700 width,
701 height,
702 samples,
703 sFmt));
704 fStats.incStencilAttachmentCreates();
705 return stencil;
706}
707
708////////////////////////////////////////////////////////////////////////////////
709
710GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
711 GrPixelConfig config) {
712
713 VkFormat pixelFormat;
714 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
715 return 0;
716 }
717
718 bool linearTiling = false;
719 if (!fVkCaps->isConfigTexturable(config)) {
720 return 0;
721 }
722
723 if (fVkCaps->isConfigTexurableLinearly(config)) {
724 linearTiling = true;
725 }
726
727 // Currently this is not supported since it requires a copy which has not yet been implemented.
728 if (srcData && !linearTiling) {
729 return 0;
730 }
731
732 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
733 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
734 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
735
736 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
737 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
738
739 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
740 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
741 // to 1.
742 GrVkImage::ImageDesc imageDesc;
743 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
744 imageDesc.fFormat = pixelFormat;
745 imageDesc.fWidth = w;
746 imageDesc.fHeight = h;
747 imageDesc.fLevels = 1;
748 imageDesc.fSamples = 1;
749 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
750 imageDesc.fUsageFlags = usageFlags;
751 imageDesc.fMemProps = memProps;
752
753 const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(this, imageDesc);
754 if (!imageRsrc) {
755 return 0;
756 }
757
758 if (srcData) {
759 if (linearTiling) {
760 const VkImageSubresource subres = {
761 VK_IMAGE_ASPECT_COLOR_BIT,
762 0, // mipLevel
763 0, // arraySlice
764 };
765 VkSubresourceLayout layout;
766 VkResult err;
767
768 const GrVkInterface* interface = this->vkInterface();
769
770 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
771 imageRsrc->fImage,
772 &subres,
773 &layout));
774
775 void* mapPtr;
776 err = GR_VK_CALL(interface, MapMemory(fDevice,
777 imageRsrc->fAlloc,
778 0,
779 layout.rowPitch * h,
780 0,
781 &mapPtr));
782 if (err) {
783 imageRsrc->unref(this);
784 return 0;
785 }
786
787 size_t bpp = GrBytesPerPixel(config);
788 size_t rowCopyBytes = bpp * w;
789 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
790 // This assumes the srcData comes in with no padding.
791 if (rowCopyBytes == layout.rowPitch) {
792 memcpy(mapPtr, srcData, rowCopyBytes * h);
793 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800794 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, w, rowCopyBytes,
795 h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500796 }
797 GR_VK_CALL(interface, UnmapMemory(fDevice, imageRsrc->fAlloc));
798 } else {
799 // TODO: Add support for copying to optimal tiling
800 SkASSERT(false);
801 }
802 }
803
804 return (GrBackendObject)imageRsrc;
805}
806
807bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
808 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
809
810 if (backend && backend->fImage && backend->fAlloc) {
811 VkMemoryRequirements req;
812 memset(&req, 0, sizeof(req));
813 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
814 backend->fImage,
815 &req));
816 // TODO: find a better check
817 // This will probably fail with a different driver
818 return (req.size > 0) && (req.size <= 8192 * 8192);
819 }
820
821 return false;
822}
823
824void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
825 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
826
827 if (backend) {
828 if (!abandon) {
829 backend->unref(this);
830 } else {
831 backend->unrefAndAbandon();
832 }
833 }
834}
835
836////////////////////////////////////////////////////////////////////////////////
837
838void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
839 VkPipelineStageFlags dstStageMask,
840 bool byRegion,
841 VkMemoryBarrier* barrier) const {
842 SkASSERT(fCurrentCmdBuffer);
843 fCurrentCmdBuffer->pipelineBarrier(this,
844 srcStageMask,
845 dstStageMask,
846 byRegion,
847 GrVkCommandBuffer::kMemory_BarrierType,
848 barrier);
849}
850
851void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
852 VkPipelineStageFlags dstStageMask,
853 bool byRegion,
854 VkBufferMemoryBarrier* barrier) const {
855 SkASSERT(fCurrentCmdBuffer);
856 fCurrentCmdBuffer->pipelineBarrier(this,
857 srcStageMask,
858 dstStageMask,
859 byRegion,
860 GrVkCommandBuffer::kBufferMemory_BarrierType,
861 barrier);
862}
863
864void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
865 VkPipelineStageFlags dstStageMask,
866 bool byRegion,
867 VkImageMemoryBarrier* barrier) const {
868 SkASSERT(fCurrentCmdBuffer);
869 fCurrentCmdBuffer->pipelineBarrier(this,
870 srcStageMask,
871 dstStageMask,
872 byRegion,
873 GrVkCommandBuffer::kImageMemory_BarrierType,
874 barrier);
875}
876
877void GrVkGpu::finishDrawTarget() {
878 // Submit the current command buffer to the Queue
879 this->submitCommandBuffer(kSkip_SyncQueue);
880}
881
egdaniel3d5d9ac2016-03-01 12:56:15 -0800882void GrVkGpu::clearStencil(GrRenderTarget* target) {
883 if (nullptr == target) {
884 return;
885 }
886 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
887 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
888
889
890 VkClearDepthStencilValue vkStencilColor;
891 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
892
893 VkImageLayout origDstLayout = vkStencil->currentLayout();
894
895 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
896 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
897
898 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
899 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
900
901 vkStencil->setImageLayout(this,
902 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
903 srcAccessMask,
904 dstAccessMask,
905 srcStageMask,
906 dstStageMask,
907 false);
908
909
910 VkImageSubresourceRange subRange;
911 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
912 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
913 subRange.baseMipLevel = 0;
914 subRange.levelCount = 1;
915 subRange.baseArrayLayer = 0;
916 subRange.layerCount = 1;
917
918 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
919 // draw. Thus we should look into using the load op functions on the render pass to clear out
920 // the stencil there.
921 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
922}
923
924void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
925 SkASSERT(target);
926
927 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
928 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
929 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
930
931 // this should only be called internally when we know we have a
932 // stencil buffer.
933 SkASSERT(sb);
934 int stencilBitCount = sb->bits();
935
936 // The contract with the callers does not guarantee that we preserve all bits in the stencil
937 // during this clear. Thus we will clear the entire stencil to the desired value.
938
939 VkClearDepthStencilValue vkStencilColor;
940 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
941 if (insideClip) {
942 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
943 } else {
944 vkStencilColor.stencil = 0;
945 }
946
947 VkImageLayout origDstLayout = vkStencil->currentLayout();
948 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
949 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
950 VkPipelineStageFlags srcStageMask =
951 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
952 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
953 vkStencil->setImageLayout(this,
954 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
955 srcAccessMask,
956 dstAccessMask,
957 srcStageMask,
958 dstStageMask,
959 false);
960
961 VkClearRect clearRect;
962 // Flip rect if necessary
963 SkIRect vkRect = rect;
964
965 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
966 vkRect.fTop = vkRT->height() - rect.fBottom;
967 vkRect.fBottom = vkRT->height() - rect.fTop;
968 }
969
970 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
971 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
972
973 clearRect.baseArrayLayer = 0;
974 clearRect.layerCount = 1;
975
976 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
977 SkASSERT(renderPass);
978 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
979
980 uint32_t stencilIndex;
981 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
982
983 VkClearAttachment attachment;
984 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
985 attachment.colorAttachment = 0; // this value shouldn't matter
986 attachment.clearValue.depthStencil = vkStencilColor;
987
988 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
989 fCurrentCmdBuffer->endRenderPass(this);
990
991 return;
992}
993
Greg Daniel164a9f02016-02-22 09:56:40 -0500994void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
995 // parent class should never let us get here with no RT
996 SkASSERT(target);
997
998 VkClearColorValue vkColor;
999 GrColorToRGBAFloat(color, vkColor.float32);
1000
1001 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1002 VkImageLayout origDstLayout = vkRT->currentLayout();
1003
1004 if (rect.width() != target->width() || rect.height() != target->height()) {
1005 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1006 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1007 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -08001008 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -05001009 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1010 vkRT->setImageLayout(this,
1011 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1012 srcAccessMask,
1013 dstAccessMask,
1014 srcStageMask,
1015 dstStageMask,
1016 false);
1017
1018 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001019 // Flip rect if necessary
1020 SkIRect vkRect = rect;
1021 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1022 vkRect.fTop = vkRT->height() - rect.fBottom;
1023 vkRect.fBottom = vkRT->height() - rect.fTop;
1024 }
1025 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1026 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001027 clearRect.baseArrayLayer = 0;
1028 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001029
1030 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1031 SkASSERT(renderPass);
1032 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1033
1034 uint32_t colorIndex;
1035 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1036
1037 VkClearAttachment attachment;
1038 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1039 attachment.colorAttachment = colorIndex;
1040 attachment.clearValue.color = vkColor;
1041
1042 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1043 fCurrentCmdBuffer->endRenderPass(this);
1044 return;
1045 }
1046
1047 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1048 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1049
1050 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1051 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1052
1053 vkRT->setImageLayout(this,
1054 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1055 srcAccessMask,
1056 dstAccessMask,
1057 srcStageMask,
1058 dstStageMask,
1059 false);
1060
1061
1062 VkImageSubresourceRange subRange;
1063 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1064 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1065 subRange.baseMipLevel = 0;
1066 subRange.levelCount = 1;
1067 subRange.baseArrayLayer = 0;
1068 subRange.layerCount = 1;
1069
1070 // In the future we may not actually be doing this type of clear at all. If we are inside a
1071 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1072 // common use case will be clearing an attachment at the start of a render pass, in which case
1073 // we will use the clear load ops.
1074 fCurrentCmdBuffer->clearColorImage(this,
1075 vkRT,
1076 &vkColor,
1077 1, &subRange);
1078}
1079
1080inline bool can_copy_image(const GrSurface* dst,
1081 const GrSurface* src,
1082 const GrVkGpu* gpu) {
1083 if (src->asTexture() &&
1084 dst->asTexture() &&
1085 src->origin() == dst->origin() &&
1086 src->config() == dst->config()) {
1087 return true;
1088 }
1089
1090 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1091 // or the resolved image here?
1092
1093 return false;
1094}
1095
1096void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1097 GrSurface* src,
1098 const SkIRect& srcRect,
1099 const SkIPoint& dstPoint) {
1100 SkASSERT(can_copy_image(dst, src, this));
1101
1102 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1103 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1104 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1105
1106 VkImageLayout origDstLayout = dstTex->currentLayout();
1107 VkImageLayout origSrcLayout = srcTex->currentLayout();
1108
1109 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1110 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1111
1112 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1113 // the cache is flushed since it is only being written to.
1114 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1115 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1116
1117 dstTex->setImageLayout(this,
1118 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1119 srcAccessMask,
1120 dstAccessMask,
1121 srcStageMask,
1122 dstStageMask,
1123 false);
1124
1125 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1126 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1127
1128 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1129 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1130
1131 srcTex->setImageLayout(this,
1132 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1133 srcAccessMask,
1134 dstAccessMask,
1135 srcStageMask,
1136 dstStageMask,
1137 false);
1138
1139 // Flip rect if necessary
1140 SkIRect srcVkRect = srcRect;
1141 int32_t dstY = dstPoint.fY;
1142
1143 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1144 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1145 srcVkRect.fTop = src->height() - srcRect.fBottom;
1146 srcVkRect.fBottom = src->height() - srcRect.fTop;
1147 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1148 }
1149
1150 VkImageCopy copyRegion;
1151 memset(&copyRegion, 0, sizeof(VkImageCopy));
1152 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1153 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1154 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1155 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1156 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1157
1158 fCurrentCmdBuffer->copyImage(this,
1159 srcTex,
1160 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1161 dstTex,
1162 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1163 1,
1164 &copyRegion);
1165}
1166
1167inline bool can_copy_as_draw(const GrSurface* dst,
1168 const GrSurface* src,
1169 const GrVkGpu* gpu) {
1170 return false;
1171}
1172
1173void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1174 GrSurface* src,
1175 const SkIRect& srcRect,
1176 const SkIPoint& dstPoint) {
1177 SkASSERT(false);
1178}
1179
1180bool GrVkGpu::onCopySurface(GrSurface* dst,
1181 GrSurface* src,
1182 const SkIRect& srcRect,
1183 const SkIPoint& dstPoint) {
1184 if (can_copy_image(dst, src, this)) {
1185 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1186 return true;
1187 }
1188
1189 if (can_copy_as_draw(dst, src, this)) {
1190 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1191 return true;
1192 }
1193
1194 return false;
1195}
1196
cdalton28f45b92016-03-07 13:58:26 -08001197void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1198 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1199 // TODO: stub.
1200 SkASSERT(!this->caps()->sampleLocationsSupport());
1201 *effectiveSampleCnt = rt->desc().fSampleCnt;
1202}
1203
Greg Daniel164a9f02016-02-22 09:56:40 -05001204bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1205 GrPixelConfig readConfig, DrawPreference* drawPreference,
1206 ReadPixelTempDrawInfo* tempDrawInfo) {
1207 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1208 if (kNoDraw_DrawPreference != *drawPreference) {
1209 return false;
1210 }
1211
1212 if (srcSurface->config() != readConfig) {
1213 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1214 // that of readConfig.
1215 return false;
1216 }
1217
1218 return true;
1219}
1220
1221bool GrVkGpu::onReadPixels(GrSurface* surface,
1222 int left, int top, int width, int height,
1223 GrPixelConfig config,
1224 void* buffer,
1225 size_t rowBytes) {
1226 VkFormat pixelFormat;
1227 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1228 return false;
1229 }
1230
1231 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1232 if (!tgt) {
1233 return false;
1234 }
1235
1236 // Change layout of our target so it can be used as copy
1237 VkImageLayout layout = tgt->currentLayout();
1238 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1239 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1240 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1241 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1242 tgt->setImageLayout(this,
1243 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1244 srcAccessMask,
1245 dstAccessMask,
1246 srcStageMask,
1247 dstStageMask,
1248 false);
1249
1250 GrVkTransferBuffer* transferBuffer =
1251 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1252 kGpuToCpu_TransferType));
1253
1254 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1255 VkOffset3D offset = {
1256 left,
1257 flipY ? surface->height() - top - height : top,
1258 0
1259 };
1260
1261 // Copy the image to a buffer so we can map it to cpu memory
1262 VkBufferImageCopy region;
1263 memset(&region, 0, sizeof(VkBufferImageCopy));
1264 region.bufferOffset = 0;
1265 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1266 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1267 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1268 region.imageOffset = offset;
1269 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1270
1271 fCurrentCmdBuffer->copyImageToBuffer(this,
1272 tgt,
1273 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1274 transferBuffer,
1275 1,
1276 &region);
1277
1278 // make sure the copy to buffer has finished
1279 transferBuffer->addMemoryBarrier(this,
1280 VK_ACCESS_TRANSFER_WRITE_BIT,
1281 VK_ACCESS_HOST_READ_BIT,
1282 VK_PIPELINE_STAGE_TRANSFER_BIT,
1283 VK_PIPELINE_STAGE_HOST_BIT,
1284 false);
1285
1286 // We need to submit the current command buffer to the Queue and make sure it finishes before
1287 // we can copy the data out of the buffer.
1288 this->submitCommandBuffer(kForce_SyncQueue);
1289
1290 void* mappedMemory = transferBuffer->map();
1291
1292 memcpy(buffer, mappedMemory, rowBytes*height);
1293
1294 transferBuffer->unmap();
1295 transferBuffer->unref();
1296
1297 if (flipY) {
1298 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1299 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1300 scratch.reset(tightRowBytes);
1301 void* tmpRow = scratch.get();
1302 // flip y in-place by rows
1303 const int halfY = height >> 1;
1304 char* top = reinterpret_cast<char*>(buffer);
1305 char* bottom = top + (height - 1) * rowBytes;
1306 for (int y = 0; y < halfY; y++) {
1307 memcpy(tmpRow, top, tightRowBytes);
1308 memcpy(top, bottom, tightRowBytes);
1309 memcpy(bottom, tmpRow, tightRowBytes);
1310 top += rowBytes;
1311 bottom -= rowBytes;
1312 }
1313 }
1314
1315 return true;
1316}
1317
egdaniel0e1853c2016-03-17 11:35:45 -07001318bool GrVkGpu::prepareDrawState(const GrPipeline& pipeline,
1319 const GrPrimitiveProcessor& primProc,
1320 GrPrimitiveType primitiveType,
1321 const GrVkRenderPass& renderPass,
1322 GrVkProgram** program) {
1323 // Get GrVkProgramDesc
1324 GrVkProgramDesc desc;
1325 if (!GrVkProgramDescBuilder::Build(&desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
1326 GrCapsDebugf(this->caps(), "Failed to vk program descriptor!\n");
1327 return false;
1328 }
1329
1330 *program = GrVkProgramBuilder::CreateProgram(this,
1331 pipeline,
1332 primProc,
1333 primitiveType,
1334 desc,
1335 renderPass);
1336 if (!program) {
1337 return false;
1338 }
1339
1340 (*program)->setData(this, primProc, pipeline);
1341
1342 (*program)->bind(this, fCurrentCmdBuffer);
1343 return true;
1344}
1345
1346void GrVkGpu::onDraw(const GrPipeline& pipeline,
1347 const GrPrimitiveProcessor& primProc,
1348 const GrMesh* meshes,
1349 int meshCount) {
1350 if (!meshCount) {
1351 return;
1352 }
1353 GrRenderTarget* rt = pipeline.getRenderTarget();
Greg Daniel164a9f02016-02-22 09:56:40 -05001354 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1355 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1356 SkASSERT(renderPass);
1357
egdaniel0e1853c2016-03-17 11:35:45 -07001358 GrVkProgram* program = nullptr;
1359 GrPrimitiveType primitiveType = meshes[0].primitiveType();
1360 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass, &program)) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001361 return;
1362 }
1363
Greg Daniel164a9f02016-02-22 09:56:40 -05001364 // Change layout of our render target so it can be used as the color attachment
1365 VkImageLayout layout = vkRT->currentLayout();
1366 // Our color attachment is purely a destination and won't be read so don't need to flush or
1367 // invalidate any caches
1368 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1369 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1370 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1371 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1372 vkRT->setImageLayout(this,
1373 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1374 srcAccessMask,
1375 dstAccessMask,
1376 srcStageMask,
1377 dstStageMask,
1378 false);
1379
egdaniel3d5d9ac2016-03-01 12:56:15 -08001380 // If we are using a stencil attachment we also need to update its layout
egdaniel0e1853c2016-03-17 11:35:45 -07001381 if (!pipeline.getStencil().isDisabled()) {
egdaniel3d5d9ac2016-03-01 12:56:15 -08001382 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1383 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1384 VkImageLayout origDstLayout = vkStencil->currentLayout();
1385 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1386 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
egdaniel0e1853c2016-03-17 11:35:45 -07001387 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001388 VkPipelineStageFlags srcStageMask =
1389 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1390 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1391 vkStencil->setImageLayout(this,
1392 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1393 srcAccessMask,
1394 dstAccessMask,
1395 srcStageMask,
1396 dstStageMask,
1397 false);
1398 }
1399
egdaniel0e1853c2016-03-17 11:35:45 -07001400 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1401
1402 for (int i = 0; i < meshCount; ++i) {
1403 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
1404 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1405 }
1406
1407 const GrMesh& mesh = meshes[i];
1408 GrMesh::Iterator iter;
1409 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1410 do {
1411 if (nonIdxMesh->primitiveType() != primitiveType) {
1412 // Technically we don't have to call this here (since there is a safety check in
1413 // program:setData but this will allow for quicker freeing of resources if the
1414 // program sits in a cache for a while.
1415 program->freeTempResources(this);
1416 // This free will go away once we setup a program cache, and then the cache will be
1417 // responsible for call freeGpuResources.
1418 program->freeGPUResources(this);
1419 program->unref();
1420 SkDEBUGCODE(program = nullptr);
1421 primitiveType = nonIdxMesh->primitiveType();
1422 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass,
1423 &program)) {
1424 return;
1425 }
1426 }
1427 SkASSERT(program);
1428 this->bindGeometry(primProc, *nonIdxMesh);
1429
1430 if (nonIdxMesh->isIndexed()) {
1431 fCurrentCmdBuffer->drawIndexed(this,
1432 nonIdxMesh->indexCount(),
1433 1,
1434 nonIdxMesh->startIndex(),
1435 nonIdxMesh->startVertex(),
1436 0);
1437 } else {
1438 fCurrentCmdBuffer->draw(this,
1439 nonIdxMesh->vertexCount(),
1440 1,
1441 nonIdxMesh->startVertex(),
1442 0);
1443 }
1444
1445 fStats.incNumDraws();
1446 } while ((nonIdxMesh = iter.next()));
Greg Daniel164a9f02016-02-22 09:56:40 -05001447 }
1448
1449 fCurrentCmdBuffer->endRenderPass(this);
1450
1451 // Technically we don't have to call this here (since there is a safety check in program:setData
1452 // but this will allow for quicker freeing of resources if the program sits in a cache for a
1453 // while.
1454 program->freeTempResources(this);
1455 // This free will go away once we setup a program cache, and then the cache will be responsible
1456 // for call freeGpuResources.
1457 program->freeGPUResources(this);
1458 program->unref();
1459
1460#if SWAP_PER_DRAW
1461 glFlush();
1462#if defined(SK_BUILD_FOR_MAC)
1463 aglSwapBuffers(aglGetCurrentContext());
1464 int set_a_break_pt_here = 9;
1465 aglSwapBuffers(aglGetCurrentContext());
1466#elif defined(SK_BUILD_FOR_WIN32)
1467 SwapBuf();
1468 int set_a_break_pt_here = 9;
1469 SwapBuf();
1470#endif
1471#endif
1472}
1473