blob: 1ce78fd192a27d5899b6f8ab671f64f533005fc0 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
13#include "GrPipeline.h"
14#include "GrRenderTargetPriv.h"
15#include "GrSurfacePriv.h"
16#include "GrTexturePriv.h"
17#include "GrVertices.h"
18
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
24#include "GrVkProgram.h"
25#include "GrVkProgramBuilder.h"
26#include "GrVkProgramDesc.h"
27#include "GrVkRenderPass.h"
28#include "GrVkResourceProvider.h"
29#include "GrVkTexture.h"
30#include "GrVkTextureRenderTarget.h"
31#include "GrVkTransferBuffer.h"
32#include "GrVkVertexBuffer.h"
33
34#include "SkConfig8888.h"
35
36#include "vk/GrVkInterface.h"
37
38#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
39#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
40#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
41
42////////////////////////////////////////////////////////////////////////////////
43// Stuff used to set up a GrVkGpu secrectly for now.
44
45// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
46// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
47// and mostly likely would take an optional device and queues to use.
48GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
49 GrContext* context) {
50 // Below is Vulkan setup code that normal would be done by a client, but will do here for now
51 // for testing purposes.
52 VkPhysicalDevice physDev;
53 VkDevice device;
54 VkInstance inst;
55 VkResult err;
56
57 const VkApplicationInfo app_info = {
58 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
59 nullptr, // pNext
60 "vktest", // pApplicationName
61 0, // applicationVersion
62 "vktest", // pEngineName
63 0, // engineVerison
64 VK_API_VERSION, // apiVersion
65 };
66 const VkInstanceCreateInfo instance_create = {
67 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
68 nullptr, // pNext
69 0, // flags
70 &app_info, // pApplicationInfo
71 0, // enabledLayerNameCount
72 nullptr, // ppEnabledLayerNames
73 0, // enabledExtensionNameCount
74 nullptr, // ppEnabledExtensionNames
75 };
76 err = vkCreateInstance(&instance_create, nullptr, &inst);
77 if (err < 0) {
78 SkDebugf("vkCreateInstanced failed: %d\n", err);
79 SkFAIL("failing");
80 }
81
82 uint32_t gpuCount;
83 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
84 if (err) {
85 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
86 SkFAIL("failing");
87 }
88 SkASSERT(gpuCount > 0);
89 // Just returning the first physical device instead of getting the whole array.
90 gpuCount = 1;
91 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
92 if (err) {
93 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
94 SkFAIL("failing");
95 }
96
97 // query to get the initial queue props size
98 uint32_t queueCount;
99 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
100 SkASSERT(queueCount >= 1);
101
102 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
103 // now get the actual queue props
104 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
105
106 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
107
108 // iterate to find the graphics queue
109 uint32_t graphicsQueueIndex = -1;
110 for (uint32_t i = 0; i < queueCount; i++) {
111 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
112 graphicsQueueIndex = i;
113 break;
114 }
115 }
116 SkASSERT(graphicsQueueIndex < queueCount);
117
118 float queuePriorities[1] = { 0.0 };
119 const VkDeviceQueueCreateInfo queueInfo = {
120 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
121 nullptr, // pNext
122 0, // VkDeviceQueueCreateFlags
123 0, // queueFamilyIndex
124 1, // queueCount
125 queuePriorities, // pQueuePriorities
126 };
127 const VkDeviceCreateInfo deviceInfo = {
128 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
129 nullptr, // pNext
130 0, // VkDeviceCreateFlags
131 1, // queueCreateInfoCount
132 &queueInfo, // pQueueCreateInfos
133 0, // layerCount
134 nullptr, // ppEnabledLayerNames
135 0, // extensionCount
136 nullptr, // ppEnabledExtensionNames
137 nullptr // ppEnabledFeatures
138 };
139
140 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
141 if (err) {
142 SkDebugf("CreateDevice failed: %d\n", err);
143 SkFAIL("failing");
144 }
145
146 VkQueue queue;
147 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
148
149 const VkCommandPoolCreateInfo cmdPoolInfo = {
150 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
151 nullptr, // pNext
152 0, // CmdPoolCreateFlags
153 graphicsQueueIndex, // queueFamilyIndex
154 };
155
156 VkCommandPool cmdPool;
157 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
158 if (err) {
159 SkDebugf("CreateCommandPool failed: %d\n", err);
160 SkFAIL("failing");
161 }
162
163 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
164}
165
166////////////////////////////////////////////////////////////////////////////////
167
168GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
169 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
170 VkInstance inst)
171 : INHERITED(context)
172 , fDevice(device)
173 , fQueue(queue)
174 , fCmdPool(cmdPool)
175 , fResourceProvider(this)
176 , fVkInstance(inst) {
177 fInterface.reset(GrVkCreateInterface(fVkInstance));
178 fCompiler = shaderc_compiler_initialize();
179
180 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
181 fCaps.reset(SkRef(fVkCaps.get()));
182
jvanverth03509ea2016-03-02 13:19:47 -0800183 fResourceProvider.init();
184
Greg Daniel164a9f02016-02-22 09:56:40 -0500185 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
186 SkASSERT(fCurrentCmdBuffer);
187 fCurrentCmdBuffer->begin(this);
188 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
189
190}
191
192GrVkGpu::~GrVkGpu() {
193 shaderc_compiler_release(fCompiler);
194 fCurrentCmdBuffer->end(this);
195 fCurrentCmdBuffer->unref(this);
196
197 // wait for all commands to finish
198 VK_CALL(QueueWaitIdle(fQueue));
199
200 // must call this just before we destroy the VkDevice
201 fResourceProvider.destroyResources();
202
203 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
204 VK_CALL(DestroyDevice(fDevice, nullptr));
205 VK_CALL(DestroyInstance(fVkInstance, nullptr));
206}
207
208///////////////////////////////////////////////////////////////////////////////
209
210void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
211 SkASSERT(fCurrentCmdBuffer);
212 fCurrentCmdBuffer->end(this);
213
214 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
215 fResourceProvider.checkCommandBuffers();
216
217 // Release old command buffer and create a new one
218 fCurrentCmdBuffer->unref(this);
219 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
220 SkASSERT(fCurrentCmdBuffer);
221
222 fCurrentCmdBuffer->begin(this);
223}
224
225///////////////////////////////////////////////////////////////////////////////
226GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
227 return GrVkVertexBuffer::Create(this, size, dynamic);
228}
229
230GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
231 return GrVkIndexBuffer::Create(this, size, dynamic);
232}
233
234GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
235 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
236 : GrVkBuffer::kCopyWrite_Type;
237 return GrVkTransferBuffer::Create(this, size, bufferType);
238}
239
240////////////////////////////////////////////////////////////////////////////////
241bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
242 GrPixelConfig srcConfig, DrawPreference* drawPreference,
243 WritePixelTempDrawInfo* tempDrawInfo) {
244 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
245 return false;
246 }
247
248 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
249 if (kNoDraw_DrawPreference != *drawPreference) {
250 return false;
251 }
252
253 if (dstSurface->config() != srcConfig) {
254 // TODO: This should fall back to drawing or copying to change config of dstSurface to
255 // match that of srcConfig.
256 return false;
257 }
258
259 return true;
260}
261
262bool GrVkGpu::onWritePixels(GrSurface* surface,
263 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800264 GrPixelConfig config,
265 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500266 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
267 if (!vkTex) {
268 return false;
269 }
270
bsalomona1e6b3b2016-03-02 10:58:23 -0800271 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800272 if (texels.empty() || !texels.begin()->fPixels) {
273 return false;
274 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800275
Greg Daniel164a9f02016-02-22 09:56:40 -0500276 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
277 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
278 return false;
279 }
280
281 bool success = false;
282 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
283 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
284 SkASSERT(config == vkTex->desc().fConfig);
285 // TODO: add compressed texture support
286 // delete the following two lines and uncomment the two after that when ready
287 vkTex->unref();
288 return false;
289 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
290 // height);
291 } else {
292 bool linearTiling = vkTex->isLinearTiled();
293 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
294 // Need to change the layout to general in order to perform a host write
295 VkImageLayout layout = vkTex->currentLayout();
296 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
297 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
298 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
299 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
300 vkTex->setImageLayout(this,
301 VK_IMAGE_LAYOUT_GENERAL,
302 srcAccessMask,
303 dstAccessMask,
304 srcStageMask,
305 dstStageMask,
306 false);
307 }
308 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800309 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500310 }
311
312 if (success) {
313 vkTex->texturePriv().dirtyMipMaps(true);
314 return true;
315 }
316
317 return false;
318}
319
320bool GrVkGpu::uploadTexData(GrVkTexture* tex,
321 int left, int top, int width, int height,
322 GrPixelConfig dataConfig,
323 const void* data,
324 size_t rowBytes) {
325 SkASSERT(data);
326
327 // If we're uploading compressed data then we should be using uploadCompressedTexData
328 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
329
330 bool linearTiling = tex->isLinearTiled();
331
332 size_t bpp = GrBytesPerPixel(dataConfig);
333
334 const GrSurfaceDesc& desc = tex->desc();
335
336 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
337 &width, &height, &data, &rowBytes)) {
338 return false;
339 }
340 size_t trimRowBytes = width * bpp;
341
342 if (linearTiling) {
343 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
344 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
345 const VkImageSubresource subres = {
346 VK_IMAGE_ASPECT_COLOR_BIT,
347 0, // mipLevel
348 0, // arraySlice
349 };
350 VkSubresourceLayout layout;
351 VkResult err;
352
353 const GrVkInterface* interface = this->vkInterface();
354
355 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
356 tex->textureImage(),
357 &subres,
358 &layout));
359
360 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
361 : top;
362 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
363 VkDeviceSize size = height*layout.rowPitch;
364 void* mapPtr;
365 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
366 &mapPtr));
367 if (err) {
368 return false;
369 }
370
371 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
372 // copy into buffer by rows
373 const char* srcRow = reinterpret_cast<const char*>(data);
374 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
375 for (int y = 0; y < height; y++) {
376 memcpy(dstRow, srcRow, trimRowBytes);
377 srcRow += rowBytes;
378 dstRow -= layout.rowPitch;
379 }
380 } else {
381 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
382 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
383 memcpy(mapPtr, data, trimRowBytes * height);
384 } else {
385 SkRectMemcpy(mapPtr, layout.rowPitch, data, rowBytes, trimRowBytes, height);
386 }
387 }
388
389 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
390 } else {
391 GrVkTransferBuffer* transferBuffer =
392 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
393
394 void* mapPtr = transferBuffer->map();
395
396 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
397 // copy into buffer by rows
398 const char* srcRow = reinterpret_cast<const char*>(data);
399 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
400 for (int y = 0; y < height; y++) {
401 memcpy(dstRow, srcRow, trimRowBytes);
402 srcRow += rowBytes;
403 dstRow -= trimRowBytes;
404 }
405 } else {
406 // If there is no padding on the src data rows, we can do a single memcpy
407 if (trimRowBytes == rowBytes) {
408 memcpy(mapPtr, data, trimRowBytes * height);
409 } else {
410 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
411 }
412 }
413
414 transferBuffer->unmap();
415
416 // make sure the unmap has finished
417 transferBuffer->addMemoryBarrier(this,
418 VK_ACCESS_HOST_WRITE_BIT,
419 VK_ACCESS_TRANSFER_READ_BIT,
420 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
421 VK_PIPELINE_STAGE_TRANSFER_BIT,
422 false);
423
424 // Set up copy region
425 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
426 VkOffset3D offset = {
427 left,
428 flipY ? tex->height() - top - height : top,
429 0
430 };
431
432 VkBufferImageCopy region;
433 memset(&region, 0, sizeof(VkBufferImageCopy));
434 region.bufferOffset = 0;
435 region.bufferRowLength = width;
436 region.bufferImageHeight = height;
437 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
438 region.imageOffset = offset;
439 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
440
441 // Change layout of our target so it can be copied to
442 VkImageLayout layout = tex->currentLayout();
443 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
444 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
445 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
446 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
447 tex->setImageLayout(this,
448 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
449 srcAccessMask,
450 dstAccessMask,
451 srcStageMask,
452 dstStageMask,
453 false);
454
455 // Copy the buffer to the image
456 fCurrentCmdBuffer->copyBufferToImage(this,
457 transferBuffer,
458 tex,
459 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
460 1,
461 &region);
462
463 // Submit the current command buffer to the Queue
464 this->submitCommandBuffer(kSkip_SyncQueue);
465
466 transferBuffer->unref();
467 }
468
469 return true;
470}
471
472////////////////////////////////////////////////////////////////////////////////
473GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800474 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500475 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
476
477 VkFormat pixelFormat;
478 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
479 return nullptr;
480 }
481
482 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
483 return nullptr;
484 }
485
486 bool linearTiling = false;
487 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
488 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
489 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
490 linearTiling = true;
491 } else {
492 return nullptr;
493 }
494 }
495
496 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
497 if (renderTarget) {
498 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
499 }
500
501 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
502 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
503 // will be using this texture in some copy or not. Also this assumes, as is the current case,
504 // that all render targets in vulkan are also texutres. If we change this practice of setting
505 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
506 // texture.
507 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
508
bsalomona1e6b3b2016-03-02 10:58:23 -0800509 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
510 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500511
512 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
513 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
514 // to 1.
515 GrVkImage::ImageDesc imageDesc;
516 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
517 imageDesc.fFormat = pixelFormat;
518 imageDesc.fWidth = desc.fWidth;
519 imageDesc.fHeight = desc.fHeight;
520 imageDesc.fLevels = 1;
521 imageDesc.fSamples = 1;
522 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
523 imageDesc.fUsageFlags = usageFlags;
524 imageDesc.fMemProps = memProps;
525
526 GrVkTexture* tex;
527 if (renderTarget) {
528 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
529 imageDesc);
egdaniel3d5d9ac2016-03-01 12:56:15 -0800530#if 0
531 // This clear can be included to fix warning described in htttps://bugs.skia.org/5045
532 // Obviously we do not want to be clearling needlessly every time we create a render target.
533 SkIRect rect = SkIRect::MakeWH(tex->width(), tex->height());
534 this->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
535#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500536 } else {
537 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
538 }
539
540 if (!tex) {
541 return nullptr;
542 }
543
bsalomona1e6b3b2016-03-02 10:58:23 -0800544 // TODO: We're ignoring MIP levels here.
bsalomond3312592016-03-04 07:06:43 -0800545 if (!texels.empty() && texels.begin()->fPixels) {
bsalomona1e6b3b2016-03-02 10:58:23 -0800546 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
547 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500548 tex->unref();
549 return nullptr;
550 }
551 }
552
553 return tex;
554}
555
556////////////////////////////////////////////////////////////////////////////////
557
558static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
559 // By default, all textures in Vk use TopLeft
560 if (kDefault_GrSurfaceOrigin == origin) {
561 return kTopLeft_GrSurfaceOrigin;
562 } else {
563 return origin;
564 }
565}
566
567GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
568 GrWrapOwnership ownership) {
569 VkFormat format;
570 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
571 return nullptr;
572 }
573
574 if (0 == desc.fTextureHandle) {
575 return nullptr;
576 }
577
578 int maxSize = this->caps()->maxTextureSize();
579 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
580 return nullptr;
581 }
582
583 // TODO: determine what format Chrome will actually send us and turn it into a Resource
584 GrVkImage::Resource* imageRsrc = reinterpret_cast<GrVkImage::Resource*>(desc.fTextureHandle);
585
586 GrGpuResource::LifeCycle lifeCycle;
587 switch (ownership) {
588 case kAdopt_GrWrapOwnership:
589 lifeCycle = GrGpuResource::kAdopted_LifeCycle;
590 break;
591 case kBorrow_GrWrapOwnership:
592 lifeCycle = GrGpuResource::kBorrowed_LifeCycle;
593 break;
594 }
595
596 GrSurfaceDesc surfDesc;
597 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
598 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
599 surfDesc.fWidth = desc.fWidth;
600 surfDesc.fHeight = desc.fHeight;
601 surfDesc.fConfig = desc.fConfig;
602 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
603 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
604 // In GL, Chrome assumes all textures are BottomLeft
605 // In VK, we don't have this restriction
606 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
607
608 GrVkTexture* texture = nullptr;
609 if (renderTarget) {
610 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
611 lifeCycle, format,
612 imageRsrc);
613 } else {
614 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format, imageRsrc);
615 }
616 if (!texture) {
617 return nullptr;
618 }
619
620 return texture;
621}
622
623GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
624 GrWrapOwnership ownership) {
625
626 // TODO: determine what format Chrome will actually send us and turn it into a Resource
627 GrVkImage::Resource* imageRsrc =
628 reinterpret_cast<GrVkImage::Resource*>(wrapDesc.fRenderTargetHandle);
629
630 GrGpuResource::LifeCycle lifeCycle;
631 switch (ownership) {
632 case kAdopt_GrWrapOwnership:
633 lifeCycle = GrGpuResource::kAdopted_LifeCycle;
634 break;
635 case kBorrow_GrWrapOwnership:
636 lifeCycle = GrGpuResource::kBorrowed_LifeCycle;
637 break;
638 }
639
640 GrSurfaceDesc desc;
641 desc.fConfig = wrapDesc.fConfig;
642 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
643 desc.fWidth = wrapDesc.fWidth;
644 desc.fHeight = wrapDesc.fHeight;
645 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
646
647 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
648
649 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
650 lifeCycle, imageRsrc);
651 if (tgt && wrapDesc.fStencilBits) {
652 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
653 tgt->unref();
654 return nullptr;
655 }
656 }
657 return tgt;
658}
659
660////////////////////////////////////////////////////////////////////////////////
661
662void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
663 const GrNonInstancedVertices& vertices) {
664 GrVkVertexBuffer* vbuf;
665 vbuf = (GrVkVertexBuffer*)vertices.vertexBuffer();
666 SkASSERT(vbuf);
667 SkASSERT(!vbuf->isMapped());
668
669 vbuf->addMemoryBarrier(this,
670 VK_ACCESS_HOST_WRITE_BIT,
671 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
672 VK_PIPELINE_STAGE_HOST_BIT,
673 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
674 false);
675
676 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
677
678 if (vertices.isIndexed()) {
679 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)vertices.indexBuffer();
680 SkASSERT(ibuf);
681 SkASSERT(!ibuf->isMapped());
682
683 ibuf->addMemoryBarrier(this,
684 VK_ACCESS_HOST_WRITE_BIT,
685 VK_ACCESS_INDEX_READ_BIT,
686 VK_PIPELINE_STAGE_HOST_BIT,
687 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
688 false);
689
690 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
691 }
692}
693
694void GrVkGpu::buildProgramDesc(GrProgramDesc* desc,
695 const GrPrimitiveProcessor& primProc,
696 const GrPipeline& pipeline) const {
697 if (!GrVkProgramDescBuilder::Build(desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
698 SkDEBUGFAIL("Failed to generate GL program descriptor");
699 }
700}
701
702////////////////////////////////////////////////////////////////////////////////
703
704GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
705 int width,
706 int height) {
707 SkASSERT(rt->asTexture());
708 SkASSERT(width >= rt->width());
709 SkASSERT(height >= rt->height());
710
711 int samples = rt->numStencilSamples();
712
713 SkASSERT(this->vkCaps().stencilFormats().count());
714 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
715
716 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
717 GrGpuResource::kCached_LifeCycle,
718 width,
719 height,
720 samples,
721 sFmt));
722 fStats.incStencilAttachmentCreates();
723 return stencil;
724}
725
726////////////////////////////////////////////////////////////////////////////////
727
728GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
729 GrPixelConfig config) {
730
731 VkFormat pixelFormat;
732 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
733 return 0;
734 }
735
736 bool linearTiling = false;
737 if (!fVkCaps->isConfigTexturable(config)) {
738 return 0;
739 }
740
741 if (fVkCaps->isConfigTexurableLinearly(config)) {
742 linearTiling = true;
743 }
744
745 // Currently this is not supported since it requires a copy which has not yet been implemented.
746 if (srcData && !linearTiling) {
747 return 0;
748 }
749
750 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
751 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
752 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
753
754 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
755 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
756
757 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
758 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
759 // to 1.
760 GrVkImage::ImageDesc imageDesc;
761 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
762 imageDesc.fFormat = pixelFormat;
763 imageDesc.fWidth = w;
764 imageDesc.fHeight = h;
765 imageDesc.fLevels = 1;
766 imageDesc.fSamples = 1;
767 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
768 imageDesc.fUsageFlags = usageFlags;
769 imageDesc.fMemProps = memProps;
770
771 const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(this, imageDesc);
772 if (!imageRsrc) {
773 return 0;
774 }
775
776 if (srcData) {
777 if (linearTiling) {
778 const VkImageSubresource subres = {
779 VK_IMAGE_ASPECT_COLOR_BIT,
780 0, // mipLevel
781 0, // arraySlice
782 };
783 VkSubresourceLayout layout;
784 VkResult err;
785
786 const GrVkInterface* interface = this->vkInterface();
787
788 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
789 imageRsrc->fImage,
790 &subres,
791 &layout));
792
793 void* mapPtr;
794 err = GR_VK_CALL(interface, MapMemory(fDevice,
795 imageRsrc->fAlloc,
796 0,
797 layout.rowPitch * h,
798 0,
799 &mapPtr));
800 if (err) {
801 imageRsrc->unref(this);
802 return 0;
803 }
804
805 size_t bpp = GrBytesPerPixel(config);
806 size_t rowCopyBytes = bpp * w;
807 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
808 // This assumes the srcData comes in with no padding.
809 if (rowCopyBytes == layout.rowPitch) {
810 memcpy(mapPtr, srcData, rowCopyBytes * h);
811 } else {
812 SkRectMemcpy(mapPtr, layout.rowPitch, srcData, w, rowCopyBytes, h);
813 }
814 GR_VK_CALL(interface, UnmapMemory(fDevice, imageRsrc->fAlloc));
815 } else {
816 // TODO: Add support for copying to optimal tiling
817 SkASSERT(false);
818 }
819 }
820
821 return (GrBackendObject)imageRsrc;
822}
823
824bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
825 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
826
827 if (backend && backend->fImage && backend->fAlloc) {
828 VkMemoryRequirements req;
829 memset(&req, 0, sizeof(req));
830 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
831 backend->fImage,
832 &req));
833 // TODO: find a better check
834 // This will probably fail with a different driver
835 return (req.size > 0) && (req.size <= 8192 * 8192);
836 }
837
838 return false;
839}
840
841void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
842 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
843
844 if (backend) {
845 if (!abandon) {
846 backend->unref(this);
847 } else {
848 backend->unrefAndAbandon();
849 }
850 }
851}
852
853////////////////////////////////////////////////////////////////////////////////
854
855void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
856 VkPipelineStageFlags dstStageMask,
857 bool byRegion,
858 VkMemoryBarrier* barrier) const {
859 SkASSERT(fCurrentCmdBuffer);
860 fCurrentCmdBuffer->pipelineBarrier(this,
861 srcStageMask,
862 dstStageMask,
863 byRegion,
864 GrVkCommandBuffer::kMemory_BarrierType,
865 barrier);
866}
867
868void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
869 VkPipelineStageFlags dstStageMask,
870 bool byRegion,
871 VkBufferMemoryBarrier* barrier) const {
872 SkASSERT(fCurrentCmdBuffer);
873 fCurrentCmdBuffer->pipelineBarrier(this,
874 srcStageMask,
875 dstStageMask,
876 byRegion,
877 GrVkCommandBuffer::kBufferMemory_BarrierType,
878 barrier);
879}
880
881void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
882 VkPipelineStageFlags dstStageMask,
883 bool byRegion,
884 VkImageMemoryBarrier* barrier) const {
885 SkASSERT(fCurrentCmdBuffer);
886 fCurrentCmdBuffer->pipelineBarrier(this,
887 srcStageMask,
888 dstStageMask,
889 byRegion,
890 GrVkCommandBuffer::kImageMemory_BarrierType,
891 barrier);
892}
893
894void GrVkGpu::finishDrawTarget() {
895 // Submit the current command buffer to the Queue
896 this->submitCommandBuffer(kSkip_SyncQueue);
897}
898
egdaniel3d5d9ac2016-03-01 12:56:15 -0800899void GrVkGpu::clearStencil(GrRenderTarget* target) {
900 if (nullptr == target) {
901 return;
902 }
903 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
904 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
905
906
907 VkClearDepthStencilValue vkStencilColor;
908 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
909
910 VkImageLayout origDstLayout = vkStencil->currentLayout();
911
912 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
913 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
914
915 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
916 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
917
918 vkStencil->setImageLayout(this,
919 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
920 srcAccessMask,
921 dstAccessMask,
922 srcStageMask,
923 dstStageMask,
924 false);
925
926
927 VkImageSubresourceRange subRange;
928 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
929 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
930 subRange.baseMipLevel = 0;
931 subRange.levelCount = 1;
932 subRange.baseArrayLayer = 0;
933 subRange.layerCount = 1;
934
935 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
936 // draw. Thus we should look into using the load op functions on the render pass to clear out
937 // the stencil there.
938 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
939}
940
941void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
942 SkASSERT(target);
943
944 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
945 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
946 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
947
948 // this should only be called internally when we know we have a
949 // stencil buffer.
950 SkASSERT(sb);
951 int stencilBitCount = sb->bits();
952
953 // The contract with the callers does not guarantee that we preserve all bits in the stencil
954 // during this clear. Thus we will clear the entire stencil to the desired value.
955
956 VkClearDepthStencilValue vkStencilColor;
957 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
958 if (insideClip) {
959 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
960 } else {
961 vkStencilColor.stencil = 0;
962 }
963
964 VkImageLayout origDstLayout = vkStencil->currentLayout();
965 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
966 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
967 VkPipelineStageFlags srcStageMask =
968 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
969 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
970 vkStencil->setImageLayout(this,
971 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
972 srcAccessMask,
973 dstAccessMask,
974 srcStageMask,
975 dstStageMask,
976 false);
977
978 VkClearRect clearRect;
979 // Flip rect if necessary
980 SkIRect vkRect = rect;
981
982 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
983 vkRect.fTop = vkRT->height() - rect.fBottom;
984 vkRect.fBottom = vkRT->height() - rect.fTop;
985 }
986
987 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
988 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
989
990 clearRect.baseArrayLayer = 0;
991 clearRect.layerCount = 1;
992
993 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
994 SkASSERT(renderPass);
995 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
996
997 uint32_t stencilIndex;
998 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
999
1000 VkClearAttachment attachment;
1001 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1002 attachment.colorAttachment = 0; // this value shouldn't matter
1003 attachment.clearValue.depthStencil = vkStencilColor;
1004
1005 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1006 fCurrentCmdBuffer->endRenderPass(this);
1007
1008 return;
1009}
1010
Greg Daniel164a9f02016-02-22 09:56:40 -05001011void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
1012 // parent class should never let us get here with no RT
1013 SkASSERT(target);
1014
1015 VkClearColorValue vkColor;
1016 GrColorToRGBAFloat(color, vkColor.float32);
1017
1018 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1019 VkImageLayout origDstLayout = vkRT->currentLayout();
1020
1021 if (rect.width() != target->width() || rect.height() != target->height()) {
1022 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1023 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1024 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -08001025 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -05001026 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1027 vkRT->setImageLayout(this,
1028 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1029 srcAccessMask,
1030 dstAccessMask,
1031 srcStageMask,
1032 dstStageMask,
1033 false);
1034
1035 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001036 // Flip rect if necessary
1037 SkIRect vkRect = rect;
1038 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1039 vkRect.fTop = vkRT->height() - rect.fBottom;
1040 vkRect.fBottom = vkRT->height() - rect.fTop;
1041 }
1042 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1043 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001044 clearRect.baseArrayLayer = 0;
1045 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001046
1047 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1048 SkASSERT(renderPass);
1049 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1050
1051 uint32_t colorIndex;
1052 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1053
1054 VkClearAttachment attachment;
1055 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1056 attachment.colorAttachment = colorIndex;
1057 attachment.clearValue.color = vkColor;
1058
1059 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1060 fCurrentCmdBuffer->endRenderPass(this);
1061 return;
1062 }
1063
1064 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1065 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1066
1067 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1068 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1069
1070 vkRT->setImageLayout(this,
1071 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1072 srcAccessMask,
1073 dstAccessMask,
1074 srcStageMask,
1075 dstStageMask,
1076 false);
1077
1078
1079 VkImageSubresourceRange subRange;
1080 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1081 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1082 subRange.baseMipLevel = 0;
1083 subRange.levelCount = 1;
1084 subRange.baseArrayLayer = 0;
1085 subRange.layerCount = 1;
1086
1087 // In the future we may not actually be doing this type of clear at all. If we are inside a
1088 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1089 // common use case will be clearing an attachment at the start of a render pass, in which case
1090 // we will use the clear load ops.
1091 fCurrentCmdBuffer->clearColorImage(this,
1092 vkRT,
1093 &vkColor,
1094 1, &subRange);
1095}
1096
1097inline bool can_copy_image(const GrSurface* dst,
1098 const GrSurface* src,
1099 const GrVkGpu* gpu) {
1100 if (src->asTexture() &&
1101 dst->asTexture() &&
1102 src->origin() == dst->origin() &&
1103 src->config() == dst->config()) {
1104 return true;
1105 }
1106
1107 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1108 // or the resolved image here?
1109
1110 return false;
1111}
1112
1113void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1114 GrSurface* src,
1115 const SkIRect& srcRect,
1116 const SkIPoint& dstPoint) {
1117 SkASSERT(can_copy_image(dst, src, this));
1118
1119 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1120 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1121 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1122
1123 VkImageLayout origDstLayout = dstTex->currentLayout();
1124 VkImageLayout origSrcLayout = srcTex->currentLayout();
1125
1126 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1127 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1128
1129 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1130 // the cache is flushed since it is only being written to.
1131 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1132 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1133
1134 dstTex->setImageLayout(this,
1135 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1136 srcAccessMask,
1137 dstAccessMask,
1138 srcStageMask,
1139 dstStageMask,
1140 false);
1141
1142 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1143 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1144
1145 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1146 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1147
1148 srcTex->setImageLayout(this,
1149 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1150 srcAccessMask,
1151 dstAccessMask,
1152 srcStageMask,
1153 dstStageMask,
1154 false);
1155
1156 // Flip rect if necessary
1157 SkIRect srcVkRect = srcRect;
1158 int32_t dstY = dstPoint.fY;
1159
1160 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1161 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1162 srcVkRect.fTop = src->height() - srcRect.fBottom;
1163 srcVkRect.fBottom = src->height() - srcRect.fTop;
1164 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1165 }
1166
1167 VkImageCopy copyRegion;
1168 memset(&copyRegion, 0, sizeof(VkImageCopy));
1169 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1170 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1171 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1172 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1173 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1174
1175 fCurrentCmdBuffer->copyImage(this,
1176 srcTex,
1177 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1178 dstTex,
1179 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1180 1,
1181 &copyRegion);
1182}
1183
1184inline bool can_copy_as_draw(const GrSurface* dst,
1185 const GrSurface* src,
1186 const GrVkGpu* gpu) {
1187 return false;
1188}
1189
1190void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1191 GrSurface* src,
1192 const SkIRect& srcRect,
1193 const SkIPoint& dstPoint) {
1194 SkASSERT(false);
1195}
1196
1197bool GrVkGpu::onCopySurface(GrSurface* dst,
1198 GrSurface* src,
1199 const SkIRect& srcRect,
1200 const SkIPoint& dstPoint) {
1201 if (can_copy_image(dst, src, this)) {
1202 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1203 return true;
1204 }
1205
1206 if (can_copy_as_draw(dst, src, this)) {
1207 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1208 return true;
1209 }
1210
1211 return false;
1212}
1213
cdalton28f45b92016-03-07 13:58:26 -08001214void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1215 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1216 // TODO: stub.
1217 SkASSERT(!this->caps()->sampleLocationsSupport());
1218 *effectiveSampleCnt = rt->desc().fSampleCnt;
1219}
1220
Greg Daniel164a9f02016-02-22 09:56:40 -05001221bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1222 GrPixelConfig readConfig, DrawPreference* drawPreference,
1223 ReadPixelTempDrawInfo* tempDrawInfo) {
1224 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1225 if (kNoDraw_DrawPreference != *drawPreference) {
1226 return false;
1227 }
1228
1229 if (srcSurface->config() != readConfig) {
1230 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1231 // that of readConfig.
1232 return false;
1233 }
1234
1235 return true;
1236}
1237
1238bool GrVkGpu::onReadPixels(GrSurface* surface,
1239 int left, int top, int width, int height,
1240 GrPixelConfig config,
1241 void* buffer,
1242 size_t rowBytes) {
1243 VkFormat pixelFormat;
1244 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1245 return false;
1246 }
1247
1248 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1249 if (!tgt) {
1250 return false;
1251 }
1252
1253 // Change layout of our target so it can be used as copy
1254 VkImageLayout layout = tgt->currentLayout();
1255 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1256 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1257 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1258 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1259 tgt->setImageLayout(this,
1260 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1261 srcAccessMask,
1262 dstAccessMask,
1263 srcStageMask,
1264 dstStageMask,
1265 false);
1266
1267 GrVkTransferBuffer* transferBuffer =
1268 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1269 kGpuToCpu_TransferType));
1270
1271 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1272 VkOffset3D offset = {
1273 left,
1274 flipY ? surface->height() - top - height : top,
1275 0
1276 };
1277
1278 // Copy the image to a buffer so we can map it to cpu memory
1279 VkBufferImageCopy region;
1280 memset(&region, 0, sizeof(VkBufferImageCopy));
1281 region.bufferOffset = 0;
1282 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1283 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1284 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1285 region.imageOffset = offset;
1286 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1287
1288 fCurrentCmdBuffer->copyImageToBuffer(this,
1289 tgt,
1290 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1291 transferBuffer,
1292 1,
1293 &region);
1294
1295 // make sure the copy to buffer has finished
1296 transferBuffer->addMemoryBarrier(this,
1297 VK_ACCESS_TRANSFER_WRITE_BIT,
1298 VK_ACCESS_HOST_READ_BIT,
1299 VK_PIPELINE_STAGE_TRANSFER_BIT,
1300 VK_PIPELINE_STAGE_HOST_BIT,
1301 false);
1302
1303 // We need to submit the current command buffer to the Queue and make sure it finishes before
1304 // we can copy the data out of the buffer.
1305 this->submitCommandBuffer(kForce_SyncQueue);
1306
1307 void* mappedMemory = transferBuffer->map();
1308
1309 memcpy(buffer, mappedMemory, rowBytes*height);
1310
1311 transferBuffer->unmap();
1312 transferBuffer->unref();
1313
1314 if (flipY) {
1315 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1316 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1317 scratch.reset(tightRowBytes);
1318 void* tmpRow = scratch.get();
1319 // flip y in-place by rows
1320 const int halfY = height >> 1;
1321 char* top = reinterpret_cast<char*>(buffer);
1322 char* bottom = top + (height - 1) * rowBytes;
1323 for (int y = 0; y < halfY; y++) {
1324 memcpy(tmpRow, top, tightRowBytes);
1325 memcpy(top, bottom, tightRowBytes);
1326 memcpy(bottom, tmpRow, tightRowBytes);
1327 top += rowBytes;
1328 bottom -= rowBytes;
1329 }
1330 }
1331
1332 return true;
1333}
1334
1335void GrVkGpu::onDraw(const DrawArgs& args, const GrNonInstancedVertices& vertices) {
1336 GrRenderTarget* rt = args.fPipeline->getRenderTarget();
1337 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1338 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1339 SkASSERT(renderPass);
1340
Greg Daniel164a9f02016-02-22 09:56:40 -05001341 GrVkProgram* program = GrVkProgramBuilder::CreateProgram(this, args,
1342 vertices.primitiveType(),
1343 *renderPass);
1344
1345 if (!program) {
1346 return;
1347 }
1348
1349 program->setData(this, *args.fPrimitiveProcessor, *args.fPipeline);
1350
1351 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1352
1353 program->bind(this, fCurrentCmdBuffer);
1354
1355 this->bindGeometry(*args.fPrimitiveProcessor, vertices);
1356
1357 // Change layout of our render target so it can be used as the color attachment
1358 VkImageLayout layout = vkRT->currentLayout();
1359 // Our color attachment is purely a destination and won't be read so don't need to flush or
1360 // invalidate any caches
1361 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1362 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1363 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1364 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1365 vkRT->setImageLayout(this,
1366 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1367 srcAccessMask,
1368 dstAccessMask,
1369 srcStageMask,
1370 dstStageMask,
1371 false);
1372
egdaniel3d5d9ac2016-03-01 12:56:15 -08001373 // If we are using a stencil attachment we also need to update its layout
1374 if (!args.fPipeline->getStencil().isDisabled()) {
1375 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1376 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1377 VkImageLayout origDstLayout = vkStencil->currentLayout();
1378 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1379 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
1380 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
1381 VkPipelineStageFlags srcStageMask =
1382 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1383 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1384 vkStencil->setImageLayout(this,
1385 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1386 srcAccessMask,
1387 dstAccessMask,
1388 srcStageMask,
1389 dstStageMask,
1390 false);
1391 }
1392
Greg Daniel164a9f02016-02-22 09:56:40 -05001393 if (vertices.isIndexed()) {
1394 fCurrentCmdBuffer->drawIndexed(this,
1395 vertices.indexCount(),
1396 1,
1397 vertices.startIndex(),
1398 vertices.startVertex(),
1399 0);
1400 } else {
1401 fCurrentCmdBuffer->draw(this, vertices.vertexCount(), 1, vertices.startVertex(), 0);
1402 }
1403
1404 fCurrentCmdBuffer->endRenderPass(this);
1405
1406 // Technically we don't have to call this here (since there is a safety check in program:setData
1407 // but this will allow for quicker freeing of resources if the program sits in a cache for a
1408 // while.
1409 program->freeTempResources(this);
1410 // This free will go away once we setup a program cache, and then the cache will be responsible
1411 // for call freeGpuResources.
1412 program->freeGPUResources(this);
1413 program->unref();
1414
1415#if SWAP_PER_DRAW
1416 glFlush();
1417#if defined(SK_BUILD_FOR_MAC)
1418 aglSwapBuffers(aglGetCurrentContext());
1419 int set_a_break_pt_here = 9;
1420 aglSwapBuffers(aglGetCurrentContext());
1421#elif defined(SK_BUILD_FOR_WIN32)
1422 SwapBuf();
1423 int set_a_break_pt_here = 9;
1424 SwapBuf();
1425#endif
1426#endif
1427}
1428