blob: 21e4ee358d57c3b04411452466f0c29e6a9afe15 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
egdaniel22281c12016-03-23 13:49:40 -070024#include "GrVkPipelineState.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050025#include "GrVkRenderPass.h"
26#include "GrVkResourceProvider.h"
27#include "GrVkTexture.h"
28#include "GrVkTextureRenderTarget.h"
29#include "GrVkTransferBuffer.h"
30#include "GrVkVertexBuffer.h"
31
32#include "SkConfig8888.h"
33
34#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070035#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050036
37#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
38#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
39#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
40
jvanverthd2497f32016-03-18 12:39:05 -070041#ifdef ENABLE_VK_LAYERS
42VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
43 VkDebugReportFlagsEXT flags,
44 VkDebugReportObjectTypeEXT objectType,
45 uint64_t object,
46 size_t location,
47 int32_t messageCode,
48 const char* pLayerPrefix,
49 const char* pMessage,
50 void* pUserData) {
51 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
52 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
53 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
54 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
55 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
56 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
57 } else {
58 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 }
60 return VK_FALSE;
61}
jvanverthd2497f32016-03-18 12:39:05 -070062#endif
63
jvanverth633b3562016-03-23 11:01:22 -070064GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
65 GrContext* context) {
66 SkAutoTUnref<const GrVkBackendContext> vkBackendContext(
67 reinterpret_cast<const GrVkBackendContext*>(backendContext));
68 if (!vkBackendContext) {
69 vkBackendContext.reset(GrVkBackendContext::Create());
70 if (!vkBackendContext) {
71 return nullptr;
Greg Daniel164a9f02016-02-22 09:56:40 -050072 }
jvanverth633b3562016-03-23 11:01:22 -070073 } else {
74 vkBackendContext->ref();
Greg Daniel164a9f02016-02-22 09:56:40 -050075 }
76
jvanverth633b3562016-03-23 11:01:22 -070077 return new GrVkGpu(context, options, vkBackendContext);
Greg Daniel164a9f02016-02-22 09:56:40 -050078}
79
80////////////////////////////////////////////////////////////////////////////////
81
jvanverth633b3562016-03-23 11:01:22 -070082GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
83 const GrVkBackendContext* backendCtx)
Greg Daniel164a9f02016-02-22 09:56:40 -050084 : INHERITED(context)
jvanverth633b3562016-03-23 11:01:22 -070085 , fVkInstance(backendCtx->fInstance)
86 , fDevice(backendCtx->fDevice)
87 , fQueue(backendCtx->fQueue)
88 , fResourceProvider(this) {
89 fBackendContext.reset(backendCtx);
Greg Daniel164a9f02016-02-22 09:56:40 -050090
jvanverthd2497f32016-03-18 12:39:05 -070091#ifdef ENABLE_VK_LAYERS
jvanverth633b3562016-03-23 11:01:22 -070092 if (this->vkInterface()->hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
jvanverthd2497f32016-03-18 12:39:05 -070093 /* Setup callback creation information */
94 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
95 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
96 callbackCreateInfo.pNext = nullptr;
97 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
98 VK_DEBUG_REPORT_WARNING_BIT_EXT |
99 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
100 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
101 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
102 callbackCreateInfo.pfnCallback = &DebugReportCallback;
103 callbackCreateInfo.pUserData = nullptr;
104
105 /* Register the callback */
jvanverth633b3562016-03-23 11:01:22 -0700106 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(fVkInstance,
107 &callbackCreateInfo, nullptr, &fCallback));
jvanverthd2497f32016-03-18 12:39:05 -0700108 }
109#endif
jvanverth633b3562016-03-23 11:01:22 -0700110
111 fCompiler = shaderc_compiler_initialize();
112
113 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice));
114 fCaps.reset(SkRef(fVkCaps.get()));
115
116 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps));
117
118 const VkCommandPoolCreateInfo cmdPoolInfo = {
119 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
120 nullptr, // pNext
121 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags
122 backendCtx->fQueueFamilyIndex, // queueFamilyIndex
123 };
124 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
125 &fCmdPool));
126
127 // must call this after creating the CommandPool
128 fResourceProvider.init();
129 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
130 SkASSERT(fCurrentCmdBuffer);
131 fCurrentCmdBuffer->begin(this);
Greg Daniel164a9f02016-02-22 09:56:40 -0500132}
133
134GrVkGpu::~GrVkGpu() {
Greg Daniel164a9f02016-02-22 09:56:40 -0500135 fCurrentCmdBuffer->end(this);
136 fCurrentCmdBuffer->unref(this);
137
138 // wait for all commands to finish
jvanverthddf98352016-03-21 11:46:00 -0700139 fResourceProvider.checkCommandBuffers();
egdaniel2cab66b2016-03-21 14:24:14 -0700140 SkDEBUGCODE(VkResult res =) VK_CALL(QueueWaitIdle(fQueue));
jvanverthddf98352016-03-21 11:46:00 -0700141 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec)
142 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
143
Greg Daniel164a9f02016-02-22 09:56:40 -0500144 // must call this just before we destroy the VkDevice
145 fResourceProvider.destroyResources();
146
jvanverth633b3562016-03-23 11:01:22 -0700147 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
148
149 shaderc_compiler_release(fCompiler);
150
151#ifdef ENABLE_VK_LAYERS
jvanverthd2497f32016-03-18 12:39:05 -0700152 VK_CALL(DestroyDebugReportCallbackEXT(fVkInstance, fCallback, nullptr));
153#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500154}
155
156///////////////////////////////////////////////////////////////////////////////
157
158void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
159 SkASSERT(fCurrentCmdBuffer);
160 fCurrentCmdBuffer->end(this);
161
162 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
163 fResourceProvider.checkCommandBuffers();
164
165 // Release old command buffer and create a new one
166 fCurrentCmdBuffer->unref(this);
167 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
168 SkASSERT(fCurrentCmdBuffer);
169
170 fCurrentCmdBuffer->begin(this);
171}
172
173///////////////////////////////////////////////////////////////////////////////
174GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
175 return GrVkVertexBuffer::Create(this, size, dynamic);
176}
177
178GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
179 return GrVkIndexBuffer::Create(this, size, dynamic);
180}
181
182GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
183 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
184 : GrVkBuffer::kCopyWrite_Type;
185 return GrVkTransferBuffer::Create(this, size, bufferType);
186}
187
188////////////////////////////////////////////////////////////////////////////////
189bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
190 GrPixelConfig srcConfig, DrawPreference* drawPreference,
191 WritePixelTempDrawInfo* tempDrawInfo) {
192 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
193 return false;
194 }
195
196 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
197 if (kNoDraw_DrawPreference != *drawPreference) {
198 return false;
199 }
200
201 if (dstSurface->config() != srcConfig) {
202 // TODO: This should fall back to drawing or copying to change config of dstSurface to
203 // match that of srcConfig.
204 return false;
205 }
206
207 return true;
208}
209
210bool GrVkGpu::onWritePixels(GrSurface* surface,
211 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800212 GrPixelConfig config,
213 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500214 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
215 if (!vkTex) {
216 return false;
217 }
218
bsalomona1e6b3b2016-03-02 10:58:23 -0800219 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800220 if (texels.empty() || !texels.begin()->fPixels) {
221 return false;
222 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800223
Greg Daniel164a9f02016-02-22 09:56:40 -0500224 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
225 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
226 return false;
227 }
228
229 bool success = false;
230 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
231 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
232 SkASSERT(config == vkTex->desc().fConfig);
233 // TODO: add compressed texture support
234 // delete the following two lines and uncomment the two after that when ready
235 vkTex->unref();
236 return false;
237 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
238 // height);
239 } else {
240 bool linearTiling = vkTex->isLinearTiled();
241 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
242 // Need to change the layout to general in order to perform a host write
243 VkImageLayout layout = vkTex->currentLayout();
244 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
245 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
246 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
247 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
248 vkTex->setImageLayout(this,
249 VK_IMAGE_LAYOUT_GENERAL,
250 srcAccessMask,
251 dstAccessMask,
252 srcStageMask,
253 dstStageMask,
254 false);
255 }
256 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800257 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500258 }
259
260 if (success) {
261 vkTex->texturePriv().dirtyMipMaps(true);
262 return true;
263 }
264
265 return false;
266}
267
268bool GrVkGpu::uploadTexData(GrVkTexture* tex,
269 int left, int top, int width, int height,
270 GrPixelConfig dataConfig,
271 const void* data,
272 size_t rowBytes) {
273 SkASSERT(data);
274
275 // If we're uploading compressed data then we should be using uploadCompressedTexData
276 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
277
278 bool linearTiling = tex->isLinearTiled();
279
280 size_t bpp = GrBytesPerPixel(dataConfig);
281
282 const GrSurfaceDesc& desc = tex->desc();
283
284 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
285 &width, &height, &data, &rowBytes)) {
286 return false;
287 }
288 size_t trimRowBytes = width * bpp;
289
290 if (linearTiling) {
291 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
292 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
293 const VkImageSubresource subres = {
294 VK_IMAGE_ASPECT_COLOR_BIT,
295 0, // mipLevel
296 0, // arraySlice
297 };
298 VkSubresourceLayout layout;
299 VkResult err;
300
301 const GrVkInterface* interface = this->vkInterface();
302
303 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
304 tex->textureImage(),
305 &subres,
306 &layout));
307
308 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
309 : top;
310 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
311 VkDeviceSize size = height*layout.rowPitch;
312 void* mapPtr;
313 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
314 &mapPtr));
315 if (err) {
316 return false;
317 }
318
319 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
320 // copy into buffer by rows
321 const char* srcRow = reinterpret_cast<const char*>(data);
322 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
323 for (int y = 0; y < height; y++) {
324 memcpy(dstRow, srcRow, trimRowBytes);
325 srcRow += rowBytes;
326 dstRow -= layout.rowPitch;
327 }
328 } else {
329 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
330 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
331 memcpy(mapPtr, data, trimRowBytes * height);
332 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800333 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
334 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500335 }
336 }
337
338 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
339 } else {
340 GrVkTransferBuffer* transferBuffer =
341 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
342
343 void* mapPtr = transferBuffer->map();
344
345 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
346 // copy into buffer by rows
347 const char* srcRow = reinterpret_cast<const char*>(data);
348 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
349 for (int y = 0; y < height; y++) {
350 memcpy(dstRow, srcRow, trimRowBytes);
351 srcRow += rowBytes;
352 dstRow -= trimRowBytes;
353 }
354 } else {
355 // If there is no padding on the src data rows, we can do a single memcpy
356 if (trimRowBytes == rowBytes) {
357 memcpy(mapPtr, data, trimRowBytes * height);
358 } else {
359 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
360 }
361 }
362
363 transferBuffer->unmap();
364
365 // make sure the unmap has finished
366 transferBuffer->addMemoryBarrier(this,
367 VK_ACCESS_HOST_WRITE_BIT,
368 VK_ACCESS_TRANSFER_READ_BIT,
369 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
370 VK_PIPELINE_STAGE_TRANSFER_BIT,
371 false);
372
373 // Set up copy region
374 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
375 VkOffset3D offset = {
376 left,
377 flipY ? tex->height() - top - height : top,
378 0
379 };
380
381 VkBufferImageCopy region;
382 memset(&region, 0, sizeof(VkBufferImageCopy));
383 region.bufferOffset = 0;
384 region.bufferRowLength = width;
385 region.bufferImageHeight = height;
386 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
387 region.imageOffset = offset;
388 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
389
390 // Change layout of our target so it can be copied to
391 VkImageLayout layout = tex->currentLayout();
392 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
393 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
394 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
395 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
396 tex->setImageLayout(this,
397 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
398 srcAccessMask,
399 dstAccessMask,
400 srcStageMask,
401 dstStageMask,
402 false);
403
404 // Copy the buffer to the image
405 fCurrentCmdBuffer->copyBufferToImage(this,
406 transferBuffer,
407 tex,
408 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
409 1,
410 &region);
411
412 // Submit the current command buffer to the Queue
413 this->submitCommandBuffer(kSkip_SyncQueue);
414
415 transferBuffer->unref();
416 }
417
418 return true;
419}
420
421////////////////////////////////////////////////////////////////////////////////
422GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800423 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500424 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
425
426 VkFormat pixelFormat;
427 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
428 return nullptr;
429 }
430
431 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
432 return nullptr;
433 }
434
435 bool linearTiling = false;
436 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
437 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
438 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
439 linearTiling = true;
440 } else {
441 return nullptr;
442 }
443 }
444
445 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
446 if (renderTarget) {
447 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
448 }
449
450 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
451 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
452 // will be using this texture in some copy or not. Also this assumes, as is the current case,
453 // that all render targets in vulkan are also texutres. If we change this practice of setting
454 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
455 // texture.
456 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
457
bsalomona1e6b3b2016-03-02 10:58:23 -0800458 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
459 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500460
461 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
462 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
463 // to 1.
464 GrVkImage::ImageDesc imageDesc;
465 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
466 imageDesc.fFormat = pixelFormat;
467 imageDesc.fWidth = desc.fWidth;
468 imageDesc.fHeight = desc.fHeight;
469 imageDesc.fLevels = 1;
470 imageDesc.fSamples = 1;
471 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
472 imageDesc.fUsageFlags = usageFlags;
473 imageDesc.fMemProps = memProps;
474
475 GrVkTexture* tex;
476 if (renderTarget) {
477 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
478 imageDesc);
479 } else {
480 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
481 }
482
483 if (!tex) {
484 return nullptr;
485 }
486
bsalomona1e6b3b2016-03-02 10:58:23 -0800487 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800488 if (!texels.empty()) {
489 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800490 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
491 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500492 tex->unref();
493 return nullptr;
494 }
495 }
496
497 return tex;
498}
499
500////////////////////////////////////////////////////////////////////////////////
501
502static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
503 // By default, all textures in Vk use TopLeft
504 if (kDefault_GrSurfaceOrigin == origin) {
505 return kTopLeft_GrSurfaceOrigin;
506 } else {
507 return origin;
508 }
509}
510
511GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
512 GrWrapOwnership ownership) {
513 VkFormat format;
514 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
515 return nullptr;
516 }
517
518 if (0 == desc.fTextureHandle) {
519 return nullptr;
520 }
521
522 int maxSize = this->caps()->maxTextureSize();
523 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
524 return nullptr;
525 }
526
jvanverthfd359ca2016-03-18 11:57:24 -0700527 const GrVkTextureInfo* info = reinterpret_cast<const GrVkTextureInfo*>(desc.fTextureHandle);
528 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) {
529 return nullptr;
530 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500531
jvanverth0fcfb752016-03-09 09:57:52 -0800532 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
533 ? GrGpuResource::kAdopted_LifeCycle
534 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500535
536 GrSurfaceDesc surfDesc;
537 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
538 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
539 surfDesc.fWidth = desc.fWidth;
540 surfDesc.fHeight = desc.fHeight;
541 surfDesc.fConfig = desc.fConfig;
542 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
543 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
544 // In GL, Chrome assumes all textures are BottomLeft
545 // In VK, we don't have this restriction
546 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
547
548 GrVkTexture* texture = nullptr;
549 if (renderTarget) {
550 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
551 lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700552 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500553 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700554 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format,
555 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500556 }
557 if (!texture) {
558 return nullptr;
559 }
560
561 return texture;
562}
563
564GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
565 GrWrapOwnership ownership) {
566
jvanverthfd359ca2016-03-18 11:57:24 -0700567 const GrVkTextureInfo* info =
568 reinterpret_cast<const GrVkTextureInfo*>(wrapDesc.fRenderTargetHandle);
569 if (VK_NULL_HANDLE == info->fImage ||
570 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership)) {
571 return nullptr;
572 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500573
jvanverth0fcfb752016-03-09 09:57:52 -0800574 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
575 ? GrGpuResource::kAdopted_LifeCycle
576 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500577
578 GrSurfaceDesc desc;
579 desc.fConfig = wrapDesc.fConfig;
580 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
581 desc.fWidth = wrapDesc.fWidth;
582 desc.fHeight = wrapDesc.fHeight;
583 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
584
585 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
586
587 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
jvanverthfd359ca2016-03-18 11:57:24 -0700588 lifeCycle,
589 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500590 if (tgt && wrapDesc.fStencilBits) {
591 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
592 tgt->unref();
593 return nullptr;
594 }
595 }
596 return tgt;
597}
598
599////////////////////////////////////////////////////////////////////////////////
600
601void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
egdaniel0e1853c2016-03-17 11:35:45 -0700602 const GrNonInstancedMesh& mesh) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500603 GrVkVertexBuffer* vbuf;
egdaniel0e1853c2016-03-17 11:35:45 -0700604 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500605 SkASSERT(vbuf);
606 SkASSERT(!vbuf->isMapped());
607
608 vbuf->addMemoryBarrier(this,
609 VK_ACCESS_HOST_WRITE_BIT,
610 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
611 VK_PIPELINE_STAGE_HOST_BIT,
612 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
613 false);
614
615 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
616
egdaniel0e1853c2016-03-17 11:35:45 -0700617 if (mesh.isIndexed()) {
618 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500619 SkASSERT(ibuf);
620 SkASSERT(!ibuf->isMapped());
621
622 ibuf->addMemoryBarrier(this,
623 VK_ACCESS_HOST_WRITE_BIT,
624 VK_ACCESS_INDEX_READ_BIT,
625 VK_PIPELINE_STAGE_HOST_BIT,
626 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
627 false);
628
629 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
630 }
631}
632
Greg Daniel164a9f02016-02-22 09:56:40 -0500633////////////////////////////////////////////////////////////////////////////////
634
635GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
636 int width,
637 int height) {
638 SkASSERT(rt->asTexture());
639 SkASSERT(width >= rt->width());
640 SkASSERT(height >= rt->height());
641
642 int samples = rt->numStencilSamples();
643
644 SkASSERT(this->vkCaps().stencilFormats().count());
645 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
646
647 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
648 GrGpuResource::kCached_LifeCycle,
649 width,
650 height,
651 samples,
652 sFmt));
653 fStats.incStencilAttachmentCreates();
654 return stencil;
655}
656
657////////////////////////////////////////////////////////////////////////////////
658
659GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
660 GrPixelConfig config) {
661
662 VkFormat pixelFormat;
663 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
664 return 0;
665 }
666
667 bool linearTiling = false;
668 if (!fVkCaps->isConfigTexturable(config)) {
669 return 0;
670 }
671
672 if (fVkCaps->isConfigTexurableLinearly(config)) {
673 linearTiling = true;
674 }
675
676 // Currently this is not supported since it requires a copy which has not yet been implemented.
677 if (srcData && !linearTiling) {
678 return 0;
679 }
680
681 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
682 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
683 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
684
685 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
686 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
687
jvanverthfd359ca2016-03-18 11:57:24 -0700688 VkImage image = VK_NULL_HANDLE;
689 VkDeviceMemory alloc = VK_NULL_HANDLE;
Greg Daniel164a9f02016-02-22 09:56:40 -0500690
jvanverthfd359ca2016-03-18 11:57:24 -0700691 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
692 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
693 ? VK_IMAGE_LAYOUT_PREINITIALIZED
694 : VK_IMAGE_LAYOUT_UNDEFINED;
695
696 // Create Image
697 VkSampleCountFlagBits vkSamples;
698 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
699 return 0;
700 }
701
702 const VkImageCreateInfo imageCreateInfo = {
703 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
704 NULL, // pNext
705 0, // VkImageCreateFlags
706 VK_IMAGE_TYPE_2D, // VkImageType
707 pixelFormat, // VkFormat
708 { w, h, 1 }, // VkExtent3D
709 1, // mipLevels
710 1, // arrayLayers
711 vkSamples, // samples
712 imageTiling, // VkImageTiling
713 usageFlags, // VkImageUsageFlags
714 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
715 0, // queueFamilyCount
716 0, // pQueueFamilyIndices
717 initialLayout // initialLayout
718 };
719
720 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
721
722 if (!GrVkMemory::AllocAndBindImageMemory(this, image, memProps, &alloc)) {
723 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500724 return 0;
725 }
726
727 if (srcData) {
728 if (linearTiling) {
729 const VkImageSubresource subres = {
730 VK_IMAGE_ASPECT_COLOR_BIT,
731 0, // mipLevel
732 0, // arraySlice
733 };
734 VkSubresourceLayout layout;
735 VkResult err;
736
jvanverthfd359ca2016-03-18 11:57:24 -0700737 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500738
739 void* mapPtr;
jvanverthfd359ca2016-03-18 11:57:24 -0700740 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500741 if (err) {
jvanverthfd359ca2016-03-18 11:57:24 -0700742 VK_CALL(FreeMemory(this->device(), alloc, nullptr));
743 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500744 return 0;
745 }
746
747 size_t bpp = GrBytesPerPixel(config);
748 size_t rowCopyBytes = bpp * w;
749 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
750 // This assumes the srcData comes in with no padding.
751 if (rowCopyBytes == layout.rowPitch) {
752 memcpy(mapPtr, srcData, rowCopyBytes * h);
753 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700754 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
755 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500756 }
jvanverthfd359ca2016-03-18 11:57:24 -0700757 VK_CALL(UnmapMemory(fDevice, alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500758 } else {
759 // TODO: Add support for copying to optimal tiling
760 SkASSERT(false);
761 }
762 }
763
jvanverthfd359ca2016-03-18 11:57:24 -0700764 GrVkTextureInfo* info = new GrVkTextureInfo;
765 info->fImage = image;
766 info->fAlloc = alloc;
767 info->fImageTiling = imageTiling;
768 info->fImageLayout = initialLayout;
769
770 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -0500771}
772
773bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
jvanverthfd359ca2016-03-18 11:57:24 -0700774 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500775
776 if (backend && backend->fImage && backend->fAlloc) {
777 VkMemoryRequirements req;
778 memset(&req, 0, sizeof(req));
779 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
780 backend->fImage,
781 &req));
782 // TODO: find a better check
783 // This will probably fail with a different driver
784 return (req.size > 0) && (req.size <= 8192 * 8192);
785 }
786
787 return false;
788}
789
790void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700791 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500792
793 if (backend) {
794 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700795 // something in the command buffer may still be using this, so force submit
796 this->submitCommandBuffer(kForce_SyncQueue);
797
798 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr));
799 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500800 }
jvanverthfd359ca2016-03-18 11:57:24 -0700801 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -0500802 }
803}
804
805////////////////////////////////////////////////////////////////////////////////
806
807void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
808 VkPipelineStageFlags dstStageMask,
809 bool byRegion,
810 VkMemoryBarrier* barrier) const {
811 SkASSERT(fCurrentCmdBuffer);
812 fCurrentCmdBuffer->pipelineBarrier(this,
813 srcStageMask,
814 dstStageMask,
815 byRegion,
816 GrVkCommandBuffer::kMemory_BarrierType,
817 barrier);
818}
819
820void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
821 VkPipelineStageFlags dstStageMask,
822 bool byRegion,
823 VkBufferMemoryBarrier* barrier) const {
824 SkASSERT(fCurrentCmdBuffer);
825 fCurrentCmdBuffer->pipelineBarrier(this,
826 srcStageMask,
827 dstStageMask,
828 byRegion,
829 GrVkCommandBuffer::kBufferMemory_BarrierType,
830 barrier);
831}
832
833void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
834 VkPipelineStageFlags dstStageMask,
835 bool byRegion,
836 VkImageMemoryBarrier* barrier) const {
837 SkASSERT(fCurrentCmdBuffer);
838 fCurrentCmdBuffer->pipelineBarrier(this,
839 srcStageMask,
840 dstStageMask,
841 byRegion,
842 GrVkCommandBuffer::kImageMemory_BarrierType,
843 barrier);
844}
845
846void GrVkGpu::finishDrawTarget() {
847 // Submit the current command buffer to the Queue
848 this->submitCommandBuffer(kSkip_SyncQueue);
849}
850
egdaniel3d5d9ac2016-03-01 12:56:15 -0800851void GrVkGpu::clearStencil(GrRenderTarget* target) {
852 if (nullptr == target) {
853 return;
854 }
855 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
856 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
857
858
859 VkClearDepthStencilValue vkStencilColor;
860 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
861
862 VkImageLayout origDstLayout = vkStencil->currentLayout();
863
864 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
865 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
866
867 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
868 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
869
870 vkStencil->setImageLayout(this,
871 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
872 srcAccessMask,
873 dstAccessMask,
874 srcStageMask,
875 dstStageMask,
876 false);
877
878
879 VkImageSubresourceRange subRange;
880 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
881 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
882 subRange.baseMipLevel = 0;
883 subRange.levelCount = 1;
884 subRange.baseArrayLayer = 0;
885 subRange.layerCount = 1;
886
887 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
888 // draw. Thus we should look into using the load op functions on the render pass to clear out
889 // the stencil there.
890 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
891}
892
893void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
894 SkASSERT(target);
895
896 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
897 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
898 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
899
900 // this should only be called internally when we know we have a
901 // stencil buffer.
902 SkASSERT(sb);
903 int stencilBitCount = sb->bits();
904
905 // The contract with the callers does not guarantee that we preserve all bits in the stencil
906 // during this clear. Thus we will clear the entire stencil to the desired value.
907
908 VkClearDepthStencilValue vkStencilColor;
909 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
910 if (insideClip) {
911 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
912 } else {
913 vkStencilColor.stencil = 0;
914 }
915
916 VkImageLayout origDstLayout = vkStencil->currentLayout();
917 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
918 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
919 VkPipelineStageFlags srcStageMask =
920 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
921 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
922 vkStencil->setImageLayout(this,
923 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
924 srcAccessMask,
925 dstAccessMask,
926 srcStageMask,
927 dstStageMask,
928 false);
929
930 VkClearRect clearRect;
931 // Flip rect if necessary
932 SkIRect vkRect = rect;
933
934 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
935 vkRect.fTop = vkRT->height() - rect.fBottom;
936 vkRect.fBottom = vkRT->height() - rect.fTop;
937 }
938
939 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
940 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
941
942 clearRect.baseArrayLayer = 0;
943 clearRect.layerCount = 1;
944
945 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
946 SkASSERT(renderPass);
947 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
948
949 uint32_t stencilIndex;
950 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
951
952 VkClearAttachment attachment;
953 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
954 attachment.colorAttachment = 0; // this value shouldn't matter
955 attachment.clearValue.depthStencil = vkStencilColor;
956
957 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
958 fCurrentCmdBuffer->endRenderPass(this);
959
960 return;
961}
962
Greg Daniel164a9f02016-02-22 09:56:40 -0500963void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
964 // parent class should never let us get here with no RT
965 SkASSERT(target);
966
967 VkClearColorValue vkColor;
968 GrColorToRGBAFloat(color, vkColor.float32);
969
970 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
971 VkImageLayout origDstLayout = vkRT->currentLayout();
972
973 if (rect.width() != target->width() || rect.height() != target->height()) {
974 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
975 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
976 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -0800977 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -0500978 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
979 vkRT->setImageLayout(this,
980 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
981 srcAccessMask,
982 dstAccessMask,
983 srcStageMask,
984 dstStageMask,
985 false);
986
987 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -0800988 // Flip rect if necessary
989 SkIRect vkRect = rect;
990 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
991 vkRect.fTop = vkRT->height() - rect.fBottom;
992 vkRect.fBottom = vkRT->height() - rect.fTop;
993 }
994 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
995 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -0800996 clearRect.baseArrayLayer = 0;
997 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -0500998
999 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1000 SkASSERT(renderPass);
1001 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1002
1003 uint32_t colorIndex;
1004 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1005
1006 VkClearAttachment attachment;
1007 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1008 attachment.colorAttachment = colorIndex;
1009 attachment.clearValue.color = vkColor;
1010
1011 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1012 fCurrentCmdBuffer->endRenderPass(this);
1013 return;
1014 }
1015
1016 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1017 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1018
1019 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1020 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1021
1022 vkRT->setImageLayout(this,
1023 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1024 srcAccessMask,
1025 dstAccessMask,
1026 srcStageMask,
1027 dstStageMask,
1028 false);
1029
1030
1031 VkImageSubresourceRange subRange;
1032 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1033 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1034 subRange.baseMipLevel = 0;
1035 subRange.levelCount = 1;
1036 subRange.baseArrayLayer = 0;
1037 subRange.layerCount = 1;
1038
1039 // In the future we may not actually be doing this type of clear at all. If we are inside a
1040 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1041 // common use case will be clearing an attachment at the start of a render pass, in which case
1042 // we will use the clear load ops.
1043 fCurrentCmdBuffer->clearColorImage(this,
1044 vkRT,
1045 &vkColor,
1046 1, &subRange);
1047}
1048
1049inline bool can_copy_image(const GrSurface* dst,
1050 const GrSurface* src,
1051 const GrVkGpu* gpu) {
1052 if (src->asTexture() &&
1053 dst->asTexture() &&
1054 src->origin() == dst->origin() &&
1055 src->config() == dst->config()) {
1056 return true;
1057 }
1058
1059 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1060 // or the resolved image here?
1061
1062 return false;
1063}
1064
1065void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1066 GrSurface* src,
1067 const SkIRect& srcRect,
1068 const SkIPoint& dstPoint) {
1069 SkASSERT(can_copy_image(dst, src, this));
1070
1071 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1072 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1073 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1074
1075 VkImageLayout origDstLayout = dstTex->currentLayout();
1076 VkImageLayout origSrcLayout = srcTex->currentLayout();
1077
1078 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1079 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1080
1081 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1082 // the cache is flushed since it is only being written to.
1083 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1084 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1085
1086 dstTex->setImageLayout(this,
1087 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1088 srcAccessMask,
1089 dstAccessMask,
1090 srcStageMask,
1091 dstStageMask,
1092 false);
1093
1094 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1095 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1096
1097 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1098 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1099
1100 srcTex->setImageLayout(this,
1101 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1102 srcAccessMask,
1103 dstAccessMask,
1104 srcStageMask,
1105 dstStageMask,
1106 false);
1107
1108 // Flip rect if necessary
1109 SkIRect srcVkRect = srcRect;
1110 int32_t dstY = dstPoint.fY;
1111
1112 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1113 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1114 srcVkRect.fTop = src->height() - srcRect.fBottom;
1115 srcVkRect.fBottom = src->height() - srcRect.fTop;
1116 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1117 }
1118
1119 VkImageCopy copyRegion;
1120 memset(&copyRegion, 0, sizeof(VkImageCopy));
1121 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1122 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1123 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1124 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1125 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1126
1127 fCurrentCmdBuffer->copyImage(this,
1128 srcTex,
1129 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1130 dstTex,
1131 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1132 1,
1133 &copyRegion);
1134}
1135
1136inline bool can_copy_as_draw(const GrSurface* dst,
1137 const GrSurface* src,
1138 const GrVkGpu* gpu) {
1139 return false;
1140}
1141
1142void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1143 GrSurface* src,
1144 const SkIRect& srcRect,
1145 const SkIPoint& dstPoint) {
1146 SkASSERT(false);
1147}
1148
1149bool GrVkGpu::onCopySurface(GrSurface* dst,
1150 GrSurface* src,
1151 const SkIRect& srcRect,
1152 const SkIPoint& dstPoint) {
1153 if (can_copy_image(dst, src, this)) {
1154 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1155 return true;
1156 }
1157
1158 if (can_copy_as_draw(dst, src, this)) {
1159 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1160 return true;
1161 }
1162
1163 return false;
1164}
1165
cdalton28f45b92016-03-07 13:58:26 -08001166void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1167 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1168 // TODO: stub.
1169 SkASSERT(!this->caps()->sampleLocationsSupport());
1170 *effectiveSampleCnt = rt->desc().fSampleCnt;
1171}
1172
Greg Daniel164a9f02016-02-22 09:56:40 -05001173bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1174 GrPixelConfig readConfig, DrawPreference* drawPreference,
1175 ReadPixelTempDrawInfo* tempDrawInfo) {
1176 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1177 if (kNoDraw_DrawPreference != *drawPreference) {
1178 return false;
1179 }
1180
1181 if (srcSurface->config() != readConfig) {
1182 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1183 // that of readConfig.
1184 return false;
1185 }
1186
1187 return true;
1188}
1189
1190bool GrVkGpu::onReadPixels(GrSurface* surface,
1191 int left, int top, int width, int height,
1192 GrPixelConfig config,
1193 void* buffer,
1194 size_t rowBytes) {
1195 VkFormat pixelFormat;
1196 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1197 return false;
1198 }
1199
1200 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1201 if (!tgt) {
1202 return false;
1203 }
1204
1205 // Change layout of our target so it can be used as copy
1206 VkImageLayout layout = tgt->currentLayout();
1207 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1208 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1209 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1210 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1211 tgt->setImageLayout(this,
1212 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1213 srcAccessMask,
1214 dstAccessMask,
1215 srcStageMask,
1216 dstStageMask,
1217 false);
1218
1219 GrVkTransferBuffer* transferBuffer =
1220 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1221 kGpuToCpu_TransferType));
1222
1223 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1224 VkOffset3D offset = {
1225 left,
1226 flipY ? surface->height() - top - height : top,
1227 0
1228 };
1229
1230 // Copy the image to a buffer so we can map it to cpu memory
1231 VkBufferImageCopy region;
1232 memset(&region, 0, sizeof(VkBufferImageCopy));
1233 region.bufferOffset = 0;
1234 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1235 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1236 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1237 region.imageOffset = offset;
1238 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1239
1240 fCurrentCmdBuffer->copyImageToBuffer(this,
1241 tgt,
1242 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1243 transferBuffer,
1244 1,
1245 &region);
1246
1247 // make sure the copy to buffer has finished
1248 transferBuffer->addMemoryBarrier(this,
1249 VK_ACCESS_TRANSFER_WRITE_BIT,
1250 VK_ACCESS_HOST_READ_BIT,
1251 VK_PIPELINE_STAGE_TRANSFER_BIT,
1252 VK_PIPELINE_STAGE_HOST_BIT,
1253 false);
1254
1255 // We need to submit the current command buffer to the Queue and make sure it finishes before
1256 // we can copy the data out of the buffer.
1257 this->submitCommandBuffer(kForce_SyncQueue);
1258
1259 void* mappedMemory = transferBuffer->map();
1260
1261 memcpy(buffer, mappedMemory, rowBytes*height);
1262
1263 transferBuffer->unmap();
1264 transferBuffer->unref();
1265
1266 if (flipY) {
1267 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1268 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1269 scratch.reset(tightRowBytes);
1270 void* tmpRow = scratch.get();
1271 // flip y in-place by rows
1272 const int halfY = height >> 1;
1273 char* top = reinterpret_cast<char*>(buffer);
1274 char* bottom = top + (height - 1) * rowBytes;
1275 for (int y = 0; y < halfY; y++) {
1276 memcpy(tmpRow, top, tightRowBytes);
1277 memcpy(top, bottom, tightRowBytes);
1278 memcpy(bottom, tmpRow, tightRowBytes);
1279 top += rowBytes;
1280 bottom -= rowBytes;
1281 }
1282 }
1283
1284 return true;
1285}
egdaniel0e1853c2016-03-17 11:35:45 -07001286bool GrVkGpu::prepareDrawState(const GrPipeline& pipeline,
1287 const GrPrimitiveProcessor& primProc,
1288 GrPrimitiveType primitiveType,
1289 const GrVkRenderPass& renderPass,
egdaniel22281c12016-03-23 13:49:40 -07001290 GrVkPipelineState** pipelineState) {
1291 *pipelineState = fResourceProvider.findOrCreateCompatiblePipelineState(pipeline,
1292 primProc,
1293 primitiveType,
1294 renderPass);
1295 if (!pipelineState) {
egdaniel0e1853c2016-03-17 11:35:45 -07001296 return false;
1297 }
1298
egdaniel22281c12016-03-23 13:49:40 -07001299 (*pipelineState)->setData(this, primProc, pipeline);
egdaniel0e1853c2016-03-17 11:35:45 -07001300
egdaniel22281c12016-03-23 13:49:40 -07001301 (*pipelineState)->bind(this, fCurrentCmdBuffer);
egdaniel470d77a2016-03-18 12:50:27 -07001302
1303 GrVkPipeline::SetDynamicState(this, fCurrentCmdBuffer, pipeline);
1304
egdaniel0e1853c2016-03-17 11:35:45 -07001305 return true;
1306}
1307
1308void GrVkGpu::onDraw(const GrPipeline& pipeline,
1309 const GrPrimitiveProcessor& primProc,
1310 const GrMesh* meshes,
1311 int meshCount) {
1312 if (!meshCount) {
1313 return;
1314 }
1315 GrRenderTarget* rt = pipeline.getRenderTarget();
Greg Daniel164a9f02016-02-22 09:56:40 -05001316 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1317 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1318 SkASSERT(renderPass);
1319
egdaniel470d77a2016-03-18 12:50:27 -07001320 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1321
egdaniel22281c12016-03-23 13:49:40 -07001322 GrVkPipelineState* pipelineState = nullptr;
egdaniel0e1853c2016-03-17 11:35:45 -07001323 GrPrimitiveType primitiveType = meshes[0].primitiveType();
egdaniel22281c12016-03-23 13:49:40 -07001324 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass, &pipelineState)) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001325 return;
1326 }
1327
Greg Daniel164a9f02016-02-22 09:56:40 -05001328 // Change layout of our render target so it can be used as the color attachment
1329 VkImageLayout layout = vkRT->currentLayout();
1330 // Our color attachment is purely a destination and won't be read so don't need to flush or
1331 // invalidate any caches
1332 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1333 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1334 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1335 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1336 vkRT->setImageLayout(this,
1337 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1338 srcAccessMask,
1339 dstAccessMask,
1340 srcStageMask,
1341 dstStageMask,
1342 false);
1343
egdaniel3d5d9ac2016-03-01 12:56:15 -08001344 // If we are using a stencil attachment we also need to update its layout
egdaniel0e1853c2016-03-17 11:35:45 -07001345 if (!pipeline.getStencil().isDisabled()) {
egdaniel3d5d9ac2016-03-01 12:56:15 -08001346 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1347 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1348 VkImageLayout origDstLayout = vkStencil->currentLayout();
1349 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1350 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
egdaniel0e1853c2016-03-17 11:35:45 -07001351 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001352 VkPipelineStageFlags srcStageMask =
1353 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1354 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1355 vkStencil->setImageLayout(this,
1356 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1357 srcAccessMask,
1358 dstAccessMask,
1359 srcStageMask,
1360 dstStageMask,
1361 false);
1362 }
1363
egdaniel0e1853c2016-03-17 11:35:45 -07001364
1365 for (int i = 0; i < meshCount; ++i) {
1366 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
1367 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1368 }
1369
1370 const GrMesh& mesh = meshes[i];
1371 GrMesh::Iterator iter;
1372 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1373 do {
1374 if (nonIdxMesh->primitiveType() != primitiveType) {
1375 // Technically we don't have to call this here (since there is a safety check in
egdaniel22281c12016-03-23 13:49:40 -07001376 // pipelineState:setData but this will allow for quicker freeing of resources if the
1377 // pipelineState sits in a cache for a while.
1378 pipelineState->freeTempResources(this);
1379 pipelineState->unref();
1380 SkDEBUGCODE(pipelineState = nullptr);
egdaniel0e1853c2016-03-17 11:35:45 -07001381 primitiveType = nonIdxMesh->primitiveType();
1382 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass,
egdaniel22281c12016-03-23 13:49:40 -07001383 &pipelineState)) {
egdaniel0e1853c2016-03-17 11:35:45 -07001384 return;
1385 }
1386 }
egdaniel22281c12016-03-23 13:49:40 -07001387 SkASSERT(pipelineState);
egdaniel0e1853c2016-03-17 11:35:45 -07001388 this->bindGeometry(primProc, *nonIdxMesh);
1389
1390 if (nonIdxMesh->isIndexed()) {
1391 fCurrentCmdBuffer->drawIndexed(this,
1392 nonIdxMesh->indexCount(),
1393 1,
1394 nonIdxMesh->startIndex(),
1395 nonIdxMesh->startVertex(),
1396 0);
1397 } else {
1398 fCurrentCmdBuffer->draw(this,
1399 nonIdxMesh->vertexCount(),
1400 1,
1401 nonIdxMesh->startVertex(),
1402 0);
1403 }
1404
1405 fStats.incNumDraws();
1406 } while ((nonIdxMesh = iter.next()));
Greg Daniel164a9f02016-02-22 09:56:40 -05001407 }
1408
1409 fCurrentCmdBuffer->endRenderPass(this);
1410
egdaniel22281c12016-03-23 13:49:40 -07001411 // Technically we don't have to call this here (since there is a safety check in
1412 // pipelineState:setData but this will allow for quicker freeing of resources if the
1413 // pipelineState sits in a cache for a while.
1414 pipelineState->freeTempResources(this);
1415 pipelineState->unref();
Greg Daniel164a9f02016-02-22 09:56:40 -05001416
1417#if SWAP_PER_DRAW
1418 glFlush();
1419#if defined(SK_BUILD_FOR_MAC)
1420 aglSwapBuffers(aglGetCurrentContext());
1421 int set_a_break_pt_here = 9;
1422 aglSwapBuffers(aglGetCurrentContext());
1423#elif defined(SK_BUILD_FOR_WIN32)
1424 SwapBuf();
1425 int set_a_break_pt_here = 9;
1426 SwapBuf();
1427#endif
1428#endif
1429}
1430