blob: f01c2e45e51bd22d27e5992172e788f1a657357c [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
24#include "GrVkProgram.h"
25#include "GrVkProgramBuilder.h"
26#include "GrVkProgramDesc.h"
27#include "GrVkRenderPass.h"
28#include "GrVkResourceProvider.h"
29#include "GrVkTexture.h"
30#include "GrVkTextureRenderTarget.h"
31#include "GrVkTransferBuffer.h"
32#include "GrVkVertexBuffer.h"
33
34#include "SkConfig8888.h"
35
36#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070037#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050038
39#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
40#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
41#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
42
jvanverthd2497f32016-03-18 12:39:05 -070043#ifdef ENABLE_VK_LAYERS
44VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
45 VkDebugReportFlagsEXT flags,
46 VkDebugReportObjectTypeEXT objectType,
47 uint64_t object,
48 size_t location,
49 int32_t messageCode,
50 const char* pLayerPrefix,
51 const char* pMessage,
52 void* pUserData) {
53 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
54 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
55 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
56 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
57 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
58 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 } else {
60 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
61 }
62 return VK_FALSE;
63}
jvanverthd2497f32016-03-18 12:39:05 -070064#endif
65
jvanverth633b3562016-03-23 11:01:22 -070066GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
67 GrContext* context) {
68 SkAutoTUnref<const GrVkBackendContext> vkBackendContext(
69 reinterpret_cast<const GrVkBackendContext*>(backendContext));
70 if (!vkBackendContext) {
71 vkBackendContext.reset(GrVkBackendContext::Create());
72 if (!vkBackendContext) {
73 return nullptr;
Greg Daniel164a9f02016-02-22 09:56:40 -050074 }
jvanverth633b3562016-03-23 11:01:22 -070075 } else {
76 vkBackendContext->ref();
Greg Daniel164a9f02016-02-22 09:56:40 -050077 }
78
jvanverth633b3562016-03-23 11:01:22 -070079 return new GrVkGpu(context, options, vkBackendContext);
Greg Daniel164a9f02016-02-22 09:56:40 -050080}
81
82////////////////////////////////////////////////////////////////////////////////
83
jvanverth633b3562016-03-23 11:01:22 -070084GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
85 const GrVkBackendContext* backendCtx)
Greg Daniel164a9f02016-02-22 09:56:40 -050086 : INHERITED(context)
jvanverth633b3562016-03-23 11:01:22 -070087 , fVkInstance(backendCtx->fInstance)
88 , fDevice(backendCtx->fDevice)
89 , fQueue(backendCtx->fQueue)
90 , fResourceProvider(this) {
91 fBackendContext.reset(backendCtx);
Greg Daniel164a9f02016-02-22 09:56:40 -050092
jvanverthd2497f32016-03-18 12:39:05 -070093#ifdef ENABLE_VK_LAYERS
jvanverth633b3562016-03-23 11:01:22 -070094 if (this->vkInterface()->hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
jvanverthd2497f32016-03-18 12:39:05 -070095 /* Setup callback creation information */
96 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
97 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
98 callbackCreateInfo.pNext = nullptr;
99 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
100 VK_DEBUG_REPORT_WARNING_BIT_EXT |
101 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
102 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
103 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
104 callbackCreateInfo.pfnCallback = &DebugReportCallback;
105 callbackCreateInfo.pUserData = nullptr;
106
107 /* Register the callback */
jvanverth633b3562016-03-23 11:01:22 -0700108 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(fVkInstance,
109 &callbackCreateInfo, nullptr, &fCallback));
jvanverthd2497f32016-03-18 12:39:05 -0700110 }
111#endif
jvanverth633b3562016-03-23 11:01:22 -0700112
113 fCompiler = shaderc_compiler_initialize();
114
115 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice));
116 fCaps.reset(SkRef(fVkCaps.get()));
117
118 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps));
119
120 const VkCommandPoolCreateInfo cmdPoolInfo = {
121 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
122 nullptr, // pNext
123 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags
124 backendCtx->fQueueFamilyIndex, // queueFamilyIndex
125 };
126 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
127 &fCmdPool));
128
129 // must call this after creating the CommandPool
130 fResourceProvider.init();
131 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
132 SkASSERT(fCurrentCmdBuffer);
133 fCurrentCmdBuffer->begin(this);
Greg Daniel164a9f02016-02-22 09:56:40 -0500134}
135
136GrVkGpu::~GrVkGpu() {
Greg Daniel164a9f02016-02-22 09:56:40 -0500137 fCurrentCmdBuffer->end(this);
138 fCurrentCmdBuffer->unref(this);
139
140 // wait for all commands to finish
jvanverthddf98352016-03-21 11:46:00 -0700141 fResourceProvider.checkCommandBuffers();
egdaniel2cab66b2016-03-21 14:24:14 -0700142 SkDEBUGCODE(VkResult res =) VK_CALL(QueueWaitIdle(fQueue));
jvanverthddf98352016-03-21 11:46:00 -0700143 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec)
144 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
145
Greg Daniel164a9f02016-02-22 09:56:40 -0500146 // must call this just before we destroy the VkDevice
147 fResourceProvider.destroyResources();
148
jvanverth633b3562016-03-23 11:01:22 -0700149 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
150
151 shaderc_compiler_release(fCompiler);
152
153#ifdef ENABLE_VK_LAYERS
jvanverthd2497f32016-03-18 12:39:05 -0700154 VK_CALL(DestroyDebugReportCallbackEXT(fVkInstance, fCallback, nullptr));
155#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500156}
157
158///////////////////////////////////////////////////////////////////////////////
159
160void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
161 SkASSERT(fCurrentCmdBuffer);
162 fCurrentCmdBuffer->end(this);
163
164 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
165 fResourceProvider.checkCommandBuffers();
166
167 // Release old command buffer and create a new one
168 fCurrentCmdBuffer->unref(this);
169 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
170 SkASSERT(fCurrentCmdBuffer);
171
172 fCurrentCmdBuffer->begin(this);
173}
174
175///////////////////////////////////////////////////////////////////////////////
176GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
177 return GrVkVertexBuffer::Create(this, size, dynamic);
178}
179
180GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
181 return GrVkIndexBuffer::Create(this, size, dynamic);
182}
183
184GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
185 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
186 : GrVkBuffer::kCopyWrite_Type;
187 return GrVkTransferBuffer::Create(this, size, bufferType);
188}
189
190////////////////////////////////////////////////////////////////////////////////
191bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
192 GrPixelConfig srcConfig, DrawPreference* drawPreference,
193 WritePixelTempDrawInfo* tempDrawInfo) {
194 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
195 return false;
196 }
197
198 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
199 if (kNoDraw_DrawPreference != *drawPreference) {
200 return false;
201 }
202
203 if (dstSurface->config() != srcConfig) {
204 // TODO: This should fall back to drawing or copying to change config of dstSurface to
205 // match that of srcConfig.
206 return false;
207 }
208
209 return true;
210}
211
212bool GrVkGpu::onWritePixels(GrSurface* surface,
213 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800214 GrPixelConfig config,
215 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500216 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
217 if (!vkTex) {
218 return false;
219 }
220
bsalomona1e6b3b2016-03-02 10:58:23 -0800221 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800222 if (texels.empty() || !texels.begin()->fPixels) {
223 return false;
224 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800225
Greg Daniel164a9f02016-02-22 09:56:40 -0500226 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
227 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
228 return false;
229 }
230
231 bool success = false;
232 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
233 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
234 SkASSERT(config == vkTex->desc().fConfig);
235 // TODO: add compressed texture support
236 // delete the following two lines and uncomment the two after that when ready
237 vkTex->unref();
238 return false;
239 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
240 // height);
241 } else {
242 bool linearTiling = vkTex->isLinearTiled();
243 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
244 // Need to change the layout to general in order to perform a host write
245 VkImageLayout layout = vkTex->currentLayout();
246 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
247 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
248 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
249 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
250 vkTex->setImageLayout(this,
251 VK_IMAGE_LAYOUT_GENERAL,
252 srcAccessMask,
253 dstAccessMask,
254 srcStageMask,
255 dstStageMask,
256 false);
257 }
258 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800259 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500260 }
261
262 if (success) {
263 vkTex->texturePriv().dirtyMipMaps(true);
264 return true;
265 }
266
267 return false;
268}
269
270bool GrVkGpu::uploadTexData(GrVkTexture* tex,
271 int left, int top, int width, int height,
272 GrPixelConfig dataConfig,
273 const void* data,
274 size_t rowBytes) {
275 SkASSERT(data);
276
277 // If we're uploading compressed data then we should be using uploadCompressedTexData
278 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
279
280 bool linearTiling = tex->isLinearTiled();
281
282 size_t bpp = GrBytesPerPixel(dataConfig);
283
284 const GrSurfaceDesc& desc = tex->desc();
285
286 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
287 &width, &height, &data, &rowBytes)) {
288 return false;
289 }
290 size_t trimRowBytes = width * bpp;
291
292 if (linearTiling) {
293 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
294 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
295 const VkImageSubresource subres = {
296 VK_IMAGE_ASPECT_COLOR_BIT,
297 0, // mipLevel
298 0, // arraySlice
299 };
300 VkSubresourceLayout layout;
301 VkResult err;
302
303 const GrVkInterface* interface = this->vkInterface();
304
305 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
306 tex->textureImage(),
307 &subres,
308 &layout));
309
310 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
311 : top;
312 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
313 VkDeviceSize size = height*layout.rowPitch;
314 void* mapPtr;
315 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
316 &mapPtr));
317 if (err) {
318 return false;
319 }
320
321 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
322 // copy into buffer by rows
323 const char* srcRow = reinterpret_cast<const char*>(data);
324 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
325 for (int y = 0; y < height; y++) {
326 memcpy(dstRow, srcRow, trimRowBytes);
327 srcRow += rowBytes;
328 dstRow -= layout.rowPitch;
329 }
330 } else {
331 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
332 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
333 memcpy(mapPtr, data, trimRowBytes * height);
334 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800335 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
336 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500337 }
338 }
339
340 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
341 } else {
342 GrVkTransferBuffer* transferBuffer =
343 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
344
345 void* mapPtr = transferBuffer->map();
346
347 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
348 // copy into buffer by rows
349 const char* srcRow = reinterpret_cast<const char*>(data);
350 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
351 for (int y = 0; y < height; y++) {
352 memcpy(dstRow, srcRow, trimRowBytes);
353 srcRow += rowBytes;
354 dstRow -= trimRowBytes;
355 }
356 } else {
357 // If there is no padding on the src data rows, we can do a single memcpy
358 if (trimRowBytes == rowBytes) {
359 memcpy(mapPtr, data, trimRowBytes * height);
360 } else {
361 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
362 }
363 }
364
365 transferBuffer->unmap();
366
367 // make sure the unmap has finished
368 transferBuffer->addMemoryBarrier(this,
369 VK_ACCESS_HOST_WRITE_BIT,
370 VK_ACCESS_TRANSFER_READ_BIT,
371 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
372 VK_PIPELINE_STAGE_TRANSFER_BIT,
373 false);
374
375 // Set up copy region
376 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
377 VkOffset3D offset = {
378 left,
379 flipY ? tex->height() - top - height : top,
380 0
381 };
382
383 VkBufferImageCopy region;
384 memset(&region, 0, sizeof(VkBufferImageCopy));
385 region.bufferOffset = 0;
386 region.bufferRowLength = width;
387 region.bufferImageHeight = height;
388 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
389 region.imageOffset = offset;
390 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
391
392 // Change layout of our target so it can be copied to
393 VkImageLayout layout = tex->currentLayout();
394 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
395 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
396 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
397 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
398 tex->setImageLayout(this,
399 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
400 srcAccessMask,
401 dstAccessMask,
402 srcStageMask,
403 dstStageMask,
404 false);
405
406 // Copy the buffer to the image
407 fCurrentCmdBuffer->copyBufferToImage(this,
408 transferBuffer,
409 tex,
410 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
411 1,
412 &region);
413
414 // Submit the current command buffer to the Queue
415 this->submitCommandBuffer(kSkip_SyncQueue);
416
417 transferBuffer->unref();
418 }
419
420 return true;
421}
422
423////////////////////////////////////////////////////////////////////////////////
424GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800425 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500426 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
427
428 VkFormat pixelFormat;
429 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
430 return nullptr;
431 }
432
433 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
434 return nullptr;
435 }
436
437 bool linearTiling = false;
438 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
439 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
440 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
441 linearTiling = true;
442 } else {
443 return nullptr;
444 }
445 }
446
447 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
448 if (renderTarget) {
449 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
450 }
451
452 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
453 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
454 // will be using this texture in some copy or not. Also this assumes, as is the current case,
455 // that all render targets in vulkan are also texutres. If we change this practice of setting
456 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
457 // texture.
458 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
459
bsalomona1e6b3b2016-03-02 10:58:23 -0800460 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
461 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500462
463 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
464 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
465 // to 1.
466 GrVkImage::ImageDesc imageDesc;
467 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
468 imageDesc.fFormat = pixelFormat;
469 imageDesc.fWidth = desc.fWidth;
470 imageDesc.fHeight = desc.fHeight;
471 imageDesc.fLevels = 1;
472 imageDesc.fSamples = 1;
473 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
474 imageDesc.fUsageFlags = usageFlags;
475 imageDesc.fMemProps = memProps;
476
477 GrVkTexture* tex;
478 if (renderTarget) {
479 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
480 imageDesc);
egdaniel3d5d9ac2016-03-01 12:56:15 -0800481#if 0
482 // This clear can be included to fix warning described in htttps://bugs.skia.org/5045
483 // Obviously we do not want to be clearling needlessly every time we create a render target.
484 SkIRect rect = SkIRect::MakeWH(tex->width(), tex->height());
485 this->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget());
486#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500487 } else {
488 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
489 }
490
491 if (!tex) {
492 return nullptr;
493 }
494
bsalomona1e6b3b2016-03-02 10:58:23 -0800495 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800496 if (!texels.empty()) {
497 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800498 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
499 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500500 tex->unref();
501 return nullptr;
502 }
503 }
504
505 return tex;
506}
507
508////////////////////////////////////////////////////////////////////////////////
509
510static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
511 // By default, all textures in Vk use TopLeft
512 if (kDefault_GrSurfaceOrigin == origin) {
513 return kTopLeft_GrSurfaceOrigin;
514 } else {
515 return origin;
516 }
517}
518
519GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
520 GrWrapOwnership ownership) {
521 VkFormat format;
522 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
523 return nullptr;
524 }
525
526 if (0 == desc.fTextureHandle) {
527 return nullptr;
528 }
529
530 int maxSize = this->caps()->maxTextureSize();
531 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
532 return nullptr;
533 }
534
jvanverthfd359ca2016-03-18 11:57:24 -0700535 const GrVkTextureInfo* info = reinterpret_cast<const GrVkTextureInfo*>(desc.fTextureHandle);
536 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) {
537 return nullptr;
538 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500539
jvanverth0fcfb752016-03-09 09:57:52 -0800540 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
541 ? GrGpuResource::kAdopted_LifeCycle
542 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500543
544 GrSurfaceDesc surfDesc;
545 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
546 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
547 surfDesc.fWidth = desc.fWidth;
548 surfDesc.fHeight = desc.fHeight;
549 surfDesc.fConfig = desc.fConfig;
550 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
551 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
552 // In GL, Chrome assumes all textures are BottomLeft
553 // In VK, we don't have this restriction
554 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
555
556 GrVkTexture* texture = nullptr;
557 if (renderTarget) {
558 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
559 lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700560 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500561 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700562 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format,
563 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500564 }
565 if (!texture) {
566 return nullptr;
567 }
568
569 return texture;
570}
571
572GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
573 GrWrapOwnership ownership) {
574
jvanverthfd359ca2016-03-18 11:57:24 -0700575 const GrVkTextureInfo* info =
576 reinterpret_cast<const GrVkTextureInfo*>(wrapDesc.fRenderTargetHandle);
577 if (VK_NULL_HANDLE == info->fImage ||
578 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership)) {
579 return nullptr;
580 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500581
jvanverth0fcfb752016-03-09 09:57:52 -0800582 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
583 ? GrGpuResource::kAdopted_LifeCycle
584 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500585
586 GrSurfaceDesc desc;
587 desc.fConfig = wrapDesc.fConfig;
588 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
589 desc.fWidth = wrapDesc.fWidth;
590 desc.fHeight = wrapDesc.fHeight;
591 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
592
593 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
594
595 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
jvanverthfd359ca2016-03-18 11:57:24 -0700596 lifeCycle,
597 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500598 if (tgt && wrapDesc.fStencilBits) {
599 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
600 tgt->unref();
601 return nullptr;
602 }
603 }
604 return tgt;
605}
606
607////////////////////////////////////////////////////////////////////////////////
608
609void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
egdaniel0e1853c2016-03-17 11:35:45 -0700610 const GrNonInstancedMesh& mesh) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500611 GrVkVertexBuffer* vbuf;
egdaniel0e1853c2016-03-17 11:35:45 -0700612 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500613 SkASSERT(vbuf);
614 SkASSERT(!vbuf->isMapped());
615
616 vbuf->addMemoryBarrier(this,
617 VK_ACCESS_HOST_WRITE_BIT,
618 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
619 VK_PIPELINE_STAGE_HOST_BIT,
620 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
621 false);
622
623 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
624
egdaniel0e1853c2016-03-17 11:35:45 -0700625 if (mesh.isIndexed()) {
626 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500627 SkASSERT(ibuf);
628 SkASSERT(!ibuf->isMapped());
629
630 ibuf->addMemoryBarrier(this,
631 VK_ACCESS_HOST_WRITE_BIT,
632 VK_ACCESS_INDEX_READ_BIT,
633 VK_PIPELINE_STAGE_HOST_BIT,
634 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
635 false);
636
637 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
638 }
639}
640
Greg Daniel164a9f02016-02-22 09:56:40 -0500641////////////////////////////////////////////////////////////////////////////////
642
643GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
644 int width,
645 int height) {
646 SkASSERT(rt->asTexture());
647 SkASSERT(width >= rt->width());
648 SkASSERT(height >= rt->height());
649
650 int samples = rt->numStencilSamples();
651
652 SkASSERT(this->vkCaps().stencilFormats().count());
653 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
654
655 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
656 GrGpuResource::kCached_LifeCycle,
657 width,
658 height,
659 samples,
660 sFmt));
661 fStats.incStencilAttachmentCreates();
662 return stencil;
663}
664
665////////////////////////////////////////////////////////////////////////////////
666
667GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
668 GrPixelConfig config) {
669
670 VkFormat pixelFormat;
671 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
672 return 0;
673 }
674
675 bool linearTiling = false;
676 if (!fVkCaps->isConfigTexturable(config)) {
677 return 0;
678 }
679
680 if (fVkCaps->isConfigTexurableLinearly(config)) {
681 linearTiling = true;
682 }
683
684 // Currently this is not supported since it requires a copy which has not yet been implemented.
685 if (srcData && !linearTiling) {
686 return 0;
687 }
688
689 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
690 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
691 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
692
693 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
694 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
695
jvanverthfd359ca2016-03-18 11:57:24 -0700696 VkImage image = VK_NULL_HANDLE;
697 VkDeviceMemory alloc = VK_NULL_HANDLE;
Greg Daniel164a9f02016-02-22 09:56:40 -0500698
jvanverthfd359ca2016-03-18 11:57:24 -0700699 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
700 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
701 ? VK_IMAGE_LAYOUT_PREINITIALIZED
702 : VK_IMAGE_LAYOUT_UNDEFINED;
703
704 // Create Image
705 VkSampleCountFlagBits vkSamples;
706 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
707 return 0;
708 }
709
710 const VkImageCreateInfo imageCreateInfo = {
711 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
712 NULL, // pNext
713 0, // VkImageCreateFlags
714 VK_IMAGE_TYPE_2D, // VkImageType
715 pixelFormat, // VkFormat
716 { w, h, 1 }, // VkExtent3D
717 1, // mipLevels
718 1, // arrayLayers
719 vkSamples, // samples
720 imageTiling, // VkImageTiling
721 usageFlags, // VkImageUsageFlags
722 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
723 0, // queueFamilyCount
724 0, // pQueueFamilyIndices
725 initialLayout // initialLayout
726 };
727
728 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
729
730 if (!GrVkMemory::AllocAndBindImageMemory(this, image, memProps, &alloc)) {
731 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500732 return 0;
733 }
734
735 if (srcData) {
736 if (linearTiling) {
737 const VkImageSubresource subres = {
738 VK_IMAGE_ASPECT_COLOR_BIT,
739 0, // mipLevel
740 0, // arraySlice
741 };
742 VkSubresourceLayout layout;
743 VkResult err;
744
jvanverthfd359ca2016-03-18 11:57:24 -0700745 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500746
747 void* mapPtr;
jvanverthfd359ca2016-03-18 11:57:24 -0700748 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500749 if (err) {
jvanverthfd359ca2016-03-18 11:57:24 -0700750 VK_CALL(FreeMemory(this->device(), alloc, nullptr));
751 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500752 return 0;
753 }
754
755 size_t bpp = GrBytesPerPixel(config);
756 size_t rowCopyBytes = bpp * w;
757 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
758 // This assumes the srcData comes in with no padding.
759 if (rowCopyBytes == layout.rowPitch) {
760 memcpy(mapPtr, srcData, rowCopyBytes * h);
761 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700762 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
763 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500764 }
jvanverthfd359ca2016-03-18 11:57:24 -0700765 VK_CALL(UnmapMemory(fDevice, alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500766 } else {
767 // TODO: Add support for copying to optimal tiling
768 SkASSERT(false);
769 }
770 }
771
jvanverthfd359ca2016-03-18 11:57:24 -0700772 GrVkTextureInfo* info = new GrVkTextureInfo;
773 info->fImage = image;
774 info->fAlloc = alloc;
775 info->fImageTiling = imageTiling;
776 info->fImageLayout = initialLayout;
777
778 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -0500779}
780
781bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
jvanverthfd359ca2016-03-18 11:57:24 -0700782 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500783
784 if (backend && backend->fImage && backend->fAlloc) {
785 VkMemoryRequirements req;
786 memset(&req, 0, sizeof(req));
787 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
788 backend->fImage,
789 &req));
790 // TODO: find a better check
791 // This will probably fail with a different driver
792 return (req.size > 0) && (req.size <= 8192 * 8192);
793 }
794
795 return false;
796}
797
798void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700799 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500800
801 if (backend) {
802 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700803 // something in the command buffer may still be using this, so force submit
804 this->submitCommandBuffer(kForce_SyncQueue);
805
806 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr));
807 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500808 }
jvanverthfd359ca2016-03-18 11:57:24 -0700809 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -0500810 }
811}
812
813////////////////////////////////////////////////////////////////////////////////
814
815void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
816 VkPipelineStageFlags dstStageMask,
817 bool byRegion,
818 VkMemoryBarrier* barrier) const {
819 SkASSERT(fCurrentCmdBuffer);
820 fCurrentCmdBuffer->pipelineBarrier(this,
821 srcStageMask,
822 dstStageMask,
823 byRegion,
824 GrVkCommandBuffer::kMemory_BarrierType,
825 barrier);
826}
827
828void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
829 VkPipelineStageFlags dstStageMask,
830 bool byRegion,
831 VkBufferMemoryBarrier* barrier) const {
832 SkASSERT(fCurrentCmdBuffer);
833 fCurrentCmdBuffer->pipelineBarrier(this,
834 srcStageMask,
835 dstStageMask,
836 byRegion,
837 GrVkCommandBuffer::kBufferMemory_BarrierType,
838 barrier);
839}
840
841void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
842 VkPipelineStageFlags dstStageMask,
843 bool byRegion,
844 VkImageMemoryBarrier* barrier) const {
845 SkASSERT(fCurrentCmdBuffer);
846 fCurrentCmdBuffer->pipelineBarrier(this,
847 srcStageMask,
848 dstStageMask,
849 byRegion,
850 GrVkCommandBuffer::kImageMemory_BarrierType,
851 barrier);
852}
853
854void GrVkGpu::finishDrawTarget() {
855 // Submit the current command buffer to the Queue
856 this->submitCommandBuffer(kSkip_SyncQueue);
857}
858
egdaniel3d5d9ac2016-03-01 12:56:15 -0800859void GrVkGpu::clearStencil(GrRenderTarget* target) {
860 if (nullptr == target) {
861 return;
862 }
863 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
864 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
865
866
867 VkClearDepthStencilValue vkStencilColor;
868 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
869
870 VkImageLayout origDstLayout = vkStencil->currentLayout();
871
872 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
873 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
874
875 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
876 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
877
878 vkStencil->setImageLayout(this,
879 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
880 srcAccessMask,
881 dstAccessMask,
882 srcStageMask,
883 dstStageMask,
884 false);
885
886
887 VkImageSubresourceRange subRange;
888 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
889 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
890 subRange.baseMipLevel = 0;
891 subRange.levelCount = 1;
892 subRange.baseArrayLayer = 0;
893 subRange.layerCount = 1;
894
895 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
896 // draw. Thus we should look into using the load op functions on the render pass to clear out
897 // the stencil there.
898 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
899}
900
901void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
902 SkASSERT(target);
903
904 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
905 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
906 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
907
908 // this should only be called internally when we know we have a
909 // stencil buffer.
910 SkASSERT(sb);
911 int stencilBitCount = sb->bits();
912
913 // The contract with the callers does not guarantee that we preserve all bits in the stencil
914 // during this clear. Thus we will clear the entire stencil to the desired value.
915
916 VkClearDepthStencilValue vkStencilColor;
917 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
918 if (insideClip) {
919 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
920 } else {
921 vkStencilColor.stencil = 0;
922 }
923
924 VkImageLayout origDstLayout = vkStencil->currentLayout();
925 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
926 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
927 VkPipelineStageFlags srcStageMask =
928 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
929 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
930 vkStencil->setImageLayout(this,
931 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
932 srcAccessMask,
933 dstAccessMask,
934 srcStageMask,
935 dstStageMask,
936 false);
937
938 VkClearRect clearRect;
939 // Flip rect if necessary
940 SkIRect vkRect = rect;
941
942 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
943 vkRect.fTop = vkRT->height() - rect.fBottom;
944 vkRect.fBottom = vkRT->height() - rect.fTop;
945 }
946
947 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
948 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
949
950 clearRect.baseArrayLayer = 0;
951 clearRect.layerCount = 1;
952
953 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
954 SkASSERT(renderPass);
955 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
956
957 uint32_t stencilIndex;
958 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
959
960 VkClearAttachment attachment;
961 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
962 attachment.colorAttachment = 0; // this value shouldn't matter
963 attachment.clearValue.depthStencil = vkStencilColor;
964
965 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
966 fCurrentCmdBuffer->endRenderPass(this);
967
968 return;
969}
970
Greg Daniel164a9f02016-02-22 09:56:40 -0500971void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
972 // parent class should never let us get here with no RT
973 SkASSERT(target);
974
975 VkClearColorValue vkColor;
976 GrColorToRGBAFloat(color, vkColor.float32);
977
978 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
979 VkImageLayout origDstLayout = vkRT->currentLayout();
980
981 if (rect.width() != target->width() || rect.height() != target->height()) {
982 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
983 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
984 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -0800985 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -0500986 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
987 vkRT->setImageLayout(this,
988 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
989 srcAccessMask,
990 dstAccessMask,
991 srcStageMask,
992 dstStageMask,
993 false);
994
995 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -0800996 // Flip rect if necessary
997 SkIRect vkRect = rect;
998 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
999 vkRect.fTop = vkRT->height() - rect.fBottom;
1000 vkRect.fBottom = vkRT->height() - rect.fTop;
1001 }
1002 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1003 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001004 clearRect.baseArrayLayer = 0;
1005 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001006
1007 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1008 SkASSERT(renderPass);
1009 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1010
1011 uint32_t colorIndex;
1012 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1013
1014 VkClearAttachment attachment;
1015 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1016 attachment.colorAttachment = colorIndex;
1017 attachment.clearValue.color = vkColor;
1018
1019 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1020 fCurrentCmdBuffer->endRenderPass(this);
1021 return;
1022 }
1023
1024 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1025 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1026
1027 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1028 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1029
1030 vkRT->setImageLayout(this,
1031 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1032 srcAccessMask,
1033 dstAccessMask,
1034 srcStageMask,
1035 dstStageMask,
1036 false);
1037
1038
1039 VkImageSubresourceRange subRange;
1040 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1041 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1042 subRange.baseMipLevel = 0;
1043 subRange.levelCount = 1;
1044 subRange.baseArrayLayer = 0;
1045 subRange.layerCount = 1;
1046
1047 // In the future we may not actually be doing this type of clear at all. If we are inside a
1048 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1049 // common use case will be clearing an attachment at the start of a render pass, in which case
1050 // we will use the clear load ops.
1051 fCurrentCmdBuffer->clearColorImage(this,
1052 vkRT,
1053 &vkColor,
1054 1, &subRange);
1055}
1056
1057inline bool can_copy_image(const GrSurface* dst,
1058 const GrSurface* src,
1059 const GrVkGpu* gpu) {
1060 if (src->asTexture() &&
1061 dst->asTexture() &&
1062 src->origin() == dst->origin() &&
1063 src->config() == dst->config()) {
1064 return true;
1065 }
1066
1067 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1068 // or the resolved image here?
1069
1070 return false;
1071}
1072
1073void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1074 GrSurface* src,
1075 const SkIRect& srcRect,
1076 const SkIPoint& dstPoint) {
1077 SkASSERT(can_copy_image(dst, src, this));
1078
1079 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1080 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1081 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1082
1083 VkImageLayout origDstLayout = dstTex->currentLayout();
1084 VkImageLayout origSrcLayout = srcTex->currentLayout();
1085
1086 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1087 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1088
1089 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1090 // the cache is flushed since it is only being written to.
1091 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1092 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1093
1094 dstTex->setImageLayout(this,
1095 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1096 srcAccessMask,
1097 dstAccessMask,
1098 srcStageMask,
1099 dstStageMask,
1100 false);
1101
1102 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1103 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1104
1105 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1106 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1107
1108 srcTex->setImageLayout(this,
1109 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1110 srcAccessMask,
1111 dstAccessMask,
1112 srcStageMask,
1113 dstStageMask,
1114 false);
1115
1116 // Flip rect if necessary
1117 SkIRect srcVkRect = srcRect;
1118 int32_t dstY = dstPoint.fY;
1119
1120 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1121 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1122 srcVkRect.fTop = src->height() - srcRect.fBottom;
1123 srcVkRect.fBottom = src->height() - srcRect.fTop;
1124 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1125 }
1126
1127 VkImageCopy copyRegion;
1128 memset(&copyRegion, 0, sizeof(VkImageCopy));
1129 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1130 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1131 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1132 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1133 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1134
1135 fCurrentCmdBuffer->copyImage(this,
1136 srcTex,
1137 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1138 dstTex,
1139 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1140 1,
1141 &copyRegion);
1142}
1143
1144inline bool can_copy_as_draw(const GrSurface* dst,
1145 const GrSurface* src,
1146 const GrVkGpu* gpu) {
1147 return false;
1148}
1149
1150void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1151 GrSurface* src,
1152 const SkIRect& srcRect,
1153 const SkIPoint& dstPoint) {
1154 SkASSERT(false);
1155}
1156
1157bool GrVkGpu::onCopySurface(GrSurface* dst,
1158 GrSurface* src,
1159 const SkIRect& srcRect,
1160 const SkIPoint& dstPoint) {
1161 if (can_copy_image(dst, src, this)) {
1162 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1163 return true;
1164 }
1165
1166 if (can_copy_as_draw(dst, src, this)) {
1167 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1168 return true;
1169 }
1170
1171 return false;
1172}
1173
cdalton28f45b92016-03-07 13:58:26 -08001174void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1175 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1176 // TODO: stub.
1177 SkASSERT(!this->caps()->sampleLocationsSupport());
1178 *effectiveSampleCnt = rt->desc().fSampleCnt;
1179}
1180
Greg Daniel164a9f02016-02-22 09:56:40 -05001181bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1182 GrPixelConfig readConfig, DrawPreference* drawPreference,
1183 ReadPixelTempDrawInfo* tempDrawInfo) {
1184 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1185 if (kNoDraw_DrawPreference != *drawPreference) {
1186 return false;
1187 }
1188
1189 if (srcSurface->config() != readConfig) {
1190 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1191 // that of readConfig.
1192 return false;
1193 }
1194
1195 return true;
1196}
1197
1198bool GrVkGpu::onReadPixels(GrSurface* surface,
1199 int left, int top, int width, int height,
1200 GrPixelConfig config,
1201 void* buffer,
1202 size_t rowBytes) {
1203 VkFormat pixelFormat;
1204 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1205 return false;
1206 }
1207
1208 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1209 if (!tgt) {
1210 return false;
1211 }
1212
1213 // Change layout of our target so it can be used as copy
1214 VkImageLayout layout = tgt->currentLayout();
1215 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1216 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1217 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1218 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1219 tgt->setImageLayout(this,
1220 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1221 srcAccessMask,
1222 dstAccessMask,
1223 srcStageMask,
1224 dstStageMask,
1225 false);
1226
1227 GrVkTransferBuffer* transferBuffer =
1228 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1229 kGpuToCpu_TransferType));
1230
1231 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1232 VkOffset3D offset = {
1233 left,
1234 flipY ? surface->height() - top - height : top,
1235 0
1236 };
1237
1238 // Copy the image to a buffer so we can map it to cpu memory
1239 VkBufferImageCopy region;
1240 memset(&region, 0, sizeof(VkBufferImageCopy));
1241 region.bufferOffset = 0;
1242 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1243 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1244 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1245 region.imageOffset = offset;
1246 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1247
1248 fCurrentCmdBuffer->copyImageToBuffer(this,
1249 tgt,
1250 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1251 transferBuffer,
1252 1,
1253 &region);
1254
1255 // make sure the copy to buffer has finished
1256 transferBuffer->addMemoryBarrier(this,
1257 VK_ACCESS_TRANSFER_WRITE_BIT,
1258 VK_ACCESS_HOST_READ_BIT,
1259 VK_PIPELINE_STAGE_TRANSFER_BIT,
1260 VK_PIPELINE_STAGE_HOST_BIT,
1261 false);
1262
1263 // We need to submit the current command buffer to the Queue and make sure it finishes before
1264 // we can copy the data out of the buffer.
1265 this->submitCommandBuffer(kForce_SyncQueue);
1266
1267 void* mappedMemory = transferBuffer->map();
1268
1269 memcpy(buffer, mappedMemory, rowBytes*height);
1270
1271 transferBuffer->unmap();
1272 transferBuffer->unref();
1273
1274 if (flipY) {
1275 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1276 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1277 scratch.reset(tightRowBytes);
1278 void* tmpRow = scratch.get();
1279 // flip y in-place by rows
1280 const int halfY = height >> 1;
1281 char* top = reinterpret_cast<char*>(buffer);
1282 char* bottom = top + (height - 1) * rowBytes;
1283 for (int y = 0; y < halfY; y++) {
1284 memcpy(tmpRow, top, tightRowBytes);
1285 memcpy(top, bottom, tightRowBytes);
1286 memcpy(bottom, tmpRow, tightRowBytes);
1287 top += rowBytes;
1288 bottom -= rowBytes;
1289 }
1290 }
1291
1292 return true;
1293}
1294
egdaniel0e1853c2016-03-17 11:35:45 -07001295bool GrVkGpu::prepareDrawState(const GrPipeline& pipeline,
1296 const GrPrimitiveProcessor& primProc,
1297 GrPrimitiveType primitiveType,
1298 const GrVkRenderPass& renderPass,
1299 GrVkProgram** program) {
1300 // Get GrVkProgramDesc
1301 GrVkProgramDesc desc;
1302 if (!GrVkProgramDescBuilder::Build(&desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
1303 GrCapsDebugf(this->caps(), "Failed to vk program descriptor!\n");
1304 return false;
1305 }
1306
1307 *program = GrVkProgramBuilder::CreateProgram(this,
1308 pipeline,
1309 primProc,
1310 primitiveType,
1311 desc,
1312 renderPass);
1313 if (!program) {
1314 return false;
1315 }
1316
1317 (*program)->setData(this, primProc, pipeline);
1318
1319 (*program)->bind(this, fCurrentCmdBuffer);
egdaniel470d77a2016-03-18 12:50:27 -07001320
1321 GrVkPipeline::SetDynamicState(this, fCurrentCmdBuffer, pipeline);
1322
egdaniel0e1853c2016-03-17 11:35:45 -07001323 return true;
1324}
1325
1326void GrVkGpu::onDraw(const GrPipeline& pipeline,
1327 const GrPrimitiveProcessor& primProc,
1328 const GrMesh* meshes,
1329 int meshCount) {
1330 if (!meshCount) {
1331 return;
1332 }
1333 GrRenderTarget* rt = pipeline.getRenderTarget();
Greg Daniel164a9f02016-02-22 09:56:40 -05001334 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1335 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1336 SkASSERT(renderPass);
1337
egdaniel470d77a2016-03-18 12:50:27 -07001338 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1339
egdaniel0e1853c2016-03-17 11:35:45 -07001340 GrVkProgram* program = nullptr;
1341 GrPrimitiveType primitiveType = meshes[0].primitiveType();
1342 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass, &program)) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001343 return;
1344 }
1345
Greg Daniel164a9f02016-02-22 09:56:40 -05001346 // Change layout of our render target so it can be used as the color attachment
1347 VkImageLayout layout = vkRT->currentLayout();
1348 // Our color attachment is purely a destination and won't be read so don't need to flush or
1349 // invalidate any caches
1350 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1351 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1352 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1353 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1354 vkRT->setImageLayout(this,
1355 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1356 srcAccessMask,
1357 dstAccessMask,
1358 srcStageMask,
1359 dstStageMask,
1360 false);
1361
egdaniel3d5d9ac2016-03-01 12:56:15 -08001362 // If we are using a stencil attachment we also need to update its layout
egdaniel0e1853c2016-03-17 11:35:45 -07001363 if (!pipeline.getStencil().isDisabled()) {
egdaniel3d5d9ac2016-03-01 12:56:15 -08001364 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1365 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1366 VkImageLayout origDstLayout = vkStencil->currentLayout();
1367 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1368 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
egdaniel0e1853c2016-03-17 11:35:45 -07001369 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001370 VkPipelineStageFlags srcStageMask =
1371 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1372 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1373 vkStencil->setImageLayout(this,
1374 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1375 srcAccessMask,
1376 dstAccessMask,
1377 srcStageMask,
1378 dstStageMask,
1379 false);
1380 }
1381
egdaniel0e1853c2016-03-17 11:35:45 -07001382
1383 for (int i = 0; i < meshCount; ++i) {
1384 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
1385 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1386 }
1387
1388 const GrMesh& mesh = meshes[i];
1389 GrMesh::Iterator iter;
1390 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1391 do {
1392 if (nonIdxMesh->primitiveType() != primitiveType) {
1393 // Technically we don't have to call this here (since there is a safety check in
1394 // program:setData but this will allow for quicker freeing of resources if the
1395 // program sits in a cache for a while.
1396 program->freeTempResources(this);
1397 // This free will go away once we setup a program cache, and then the cache will be
1398 // responsible for call freeGpuResources.
1399 program->freeGPUResources(this);
1400 program->unref();
1401 SkDEBUGCODE(program = nullptr);
1402 primitiveType = nonIdxMesh->primitiveType();
1403 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass,
1404 &program)) {
1405 return;
1406 }
1407 }
1408 SkASSERT(program);
1409 this->bindGeometry(primProc, *nonIdxMesh);
1410
1411 if (nonIdxMesh->isIndexed()) {
1412 fCurrentCmdBuffer->drawIndexed(this,
1413 nonIdxMesh->indexCount(),
1414 1,
1415 nonIdxMesh->startIndex(),
1416 nonIdxMesh->startVertex(),
1417 0);
1418 } else {
1419 fCurrentCmdBuffer->draw(this,
1420 nonIdxMesh->vertexCount(),
1421 1,
1422 nonIdxMesh->startVertex(),
1423 0);
1424 }
1425
1426 fStats.incNumDraws();
1427 } while ((nonIdxMesh = iter.next()));
Greg Daniel164a9f02016-02-22 09:56:40 -05001428 }
1429
1430 fCurrentCmdBuffer->endRenderPass(this);
1431
1432 // Technically we don't have to call this here (since there is a safety check in program:setData
1433 // but this will allow for quicker freeing of resources if the program sits in a cache for a
1434 // while.
1435 program->freeTempResources(this);
1436 // This free will go away once we setup a program cache, and then the cache will be responsible
1437 // for call freeGpuResources.
1438 program->freeGPUResources(this);
1439 program->unref();
1440
1441#if SWAP_PER_DRAW
1442 glFlush();
1443#if defined(SK_BUILD_FOR_MAC)
1444 aglSwapBuffers(aglGetCurrentContext());
1445 int set_a_break_pt_here = 9;
1446 aglSwapBuffers(aglGetCurrentContext());
1447#elif defined(SK_BUILD_FOR_WIN32)
1448 SwapBuf();
1449 int set_a_break_pt_here = 9;
1450 SwapBuf();
1451#endif
1452#endif
1453}
1454