blob: f9046b75892b7f5c7d69976a0f3eede123da9a0e [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
egdaniel22281c12016-03-23 13:49:40 -070024#include "GrVkPipelineState.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050025#include "GrVkRenderPass.h"
26#include "GrVkResourceProvider.h"
27#include "GrVkTexture.h"
28#include "GrVkTextureRenderTarget.h"
29#include "GrVkTransferBuffer.h"
30#include "GrVkVertexBuffer.h"
31
32#include "SkConfig8888.h"
33
34#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070035#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050036
37#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
38#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
39#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
40
jvanverthd2497f32016-03-18 12:39:05 -070041#ifdef ENABLE_VK_LAYERS
42VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
43 VkDebugReportFlagsEXT flags,
44 VkDebugReportObjectTypeEXT objectType,
45 uint64_t object,
46 size_t location,
47 int32_t messageCode,
48 const char* pLayerPrefix,
49 const char* pMessage,
50 void* pUserData) {
51 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
52 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
53 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
54 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
55 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
56 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
57 } else {
58 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 }
60 return VK_FALSE;
61}
jvanverthd2497f32016-03-18 12:39:05 -070062#endif
63
jvanverth633b3562016-03-23 11:01:22 -070064GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
65 GrContext* context) {
66 SkAutoTUnref<const GrVkBackendContext> vkBackendContext(
67 reinterpret_cast<const GrVkBackendContext*>(backendContext));
68 if (!vkBackendContext) {
69 vkBackendContext.reset(GrVkBackendContext::Create());
70 if (!vkBackendContext) {
71 return nullptr;
Greg Daniel164a9f02016-02-22 09:56:40 -050072 }
jvanverth633b3562016-03-23 11:01:22 -070073 } else {
74 vkBackendContext->ref();
Greg Daniel164a9f02016-02-22 09:56:40 -050075 }
76
jvanverth633b3562016-03-23 11:01:22 -070077 return new GrVkGpu(context, options, vkBackendContext);
Greg Daniel164a9f02016-02-22 09:56:40 -050078}
79
80////////////////////////////////////////////////////////////////////////////////
81
jvanverth633b3562016-03-23 11:01:22 -070082GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
83 const GrVkBackendContext* backendCtx)
Greg Daniel164a9f02016-02-22 09:56:40 -050084 : INHERITED(context)
jvanverth633b3562016-03-23 11:01:22 -070085 , fVkInstance(backendCtx->fInstance)
86 , fDevice(backendCtx->fDevice)
87 , fQueue(backendCtx->fQueue)
88 , fResourceProvider(this) {
89 fBackendContext.reset(backendCtx);
Greg Daniel164a9f02016-02-22 09:56:40 -050090
jvanverthd2497f32016-03-18 12:39:05 -070091#ifdef ENABLE_VK_LAYERS
jvanverthfd7bd452016-03-25 06:29:52 -070092 if (backendCtx->fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
93 // Setup callback creation information
jvanverthd2497f32016-03-18 12:39:05 -070094 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
95 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
96 callbackCreateInfo.pNext = nullptr;
97 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
98 VK_DEBUG_REPORT_WARNING_BIT_EXT |
99 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
100 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
101 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
102 callbackCreateInfo.pfnCallback = &DebugReportCallback;
103 callbackCreateInfo.pUserData = nullptr;
104
jvanverthfd7bd452016-03-25 06:29:52 -0700105 // Register the callback
jvanverth633b3562016-03-23 11:01:22 -0700106 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(fVkInstance,
107 &callbackCreateInfo, nullptr, &fCallback));
jvanverthd2497f32016-03-18 12:39:05 -0700108 }
109#endif
jvanverth633b3562016-03-23 11:01:22 -0700110
111 fCompiler = shaderc_compiler_initialize();
112
jvanverthfd7bd452016-03-25 06:29:52 -0700113 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice,
114 backendCtx->fFeatures));
jvanverth633b3562016-03-23 11:01:22 -0700115 fCaps.reset(SkRef(fVkCaps.get()));
116
117 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps));
118
119 const VkCommandPoolCreateInfo cmdPoolInfo = {
120 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
121 nullptr, // pNext
122 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags
123 backendCtx->fQueueFamilyIndex, // queueFamilyIndex
124 };
125 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
126 &fCmdPool));
127
128 // must call this after creating the CommandPool
129 fResourceProvider.init();
130 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
131 SkASSERT(fCurrentCmdBuffer);
132 fCurrentCmdBuffer->begin(this);
Greg Daniel164a9f02016-02-22 09:56:40 -0500133}
134
135GrVkGpu::~GrVkGpu() {
Greg Daniel164a9f02016-02-22 09:56:40 -0500136 fCurrentCmdBuffer->end(this);
137 fCurrentCmdBuffer->unref(this);
138
139 // wait for all commands to finish
jvanverthddf98352016-03-21 11:46:00 -0700140 fResourceProvider.checkCommandBuffers();
egdaniel2cab66b2016-03-21 14:24:14 -0700141 SkDEBUGCODE(VkResult res =) VK_CALL(QueueWaitIdle(fQueue));
jvanverthddf98352016-03-21 11:46:00 -0700142 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec)
143 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
144
Greg Daniel164a9f02016-02-22 09:56:40 -0500145 // must call this just before we destroy the VkDevice
146 fResourceProvider.destroyResources();
147
jvanverth633b3562016-03-23 11:01:22 -0700148 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
149
150 shaderc_compiler_release(fCompiler);
151
152#ifdef ENABLE_VK_LAYERS
jvanverthd2497f32016-03-18 12:39:05 -0700153 VK_CALL(DestroyDebugReportCallbackEXT(fVkInstance, fCallback, nullptr));
154#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500155}
156
157///////////////////////////////////////////////////////////////////////////////
158
159void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
160 SkASSERT(fCurrentCmdBuffer);
161 fCurrentCmdBuffer->end(this);
162
163 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
164 fResourceProvider.checkCommandBuffers();
165
166 // Release old command buffer and create a new one
167 fCurrentCmdBuffer->unref(this);
168 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
169 SkASSERT(fCurrentCmdBuffer);
170
171 fCurrentCmdBuffer->begin(this);
172}
173
174///////////////////////////////////////////////////////////////////////////////
robertphillipsf8c3ba42016-03-25 04:55:58 -0700175GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
176 return GrVkVertexBuffer::Create(this, size, dynamic);
177}
178
179GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
180 return GrVkIndexBuffer::Create(this, size, dynamic);
181}
182
183GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
184 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
185 : GrVkBuffer::kCopyWrite_Type;
186 return GrVkTransferBuffer::Create(this, size, bufferType);
Greg Daniel164a9f02016-02-22 09:56:40 -0500187}
188
189////////////////////////////////////////////////////////////////////////////////
190bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
191 GrPixelConfig srcConfig, DrawPreference* drawPreference,
192 WritePixelTempDrawInfo* tempDrawInfo) {
193 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
194 return false;
195 }
196
197 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
198 if (kNoDraw_DrawPreference != *drawPreference) {
199 return false;
200 }
201
202 if (dstSurface->config() != srcConfig) {
203 // TODO: This should fall back to drawing or copying to change config of dstSurface to
204 // match that of srcConfig.
205 return false;
206 }
207
208 return true;
209}
210
211bool GrVkGpu::onWritePixels(GrSurface* surface,
212 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800213 GrPixelConfig config,
214 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500215 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
216 if (!vkTex) {
217 return false;
218 }
219
bsalomona1e6b3b2016-03-02 10:58:23 -0800220 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800221 if (texels.empty() || !texels.begin()->fPixels) {
222 return false;
223 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800224
Greg Daniel164a9f02016-02-22 09:56:40 -0500225 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
226 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
227 return false;
228 }
229
230 bool success = false;
231 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
232 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
233 SkASSERT(config == vkTex->desc().fConfig);
234 // TODO: add compressed texture support
235 // delete the following two lines and uncomment the two after that when ready
236 vkTex->unref();
237 return false;
238 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
239 // height);
240 } else {
241 bool linearTiling = vkTex->isLinearTiled();
242 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
243 // Need to change the layout to general in order to perform a host write
244 VkImageLayout layout = vkTex->currentLayout();
245 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
246 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
247 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
248 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
249 vkTex->setImageLayout(this,
250 VK_IMAGE_LAYOUT_GENERAL,
251 srcAccessMask,
252 dstAccessMask,
253 srcStageMask,
254 dstStageMask,
255 false);
256 }
257 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800258 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500259 }
260
261 if (success) {
262 vkTex->texturePriv().dirtyMipMaps(true);
263 return true;
264 }
265
266 return false;
267}
268
269bool GrVkGpu::uploadTexData(GrVkTexture* tex,
270 int left, int top, int width, int height,
271 GrPixelConfig dataConfig,
272 const void* data,
273 size_t rowBytes) {
274 SkASSERT(data);
275
276 // If we're uploading compressed data then we should be using uploadCompressedTexData
277 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
278
279 bool linearTiling = tex->isLinearTiled();
280
281 size_t bpp = GrBytesPerPixel(dataConfig);
282
283 const GrSurfaceDesc& desc = tex->desc();
284
285 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
286 &width, &height, &data, &rowBytes)) {
287 return false;
288 }
289 size_t trimRowBytes = width * bpp;
290
291 if (linearTiling) {
292 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
293 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
294 const VkImageSubresource subres = {
295 VK_IMAGE_ASPECT_COLOR_BIT,
296 0, // mipLevel
297 0, // arraySlice
298 };
299 VkSubresourceLayout layout;
300 VkResult err;
301
302 const GrVkInterface* interface = this->vkInterface();
303
304 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
305 tex->textureImage(),
306 &subres,
307 &layout));
308
309 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
310 : top;
311 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
312 VkDeviceSize size = height*layout.rowPitch;
313 void* mapPtr;
314 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
315 &mapPtr));
316 if (err) {
317 return false;
318 }
319
320 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
321 // copy into buffer by rows
322 const char* srcRow = reinterpret_cast<const char*>(data);
323 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
324 for (int y = 0; y < height; y++) {
325 memcpy(dstRow, srcRow, trimRowBytes);
326 srcRow += rowBytes;
327 dstRow -= layout.rowPitch;
328 }
329 } else {
330 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
331 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
332 memcpy(mapPtr, data, trimRowBytes * height);
333 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800334 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
335 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500336 }
337 }
338
339 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
340 } else {
341 GrVkTransferBuffer* transferBuffer =
342 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
343
344 void* mapPtr = transferBuffer->map();
345
346 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
347 // copy into buffer by rows
348 const char* srcRow = reinterpret_cast<const char*>(data);
349 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
350 for (int y = 0; y < height; y++) {
351 memcpy(dstRow, srcRow, trimRowBytes);
352 srcRow += rowBytes;
353 dstRow -= trimRowBytes;
354 }
355 } else {
356 // If there is no padding on the src data rows, we can do a single memcpy
357 if (trimRowBytes == rowBytes) {
358 memcpy(mapPtr, data, trimRowBytes * height);
359 } else {
360 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
361 }
362 }
363
364 transferBuffer->unmap();
365
366 // make sure the unmap has finished
367 transferBuffer->addMemoryBarrier(this,
368 VK_ACCESS_HOST_WRITE_BIT,
369 VK_ACCESS_TRANSFER_READ_BIT,
370 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
371 VK_PIPELINE_STAGE_TRANSFER_BIT,
372 false);
373
374 // Set up copy region
375 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
376 VkOffset3D offset = {
377 left,
378 flipY ? tex->height() - top - height : top,
379 0
380 };
381
382 VkBufferImageCopy region;
383 memset(&region, 0, sizeof(VkBufferImageCopy));
384 region.bufferOffset = 0;
385 region.bufferRowLength = width;
386 region.bufferImageHeight = height;
387 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
388 region.imageOffset = offset;
389 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
390
391 // Change layout of our target so it can be copied to
392 VkImageLayout layout = tex->currentLayout();
393 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
394 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
395 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
396 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
397 tex->setImageLayout(this,
398 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
399 srcAccessMask,
400 dstAccessMask,
401 srcStageMask,
402 dstStageMask,
403 false);
404
405 // Copy the buffer to the image
406 fCurrentCmdBuffer->copyBufferToImage(this,
407 transferBuffer,
408 tex,
409 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
410 1,
411 &region);
412
413 // Submit the current command buffer to the Queue
414 this->submitCommandBuffer(kSkip_SyncQueue);
415
416 transferBuffer->unref();
417 }
418
419 return true;
420}
421
422////////////////////////////////////////////////////////////////////////////////
423GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800424 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500425 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
426
427 VkFormat pixelFormat;
428 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
429 return nullptr;
430 }
431
432 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
433 return nullptr;
434 }
435
436 bool linearTiling = false;
437 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
438 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
439 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
440 linearTiling = true;
441 } else {
442 return nullptr;
443 }
444 }
445
446 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
447 if (renderTarget) {
448 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
449 }
450
451 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
452 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
453 // will be using this texture in some copy or not. Also this assumes, as is the current case,
454 // that all render targets in vulkan are also texutres. If we change this practice of setting
455 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
456 // texture.
457 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
458
bsalomona1e6b3b2016-03-02 10:58:23 -0800459 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
460 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500461
462 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
463 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
464 // to 1.
465 GrVkImage::ImageDesc imageDesc;
466 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
467 imageDesc.fFormat = pixelFormat;
468 imageDesc.fWidth = desc.fWidth;
469 imageDesc.fHeight = desc.fHeight;
470 imageDesc.fLevels = 1;
471 imageDesc.fSamples = 1;
472 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
473 imageDesc.fUsageFlags = usageFlags;
474 imageDesc.fMemProps = memProps;
475
476 GrVkTexture* tex;
477 if (renderTarget) {
478 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
479 imageDesc);
480 } else {
481 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
482 }
483
484 if (!tex) {
485 return nullptr;
486 }
487
bsalomona1e6b3b2016-03-02 10:58:23 -0800488 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800489 if (!texels.empty()) {
490 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800491 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
492 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500493 tex->unref();
494 return nullptr;
495 }
496 }
497
498 return tex;
499}
500
501////////////////////////////////////////////////////////////////////////////////
502
503static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
504 // By default, all textures in Vk use TopLeft
505 if (kDefault_GrSurfaceOrigin == origin) {
506 return kTopLeft_GrSurfaceOrigin;
507 } else {
508 return origin;
509 }
510}
511
512GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
513 GrWrapOwnership ownership) {
514 VkFormat format;
515 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
516 return nullptr;
517 }
518
519 if (0 == desc.fTextureHandle) {
520 return nullptr;
521 }
522
523 int maxSize = this->caps()->maxTextureSize();
524 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
525 return nullptr;
526 }
527
jvanverthfd359ca2016-03-18 11:57:24 -0700528 const GrVkTextureInfo* info = reinterpret_cast<const GrVkTextureInfo*>(desc.fTextureHandle);
529 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) {
530 return nullptr;
531 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500532
jvanverth0fcfb752016-03-09 09:57:52 -0800533 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
534 ? GrGpuResource::kAdopted_LifeCycle
535 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500536
537 GrSurfaceDesc surfDesc;
538 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
539 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
540 surfDesc.fWidth = desc.fWidth;
541 surfDesc.fHeight = desc.fHeight;
542 surfDesc.fConfig = desc.fConfig;
543 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
544 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
545 // In GL, Chrome assumes all textures are BottomLeft
546 // In VK, we don't have this restriction
547 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
548
549 GrVkTexture* texture = nullptr;
550 if (renderTarget) {
551 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
552 lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700553 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500554 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700555 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format,
556 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500557 }
558 if (!texture) {
559 return nullptr;
560 }
561
562 return texture;
563}
564
565GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
566 GrWrapOwnership ownership) {
567
jvanverthfd359ca2016-03-18 11:57:24 -0700568 const GrVkTextureInfo* info =
569 reinterpret_cast<const GrVkTextureInfo*>(wrapDesc.fRenderTargetHandle);
570 if (VK_NULL_HANDLE == info->fImage ||
571 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership)) {
572 return nullptr;
573 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500574
jvanverth0fcfb752016-03-09 09:57:52 -0800575 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
576 ? GrGpuResource::kAdopted_LifeCycle
577 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500578
579 GrSurfaceDesc desc;
580 desc.fConfig = wrapDesc.fConfig;
581 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
582 desc.fWidth = wrapDesc.fWidth;
583 desc.fHeight = wrapDesc.fHeight;
584 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
585
586 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
587
588 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
jvanverthfd359ca2016-03-18 11:57:24 -0700589 lifeCycle,
590 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500591 if (tgt && wrapDesc.fStencilBits) {
592 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
593 tgt->unref();
594 return nullptr;
595 }
596 }
597 return tgt;
598}
599
600////////////////////////////////////////////////////////////////////////////////
601
602void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
egdaniel0e1853c2016-03-17 11:35:45 -0700603 const GrNonInstancedMesh& mesh) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500604 GrVkVertexBuffer* vbuf;
egdaniel0e1853c2016-03-17 11:35:45 -0700605 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500606 SkASSERT(vbuf);
607 SkASSERT(!vbuf->isMapped());
608
609 vbuf->addMemoryBarrier(this,
610 VK_ACCESS_HOST_WRITE_BIT,
611 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
612 VK_PIPELINE_STAGE_HOST_BIT,
613 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
614 false);
615
616 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
617
egdaniel0e1853c2016-03-17 11:35:45 -0700618 if (mesh.isIndexed()) {
619 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500620 SkASSERT(ibuf);
621 SkASSERT(!ibuf->isMapped());
622
623 ibuf->addMemoryBarrier(this,
624 VK_ACCESS_HOST_WRITE_BIT,
625 VK_ACCESS_INDEX_READ_BIT,
626 VK_PIPELINE_STAGE_HOST_BIT,
627 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
628 false);
629
630 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
631 }
632}
633
Greg Daniel164a9f02016-02-22 09:56:40 -0500634////////////////////////////////////////////////////////////////////////////////
635
636GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
637 int width,
638 int height) {
639 SkASSERT(rt->asTexture());
640 SkASSERT(width >= rt->width());
641 SkASSERT(height >= rt->height());
642
643 int samples = rt->numStencilSamples();
644
645 SkASSERT(this->vkCaps().stencilFormats().count());
646 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
647
648 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
649 GrGpuResource::kCached_LifeCycle,
650 width,
651 height,
652 samples,
653 sFmt));
654 fStats.incStencilAttachmentCreates();
655 return stencil;
656}
657
658////////////////////////////////////////////////////////////////////////////////
659
660GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
661 GrPixelConfig config) {
662
663 VkFormat pixelFormat;
664 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
665 return 0;
666 }
667
668 bool linearTiling = false;
669 if (!fVkCaps->isConfigTexturable(config)) {
670 return 0;
671 }
672
673 if (fVkCaps->isConfigTexurableLinearly(config)) {
674 linearTiling = true;
675 }
676
677 // Currently this is not supported since it requires a copy which has not yet been implemented.
678 if (srcData && !linearTiling) {
679 return 0;
680 }
681
682 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
683 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
684 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
685
686 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
687 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
688
jvanverthfd359ca2016-03-18 11:57:24 -0700689 VkImage image = VK_NULL_HANDLE;
690 VkDeviceMemory alloc = VK_NULL_HANDLE;
Greg Daniel164a9f02016-02-22 09:56:40 -0500691
jvanverthfd359ca2016-03-18 11:57:24 -0700692 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
693 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
694 ? VK_IMAGE_LAYOUT_PREINITIALIZED
695 : VK_IMAGE_LAYOUT_UNDEFINED;
696
697 // Create Image
698 VkSampleCountFlagBits vkSamples;
699 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
700 return 0;
701 }
702
703 const VkImageCreateInfo imageCreateInfo = {
704 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
705 NULL, // pNext
706 0, // VkImageCreateFlags
707 VK_IMAGE_TYPE_2D, // VkImageType
708 pixelFormat, // VkFormat
709 { w, h, 1 }, // VkExtent3D
710 1, // mipLevels
711 1, // arrayLayers
712 vkSamples, // samples
713 imageTiling, // VkImageTiling
714 usageFlags, // VkImageUsageFlags
715 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
716 0, // queueFamilyCount
717 0, // pQueueFamilyIndices
718 initialLayout // initialLayout
719 };
720
721 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
722
723 if (!GrVkMemory::AllocAndBindImageMemory(this, image, memProps, &alloc)) {
724 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500725 return 0;
726 }
727
728 if (srcData) {
729 if (linearTiling) {
730 const VkImageSubresource subres = {
731 VK_IMAGE_ASPECT_COLOR_BIT,
732 0, // mipLevel
733 0, // arraySlice
734 };
735 VkSubresourceLayout layout;
736 VkResult err;
737
jvanverthfd359ca2016-03-18 11:57:24 -0700738 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500739
740 void* mapPtr;
jvanverthfd359ca2016-03-18 11:57:24 -0700741 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500742 if (err) {
jvanverthfd359ca2016-03-18 11:57:24 -0700743 VK_CALL(FreeMemory(this->device(), alloc, nullptr));
744 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500745 return 0;
746 }
747
748 size_t bpp = GrBytesPerPixel(config);
749 size_t rowCopyBytes = bpp * w;
750 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
751 // This assumes the srcData comes in with no padding.
752 if (rowCopyBytes == layout.rowPitch) {
753 memcpy(mapPtr, srcData, rowCopyBytes * h);
754 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700755 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
756 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500757 }
jvanverthfd359ca2016-03-18 11:57:24 -0700758 VK_CALL(UnmapMemory(fDevice, alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500759 } else {
760 // TODO: Add support for copying to optimal tiling
761 SkASSERT(false);
762 }
763 }
764
jvanverthfd359ca2016-03-18 11:57:24 -0700765 GrVkTextureInfo* info = new GrVkTextureInfo;
766 info->fImage = image;
767 info->fAlloc = alloc;
768 info->fImageTiling = imageTiling;
769 info->fImageLayout = initialLayout;
770
771 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -0500772}
773
774bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
jvanverthfd359ca2016-03-18 11:57:24 -0700775 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500776
777 if (backend && backend->fImage && backend->fAlloc) {
778 VkMemoryRequirements req;
779 memset(&req, 0, sizeof(req));
780 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
781 backend->fImage,
782 &req));
783 // TODO: find a better check
784 // This will probably fail with a different driver
785 return (req.size > 0) && (req.size <= 8192 * 8192);
786 }
787
788 return false;
789}
790
791void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700792 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500793
794 if (backend) {
795 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700796 // something in the command buffer may still be using this, so force submit
797 this->submitCommandBuffer(kForce_SyncQueue);
798
799 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr));
800 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500801 }
jvanverthfd359ca2016-03-18 11:57:24 -0700802 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -0500803 }
804}
805
806////////////////////////////////////////////////////////////////////////////////
807
808void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
809 VkPipelineStageFlags dstStageMask,
810 bool byRegion,
811 VkMemoryBarrier* barrier) const {
812 SkASSERT(fCurrentCmdBuffer);
813 fCurrentCmdBuffer->pipelineBarrier(this,
814 srcStageMask,
815 dstStageMask,
816 byRegion,
817 GrVkCommandBuffer::kMemory_BarrierType,
818 barrier);
819}
820
821void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
822 VkPipelineStageFlags dstStageMask,
823 bool byRegion,
824 VkBufferMemoryBarrier* barrier) const {
825 SkASSERT(fCurrentCmdBuffer);
826 fCurrentCmdBuffer->pipelineBarrier(this,
827 srcStageMask,
828 dstStageMask,
829 byRegion,
830 GrVkCommandBuffer::kBufferMemory_BarrierType,
831 barrier);
832}
833
834void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
835 VkPipelineStageFlags dstStageMask,
836 bool byRegion,
837 VkImageMemoryBarrier* barrier) const {
838 SkASSERT(fCurrentCmdBuffer);
839 fCurrentCmdBuffer->pipelineBarrier(this,
840 srcStageMask,
841 dstStageMask,
842 byRegion,
843 GrVkCommandBuffer::kImageMemory_BarrierType,
844 barrier);
845}
846
847void GrVkGpu::finishDrawTarget() {
848 // Submit the current command buffer to the Queue
849 this->submitCommandBuffer(kSkip_SyncQueue);
850}
851
egdaniel3d5d9ac2016-03-01 12:56:15 -0800852void GrVkGpu::clearStencil(GrRenderTarget* target) {
853 if (nullptr == target) {
854 return;
855 }
856 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
857 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
858
859
860 VkClearDepthStencilValue vkStencilColor;
861 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
862
863 VkImageLayout origDstLayout = vkStencil->currentLayout();
864
865 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
866 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
867
868 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
869 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
870
871 vkStencil->setImageLayout(this,
872 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
873 srcAccessMask,
874 dstAccessMask,
875 srcStageMask,
876 dstStageMask,
877 false);
878
879
880 VkImageSubresourceRange subRange;
881 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
882 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
883 subRange.baseMipLevel = 0;
884 subRange.levelCount = 1;
885 subRange.baseArrayLayer = 0;
886 subRange.layerCount = 1;
887
888 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
889 // draw. Thus we should look into using the load op functions on the render pass to clear out
890 // the stencil there.
891 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
892}
893
894void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
895 SkASSERT(target);
896
897 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
898 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
899 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
900
901 // this should only be called internally when we know we have a
902 // stencil buffer.
903 SkASSERT(sb);
904 int stencilBitCount = sb->bits();
905
906 // The contract with the callers does not guarantee that we preserve all bits in the stencil
907 // during this clear. Thus we will clear the entire stencil to the desired value.
908
909 VkClearDepthStencilValue vkStencilColor;
910 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
911 if (insideClip) {
912 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
913 } else {
914 vkStencilColor.stencil = 0;
915 }
916
917 VkImageLayout origDstLayout = vkStencil->currentLayout();
918 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
919 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
920 VkPipelineStageFlags srcStageMask =
921 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
922 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
923 vkStencil->setImageLayout(this,
924 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
925 srcAccessMask,
926 dstAccessMask,
927 srcStageMask,
928 dstStageMask,
929 false);
930
931 VkClearRect clearRect;
932 // Flip rect if necessary
933 SkIRect vkRect = rect;
934
935 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
936 vkRect.fTop = vkRT->height() - rect.fBottom;
937 vkRect.fBottom = vkRT->height() - rect.fTop;
938 }
939
940 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
941 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
942
943 clearRect.baseArrayLayer = 0;
944 clearRect.layerCount = 1;
945
946 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
947 SkASSERT(renderPass);
948 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
949
950 uint32_t stencilIndex;
951 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
952
953 VkClearAttachment attachment;
954 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
955 attachment.colorAttachment = 0; // this value shouldn't matter
956 attachment.clearValue.depthStencil = vkStencilColor;
957
958 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
959 fCurrentCmdBuffer->endRenderPass(this);
960
961 return;
962}
963
Greg Daniel164a9f02016-02-22 09:56:40 -0500964void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
965 // parent class should never let us get here with no RT
966 SkASSERT(target);
967
968 VkClearColorValue vkColor;
969 GrColorToRGBAFloat(color, vkColor.float32);
970
971 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
972 VkImageLayout origDstLayout = vkRT->currentLayout();
973
974 if (rect.width() != target->width() || rect.height() != target->height()) {
975 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
976 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
977 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -0800978 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -0500979 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
980 vkRT->setImageLayout(this,
981 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
982 srcAccessMask,
983 dstAccessMask,
984 srcStageMask,
985 dstStageMask,
986 false);
987
988 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -0800989 // Flip rect if necessary
990 SkIRect vkRect = rect;
991 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
992 vkRect.fTop = vkRT->height() - rect.fBottom;
993 vkRect.fBottom = vkRT->height() - rect.fTop;
994 }
995 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
996 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -0800997 clearRect.baseArrayLayer = 0;
998 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -0500999
1000 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1001 SkASSERT(renderPass);
1002 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1003
1004 uint32_t colorIndex;
1005 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1006
1007 VkClearAttachment attachment;
1008 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1009 attachment.colorAttachment = colorIndex;
1010 attachment.clearValue.color = vkColor;
1011
1012 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1013 fCurrentCmdBuffer->endRenderPass(this);
1014 return;
1015 }
1016
1017 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1018 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1019
1020 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1021 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1022
1023 vkRT->setImageLayout(this,
1024 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1025 srcAccessMask,
1026 dstAccessMask,
1027 srcStageMask,
1028 dstStageMask,
1029 false);
1030
1031
1032 VkImageSubresourceRange subRange;
1033 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1034 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1035 subRange.baseMipLevel = 0;
1036 subRange.levelCount = 1;
1037 subRange.baseArrayLayer = 0;
1038 subRange.layerCount = 1;
1039
1040 // In the future we may not actually be doing this type of clear at all. If we are inside a
1041 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1042 // common use case will be clearing an attachment at the start of a render pass, in which case
1043 // we will use the clear load ops.
1044 fCurrentCmdBuffer->clearColorImage(this,
1045 vkRT,
1046 &vkColor,
1047 1, &subRange);
1048}
1049
1050inline bool can_copy_image(const GrSurface* dst,
1051 const GrSurface* src,
1052 const GrVkGpu* gpu) {
1053 if (src->asTexture() &&
1054 dst->asTexture() &&
1055 src->origin() == dst->origin() &&
1056 src->config() == dst->config()) {
1057 return true;
1058 }
1059
1060 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
1061 // or the resolved image here?
1062
1063 return false;
1064}
1065
1066void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1067 GrSurface* src,
1068 const SkIRect& srcRect,
1069 const SkIPoint& dstPoint) {
1070 SkASSERT(can_copy_image(dst, src, this));
1071
1072 // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
1073 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
1074 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
1075
1076 VkImageLayout origDstLayout = dstTex->currentLayout();
1077 VkImageLayout origSrcLayout = srcTex->currentLayout();
1078
1079 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1080 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1081
1082 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1083 // the cache is flushed since it is only being written to.
1084 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1085 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1086
1087 dstTex->setImageLayout(this,
1088 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1089 srcAccessMask,
1090 dstAccessMask,
1091 srcStageMask,
1092 dstStageMask,
1093 false);
1094
1095 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1096 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1097
1098 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1099 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1100
1101 srcTex->setImageLayout(this,
1102 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1103 srcAccessMask,
1104 dstAccessMask,
1105 srcStageMask,
1106 dstStageMask,
1107 false);
1108
1109 // Flip rect if necessary
1110 SkIRect srcVkRect = srcRect;
1111 int32_t dstY = dstPoint.fY;
1112
1113 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1114 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1115 srcVkRect.fTop = src->height() - srcRect.fBottom;
1116 srcVkRect.fBottom = src->height() - srcRect.fTop;
1117 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1118 }
1119
1120 VkImageCopy copyRegion;
1121 memset(&copyRegion, 0, sizeof(VkImageCopy));
1122 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1123 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1124 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1125 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1126 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1127
1128 fCurrentCmdBuffer->copyImage(this,
1129 srcTex,
1130 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1131 dstTex,
1132 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1133 1,
1134 &copyRegion);
1135}
1136
1137inline bool can_copy_as_draw(const GrSurface* dst,
1138 const GrSurface* src,
1139 const GrVkGpu* gpu) {
1140 return false;
1141}
1142
1143void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1144 GrSurface* src,
1145 const SkIRect& srcRect,
1146 const SkIPoint& dstPoint) {
1147 SkASSERT(false);
1148}
1149
1150bool GrVkGpu::onCopySurface(GrSurface* dst,
1151 GrSurface* src,
1152 const SkIRect& srcRect,
1153 const SkIPoint& dstPoint) {
1154 if (can_copy_image(dst, src, this)) {
1155 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
1156 return true;
1157 }
1158
1159 if (can_copy_as_draw(dst, src, this)) {
1160 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1161 return true;
1162 }
1163
1164 return false;
1165}
1166
cdalton28f45b92016-03-07 13:58:26 -08001167void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1168 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1169 // TODO: stub.
1170 SkASSERT(!this->caps()->sampleLocationsSupport());
1171 *effectiveSampleCnt = rt->desc().fSampleCnt;
1172}
1173
Greg Daniel164a9f02016-02-22 09:56:40 -05001174bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1175 GrPixelConfig readConfig, DrawPreference* drawPreference,
1176 ReadPixelTempDrawInfo* tempDrawInfo) {
1177 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1178 if (kNoDraw_DrawPreference != *drawPreference) {
1179 return false;
1180 }
1181
1182 if (srcSurface->config() != readConfig) {
1183 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1184 // that of readConfig.
1185 return false;
1186 }
1187
1188 return true;
1189}
1190
1191bool GrVkGpu::onReadPixels(GrSurface* surface,
1192 int left, int top, int width, int height,
1193 GrPixelConfig config,
1194 void* buffer,
1195 size_t rowBytes) {
1196 VkFormat pixelFormat;
1197 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1198 return false;
1199 }
1200
1201 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1202 if (!tgt) {
1203 return false;
1204 }
1205
1206 // Change layout of our target so it can be used as copy
1207 VkImageLayout layout = tgt->currentLayout();
1208 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1209 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1210 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1211 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1212 tgt->setImageLayout(this,
1213 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1214 srcAccessMask,
1215 dstAccessMask,
1216 srcStageMask,
1217 dstStageMask,
1218 false);
1219
1220 GrVkTransferBuffer* transferBuffer =
robertphillipsf8c3ba42016-03-25 04:55:58 -07001221 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
1222 kGpuToCpu_TransferType));
Greg Daniel164a9f02016-02-22 09:56:40 -05001223
1224 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1225 VkOffset3D offset = {
1226 left,
1227 flipY ? surface->height() - top - height : top,
1228 0
1229 };
1230
1231 // Copy the image to a buffer so we can map it to cpu memory
1232 VkBufferImageCopy region;
1233 memset(&region, 0, sizeof(VkBufferImageCopy));
1234 region.bufferOffset = 0;
1235 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1236 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1237 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1238 region.imageOffset = offset;
1239 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1240
1241 fCurrentCmdBuffer->copyImageToBuffer(this,
1242 tgt,
1243 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1244 transferBuffer,
1245 1,
1246 &region);
1247
1248 // make sure the copy to buffer has finished
1249 transferBuffer->addMemoryBarrier(this,
1250 VK_ACCESS_TRANSFER_WRITE_BIT,
1251 VK_ACCESS_HOST_READ_BIT,
1252 VK_PIPELINE_STAGE_TRANSFER_BIT,
1253 VK_PIPELINE_STAGE_HOST_BIT,
1254 false);
1255
1256 // We need to submit the current command buffer to the Queue and make sure it finishes before
1257 // we can copy the data out of the buffer.
1258 this->submitCommandBuffer(kForce_SyncQueue);
1259
1260 void* mappedMemory = transferBuffer->map();
1261
1262 memcpy(buffer, mappedMemory, rowBytes*height);
1263
1264 transferBuffer->unmap();
1265 transferBuffer->unref();
1266
1267 if (flipY) {
1268 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1269 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1270 scratch.reset(tightRowBytes);
1271 void* tmpRow = scratch.get();
1272 // flip y in-place by rows
1273 const int halfY = height >> 1;
1274 char* top = reinterpret_cast<char*>(buffer);
1275 char* bottom = top + (height - 1) * rowBytes;
1276 for (int y = 0; y < halfY; y++) {
1277 memcpy(tmpRow, top, tightRowBytes);
1278 memcpy(top, bottom, tightRowBytes);
1279 memcpy(bottom, tmpRow, tightRowBytes);
1280 top += rowBytes;
1281 bottom -= rowBytes;
1282 }
1283 }
1284
1285 return true;
1286}
egdaniel0e1853c2016-03-17 11:35:45 -07001287bool GrVkGpu::prepareDrawState(const GrPipeline& pipeline,
1288 const GrPrimitiveProcessor& primProc,
1289 GrPrimitiveType primitiveType,
1290 const GrVkRenderPass& renderPass,
egdaniel22281c12016-03-23 13:49:40 -07001291 GrVkPipelineState** pipelineState) {
1292 *pipelineState = fResourceProvider.findOrCreateCompatiblePipelineState(pipeline,
1293 primProc,
1294 primitiveType,
1295 renderPass);
1296 if (!pipelineState) {
egdaniel0e1853c2016-03-17 11:35:45 -07001297 return false;
1298 }
1299
egdaniel22281c12016-03-23 13:49:40 -07001300 (*pipelineState)->setData(this, primProc, pipeline);
egdaniel0e1853c2016-03-17 11:35:45 -07001301
egdaniel22281c12016-03-23 13:49:40 -07001302 (*pipelineState)->bind(this, fCurrentCmdBuffer);
egdaniel470d77a2016-03-18 12:50:27 -07001303
1304 GrVkPipeline::SetDynamicState(this, fCurrentCmdBuffer, pipeline);
1305
egdaniel0e1853c2016-03-17 11:35:45 -07001306 return true;
1307}
1308
1309void GrVkGpu::onDraw(const GrPipeline& pipeline,
1310 const GrPrimitiveProcessor& primProc,
1311 const GrMesh* meshes,
1312 int meshCount) {
1313 if (!meshCount) {
1314 return;
1315 }
1316 GrRenderTarget* rt = pipeline.getRenderTarget();
Greg Daniel164a9f02016-02-22 09:56:40 -05001317 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1318 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1319 SkASSERT(renderPass);
1320
egdaniel470d77a2016-03-18 12:50:27 -07001321 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1322
egdaniel22281c12016-03-23 13:49:40 -07001323 GrVkPipelineState* pipelineState = nullptr;
egdaniel0e1853c2016-03-17 11:35:45 -07001324 GrPrimitiveType primitiveType = meshes[0].primitiveType();
egdaniel22281c12016-03-23 13:49:40 -07001325 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass, &pipelineState)) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001326 return;
1327 }
1328
Greg Daniel164a9f02016-02-22 09:56:40 -05001329 // Change layout of our render target so it can be used as the color attachment
1330 VkImageLayout layout = vkRT->currentLayout();
1331 // Our color attachment is purely a destination and won't be read so don't need to flush or
1332 // invalidate any caches
1333 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1334 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1335 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1336 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1337 vkRT->setImageLayout(this,
1338 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1339 srcAccessMask,
1340 dstAccessMask,
1341 srcStageMask,
1342 dstStageMask,
1343 false);
1344
egdaniel3d5d9ac2016-03-01 12:56:15 -08001345 // If we are using a stencil attachment we also need to update its layout
egdaniel0e1853c2016-03-17 11:35:45 -07001346 if (!pipeline.getStencil().isDisabled()) {
egdaniel3d5d9ac2016-03-01 12:56:15 -08001347 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1348 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1349 VkImageLayout origDstLayout = vkStencil->currentLayout();
1350 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1351 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
egdaniel0e1853c2016-03-17 11:35:45 -07001352 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001353 VkPipelineStageFlags srcStageMask =
1354 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1355 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1356 vkStencil->setImageLayout(this,
1357 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1358 srcAccessMask,
1359 dstAccessMask,
1360 srcStageMask,
1361 dstStageMask,
1362 false);
1363 }
1364
egdaniel0e1853c2016-03-17 11:35:45 -07001365
1366 for (int i = 0; i < meshCount; ++i) {
1367 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
1368 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1369 }
1370
1371 const GrMesh& mesh = meshes[i];
1372 GrMesh::Iterator iter;
1373 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1374 do {
1375 if (nonIdxMesh->primitiveType() != primitiveType) {
1376 // Technically we don't have to call this here (since there is a safety check in
egdaniel22281c12016-03-23 13:49:40 -07001377 // pipelineState:setData but this will allow for quicker freeing of resources if the
1378 // pipelineState sits in a cache for a while.
1379 pipelineState->freeTempResources(this);
1380 pipelineState->unref();
1381 SkDEBUGCODE(pipelineState = nullptr);
egdaniel0e1853c2016-03-17 11:35:45 -07001382 primitiveType = nonIdxMesh->primitiveType();
1383 if (!this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass,
egdaniel22281c12016-03-23 13:49:40 -07001384 &pipelineState)) {
egdaniel0e1853c2016-03-17 11:35:45 -07001385 return;
1386 }
1387 }
egdaniel22281c12016-03-23 13:49:40 -07001388 SkASSERT(pipelineState);
egdaniel0e1853c2016-03-17 11:35:45 -07001389 this->bindGeometry(primProc, *nonIdxMesh);
1390
1391 if (nonIdxMesh->isIndexed()) {
1392 fCurrentCmdBuffer->drawIndexed(this,
1393 nonIdxMesh->indexCount(),
1394 1,
1395 nonIdxMesh->startIndex(),
1396 nonIdxMesh->startVertex(),
1397 0);
1398 } else {
1399 fCurrentCmdBuffer->draw(this,
1400 nonIdxMesh->vertexCount(),
1401 1,
1402 nonIdxMesh->startVertex(),
1403 0);
1404 }
1405
1406 fStats.incNumDraws();
1407 } while ((nonIdxMesh = iter.next()));
Greg Daniel164a9f02016-02-22 09:56:40 -05001408 }
1409
1410 fCurrentCmdBuffer->endRenderPass(this);
1411
egdaniel22281c12016-03-23 13:49:40 -07001412 // Technically we don't have to call this here (since there is a safety check in
1413 // pipelineState:setData but this will allow for quicker freeing of resources if the
1414 // pipelineState sits in a cache for a while.
1415 pipelineState->freeTempResources(this);
1416 pipelineState->unref();
Greg Daniel164a9f02016-02-22 09:56:40 -05001417
1418#if SWAP_PER_DRAW
1419 glFlush();
1420#if defined(SK_BUILD_FOR_MAC)
1421 aglSwapBuffers(aglGetCurrentContext());
1422 int set_a_break_pt_here = 9;
1423 aglSwapBuffers(aglGetCurrentContext());
1424#elif defined(SK_BUILD_FOR_WIN32)
1425 SwapBuf();
1426 int set_a_break_pt_here = 9;
1427 SwapBuf();
1428#endif
1429#endif
1430}
1431