blob: 28dce344be37659f0e18b4fbd768dc0b7164461f [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
egdaniel22281c12016-03-23 13:49:40 -070024#include "GrVkPipelineState.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050025#include "GrVkRenderPass.h"
26#include "GrVkResourceProvider.h"
27#include "GrVkTexture.h"
28#include "GrVkTextureRenderTarget.h"
29#include "GrVkTransferBuffer.h"
30#include "GrVkVertexBuffer.h"
31
32#include "SkConfig8888.h"
33
34#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070035#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050036
37#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
38#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
39#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
40
jvanverthd2497f32016-03-18 12:39:05 -070041#ifdef ENABLE_VK_LAYERS
42VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
43 VkDebugReportFlagsEXT flags,
44 VkDebugReportObjectTypeEXT objectType,
45 uint64_t object,
46 size_t location,
47 int32_t messageCode,
48 const char* pLayerPrefix,
49 const char* pMessage,
50 void* pUserData) {
51 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
52 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
53 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
54 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
55 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
56 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
57 } else {
58 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 }
60 return VK_FALSE;
61}
jvanverthd2497f32016-03-18 12:39:05 -070062#endif
63
jvanverth633b3562016-03-23 11:01:22 -070064GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
65 GrContext* context) {
bsalomondc0fcd42016-04-11 14:21:33 -070066 const GrVkBackendContext* vkBackendContext =
67 reinterpret_cast<const GrVkBackendContext*>(backendContext);
jvanverth633b3562016-03-23 11:01:22 -070068 if (!vkBackendContext) {
bsalomondc0fcd42016-04-11 14:21:33 -070069 vkBackendContext = GrVkBackendContext::Create();
jvanverth633b3562016-03-23 11:01:22 -070070 if (!vkBackendContext) {
71 return nullptr;
Greg Daniel164a9f02016-02-22 09:56:40 -050072 }
jvanverth633b3562016-03-23 11:01:22 -070073 } else {
74 vkBackendContext->ref();
Greg Daniel164a9f02016-02-22 09:56:40 -050075 }
76
jvanverth633b3562016-03-23 11:01:22 -070077 return new GrVkGpu(context, options, vkBackendContext);
Greg Daniel164a9f02016-02-22 09:56:40 -050078}
79
80////////////////////////////////////////////////////////////////////////////////
81
halcanary9d524f22016-03-29 09:03:52 -070082GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
jvanverth633b3562016-03-23 11:01:22 -070083 const GrVkBackendContext* backendCtx)
Greg Daniel164a9f02016-02-22 09:56:40 -050084 : INHERITED(context)
jvanverth633b3562016-03-23 11:01:22 -070085 , fVkInstance(backendCtx->fInstance)
86 , fDevice(backendCtx->fDevice)
87 , fQueue(backendCtx->fQueue)
88 , fResourceProvider(this) {
89 fBackendContext.reset(backendCtx);
Greg Daniel164a9f02016-02-22 09:56:40 -050090
jvanverthd2497f32016-03-18 12:39:05 -070091#ifdef ENABLE_VK_LAYERS
jvanverthfd7bd452016-03-25 06:29:52 -070092 if (backendCtx->fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
93 // Setup callback creation information
jvanverthd2497f32016-03-18 12:39:05 -070094 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
95 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
96 callbackCreateInfo.pNext = nullptr;
97 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
egdanielef0c10c2016-04-07 07:51:22 -070098 VK_DEBUG_REPORT_WARNING_BIT_EXT |
jvanverthd2497f32016-03-18 12:39:05 -070099 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
100 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
egdanielb4aa3622016-04-06 13:47:08 -0700101 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
jvanverthd2497f32016-03-18 12:39:05 -0700102 callbackCreateInfo.pfnCallback = &DebugReportCallback;
103 callbackCreateInfo.pUserData = nullptr;
104
jvanverthfd7bd452016-03-25 06:29:52 -0700105 // Register the callback
jvanverth633b3562016-03-23 11:01:22 -0700106 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(fVkInstance,
107 &callbackCreateInfo, nullptr, &fCallback));
jvanverthd2497f32016-03-18 12:39:05 -0700108 }
109#endif
jvanverth633b3562016-03-23 11:01:22 -0700110
111 fCompiler = shaderc_compiler_initialize();
112
jvanverthfd7bd452016-03-25 06:29:52 -0700113 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice,
egdanielc5ec1402016-03-28 12:14:42 -0700114 backendCtx->fFeatures, backendCtx->fExtensions));
jvanverth633b3562016-03-23 11:01:22 -0700115 fCaps.reset(SkRef(fVkCaps.get()));
116
117 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps));
118
119 const VkCommandPoolCreateInfo cmdPoolInfo = {
120 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
121 nullptr, // pNext
122 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags
123 backendCtx->fQueueFamilyIndex, // queueFamilyIndex
124 };
halcanary9d524f22016-03-29 09:03:52 -0700125 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
jvanverth633b3562016-03-23 11:01:22 -0700126 &fCmdPool));
127
128 // must call this after creating the CommandPool
129 fResourceProvider.init();
130 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
131 SkASSERT(fCurrentCmdBuffer);
132 fCurrentCmdBuffer->begin(this);
Greg Daniel164a9f02016-02-22 09:56:40 -0500133}
134
135GrVkGpu::~GrVkGpu() {
Greg Daniel164a9f02016-02-22 09:56:40 -0500136 fCurrentCmdBuffer->end(this);
137 fCurrentCmdBuffer->unref(this);
138
139 // wait for all commands to finish
jvanverthddf98352016-03-21 11:46:00 -0700140 fResourceProvider.checkCommandBuffers();
egdaniel2cab66b2016-03-21 14:24:14 -0700141 SkDEBUGCODE(VkResult res =) VK_CALL(QueueWaitIdle(fQueue));
jvanverthddf98352016-03-21 11:46:00 -0700142 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec)
143 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
halcanary9d524f22016-03-29 09:03:52 -0700144
Greg Daniel164a9f02016-02-22 09:56:40 -0500145 // must call this just before we destroy the VkDevice
146 fResourceProvider.destroyResources();
147
jvanverth633b3562016-03-23 11:01:22 -0700148 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
149
150 shaderc_compiler_release(fCompiler);
151
152#ifdef ENABLE_VK_LAYERS
jvanverthd2497f32016-03-18 12:39:05 -0700153 VK_CALL(DestroyDebugReportCallbackEXT(fVkInstance, fCallback, nullptr));
154#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500155}
156
157///////////////////////////////////////////////////////////////////////////////
158
159void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
160 SkASSERT(fCurrentCmdBuffer);
161 fCurrentCmdBuffer->end(this);
162
163 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
164 fResourceProvider.checkCommandBuffers();
165
166 // Release old command buffer and create a new one
167 fCurrentCmdBuffer->unref(this);
168 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
169 SkASSERT(fCurrentCmdBuffer);
170
171 fCurrentCmdBuffer->begin(this);
172}
173
174///////////////////////////////////////////////////////////////////////////////
cdaltone2e71c22016-04-07 18:13:29 -0700175GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern) {
cdalton397536c2016-03-25 12:15:03 -0700176 switch (type) {
177 case kVertex_GrBufferType:
178 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
179 kStatic_GrAccessPattern == accessPattern);
180 return GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
181 case kIndex_GrBufferType:
182 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
183 kStatic_GrAccessPattern == accessPattern);
184 return GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
185 case kXferCpuToGpu_GrBufferType:
186 SkASSERT(kStream_GrAccessPattern == accessPattern);
187 return GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
188 case kXferGpuToCpu_GrBufferType:
189 SkASSERT(kStream_GrAccessPattern == accessPattern);
190 return GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
191 default:
192 SkFAIL("Unknown buffer type.");
193 return nullptr;
194 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500195}
196
197////////////////////////////////////////////////////////////////////////////////
198bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
199 GrPixelConfig srcConfig, DrawPreference* drawPreference,
200 WritePixelTempDrawInfo* tempDrawInfo) {
201 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
202 return false;
203 }
204
205 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
206 if (kNoDraw_DrawPreference != *drawPreference) {
207 return false;
208 }
209
210 if (dstSurface->config() != srcConfig) {
halcanary9d524f22016-03-29 09:03:52 -0700211 // TODO: This should fall back to drawing or copying to change config of dstSurface to
Greg Daniel164a9f02016-02-22 09:56:40 -0500212 // match that of srcConfig.
213 return false;
214 }
215
216 return true;
217}
218
219bool GrVkGpu::onWritePixels(GrSurface* surface,
220 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800221 GrPixelConfig config,
222 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500223 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
224 if (!vkTex) {
225 return false;
226 }
227
bsalomona1e6b3b2016-03-02 10:58:23 -0800228 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800229 if (texels.empty() || !texels.begin()->fPixels) {
230 return false;
231 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800232
Greg Daniel164a9f02016-02-22 09:56:40 -0500233 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
234 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
235 return false;
236 }
237
238 bool success = false;
239 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
240 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
241 SkASSERT(config == vkTex->desc().fConfig);
242 // TODO: add compressed texture support
243 // delete the following two lines and uncomment the two after that when ready
244 vkTex->unref();
245 return false;
246 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
247 // height);
248 } else {
249 bool linearTiling = vkTex->isLinearTiled();
250 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
251 // Need to change the layout to general in order to perform a host write
252 VkImageLayout layout = vkTex->currentLayout();
253 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
254 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
255 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
256 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
257 vkTex->setImageLayout(this,
258 VK_IMAGE_LAYOUT_GENERAL,
259 srcAccessMask,
260 dstAccessMask,
261 srcStageMask,
262 dstStageMask,
263 false);
264 }
265 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800266 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500267 }
268
269 if (success) {
270 vkTex->texturePriv().dirtyMipMaps(true);
271 return true;
272 }
273
274 return false;
275}
276
277bool GrVkGpu::uploadTexData(GrVkTexture* tex,
278 int left, int top, int width, int height,
279 GrPixelConfig dataConfig,
280 const void* data,
281 size_t rowBytes) {
282 SkASSERT(data);
283
284 // If we're uploading compressed data then we should be using uploadCompressedTexData
285 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
286
287 bool linearTiling = tex->isLinearTiled();
288
289 size_t bpp = GrBytesPerPixel(dataConfig);
290
291 const GrSurfaceDesc& desc = tex->desc();
292
293 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
294 &width, &height, &data, &rowBytes)) {
295 return false;
296 }
297 size_t trimRowBytes = width * bpp;
298
299 if (linearTiling) {
300 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
301 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
302 const VkImageSubresource subres = {
303 VK_IMAGE_ASPECT_COLOR_BIT,
304 0, // mipLevel
305 0, // arraySlice
306 };
307 VkSubresourceLayout layout;
308 VkResult err;
309
310 const GrVkInterface* interface = this->vkInterface();
311
312 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
313 tex->textureImage(),
314 &subres,
315 &layout));
316
317 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
318 : top;
319 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
320 VkDeviceSize size = height*layout.rowPitch;
321 void* mapPtr;
halcanary9d524f22016-03-29 09:03:52 -0700322 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
Greg Daniel164a9f02016-02-22 09:56:40 -0500323 &mapPtr));
324 if (err) {
325 return false;
326 }
327
328 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
329 // copy into buffer by rows
330 const char* srcRow = reinterpret_cast<const char*>(data);
331 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
332 for (int y = 0; y < height; y++) {
333 memcpy(dstRow, srcRow, trimRowBytes);
334 srcRow += rowBytes;
335 dstRow -= layout.rowPitch;
336 }
337 } else {
338 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
339 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
340 memcpy(mapPtr, data, trimRowBytes * height);
341 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800342 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
343 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500344 }
345 }
346
347 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
348 } else {
349 GrVkTransferBuffer* transferBuffer =
350 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
351
352 void* mapPtr = transferBuffer->map();
353
354 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
355 // copy into buffer by rows
356 const char* srcRow = reinterpret_cast<const char*>(data);
357 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
358 for (int y = 0; y < height; y++) {
359 memcpy(dstRow, srcRow, trimRowBytes);
360 srcRow += rowBytes;
361 dstRow -= trimRowBytes;
362 }
363 } else {
364 // If there is no padding on the src data rows, we can do a single memcpy
365 if (trimRowBytes == rowBytes) {
366 memcpy(mapPtr, data, trimRowBytes * height);
367 } else {
368 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
369 }
370 }
371
372 transferBuffer->unmap();
373
374 // make sure the unmap has finished
375 transferBuffer->addMemoryBarrier(this,
376 VK_ACCESS_HOST_WRITE_BIT,
377 VK_ACCESS_TRANSFER_READ_BIT,
378 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
379 VK_PIPELINE_STAGE_TRANSFER_BIT,
380 false);
381
382 // Set up copy region
383 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
384 VkOffset3D offset = {
385 left,
386 flipY ? tex->height() - top - height : top,
387 0
388 };
389
390 VkBufferImageCopy region;
391 memset(&region, 0, sizeof(VkBufferImageCopy));
392 region.bufferOffset = 0;
393 region.bufferRowLength = width;
394 region.bufferImageHeight = height;
395 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
396 region.imageOffset = offset;
397 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
398
399 // Change layout of our target so it can be copied to
400 VkImageLayout layout = tex->currentLayout();
401 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
402 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
403 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
404 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
405 tex->setImageLayout(this,
406 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
407 srcAccessMask,
408 dstAccessMask,
409 srcStageMask,
410 dstStageMask,
411 false);
412
413 // Copy the buffer to the image
414 fCurrentCmdBuffer->copyBufferToImage(this,
415 transferBuffer,
416 tex,
417 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
418 1,
419 &region);
420
421 // Submit the current command buffer to the Queue
422 this->submitCommandBuffer(kSkip_SyncQueue);
423
424 transferBuffer->unref();
425 }
426
427 return true;
428}
429
430////////////////////////////////////////////////////////////////////////////////
431GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800432 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500433 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
434
435 VkFormat pixelFormat;
436 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
437 return nullptr;
438 }
439
440 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
441 return nullptr;
442 }
443
444 bool linearTiling = false;
445 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
446 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
447 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
448 linearTiling = true;
449 } else {
450 return nullptr;
451 }
452 }
453
454 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
455 if (renderTarget) {
456 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
457 }
458
459 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
460 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
461 // will be using this texture in some copy or not. Also this assumes, as is the current case,
462 // that all render targets in vulkan are also texutres. If we change this practice of setting
463 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
464 // texture.
465 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
466
bsalomona1e6b3b2016-03-02 10:58:23 -0800467 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
468 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500469
470 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
471 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
472 // to 1.
473 GrVkImage::ImageDesc imageDesc;
474 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
475 imageDesc.fFormat = pixelFormat;
476 imageDesc.fWidth = desc.fWidth;
477 imageDesc.fHeight = desc.fHeight;
478 imageDesc.fLevels = 1;
479 imageDesc.fSamples = 1;
480 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
481 imageDesc.fUsageFlags = usageFlags;
482 imageDesc.fMemProps = memProps;
483
484 GrVkTexture* tex;
485 if (renderTarget) {
486 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
487 imageDesc);
488 } else {
489 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
490 }
491
492 if (!tex) {
493 return nullptr;
494 }
495
bsalomona1e6b3b2016-03-02 10:58:23 -0800496 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800497 if (!texels.empty()) {
498 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800499 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
500 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500501 tex->unref();
502 return nullptr;
503 }
504 }
505
506 return tex;
507}
508
509////////////////////////////////////////////////////////////////////////////////
510
511static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
512 // By default, all textures in Vk use TopLeft
513 if (kDefault_GrSurfaceOrigin == origin) {
514 return kTopLeft_GrSurfaceOrigin;
515 } else {
516 return origin;
517 }
518}
519
520GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
521 GrWrapOwnership ownership) {
522 VkFormat format;
523 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
524 return nullptr;
525 }
526
527 if (0 == desc.fTextureHandle) {
528 return nullptr;
529 }
530
531 int maxSize = this->caps()->maxTextureSize();
532 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
533 return nullptr;
534 }
535
jvanverthfd359ca2016-03-18 11:57:24 -0700536 const GrVkTextureInfo* info = reinterpret_cast<const GrVkTextureInfo*>(desc.fTextureHandle);
537 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) {
538 return nullptr;
539 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500540
jvanverth0fcfb752016-03-09 09:57:52 -0800541 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
542 ? GrGpuResource::kAdopted_LifeCycle
543 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500544
545 GrSurfaceDesc surfDesc;
546 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
547 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
548 surfDesc.fWidth = desc.fWidth;
549 surfDesc.fHeight = desc.fHeight;
550 surfDesc.fConfig = desc.fConfig;
551 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
552 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
553 // In GL, Chrome assumes all textures are BottomLeft
554 // In VK, we don't have this restriction
555 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
556
557 GrVkTexture* texture = nullptr;
558 if (renderTarget) {
halcanary9d524f22016-03-29 09:03:52 -0700559 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
Greg Daniel164a9f02016-02-22 09:56:40 -0500560 lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700561 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500562 } else {
halcanary9d524f22016-03-29 09:03:52 -0700563 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700564 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500565 }
566 if (!texture) {
567 return nullptr;
568 }
569
570 return texture;
571}
572
573GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
574 GrWrapOwnership ownership) {
halcanary9d524f22016-03-29 09:03:52 -0700575
jvanverthfd359ca2016-03-18 11:57:24 -0700576 const GrVkTextureInfo* info =
577 reinterpret_cast<const GrVkTextureInfo*>(wrapDesc.fRenderTargetHandle);
578 if (VK_NULL_HANDLE == info->fImage ||
579 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership)) {
580 return nullptr;
581 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500582
jvanverth0fcfb752016-03-09 09:57:52 -0800583 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
584 ? GrGpuResource::kAdopted_LifeCycle
585 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500586
587 GrSurfaceDesc desc;
588 desc.fConfig = wrapDesc.fConfig;
589 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
590 desc.fWidth = wrapDesc.fWidth;
591 desc.fHeight = wrapDesc.fHeight;
592 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
593
594 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
595
596 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
halcanary9d524f22016-03-29 09:03:52 -0700597 lifeCycle,
jvanverthfd359ca2016-03-18 11:57:24 -0700598 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500599 if (tgt && wrapDesc.fStencilBits) {
600 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
601 tgt->unref();
602 return nullptr;
603 }
604 }
605 return tgt;
606}
607
608////////////////////////////////////////////////////////////////////////////////
609
610void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
egdaniel0e1853c2016-03-17 11:35:45 -0700611 const GrNonInstancedMesh& mesh) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500612 GrVkVertexBuffer* vbuf;
egdaniel0e1853c2016-03-17 11:35:45 -0700613 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500614 SkASSERT(vbuf);
615 SkASSERT(!vbuf->isMapped());
616
617 vbuf->addMemoryBarrier(this,
618 VK_ACCESS_HOST_WRITE_BIT,
619 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
620 VK_PIPELINE_STAGE_HOST_BIT,
621 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
622 false);
623
624 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
625
egdaniel0e1853c2016-03-17 11:35:45 -0700626 if (mesh.isIndexed()) {
627 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500628 SkASSERT(ibuf);
629 SkASSERT(!ibuf->isMapped());
630
631 ibuf->addMemoryBarrier(this,
632 VK_ACCESS_HOST_WRITE_BIT,
633 VK_ACCESS_INDEX_READ_BIT,
634 VK_PIPELINE_STAGE_HOST_BIT,
635 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
636 false);
637
638 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
639 }
640}
641
Greg Daniel164a9f02016-02-22 09:56:40 -0500642////////////////////////////////////////////////////////////////////////////////
643
644GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
645 int width,
646 int height) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500647 SkASSERT(width >= rt->width());
648 SkASSERT(height >= rt->height());
649
650 int samples = rt->numStencilSamples();
651
egdaniel8f1dcaa2016-04-01 10:10:45 -0700652 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferedStencilFormat();
Greg Daniel164a9f02016-02-22 09:56:40 -0500653
654 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
655 GrGpuResource::kCached_LifeCycle,
656 width,
657 height,
658 samples,
659 sFmt));
660 fStats.incStencilAttachmentCreates();
661 return stencil;
662}
663
664////////////////////////////////////////////////////////////////////////////////
665
666GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
667 GrPixelConfig config) {
668
669 VkFormat pixelFormat;
670 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
671 return 0;
672 }
673
674 bool linearTiling = false;
675 if (!fVkCaps->isConfigTexturable(config)) {
676 return 0;
677 }
678
679 if (fVkCaps->isConfigTexurableLinearly(config)) {
680 linearTiling = true;
681 }
682
683 // Currently this is not supported since it requires a copy which has not yet been implemented.
684 if (srcData && !linearTiling) {
685 return 0;
686 }
687
688 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
689 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
690 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
691
692 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
693 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
694
jvanverthfd359ca2016-03-18 11:57:24 -0700695 VkImage image = VK_NULL_HANDLE;
696 VkDeviceMemory alloc = VK_NULL_HANDLE;
Greg Daniel164a9f02016-02-22 09:56:40 -0500697
jvanverthfd359ca2016-03-18 11:57:24 -0700698 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
699 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
700 ? VK_IMAGE_LAYOUT_PREINITIALIZED
701 : VK_IMAGE_LAYOUT_UNDEFINED;
702
703 // Create Image
704 VkSampleCountFlagBits vkSamples;
705 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
706 return 0;
707 }
708
709 const VkImageCreateInfo imageCreateInfo = {
710 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
711 NULL, // pNext
712 0, // VkImageCreateFlags
713 VK_IMAGE_TYPE_2D, // VkImageType
714 pixelFormat, // VkFormat
ethannicholas384b5e92016-03-25 11:04:06 -0700715 { (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D
jvanverthfd359ca2016-03-18 11:57:24 -0700716 1, // mipLevels
717 1, // arrayLayers
718 vkSamples, // samples
719 imageTiling, // VkImageTiling
720 usageFlags, // VkImageUsageFlags
721 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
722 0, // queueFamilyCount
723 0, // pQueueFamilyIndices
724 initialLayout // initialLayout
725 };
726
727 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
728
729 if (!GrVkMemory::AllocAndBindImageMemory(this, image, memProps, &alloc)) {
730 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500731 return 0;
732 }
733
734 if (srcData) {
735 if (linearTiling) {
736 const VkImageSubresource subres = {
737 VK_IMAGE_ASPECT_COLOR_BIT,
738 0, // mipLevel
739 0, // arraySlice
740 };
741 VkSubresourceLayout layout;
742 VkResult err;
743
jvanverthfd359ca2016-03-18 11:57:24 -0700744 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500745
746 void* mapPtr;
jvanverthfd359ca2016-03-18 11:57:24 -0700747 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500748 if (err) {
jvanverthfd359ca2016-03-18 11:57:24 -0700749 VK_CALL(FreeMemory(this->device(), alloc, nullptr));
750 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500751 return 0;
752 }
753
754 size_t bpp = GrBytesPerPixel(config);
755 size_t rowCopyBytes = bpp * w;
756 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
757 // This assumes the srcData comes in with no padding.
758 if (rowCopyBytes == layout.rowPitch) {
759 memcpy(mapPtr, srcData, rowCopyBytes * h);
760 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700761 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
762 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500763 }
jvanverthfd359ca2016-03-18 11:57:24 -0700764 VK_CALL(UnmapMemory(fDevice, alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500765 } else {
766 // TODO: Add support for copying to optimal tiling
767 SkASSERT(false);
768 }
769 }
770
jvanverthfd359ca2016-03-18 11:57:24 -0700771 GrVkTextureInfo* info = new GrVkTextureInfo;
772 info->fImage = image;
773 info->fAlloc = alloc;
774 info->fImageTiling = imageTiling;
775 info->fImageLayout = initialLayout;
776
777 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -0500778}
779
780bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
jvanverthfd359ca2016-03-18 11:57:24 -0700781 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500782
783 if (backend && backend->fImage && backend->fAlloc) {
784 VkMemoryRequirements req;
785 memset(&req, 0, sizeof(req));
786 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
787 backend->fImage,
788 &req));
789 // TODO: find a better check
790 // This will probably fail with a different driver
791 return (req.size > 0) && (req.size <= 8192 * 8192);
792 }
793
794 return false;
795}
796
797void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700798 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500799
800 if (backend) {
801 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700802 // something in the command buffer may still be using this, so force submit
803 this->submitCommandBuffer(kForce_SyncQueue);
804
805 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr));
806 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500807 }
jvanverthfd359ca2016-03-18 11:57:24 -0700808 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -0500809 }
810}
811
812////////////////////////////////////////////////////////////////////////////////
813
814void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
815 VkPipelineStageFlags dstStageMask,
816 bool byRegion,
817 VkMemoryBarrier* barrier) const {
818 SkASSERT(fCurrentCmdBuffer);
819 fCurrentCmdBuffer->pipelineBarrier(this,
820 srcStageMask,
821 dstStageMask,
822 byRegion,
823 GrVkCommandBuffer::kMemory_BarrierType,
824 barrier);
825}
826
827void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
828 VkPipelineStageFlags dstStageMask,
829 bool byRegion,
830 VkBufferMemoryBarrier* barrier) const {
831 SkASSERT(fCurrentCmdBuffer);
832 fCurrentCmdBuffer->pipelineBarrier(this,
833 srcStageMask,
834 dstStageMask,
835 byRegion,
836 GrVkCommandBuffer::kBufferMemory_BarrierType,
837 barrier);
838}
839
840void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
841 VkPipelineStageFlags dstStageMask,
842 bool byRegion,
843 VkImageMemoryBarrier* barrier) const {
844 SkASSERT(fCurrentCmdBuffer);
845 fCurrentCmdBuffer->pipelineBarrier(this,
846 srcStageMask,
847 dstStageMask,
848 byRegion,
849 GrVkCommandBuffer::kImageMemory_BarrierType,
850 barrier);
851}
852
853void GrVkGpu::finishDrawTarget() {
854 // Submit the current command buffer to the Queue
855 this->submitCommandBuffer(kSkip_SyncQueue);
856}
857
egdaniel3d5d9ac2016-03-01 12:56:15 -0800858void GrVkGpu::clearStencil(GrRenderTarget* target) {
859 if (nullptr == target) {
860 return;
861 }
862 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
863 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
864
865
866 VkClearDepthStencilValue vkStencilColor;
867 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
868
869 VkImageLayout origDstLayout = vkStencil->currentLayout();
870
871 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
872 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
873
874 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
875 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
876
877 vkStencil->setImageLayout(this,
878 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
879 srcAccessMask,
880 dstAccessMask,
881 srcStageMask,
882 dstStageMask,
883 false);
884
885
886 VkImageSubresourceRange subRange;
887 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
888 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
889 subRange.baseMipLevel = 0;
890 subRange.levelCount = 1;
891 subRange.baseArrayLayer = 0;
892 subRange.layerCount = 1;
893
894 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
895 // draw. Thus we should look into using the load op functions on the render pass to clear out
896 // the stencil there.
897 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
898}
899
900void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
901 SkASSERT(target);
902
903 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
904 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
905 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
906
907 // this should only be called internally when we know we have a
908 // stencil buffer.
909 SkASSERT(sb);
910 int stencilBitCount = sb->bits();
911
912 // The contract with the callers does not guarantee that we preserve all bits in the stencil
913 // during this clear. Thus we will clear the entire stencil to the desired value.
914
915 VkClearDepthStencilValue vkStencilColor;
916 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
917 if (insideClip) {
918 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
919 } else {
920 vkStencilColor.stencil = 0;
921 }
922
923 VkImageLayout origDstLayout = vkStencil->currentLayout();
924 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
925 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
926 VkPipelineStageFlags srcStageMask =
927 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
928 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
929 vkStencil->setImageLayout(this,
930 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
931 srcAccessMask,
932 dstAccessMask,
933 srcStageMask,
934 dstStageMask,
935 false);
936
937 VkClearRect clearRect;
938 // Flip rect if necessary
939 SkIRect vkRect = rect;
940
941 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
942 vkRect.fTop = vkRT->height() - rect.fBottom;
943 vkRect.fBottom = vkRT->height() - rect.fTop;
944 }
945
946 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
947 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
948
949 clearRect.baseArrayLayer = 0;
950 clearRect.layerCount = 1;
951
952 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
953 SkASSERT(renderPass);
954 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
955
956 uint32_t stencilIndex;
957 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
958
959 VkClearAttachment attachment;
960 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
961 attachment.colorAttachment = 0; // this value shouldn't matter
962 attachment.clearValue.depthStencil = vkStencilColor;
963
964 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
965 fCurrentCmdBuffer->endRenderPass(this);
966
967 return;
968}
969
Greg Daniel164a9f02016-02-22 09:56:40 -0500970void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
971 // parent class should never let us get here with no RT
972 SkASSERT(target);
973
974 VkClearColorValue vkColor;
975 GrColorToRGBAFloat(color, vkColor.float32);
halcanary9d524f22016-03-29 09:03:52 -0700976
Greg Daniel164a9f02016-02-22 09:56:40 -0500977 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
978 VkImageLayout origDstLayout = vkRT->currentLayout();
979
980 if (rect.width() != target->width() || rect.height() != target->height()) {
981 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
982 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
983 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -0800984 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -0500985 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
986 vkRT->setImageLayout(this,
987 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
988 srcAccessMask,
989 dstAccessMask,
990 srcStageMask,
991 dstStageMask,
992 false);
993
994 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -0800995 // Flip rect if necessary
996 SkIRect vkRect = rect;
997 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
998 vkRect.fTop = vkRT->height() - rect.fBottom;
999 vkRect.fBottom = vkRT->height() - rect.fTop;
1000 }
1001 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1002 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001003 clearRect.baseArrayLayer = 0;
1004 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001005
1006 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1007 SkASSERT(renderPass);
1008 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1009
1010 uint32_t colorIndex;
1011 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1012
1013 VkClearAttachment attachment;
1014 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1015 attachment.colorAttachment = colorIndex;
1016 attachment.clearValue.color = vkColor;
1017
1018 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1019 fCurrentCmdBuffer->endRenderPass(this);
1020 return;
1021 }
1022
1023 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1024 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1025
1026 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1027 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1028
1029 vkRT->setImageLayout(this,
1030 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1031 srcAccessMask,
1032 dstAccessMask,
1033 srcStageMask,
1034 dstStageMask,
1035 false);
1036
1037
1038 VkImageSubresourceRange subRange;
1039 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1040 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1041 subRange.baseMipLevel = 0;
1042 subRange.levelCount = 1;
1043 subRange.baseArrayLayer = 0;
1044 subRange.layerCount = 1;
1045
halcanary9d524f22016-03-29 09:03:52 -07001046 // In the future we may not actually be doing this type of clear at all. If we are inside a
Greg Daniel164a9f02016-02-22 09:56:40 -05001047 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1048 // common use case will be clearing an attachment at the start of a render pass, in which case
1049 // we will use the clear load ops.
1050 fCurrentCmdBuffer->clearColorImage(this,
1051 vkRT,
1052 &vkColor,
1053 1, &subRange);
1054}
1055
1056inline bool can_copy_image(const GrSurface* dst,
1057 const GrSurface* src,
1058 const GrVkGpu* gpu) {
egdaniel17b89252016-04-05 07:23:38 -07001059 // Currently we don't support msaa
1060 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1061 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1062 return false;
1063 }
1064
1065 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1066 // as image usage flags.
1067 if (src->origin() == dst->origin() &&
1068 GrBytesPerPixel(src->config()) == GrBytesPerPixel(dst->config())) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001069 return true;
1070 }
1071
1072 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
egdaniel17b89252016-04-05 07:23:38 -07001073 // or the resolved image here? Im multisampled, Vulkan requires sample counts to be the same.
Greg Daniel164a9f02016-02-22 09:56:40 -05001074
1075 return false;
1076}
1077
1078void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1079 GrSurface* src,
egdaniel17b89252016-04-05 07:23:38 -07001080 GrVkImage* dstImage,
1081 GrVkImage* srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001082 const SkIRect& srcRect,
1083 const SkIPoint& dstPoint) {
1084 SkASSERT(can_copy_image(dst, src, this));
1085
egdaniel17b89252016-04-05 07:23:38 -07001086 VkImageLayout origDstLayout = dstImage->currentLayout();
1087 VkImageLayout origSrcLayout = srcImage->currentLayout();
Greg Daniel164a9f02016-02-22 09:56:40 -05001088
1089 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1090 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1091
1092 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1093 // the cache is flushed since it is only being written to.
1094 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1095 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
halcanary9d524f22016-03-29 09:03:52 -07001096
egdaniel17b89252016-04-05 07:23:38 -07001097 dstImage->setImageLayout(this,
1098 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1099 srcAccessMask,
1100 dstAccessMask,
1101 srcStageMask,
1102 dstStageMask,
1103 false);
halcanary9d524f22016-03-29 09:03:52 -07001104
Greg Daniel164a9f02016-02-22 09:56:40 -05001105 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1106 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1107
1108 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1109 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1110
egdaniel17b89252016-04-05 07:23:38 -07001111 srcImage->setImageLayout(this,
1112 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1113 srcAccessMask,
1114 dstAccessMask,
1115 srcStageMask,
1116 dstStageMask,
1117 false);
Greg Daniel164a9f02016-02-22 09:56:40 -05001118
1119 // Flip rect if necessary
1120 SkIRect srcVkRect = srcRect;
1121 int32_t dstY = dstPoint.fY;
1122
1123 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1124 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1125 srcVkRect.fTop = src->height() - srcRect.fBottom;
1126 srcVkRect.fBottom = src->height() - srcRect.fTop;
1127 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1128 }
1129
1130 VkImageCopy copyRegion;
1131 memset(&copyRegion, 0, sizeof(VkImageCopy));
1132 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1133 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1134 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1135 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1136 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1137
1138 fCurrentCmdBuffer->copyImage(this,
egdaniel17b89252016-04-05 07:23:38 -07001139 srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001140 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
egdaniel17b89252016-04-05 07:23:38 -07001141 dstImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001142 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1143 1,
1144 &copyRegion);
1145}
1146
egdaniel17b89252016-04-05 07:23:38 -07001147inline bool can_copy_as_blit(const GrSurface* dst,
1148 const GrSurface* src,
1149 const GrVkImage* dstImage,
1150 const GrVkImage* srcImage,
1151 const GrVkGpu* gpu) {
1152 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1153 // as image usage flags.
1154 const GrVkCaps& caps = gpu->vkCaps();
1155 if (!caps.configCanBeDstofBlit(dst->config(), dstImage->isLinearTiled()) ||
1156 !caps.configCanBeSrcofBlit(src->config(), srcImage->isLinearTiled())) {
1157 return false;
1158 }
1159
1160 // We cannot blit images that are multisampled. Will need to figure out if we can blit the
1161 // resolved msaa though.
1162 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1163 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1164 return false;
1165 }
1166
1167 return true;
1168}
1169
1170void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
1171 GrSurface* src,
1172 GrVkImage* dstImage,
1173 GrVkImage* srcImage,
1174 const SkIRect& srcRect,
1175 const SkIPoint& dstPoint) {
1176 SkASSERT(can_copy_as_blit(dst, src, dstImage, srcImage, this));
1177
1178 VkImageLayout origDstLayout = dstImage->currentLayout();
1179 VkImageLayout origSrcLayout = srcImage->currentLayout();
1180
1181 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1182 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1183
1184 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1185 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1186
1187 dstImage->setImageLayout(this,
1188 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1189 srcAccessMask,
1190 dstAccessMask,
1191 srcStageMask,
1192 dstStageMask,
1193 false);
1194
1195 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1196 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1197
1198 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1199 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1200
1201 srcImage->setImageLayout(this,
1202 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1203 srcAccessMask,
1204 dstAccessMask,
1205 srcStageMask,
1206 dstStageMask,
1207 false);
1208
1209 // Flip rect if necessary
1210 SkIRect srcVkRect;
egdaniel8af936d2016-04-07 10:17:47 -07001211 srcVkRect.fLeft = srcRect.fLeft;
1212 srcVkRect.fRight = srcRect.fRight;
egdaniel17b89252016-04-05 07:23:38 -07001213 SkIRect dstRect;
1214 dstRect.fLeft = dstPoint.fX;
egdaniel8af936d2016-04-07 10:17:47 -07001215 dstRect.fRight = dstPoint.fX + srcRect.width();
egdaniel17b89252016-04-05 07:23:38 -07001216
1217 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1218 srcVkRect.fTop = src->height() - srcRect.fBottom;
1219 srcVkRect.fBottom = src->height() - srcRect.fTop;
1220 } else {
egdaniel8af936d2016-04-07 10:17:47 -07001221 srcVkRect.fTop = srcRect.fTop;
1222 srcVkRect.fBottom = srcRect.fBottom;
egdaniel17b89252016-04-05 07:23:38 -07001223 }
1224
1225 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
1226 dstRect.fTop = dst->height() - dstPoint.fY - srcVkRect.height();
1227 } else {
1228 dstRect.fTop = dstPoint.fY;
1229 }
1230 dstRect.fBottom = dstRect.fTop + srcVkRect.height();
1231
1232 // If we have different origins, we need to flip the top and bottom of the dst rect so that we
1233 // get the correct origintation of the copied data.
1234 if (src->origin() != dst->origin()) {
1235 SkTSwap(dstRect.fTop, dstRect.fBottom);
1236 }
1237
1238 VkImageBlit blitRegion;
1239 memset(&blitRegion, 0, sizeof(VkImageBlit));
1240 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1241 blitRegion.srcOffsets[0] = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1242 blitRegion.srcOffsets[1] = { srcVkRect.fRight, srcVkRect.fBottom, 0 };
1243 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1244 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
1245 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 0 };
1246
1247 fCurrentCmdBuffer->blitImage(this,
1248 srcImage,
1249 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1250 dstImage,
1251 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1252 1,
1253 &blitRegion,
1254 VK_FILTER_NEAREST); // We never scale so any filter works here
1255}
1256
Greg Daniel164a9f02016-02-22 09:56:40 -05001257inline bool can_copy_as_draw(const GrSurface* dst,
1258 const GrSurface* src,
1259 const GrVkGpu* gpu) {
1260 return false;
1261}
1262
1263void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1264 GrSurface* src,
1265 const SkIRect& srcRect,
1266 const SkIPoint& dstPoint) {
1267 SkASSERT(false);
1268}
1269
1270bool GrVkGpu::onCopySurface(GrSurface* dst,
1271 GrSurface* src,
1272 const SkIRect& srcRect,
1273 const SkIPoint& dstPoint) {
egdaniel17b89252016-04-05 07:23:38 -07001274 GrVkImage* dstImage;
1275 GrVkImage* srcImage;
1276 if (dst->asTexture()) {
1277 dstImage = static_cast<GrVkTexture*>(dst->asTexture());
1278 } else {
1279 SkASSERT(dst->asRenderTarget());
1280 dstImage = static_cast<GrVkRenderTarget*>(dst->asRenderTarget());
1281 }
1282 if (src->asTexture()) {
1283 srcImage = static_cast<GrVkTexture*>(src->asTexture());
1284 } else {
1285 SkASSERT(src->asRenderTarget());
1286 srcImage = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
1287 }
1288
Greg Daniel164a9f02016-02-22 09:56:40 -05001289 if (can_copy_image(dst, src, this)) {
egdaniel17b89252016-04-05 07:23:38 -07001290 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
1291 return true;
1292 }
1293
1294 if (can_copy_as_blit(dst, src, dstImage, srcImage, this)) {
1295 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
Greg Daniel164a9f02016-02-22 09:56:40 -05001296 return true;
1297 }
1298
1299 if (can_copy_as_draw(dst, src, this)) {
1300 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1301 return true;
1302 }
1303
1304 return false;
1305}
1306
egdaniel37798fb2016-04-12 07:31:49 -07001307bool GrVkGpu::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) const {
1308 // Currently we don't support msaa
1309 if (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1) {
1310 return false;
1311 }
1312
1313 // This will support copying the dst as CopyImage since all of our surfaces require transferSrc
1314 // and transferDst usage flags in Vulkan.
1315 desc->fOrigin = src->origin();
1316 desc->fConfig = src->config();
1317 desc->fFlags = kNone_GrSurfaceFlags;
1318 return true;
1319}
1320
cdalton28f45b92016-03-07 13:58:26 -08001321void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1322 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1323 // TODO: stub.
1324 SkASSERT(!this->caps()->sampleLocationsSupport());
1325 *effectiveSampleCnt = rt->desc().fSampleCnt;
1326}
1327
Greg Daniel164a9f02016-02-22 09:56:40 -05001328bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1329 GrPixelConfig readConfig, DrawPreference* drawPreference,
1330 ReadPixelTempDrawInfo* tempDrawInfo) {
1331 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1332 if (kNoDraw_DrawPreference != *drawPreference) {
1333 return false;
1334 }
1335
1336 if (srcSurface->config() != readConfig) {
1337 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1338 // that of readConfig.
1339 return false;
1340 }
1341
1342 return true;
1343}
1344
1345bool GrVkGpu::onReadPixels(GrSurface* surface,
1346 int left, int top, int width, int height,
1347 GrPixelConfig config,
1348 void* buffer,
1349 size_t rowBytes) {
1350 VkFormat pixelFormat;
1351 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1352 return false;
1353 }
1354
1355 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1356 if (!tgt) {
1357 return false;
1358 }
1359
1360 // Change layout of our target so it can be used as copy
1361 VkImageLayout layout = tgt->currentLayout();
1362 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1363 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1364 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1365 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1366 tgt->setImageLayout(this,
1367 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1368 srcAccessMask,
1369 dstAccessMask,
1370 srcStageMask,
1371 dstStageMask,
1372 false);
1373
halcanary9d524f22016-03-29 09:03:52 -07001374 GrVkTransferBuffer* transferBuffer =
cdaltone2e71c22016-04-07 18:13:29 -07001375 static_cast<GrVkTransferBuffer*>(this->createBuffer(rowBytes * height,
1376 kXferGpuToCpu_GrBufferType,
cdalton397536c2016-03-25 12:15:03 -07001377 kStream_GrAccessPattern));
Greg Daniel164a9f02016-02-22 09:56:40 -05001378
1379 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1380 VkOffset3D offset = {
1381 left,
1382 flipY ? surface->height() - top - height : top,
1383 0
1384 };
1385
1386 // Copy the image to a buffer so we can map it to cpu memory
1387 VkBufferImageCopy region;
1388 memset(&region, 0, sizeof(VkBufferImageCopy));
1389 region.bufferOffset = 0;
1390 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1391 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1392 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1393 region.imageOffset = offset;
1394 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1395
1396 fCurrentCmdBuffer->copyImageToBuffer(this,
1397 tgt,
1398 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1399 transferBuffer,
1400 1,
1401 &region);
1402
1403 // make sure the copy to buffer has finished
1404 transferBuffer->addMemoryBarrier(this,
1405 VK_ACCESS_TRANSFER_WRITE_BIT,
1406 VK_ACCESS_HOST_READ_BIT,
1407 VK_PIPELINE_STAGE_TRANSFER_BIT,
1408 VK_PIPELINE_STAGE_HOST_BIT,
1409 false);
1410
1411 // We need to submit the current command buffer to the Queue and make sure it finishes before
1412 // we can copy the data out of the buffer.
1413 this->submitCommandBuffer(kForce_SyncQueue);
1414
1415 void* mappedMemory = transferBuffer->map();
1416
1417 memcpy(buffer, mappedMemory, rowBytes*height);
1418
1419 transferBuffer->unmap();
1420 transferBuffer->unref();
1421
1422 if (flipY) {
1423 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1424 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1425 scratch.reset(tightRowBytes);
1426 void* tmpRow = scratch.get();
1427 // flip y in-place by rows
1428 const int halfY = height >> 1;
1429 char* top = reinterpret_cast<char*>(buffer);
1430 char* bottom = top + (height - 1) * rowBytes;
1431 for (int y = 0; y < halfY; y++) {
1432 memcpy(tmpRow, top, tightRowBytes);
1433 memcpy(top, bottom, tightRowBytes);
1434 memcpy(bottom, tmpRow, tightRowBytes);
1435 top += rowBytes;
1436 bottom -= rowBytes;
1437 }
1438 }
1439
1440 return true;
1441}
egdanielaf132772016-03-28 12:39:29 -07001442sk_sp<GrVkPipelineState> GrVkGpu::prepareDrawState(const GrPipeline& pipeline,
1443 const GrPrimitiveProcessor& primProc,
1444 GrPrimitiveType primitiveType,
1445 const GrVkRenderPass& renderPass) {
1446 sk_sp<GrVkPipelineState> pipelineState =
1447 fResourceProvider.findOrCreateCompatiblePipelineState(pipeline,
1448 primProc,
1449 primitiveType,
1450 renderPass);
egdaniel22281c12016-03-23 13:49:40 -07001451 if (!pipelineState) {
egdanielaf132772016-03-28 12:39:29 -07001452 return pipelineState;
egdaniel0e1853c2016-03-17 11:35:45 -07001453 }
1454
egdanielaf132772016-03-28 12:39:29 -07001455 pipelineState->setData(this, primProc, pipeline);
egdaniel0e1853c2016-03-17 11:35:45 -07001456
egdanielaf132772016-03-28 12:39:29 -07001457 pipelineState->bind(this, fCurrentCmdBuffer);
egdaniel470d77a2016-03-18 12:50:27 -07001458
1459 GrVkPipeline::SetDynamicState(this, fCurrentCmdBuffer, pipeline);
1460
egdanielaf132772016-03-28 12:39:29 -07001461 return pipelineState;
egdaniel0e1853c2016-03-17 11:35:45 -07001462}
1463
1464void GrVkGpu::onDraw(const GrPipeline& pipeline,
1465 const GrPrimitiveProcessor& primProc,
1466 const GrMesh* meshes,
1467 int meshCount) {
1468 if (!meshCount) {
1469 return;
1470 }
1471 GrRenderTarget* rt = pipeline.getRenderTarget();
Greg Daniel164a9f02016-02-22 09:56:40 -05001472 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1473 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1474 SkASSERT(renderPass);
1475
egdaniel470d77a2016-03-18 12:50:27 -07001476 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1477
egdaniel0e1853c2016-03-17 11:35:45 -07001478 GrPrimitiveType primitiveType = meshes[0].primitiveType();
egdanielaf132772016-03-28 12:39:29 -07001479 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline,
1480 primProc,
1481 primitiveType,
1482 *renderPass);
1483 if (!pipelineState) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001484 return;
1485 }
1486
Greg Daniel164a9f02016-02-22 09:56:40 -05001487 // Change layout of our render target so it can be used as the color attachment
1488 VkImageLayout layout = vkRT->currentLayout();
1489 // Our color attachment is purely a destination and won't be read so don't need to flush or
1490 // invalidate any caches
1491 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1492 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1493 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1494 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1495 vkRT->setImageLayout(this,
1496 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1497 srcAccessMask,
1498 dstAccessMask,
1499 srcStageMask,
1500 dstStageMask,
1501 false);
1502
egdaniel3d5d9ac2016-03-01 12:56:15 -08001503 // If we are using a stencil attachment we also need to update its layout
egdaniel0e1853c2016-03-17 11:35:45 -07001504 if (!pipeline.getStencil().isDisabled()) {
egdaniel3d5d9ac2016-03-01 12:56:15 -08001505 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1506 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1507 VkImageLayout origDstLayout = vkStencil->currentLayout();
1508 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1509 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
egdaniel0e1853c2016-03-17 11:35:45 -07001510 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001511 VkPipelineStageFlags srcStageMask =
1512 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1513 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1514 vkStencil->setImageLayout(this,
1515 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1516 srcAccessMask,
1517 dstAccessMask,
1518 srcStageMask,
1519 dstStageMask,
1520 false);
1521 }
1522
egdaniel0e1853c2016-03-17 11:35:45 -07001523
1524 for (int i = 0; i < meshCount; ++i) {
1525 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
1526 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1527 }
1528
1529 const GrMesh& mesh = meshes[i];
1530 GrMesh::Iterator iter;
1531 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1532 do {
1533 if (nonIdxMesh->primitiveType() != primitiveType) {
1534 // Technically we don't have to call this here (since there is a safety check in
egdaniel22281c12016-03-23 13:49:40 -07001535 // pipelineState:setData but this will allow for quicker freeing of resources if the
1536 // pipelineState sits in a cache for a while.
1537 pipelineState->freeTempResources(this);
egdaniel22281c12016-03-23 13:49:40 -07001538 SkDEBUGCODE(pipelineState = nullptr);
egdaniel0e1853c2016-03-17 11:35:45 -07001539 primitiveType = nonIdxMesh->primitiveType();
egdanielaf132772016-03-28 12:39:29 -07001540 pipelineState = this->prepareDrawState(pipeline,
1541 primProc,
1542 primitiveType,
1543 *renderPass);
1544 if (!pipelineState) {
egdaniel0e1853c2016-03-17 11:35:45 -07001545 return;
1546 }
1547 }
egdaniel22281c12016-03-23 13:49:40 -07001548 SkASSERT(pipelineState);
egdaniel0e1853c2016-03-17 11:35:45 -07001549 this->bindGeometry(primProc, *nonIdxMesh);
1550
1551 if (nonIdxMesh->isIndexed()) {
1552 fCurrentCmdBuffer->drawIndexed(this,
1553 nonIdxMesh->indexCount(),
1554 1,
1555 nonIdxMesh->startIndex(),
1556 nonIdxMesh->startVertex(),
1557 0);
1558 } else {
1559 fCurrentCmdBuffer->draw(this,
1560 nonIdxMesh->vertexCount(),
1561 1,
1562 nonIdxMesh->startVertex(),
1563 0);
1564 }
1565
1566 fStats.incNumDraws();
1567 } while ((nonIdxMesh = iter.next()));
Greg Daniel164a9f02016-02-22 09:56:40 -05001568 }
1569
1570 fCurrentCmdBuffer->endRenderPass(this);
1571
egdaniel22281c12016-03-23 13:49:40 -07001572 // Technically we don't have to call this here (since there is a safety check in
1573 // pipelineState:setData but this will allow for quicker freeing of resources if the
1574 // pipelineState sits in a cache for a while.
1575 pipelineState->freeTempResources(this);
Greg Daniel164a9f02016-02-22 09:56:40 -05001576
1577#if SWAP_PER_DRAW
1578 glFlush();
1579#if defined(SK_BUILD_FOR_MAC)
1580 aglSwapBuffers(aglGetCurrentContext());
1581 int set_a_break_pt_here = 9;
1582 aglSwapBuffers(aglGetCurrentContext());
1583#elif defined(SK_BUILD_FOR_WIN32)
1584 SwapBuf();
1585 int set_a_break_pt_here = 9;
1586 SwapBuf();
1587#endif
1588#endif
1589}