blob: 8083509877105c8c68bde6279db2afb0870f730f [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
20#include "GrVkImage.h"
21#include "GrVkIndexBuffer.h"
22#include "GrVkMemory.h"
23#include "GrVkPipeline.h"
egdaniel22281c12016-03-23 13:49:40 -070024#include "GrVkPipelineState.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050025#include "GrVkRenderPass.h"
26#include "GrVkResourceProvider.h"
27#include "GrVkTexture.h"
28#include "GrVkTextureRenderTarget.h"
29#include "GrVkTransferBuffer.h"
30#include "GrVkVertexBuffer.h"
31
32#include "SkConfig8888.h"
33
34#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070035#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050036
37#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
38#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
39#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
40
jvanverthd2497f32016-03-18 12:39:05 -070041#ifdef ENABLE_VK_LAYERS
42VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
43 VkDebugReportFlagsEXT flags,
44 VkDebugReportObjectTypeEXT objectType,
45 uint64_t object,
46 size_t location,
47 int32_t messageCode,
48 const char* pLayerPrefix,
49 const char* pMessage,
50 void* pUserData) {
51 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
52 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
53 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
54 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
55 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
56 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
57 } else {
58 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 }
60 return VK_FALSE;
61}
jvanverthd2497f32016-03-18 12:39:05 -070062#endif
63
jvanverth633b3562016-03-23 11:01:22 -070064GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
65 GrContext* context) {
bsalomondc0fcd42016-04-11 14:21:33 -070066 const GrVkBackendContext* vkBackendContext =
67 reinterpret_cast<const GrVkBackendContext*>(backendContext);
jvanverth633b3562016-03-23 11:01:22 -070068 if (!vkBackendContext) {
bsalomondc0fcd42016-04-11 14:21:33 -070069 vkBackendContext = GrVkBackendContext::Create();
jvanverth633b3562016-03-23 11:01:22 -070070 if (!vkBackendContext) {
71 return nullptr;
Greg Daniel164a9f02016-02-22 09:56:40 -050072 }
jvanverth633b3562016-03-23 11:01:22 -070073 } else {
74 vkBackendContext->ref();
Greg Daniel164a9f02016-02-22 09:56:40 -050075 }
76
jvanverth633b3562016-03-23 11:01:22 -070077 return new GrVkGpu(context, options, vkBackendContext);
Greg Daniel164a9f02016-02-22 09:56:40 -050078}
79
80////////////////////////////////////////////////////////////////////////////////
81
halcanary9d524f22016-03-29 09:03:52 -070082GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
jvanverth633b3562016-03-23 11:01:22 -070083 const GrVkBackendContext* backendCtx)
Greg Daniel164a9f02016-02-22 09:56:40 -050084 : INHERITED(context)
jvanverth633b3562016-03-23 11:01:22 -070085 , fVkInstance(backendCtx->fInstance)
86 , fDevice(backendCtx->fDevice)
87 , fQueue(backendCtx->fQueue)
88 , fResourceProvider(this) {
89 fBackendContext.reset(backendCtx);
Greg Daniel164a9f02016-02-22 09:56:40 -050090
jvanverthd2497f32016-03-18 12:39:05 -070091#ifdef ENABLE_VK_LAYERS
jvanverthfd7bd452016-03-25 06:29:52 -070092 if (backendCtx->fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
93 // Setup callback creation information
jvanverthd2497f32016-03-18 12:39:05 -070094 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
95 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
96 callbackCreateInfo.pNext = nullptr;
97 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
egdanielef0c10c2016-04-07 07:51:22 -070098 VK_DEBUG_REPORT_WARNING_BIT_EXT |
jvanverthd2497f32016-03-18 12:39:05 -070099 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
100 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
egdanielb4aa3622016-04-06 13:47:08 -0700101 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
jvanverthd2497f32016-03-18 12:39:05 -0700102 callbackCreateInfo.pfnCallback = &DebugReportCallback;
103 callbackCreateInfo.pUserData = nullptr;
104
jvanverthfd7bd452016-03-25 06:29:52 -0700105 // Register the callback
jvanverth633b3562016-03-23 11:01:22 -0700106 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(fVkInstance,
107 &callbackCreateInfo, nullptr, &fCallback));
jvanverthd2497f32016-03-18 12:39:05 -0700108 }
109#endif
jvanverth633b3562016-03-23 11:01:22 -0700110
111 fCompiler = shaderc_compiler_initialize();
112
jvanverthfd7bd452016-03-25 06:29:52 -0700113 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice,
egdanielc5ec1402016-03-28 12:14:42 -0700114 backendCtx->fFeatures, backendCtx->fExtensions));
jvanverth633b3562016-03-23 11:01:22 -0700115 fCaps.reset(SkRef(fVkCaps.get()));
116
117 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps));
118
119 const VkCommandPoolCreateInfo cmdPoolInfo = {
120 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
121 nullptr, // pNext
122 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags
123 backendCtx->fQueueFamilyIndex, // queueFamilyIndex
124 };
halcanary9d524f22016-03-29 09:03:52 -0700125 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
jvanverth633b3562016-03-23 11:01:22 -0700126 &fCmdPool));
127
128 // must call this after creating the CommandPool
129 fResourceProvider.init();
130 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
131 SkASSERT(fCurrentCmdBuffer);
132 fCurrentCmdBuffer->begin(this);
Greg Daniel164a9f02016-02-22 09:56:40 -0500133}
134
135GrVkGpu::~GrVkGpu() {
Greg Daniel164a9f02016-02-22 09:56:40 -0500136 fCurrentCmdBuffer->end(this);
137 fCurrentCmdBuffer->unref(this);
138
139 // wait for all commands to finish
jvanverthddf98352016-03-21 11:46:00 -0700140 fResourceProvider.checkCommandBuffers();
egdaniel2cab66b2016-03-21 14:24:14 -0700141 SkDEBUGCODE(VkResult res =) VK_CALL(QueueWaitIdle(fQueue));
jvanverthddf98352016-03-21 11:46:00 -0700142 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec)
143 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
halcanary9d524f22016-03-29 09:03:52 -0700144
Greg Daniel164a9f02016-02-22 09:56:40 -0500145 // must call this just before we destroy the VkDevice
146 fResourceProvider.destroyResources();
147
jvanverth633b3562016-03-23 11:01:22 -0700148 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
149
150 shaderc_compiler_release(fCompiler);
151
152#ifdef ENABLE_VK_LAYERS
jvanverthd2497f32016-03-18 12:39:05 -0700153 VK_CALL(DestroyDebugReportCallbackEXT(fVkInstance, fCallback, nullptr));
154#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500155}
156
157///////////////////////////////////////////////////////////////////////////////
158
159void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
160 SkASSERT(fCurrentCmdBuffer);
161 fCurrentCmdBuffer->end(this);
162
163 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
164 fResourceProvider.checkCommandBuffers();
165
166 // Release old command buffer and create a new one
167 fCurrentCmdBuffer->unref(this);
168 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
169 SkASSERT(fCurrentCmdBuffer);
170
171 fCurrentCmdBuffer->begin(this);
172}
173
174///////////////////////////////////////////////////////////////////////////////
cdalton1bf3e712016-04-19 10:00:02 -0700175GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern,
176 const void* data) {
177 GrBuffer* buff;
cdalton397536c2016-03-25 12:15:03 -0700178 switch (type) {
179 case kVertex_GrBufferType:
180 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
181 kStatic_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700182 buff = GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
egdaniele05bbbb2016-04-19 12:13:41 -0700183 break;
cdalton397536c2016-03-25 12:15:03 -0700184 case kIndex_GrBufferType:
185 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
186 kStatic_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700187 buff = GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
egdaniele05bbbb2016-04-19 12:13:41 -0700188 break;
cdalton397536c2016-03-25 12:15:03 -0700189 case kXferCpuToGpu_GrBufferType:
jvanverthb0ec9832016-04-20 05:54:01 -0700190 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
191 kStream_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700192 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
egdaniele05bbbb2016-04-19 12:13:41 -0700193 break;
cdalton397536c2016-03-25 12:15:03 -0700194 case kXferGpuToCpu_GrBufferType:
jvanverthb0ec9832016-04-20 05:54:01 -0700195 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
196 kStream_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700197 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
egdaniele05bbbb2016-04-19 12:13:41 -0700198 break;
cdalton397536c2016-03-25 12:15:03 -0700199 default:
200 SkFAIL("Unknown buffer type.");
201 return nullptr;
202 }
cdalton1bf3e712016-04-19 10:00:02 -0700203 if (data && buff) {
204 buff->updateData(data, size);
205 }
206 return buff;
Greg Daniel164a9f02016-02-22 09:56:40 -0500207}
208
209////////////////////////////////////////////////////////////////////////////////
210bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
211 GrPixelConfig srcConfig, DrawPreference* drawPreference,
212 WritePixelTempDrawInfo* tempDrawInfo) {
213 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
214 return false;
215 }
216
217 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
218 if (kNoDraw_DrawPreference != *drawPreference) {
219 return false;
220 }
221
222 if (dstSurface->config() != srcConfig) {
halcanary9d524f22016-03-29 09:03:52 -0700223 // TODO: This should fall back to drawing or copying to change config of dstSurface to
Greg Daniel164a9f02016-02-22 09:56:40 -0500224 // match that of srcConfig.
225 return false;
226 }
227
228 return true;
229}
230
231bool GrVkGpu::onWritePixels(GrSurface* surface,
232 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800233 GrPixelConfig config,
234 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500235 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
236 if (!vkTex) {
237 return false;
238 }
239
bsalomona1e6b3b2016-03-02 10:58:23 -0800240 // TODO: We're ignoring MIP levels here.
jvanverth03509ea2016-03-02 13:19:47 -0800241 if (texels.empty() || !texels.begin()->fPixels) {
242 return false;
243 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800244
Greg Daniel164a9f02016-02-22 09:56:40 -0500245 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
246 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
247 return false;
248 }
249
250 bool success = false;
251 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
252 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
253 SkASSERT(config == vkTex->desc().fConfig);
254 // TODO: add compressed texture support
255 // delete the following two lines and uncomment the two after that when ready
256 vkTex->unref();
257 return false;
258 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
259 // height);
260 } else {
261 bool linearTiling = vkTex->isLinearTiled();
262 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
263 // Need to change the layout to general in order to perform a host write
264 VkImageLayout layout = vkTex->currentLayout();
265 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
266 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
267 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
268 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
269 vkTex->setImageLayout(this,
270 VK_IMAGE_LAYOUT_GENERAL,
271 srcAccessMask,
272 dstAccessMask,
273 srcStageMask,
274 dstStageMask,
275 false);
276 }
277 success = this->uploadTexData(vkTex, left, top, width, height, config,
bsalomona1e6b3b2016-03-02 10:58:23 -0800278 texels.begin()->fPixels, texels.begin()->fRowBytes);
Greg Daniel164a9f02016-02-22 09:56:40 -0500279 }
280
281 if (success) {
282 vkTex->texturePriv().dirtyMipMaps(true);
283 return true;
284 }
285
286 return false;
287}
288
jvanverthb0ec9832016-04-20 05:54:01 -0700289
290bool GrVkGpu::onTransferPixels(GrTexture* texture,
291 int left, int top, int width, int height,
292 GrPixelConfig config, GrBuffer* transferBuffer,
293 size_t bufferOffset, size_t rowBytes) {
294 GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
295 if (!vkTex) {
296 return false;
297 }
298 GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
299 if (!vkBuffer) {
300 return false;
301 }
302
303 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
304 if (GrPixelConfigIsSRGB(texture->config()) != GrPixelConfigIsSRGB(config)) {
305 return false;
306 }
307
308 // TODO: Handle y axis flip via copy to temp image, then blit to final
309 if (kBottomLeft_GrSurfaceOrigin == vkTex->origin()) {
310 return false;
311 }
312
313 bool success = false;
314 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
315 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
316 SkASSERT(config == vkTex->desc().fConfig);
317 // TODO: add compressed texture support
318 // delete the following two lines and uncomment the two after that when ready
319 vkTex->unref();
320 return false;
321 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
322 // height);
323 } else {
324 // make sure the unmap has finished
325 vkBuffer->addMemoryBarrier(this,
326 VK_ACCESS_HOST_WRITE_BIT,
327 VK_ACCESS_TRANSFER_READ_BIT,
328 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
329 VK_PIPELINE_STAGE_TRANSFER_BIT,
330 false);
331
332 // Set up copy region
333 size_t bpp = GrBytesPerPixel(config);
334
335 VkBufferImageCopy region;
336 memset(&region, 0, sizeof(VkBufferImageCopy));
337 region.bufferOffset = bufferOffset;
338 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
339 region.bufferImageHeight = 0;
340 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
341 region.imageOffset = { left, top, 0 };
342 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
343
344 // Change layout of our target so it can be copied to
345 VkImageLayout layout = vkTex->currentLayout();
346 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
347 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
348 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
349 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
350 vkTex->setImageLayout(this,
351 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
352 srcAccessMask,
353 dstAccessMask,
354 srcStageMask,
355 dstStageMask,
356 false);
357
358 // Copy the buffer to the image
359 fCurrentCmdBuffer->copyBufferToImage(this,
360 vkBuffer,
361 vkTex,
362 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
363 1,
364 &region);
365
366 // Submit the current command buffer to the Queue
367 this->submitCommandBuffer(kSkip_SyncQueue);
368 }
369
370 if (success) {
371 vkTex->texturePriv().dirtyMipMaps(true);
372 return true;
373 }
374
375 return false;
376}
377
Greg Daniel164a9f02016-02-22 09:56:40 -0500378bool GrVkGpu::uploadTexData(GrVkTexture* tex,
379 int left, int top, int width, int height,
380 GrPixelConfig dataConfig,
381 const void* data,
382 size_t rowBytes) {
383 SkASSERT(data);
384
385 // If we're uploading compressed data then we should be using uploadCompressedTexData
386 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
387
388 bool linearTiling = tex->isLinearTiled();
389
390 size_t bpp = GrBytesPerPixel(dataConfig);
391
392 const GrSurfaceDesc& desc = tex->desc();
393
394 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
395 &width, &height, &data, &rowBytes)) {
396 return false;
397 }
398 size_t trimRowBytes = width * bpp;
399
400 if (linearTiling) {
401 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
402 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
403 const VkImageSubresource subres = {
404 VK_IMAGE_ASPECT_COLOR_BIT,
405 0, // mipLevel
406 0, // arraySlice
407 };
408 VkSubresourceLayout layout;
409 VkResult err;
410
411 const GrVkInterface* interface = this->vkInterface();
412
413 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
414 tex->textureImage(),
415 &subres,
416 &layout));
417
418 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
419 : top;
420 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
421 VkDeviceSize size = height*layout.rowPitch;
422 void* mapPtr;
halcanary9d524f22016-03-29 09:03:52 -0700423 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
Greg Daniel164a9f02016-02-22 09:56:40 -0500424 &mapPtr));
425 if (err) {
426 return false;
427 }
428
429 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
430 // copy into buffer by rows
431 const char* srcRow = reinterpret_cast<const char*>(data);
432 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
433 for (int y = 0; y < height; y++) {
434 memcpy(dstRow, srcRow, trimRowBytes);
435 srcRow += rowBytes;
436 dstRow -= layout.rowPitch;
437 }
438 } else {
439 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
440 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
441 memcpy(mapPtr, data, trimRowBytes * height);
442 } else {
bsalomona6b439a2016-03-10 12:31:20 -0800443 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
444 trimRowBytes, height);
Greg Daniel164a9f02016-02-22 09:56:40 -0500445 }
446 }
447
448 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
449 } else {
450 GrVkTransferBuffer* transferBuffer =
451 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
452
453 void* mapPtr = transferBuffer->map();
454
455 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
456 // copy into buffer by rows
457 const char* srcRow = reinterpret_cast<const char*>(data);
458 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
459 for (int y = 0; y < height; y++) {
460 memcpy(dstRow, srcRow, trimRowBytes);
461 srcRow += rowBytes;
462 dstRow -= trimRowBytes;
463 }
464 } else {
465 // If there is no padding on the src data rows, we can do a single memcpy
466 if (trimRowBytes == rowBytes) {
467 memcpy(mapPtr, data, trimRowBytes * height);
468 } else {
469 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
470 }
471 }
472
473 transferBuffer->unmap();
474
475 // make sure the unmap has finished
476 transferBuffer->addMemoryBarrier(this,
477 VK_ACCESS_HOST_WRITE_BIT,
478 VK_ACCESS_TRANSFER_READ_BIT,
479 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
480 VK_PIPELINE_STAGE_TRANSFER_BIT,
481 false);
482
483 // Set up copy region
484 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
485 VkOffset3D offset = {
486 left,
487 flipY ? tex->height() - top - height : top,
488 0
489 };
490
491 VkBufferImageCopy region;
492 memset(&region, 0, sizeof(VkBufferImageCopy));
493 region.bufferOffset = 0;
494 region.bufferRowLength = width;
495 region.bufferImageHeight = height;
496 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
497 region.imageOffset = offset;
498 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
499
500 // Change layout of our target so it can be copied to
501 VkImageLayout layout = tex->currentLayout();
502 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
503 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
504 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
505 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
506 tex->setImageLayout(this,
507 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
508 srcAccessMask,
509 dstAccessMask,
510 srcStageMask,
511 dstStageMask,
512 false);
513
514 // Copy the buffer to the image
515 fCurrentCmdBuffer->copyBufferToImage(this,
516 transferBuffer,
517 tex,
518 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
519 1,
520 &region);
521
522 // Submit the current command buffer to the Queue
523 this->submitCommandBuffer(kSkip_SyncQueue);
524
525 transferBuffer->unref();
526 }
527
528 return true;
529}
530
531////////////////////////////////////////////////////////////////////////////////
532GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
bsalomona1e6b3b2016-03-02 10:58:23 -0800533 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500534 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
535
536 VkFormat pixelFormat;
537 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
538 return nullptr;
539 }
540
541 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
542 return nullptr;
543 }
544
545 bool linearTiling = false;
546 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
547 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
548 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
549 linearTiling = true;
550 } else {
551 return nullptr;
552 }
553 }
554
555 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
556 if (renderTarget) {
557 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
558 }
559
560 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
561 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
562 // will be using this texture in some copy or not. Also this assumes, as is the current case,
563 // that all render targets in vulkan are also texutres. If we change this practice of setting
564 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
565 // texture.
566 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
567
bsalomona1e6b3b2016-03-02 10:58:23 -0800568 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
569 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500570
571 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
572 // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
573 // to 1.
574 GrVkImage::ImageDesc imageDesc;
575 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
576 imageDesc.fFormat = pixelFormat;
577 imageDesc.fWidth = desc.fWidth;
578 imageDesc.fHeight = desc.fHeight;
579 imageDesc.fLevels = 1;
580 imageDesc.fSamples = 1;
581 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
582 imageDesc.fUsageFlags = usageFlags;
583 imageDesc.fMemProps = memProps;
584
585 GrVkTexture* tex;
586 if (renderTarget) {
587 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
588 imageDesc);
589 } else {
590 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
591 }
592
593 if (!tex) {
594 return nullptr;
595 }
596
bsalomona1e6b3b2016-03-02 10:58:23 -0800597 // TODO: We're ignoring MIP levels here.
bsalomone699d0c2016-03-09 06:25:15 -0800598 if (!texels.empty()) {
599 SkASSERT(texels.begin()->fPixels);
bsalomona1e6b3b2016-03-02 10:58:23 -0800600 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
601 texels.begin()->fPixels, texels.begin()->fRowBytes)) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500602 tex->unref();
603 return nullptr;
604 }
605 }
606
607 return tex;
608}
609
610////////////////////////////////////////////////////////////////////////////////
611
612static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
613 // By default, all textures in Vk use TopLeft
614 if (kDefault_GrSurfaceOrigin == origin) {
615 return kTopLeft_GrSurfaceOrigin;
616 } else {
617 return origin;
618 }
619}
620
621GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
622 GrWrapOwnership ownership) {
623 VkFormat format;
624 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
625 return nullptr;
626 }
627
628 if (0 == desc.fTextureHandle) {
629 return nullptr;
630 }
631
632 int maxSize = this->caps()->maxTextureSize();
633 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
634 return nullptr;
635 }
636
jvanverthfd359ca2016-03-18 11:57:24 -0700637 const GrVkTextureInfo* info = reinterpret_cast<const GrVkTextureInfo*>(desc.fTextureHandle);
638 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) {
639 return nullptr;
640 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500641
jvanverth0fcfb752016-03-09 09:57:52 -0800642 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
643 ? GrGpuResource::kAdopted_LifeCycle
644 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500645
646 GrSurfaceDesc surfDesc;
647 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
648 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
649 surfDesc.fWidth = desc.fWidth;
650 surfDesc.fHeight = desc.fHeight;
651 surfDesc.fConfig = desc.fConfig;
652 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
653 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
654 // In GL, Chrome assumes all textures are BottomLeft
655 // In VK, we don't have this restriction
656 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
657
658 GrVkTexture* texture = nullptr;
659 if (renderTarget) {
halcanary9d524f22016-03-29 09:03:52 -0700660 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
Greg Daniel164a9f02016-02-22 09:56:40 -0500661 lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700662 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500663 } else {
halcanary9d524f22016-03-29 09:03:52 -0700664 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format,
jvanverthfd359ca2016-03-18 11:57:24 -0700665 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500666 }
667 if (!texture) {
668 return nullptr;
669 }
670
671 return texture;
672}
673
674GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
675 GrWrapOwnership ownership) {
halcanary9d524f22016-03-29 09:03:52 -0700676
jvanverthfd359ca2016-03-18 11:57:24 -0700677 const GrVkTextureInfo* info =
678 reinterpret_cast<const GrVkTextureInfo*>(wrapDesc.fRenderTargetHandle);
679 if (VK_NULL_HANDLE == info->fImage ||
680 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership)) {
681 return nullptr;
682 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500683
jvanverth0fcfb752016-03-09 09:57:52 -0800684 GrGpuResource::LifeCycle lifeCycle = (kAdopt_GrWrapOwnership == ownership)
685 ? GrGpuResource::kAdopted_LifeCycle
686 : GrGpuResource::kBorrowed_LifeCycle;
Greg Daniel164a9f02016-02-22 09:56:40 -0500687
688 GrSurfaceDesc desc;
689 desc.fConfig = wrapDesc.fConfig;
690 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
691 desc.fWidth = wrapDesc.fWidth;
692 desc.fHeight = wrapDesc.fHeight;
693 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
694
695 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
696
697 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
halcanary9d524f22016-03-29 09:03:52 -0700698 lifeCycle,
jvanverthfd359ca2016-03-18 11:57:24 -0700699 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500700 if (tgt && wrapDesc.fStencilBits) {
701 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
702 tgt->unref();
703 return nullptr;
704 }
705 }
706 return tgt;
707}
708
709////////////////////////////////////////////////////////////////////////////////
710
711void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
egdaniel0e1853c2016-03-17 11:35:45 -0700712 const GrNonInstancedMesh& mesh) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500713 GrVkVertexBuffer* vbuf;
egdaniel0e1853c2016-03-17 11:35:45 -0700714 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500715 SkASSERT(vbuf);
716 SkASSERT(!vbuf->isMapped());
717
718 vbuf->addMemoryBarrier(this,
719 VK_ACCESS_HOST_WRITE_BIT,
720 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
721 VK_PIPELINE_STAGE_HOST_BIT,
722 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
723 false);
724
725 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
726
egdaniel0e1853c2016-03-17 11:35:45 -0700727 if (mesh.isIndexed()) {
728 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500729 SkASSERT(ibuf);
730 SkASSERT(!ibuf->isMapped());
731
732 ibuf->addMemoryBarrier(this,
733 VK_ACCESS_HOST_WRITE_BIT,
734 VK_ACCESS_INDEX_READ_BIT,
735 VK_PIPELINE_STAGE_HOST_BIT,
736 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
737 false);
738
739 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
740 }
741}
742
Greg Daniel164a9f02016-02-22 09:56:40 -0500743////////////////////////////////////////////////////////////////////////////////
744
745GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
746 int width,
747 int height) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500748 SkASSERT(width >= rt->width());
749 SkASSERT(height >= rt->height());
750
751 int samples = rt->numStencilSamples();
752
egdaniel8f1dcaa2016-04-01 10:10:45 -0700753 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferedStencilFormat();
Greg Daniel164a9f02016-02-22 09:56:40 -0500754
755 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
756 GrGpuResource::kCached_LifeCycle,
757 width,
758 height,
759 samples,
760 sFmt));
761 fStats.incStencilAttachmentCreates();
762 return stencil;
763}
764
765////////////////////////////////////////////////////////////////////////////////
766
767GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
768 GrPixelConfig config) {
769
770 VkFormat pixelFormat;
771 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
772 return 0;
773 }
774
775 bool linearTiling = false;
776 if (!fVkCaps->isConfigTexturable(config)) {
777 return 0;
778 }
779
780 if (fVkCaps->isConfigTexurableLinearly(config)) {
781 linearTiling = true;
782 }
783
784 // Currently this is not supported since it requires a copy which has not yet been implemented.
785 if (srcData && !linearTiling) {
786 return 0;
787 }
788
789 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
790 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
791 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
792
793 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
794 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
795
jvanverthfd359ca2016-03-18 11:57:24 -0700796 VkImage image = VK_NULL_HANDLE;
797 VkDeviceMemory alloc = VK_NULL_HANDLE;
Greg Daniel164a9f02016-02-22 09:56:40 -0500798
jvanverthfd359ca2016-03-18 11:57:24 -0700799 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
800 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
801 ? VK_IMAGE_LAYOUT_PREINITIALIZED
802 : VK_IMAGE_LAYOUT_UNDEFINED;
803
804 // Create Image
805 VkSampleCountFlagBits vkSamples;
806 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
807 return 0;
808 }
809
810 const VkImageCreateInfo imageCreateInfo = {
811 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
812 NULL, // pNext
813 0, // VkImageCreateFlags
814 VK_IMAGE_TYPE_2D, // VkImageType
815 pixelFormat, // VkFormat
ethannicholas384b5e92016-03-25 11:04:06 -0700816 { (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D
jvanverthfd359ca2016-03-18 11:57:24 -0700817 1, // mipLevels
818 1, // arrayLayers
819 vkSamples, // samples
820 imageTiling, // VkImageTiling
821 usageFlags, // VkImageUsageFlags
822 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
823 0, // queueFamilyCount
824 0, // pQueueFamilyIndices
825 initialLayout // initialLayout
826 };
827
828 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
829
830 if (!GrVkMemory::AllocAndBindImageMemory(this, image, memProps, &alloc)) {
831 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500832 return 0;
833 }
834
835 if (srcData) {
836 if (linearTiling) {
837 const VkImageSubresource subres = {
838 VK_IMAGE_ASPECT_COLOR_BIT,
839 0, // mipLevel
840 0, // arraySlice
841 };
842 VkSubresourceLayout layout;
843 VkResult err;
844
jvanverthfd359ca2016-03-18 11:57:24 -0700845 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500846
847 void* mapPtr;
jvanverthfd359ca2016-03-18 11:57:24 -0700848 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500849 if (err) {
jvanverthfd359ca2016-03-18 11:57:24 -0700850 VK_CALL(FreeMemory(this->device(), alloc, nullptr));
851 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500852 return 0;
853 }
854
855 size_t bpp = GrBytesPerPixel(config);
856 size_t rowCopyBytes = bpp * w;
857 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
858 // This assumes the srcData comes in with no padding.
859 if (rowCopyBytes == layout.rowPitch) {
860 memcpy(mapPtr, srcData, rowCopyBytes * h);
861 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700862 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
863 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500864 }
jvanverthfd359ca2016-03-18 11:57:24 -0700865 VK_CALL(UnmapMemory(fDevice, alloc));
Greg Daniel164a9f02016-02-22 09:56:40 -0500866 } else {
867 // TODO: Add support for copying to optimal tiling
868 SkASSERT(false);
869 }
870 }
871
jvanverthfd359ca2016-03-18 11:57:24 -0700872 GrVkTextureInfo* info = new GrVkTextureInfo;
873 info->fImage = image;
874 info->fAlloc = alloc;
875 info->fImageTiling = imageTiling;
876 info->fImageLayout = initialLayout;
877
878 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -0500879}
880
881bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
jvanverthfd359ca2016-03-18 11:57:24 -0700882 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500883
884 if (backend && backend->fImage && backend->fAlloc) {
885 VkMemoryRequirements req;
886 memset(&req, 0, sizeof(req));
887 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
888 backend->fImage,
889 &req));
890 // TODO: find a better check
891 // This will probably fail with a different driver
892 return (req.size > 0) && (req.size <= 8192 * 8192);
893 }
894
895 return false;
896}
897
898void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700899 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500900
901 if (backend) {
902 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -0700903 // something in the command buffer may still be using this, so force submit
904 this->submitCommandBuffer(kForce_SyncQueue);
905
906 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr));
907 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500908 }
jvanverthfd359ca2016-03-18 11:57:24 -0700909 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -0500910 }
911}
912
913////////////////////////////////////////////////////////////////////////////////
914
915void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
916 VkPipelineStageFlags dstStageMask,
917 bool byRegion,
918 VkMemoryBarrier* barrier) const {
919 SkASSERT(fCurrentCmdBuffer);
920 fCurrentCmdBuffer->pipelineBarrier(this,
921 srcStageMask,
922 dstStageMask,
923 byRegion,
924 GrVkCommandBuffer::kMemory_BarrierType,
925 barrier);
926}
927
928void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
929 VkPipelineStageFlags dstStageMask,
930 bool byRegion,
931 VkBufferMemoryBarrier* barrier) const {
932 SkASSERT(fCurrentCmdBuffer);
933 fCurrentCmdBuffer->pipelineBarrier(this,
934 srcStageMask,
935 dstStageMask,
936 byRegion,
937 GrVkCommandBuffer::kBufferMemory_BarrierType,
938 barrier);
939}
940
941void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
942 VkPipelineStageFlags dstStageMask,
943 bool byRegion,
944 VkImageMemoryBarrier* barrier) const {
945 SkASSERT(fCurrentCmdBuffer);
946 fCurrentCmdBuffer->pipelineBarrier(this,
947 srcStageMask,
948 dstStageMask,
949 byRegion,
950 GrVkCommandBuffer::kImageMemory_BarrierType,
951 barrier);
952}
953
954void GrVkGpu::finishDrawTarget() {
955 // Submit the current command buffer to the Queue
956 this->submitCommandBuffer(kSkip_SyncQueue);
957}
958
egdaniel3d5d9ac2016-03-01 12:56:15 -0800959void GrVkGpu::clearStencil(GrRenderTarget* target) {
960 if (nullptr == target) {
961 return;
962 }
963 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
964 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
965
966
967 VkClearDepthStencilValue vkStencilColor;
968 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
969
970 VkImageLayout origDstLayout = vkStencil->currentLayout();
971
972 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
973 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
974
975 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
976 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
977
978 vkStencil->setImageLayout(this,
979 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
980 srcAccessMask,
981 dstAccessMask,
982 srcStageMask,
983 dstStageMask,
984 false);
985
986
987 VkImageSubresourceRange subRange;
988 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
989 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
990 subRange.baseMipLevel = 0;
991 subRange.levelCount = 1;
992 subRange.baseArrayLayer = 0;
993 subRange.layerCount = 1;
994
995 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
996 // draw. Thus we should look into using the load op functions on the render pass to clear out
997 // the stencil there.
998 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
999}
1000
1001void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
1002 SkASSERT(target);
1003
1004 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1005 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
1006 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)sb;
1007
1008 // this should only be called internally when we know we have a
1009 // stencil buffer.
1010 SkASSERT(sb);
1011 int stencilBitCount = sb->bits();
1012
1013 // The contract with the callers does not guarantee that we preserve all bits in the stencil
1014 // during this clear. Thus we will clear the entire stencil to the desired value.
1015
1016 VkClearDepthStencilValue vkStencilColor;
1017 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
1018 if (insideClip) {
1019 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
1020 } else {
1021 vkStencilColor.stencil = 0;
1022 }
1023
1024 VkImageLayout origDstLayout = vkStencil->currentLayout();
1025 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1026 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
1027 VkPipelineStageFlags srcStageMask =
1028 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1029 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1030 vkStencil->setImageLayout(this,
1031 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1032 srcAccessMask,
1033 dstAccessMask,
1034 srcStageMask,
1035 dstStageMask,
1036 false);
1037
1038 VkClearRect clearRect;
1039 // Flip rect if necessary
1040 SkIRect vkRect = rect;
1041
1042 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1043 vkRect.fTop = vkRT->height() - rect.fBottom;
1044 vkRect.fBottom = vkRT->height() - rect.fTop;
1045 }
1046
1047 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1048 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
1049
1050 clearRect.baseArrayLayer = 0;
1051 clearRect.layerCount = 1;
1052
1053 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1054 SkASSERT(renderPass);
1055 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1056
1057 uint32_t stencilIndex;
1058 SkAssertResult(renderPass->stencilAttachmentIndex(&stencilIndex));
1059
1060 VkClearAttachment attachment;
1061 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1062 attachment.colorAttachment = 0; // this value shouldn't matter
1063 attachment.clearValue.depthStencil = vkStencilColor;
1064
1065 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1066 fCurrentCmdBuffer->endRenderPass(this);
1067
1068 return;
1069}
1070
Greg Daniel164a9f02016-02-22 09:56:40 -05001071void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
1072 // parent class should never let us get here with no RT
1073 SkASSERT(target);
1074
1075 VkClearColorValue vkColor;
1076 GrColorToRGBAFloat(color, vkColor.float32);
halcanary9d524f22016-03-29 09:03:52 -07001077
Greg Daniel164a9f02016-02-22 09:56:40 -05001078 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
1079 VkImageLayout origDstLayout = vkRT->currentLayout();
1080
1081 if (rect.width() != target->width() || rect.height() != target->height()) {
1082 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1083 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1084 VkPipelineStageFlags srcStageMask =
egdaniel3d5d9ac2016-03-01 12:56:15 -08001085 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
Greg Daniel164a9f02016-02-22 09:56:40 -05001086 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1087 vkRT->setImageLayout(this,
1088 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1089 srcAccessMask,
1090 dstAccessMask,
1091 srcStageMask,
1092 dstStageMask,
1093 false);
1094
1095 VkClearRect clearRect;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001096 // Flip rect if necessary
1097 SkIRect vkRect = rect;
1098 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1099 vkRect.fTop = vkRT->height() - rect.fBottom;
1100 vkRect.fBottom = vkRT->height() - rect.fTop;
1101 }
1102 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1103 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
jvanverthe77875a2016-03-04 15:16:25 -08001104 clearRect.baseArrayLayer = 0;
1105 clearRect.layerCount = 1;
Greg Daniel164a9f02016-02-22 09:56:40 -05001106
1107 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1108 SkASSERT(renderPass);
1109 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1110
1111 uint32_t colorIndex;
1112 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
1113
1114 VkClearAttachment attachment;
1115 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1116 attachment.colorAttachment = colorIndex;
1117 attachment.clearValue.color = vkColor;
1118
1119 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
1120 fCurrentCmdBuffer->endRenderPass(this);
1121 return;
1122 }
1123
1124 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1125 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1126
1127 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1128 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1129
1130 vkRT->setImageLayout(this,
1131 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1132 srcAccessMask,
1133 dstAccessMask,
1134 srcStageMask,
1135 dstStageMask,
1136 false);
1137
1138
1139 VkImageSubresourceRange subRange;
1140 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1141 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1142 subRange.baseMipLevel = 0;
1143 subRange.levelCount = 1;
1144 subRange.baseArrayLayer = 0;
1145 subRange.layerCount = 1;
1146
halcanary9d524f22016-03-29 09:03:52 -07001147 // In the future we may not actually be doing this type of clear at all. If we are inside a
Greg Daniel164a9f02016-02-22 09:56:40 -05001148 // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
1149 // common use case will be clearing an attachment at the start of a render pass, in which case
1150 // we will use the clear load ops.
1151 fCurrentCmdBuffer->clearColorImage(this,
1152 vkRT,
1153 &vkColor,
1154 1, &subRange);
1155}
1156
1157inline bool can_copy_image(const GrSurface* dst,
1158 const GrSurface* src,
1159 const GrVkGpu* gpu) {
egdaniel17b89252016-04-05 07:23:38 -07001160 // Currently we don't support msaa
1161 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1162 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1163 return false;
1164 }
1165
1166 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1167 // as image usage flags.
1168 if (src->origin() == dst->origin() &&
1169 GrBytesPerPixel(src->config()) == GrBytesPerPixel(dst->config())) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001170 return true;
1171 }
1172
1173 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
egdaniel17b89252016-04-05 07:23:38 -07001174 // or the resolved image here? Im multisampled, Vulkan requires sample counts to be the same.
Greg Daniel164a9f02016-02-22 09:56:40 -05001175
1176 return false;
1177}
1178
1179void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1180 GrSurface* src,
egdaniel17b89252016-04-05 07:23:38 -07001181 GrVkImage* dstImage,
1182 GrVkImage* srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001183 const SkIRect& srcRect,
1184 const SkIPoint& dstPoint) {
1185 SkASSERT(can_copy_image(dst, src, this));
1186
egdaniel17b89252016-04-05 07:23:38 -07001187 VkImageLayout origDstLayout = dstImage->currentLayout();
1188 VkImageLayout origSrcLayout = srcImage->currentLayout();
Greg Daniel164a9f02016-02-22 09:56:40 -05001189
1190 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1191 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1192
1193 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1194 // the cache is flushed since it is only being written to.
1195 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1196 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
halcanary9d524f22016-03-29 09:03:52 -07001197
egdaniel17b89252016-04-05 07:23:38 -07001198 dstImage->setImageLayout(this,
1199 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1200 srcAccessMask,
1201 dstAccessMask,
1202 srcStageMask,
1203 dstStageMask,
1204 false);
halcanary9d524f22016-03-29 09:03:52 -07001205
Greg Daniel164a9f02016-02-22 09:56:40 -05001206 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1207 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1208
1209 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1210 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1211
egdaniel17b89252016-04-05 07:23:38 -07001212 srcImage->setImageLayout(this,
1213 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1214 srcAccessMask,
1215 dstAccessMask,
1216 srcStageMask,
1217 dstStageMask,
1218 false);
Greg Daniel164a9f02016-02-22 09:56:40 -05001219
1220 // Flip rect if necessary
1221 SkIRect srcVkRect = srcRect;
1222 int32_t dstY = dstPoint.fY;
1223
1224 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1225 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1226 srcVkRect.fTop = src->height() - srcRect.fBottom;
1227 srcVkRect.fBottom = src->height() - srcRect.fTop;
1228 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1229 }
1230
1231 VkImageCopy copyRegion;
1232 memset(&copyRegion, 0, sizeof(VkImageCopy));
1233 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1234 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1235 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1236 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
1237 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
1238
1239 fCurrentCmdBuffer->copyImage(this,
egdaniel17b89252016-04-05 07:23:38 -07001240 srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001241 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
egdaniel17b89252016-04-05 07:23:38 -07001242 dstImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001243 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1244 1,
1245 &copyRegion);
1246}
1247
egdaniel17b89252016-04-05 07:23:38 -07001248inline bool can_copy_as_blit(const GrSurface* dst,
1249 const GrSurface* src,
1250 const GrVkImage* dstImage,
1251 const GrVkImage* srcImage,
1252 const GrVkGpu* gpu) {
1253 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1254 // as image usage flags.
1255 const GrVkCaps& caps = gpu->vkCaps();
1256 if (!caps.configCanBeDstofBlit(dst->config(), dstImage->isLinearTiled()) ||
1257 !caps.configCanBeSrcofBlit(src->config(), srcImage->isLinearTiled())) {
1258 return false;
1259 }
1260
1261 // We cannot blit images that are multisampled. Will need to figure out if we can blit the
1262 // resolved msaa though.
1263 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1264 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1265 return false;
1266 }
1267
1268 return true;
1269}
1270
1271void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
1272 GrSurface* src,
1273 GrVkImage* dstImage,
1274 GrVkImage* srcImage,
1275 const SkIRect& srcRect,
1276 const SkIPoint& dstPoint) {
1277 SkASSERT(can_copy_as_blit(dst, src, dstImage, srcImage, this));
1278
1279 VkImageLayout origDstLayout = dstImage->currentLayout();
1280 VkImageLayout origSrcLayout = srcImage->currentLayout();
1281
1282 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1283 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1284
1285 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
1286 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1287
1288 dstImage->setImageLayout(this,
1289 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1290 srcAccessMask,
1291 dstAccessMask,
1292 srcStageMask,
1293 dstStageMask,
1294 false);
1295
1296 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
1297 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1298
1299 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
1300 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1301
1302 srcImage->setImageLayout(this,
1303 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1304 srcAccessMask,
1305 dstAccessMask,
1306 srcStageMask,
1307 dstStageMask,
1308 false);
1309
1310 // Flip rect if necessary
1311 SkIRect srcVkRect;
egdaniel8af936d2016-04-07 10:17:47 -07001312 srcVkRect.fLeft = srcRect.fLeft;
1313 srcVkRect.fRight = srcRect.fRight;
egdaniel17b89252016-04-05 07:23:38 -07001314 SkIRect dstRect;
1315 dstRect.fLeft = dstPoint.fX;
egdaniel8af936d2016-04-07 10:17:47 -07001316 dstRect.fRight = dstPoint.fX + srcRect.width();
egdaniel17b89252016-04-05 07:23:38 -07001317
1318 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1319 srcVkRect.fTop = src->height() - srcRect.fBottom;
1320 srcVkRect.fBottom = src->height() - srcRect.fTop;
1321 } else {
egdaniel8af936d2016-04-07 10:17:47 -07001322 srcVkRect.fTop = srcRect.fTop;
1323 srcVkRect.fBottom = srcRect.fBottom;
egdaniel17b89252016-04-05 07:23:38 -07001324 }
1325
1326 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
1327 dstRect.fTop = dst->height() - dstPoint.fY - srcVkRect.height();
1328 } else {
1329 dstRect.fTop = dstPoint.fY;
1330 }
1331 dstRect.fBottom = dstRect.fTop + srcVkRect.height();
1332
1333 // If we have different origins, we need to flip the top and bottom of the dst rect so that we
1334 // get the correct origintation of the copied data.
1335 if (src->origin() != dst->origin()) {
1336 SkTSwap(dstRect.fTop, dstRect.fBottom);
1337 }
1338
1339 VkImageBlit blitRegion;
1340 memset(&blitRegion, 0, sizeof(VkImageBlit));
1341 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1342 blitRegion.srcOffsets[0] = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1343 blitRegion.srcOffsets[1] = { srcVkRect.fRight, srcVkRect.fBottom, 0 };
1344 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1345 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
1346 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 0 };
1347
1348 fCurrentCmdBuffer->blitImage(this,
1349 srcImage,
1350 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1351 dstImage,
1352 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1353 1,
1354 &blitRegion,
1355 VK_FILTER_NEAREST); // We never scale so any filter works here
1356}
1357
Greg Daniel164a9f02016-02-22 09:56:40 -05001358inline bool can_copy_as_draw(const GrSurface* dst,
1359 const GrSurface* src,
1360 const GrVkGpu* gpu) {
1361 return false;
1362}
1363
1364void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1365 GrSurface* src,
1366 const SkIRect& srcRect,
1367 const SkIPoint& dstPoint) {
1368 SkASSERT(false);
1369}
1370
1371bool GrVkGpu::onCopySurface(GrSurface* dst,
1372 GrSurface* src,
1373 const SkIRect& srcRect,
1374 const SkIPoint& dstPoint) {
egdaniel17b89252016-04-05 07:23:38 -07001375 GrVkImage* dstImage;
1376 GrVkImage* srcImage;
1377 if (dst->asTexture()) {
1378 dstImage = static_cast<GrVkTexture*>(dst->asTexture());
1379 } else {
1380 SkASSERT(dst->asRenderTarget());
1381 dstImage = static_cast<GrVkRenderTarget*>(dst->asRenderTarget());
1382 }
1383 if (src->asTexture()) {
1384 srcImage = static_cast<GrVkTexture*>(src->asTexture());
1385 } else {
1386 SkASSERT(src->asRenderTarget());
1387 srcImage = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
1388 }
1389
Greg Daniel164a9f02016-02-22 09:56:40 -05001390 if (can_copy_image(dst, src, this)) {
egdaniel17b89252016-04-05 07:23:38 -07001391 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
1392 return true;
1393 }
1394
1395 if (can_copy_as_blit(dst, src, dstImage, srcImage, this)) {
1396 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
Greg Daniel164a9f02016-02-22 09:56:40 -05001397 return true;
1398 }
1399
1400 if (can_copy_as_draw(dst, src, this)) {
1401 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1402 return true;
1403 }
1404
1405 return false;
1406}
1407
egdaniel37798fb2016-04-12 07:31:49 -07001408bool GrVkGpu::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) const {
1409 // Currently we don't support msaa
1410 if (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1) {
1411 return false;
1412 }
1413
1414 // This will support copying the dst as CopyImage since all of our surfaces require transferSrc
1415 // and transferDst usage flags in Vulkan.
1416 desc->fOrigin = src->origin();
1417 desc->fConfig = src->config();
1418 desc->fFlags = kNone_GrSurfaceFlags;
1419 return true;
1420}
1421
cdalton28f45b92016-03-07 13:58:26 -08001422void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1423 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1424 // TODO: stub.
1425 SkASSERT(!this->caps()->sampleLocationsSupport());
1426 *effectiveSampleCnt = rt->desc().fSampleCnt;
1427}
1428
Greg Daniel164a9f02016-02-22 09:56:40 -05001429bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1430 GrPixelConfig readConfig, DrawPreference* drawPreference,
1431 ReadPixelTempDrawInfo* tempDrawInfo) {
1432 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1433 if (kNoDraw_DrawPreference != *drawPreference) {
1434 return false;
1435 }
1436
1437 if (srcSurface->config() != readConfig) {
1438 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1439 // that of readConfig.
1440 return false;
1441 }
1442
1443 return true;
1444}
1445
1446bool GrVkGpu::onReadPixels(GrSurface* surface,
1447 int left, int top, int width, int height,
1448 GrPixelConfig config,
1449 void* buffer,
1450 size_t rowBytes) {
1451 VkFormat pixelFormat;
1452 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1453 return false;
1454 }
1455
1456 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1457 if (!tgt) {
1458 return false;
1459 }
1460
1461 // Change layout of our target so it can be used as copy
1462 VkImageLayout layout = tgt->currentLayout();
1463 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1464 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
1465 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1466 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1467 tgt->setImageLayout(this,
1468 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1469 srcAccessMask,
1470 dstAccessMask,
1471 srcStageMask,
1472 dstStageMask,
1473 false);
1474
halcanary9d524f22016-03-29 09:03:52 -07001475 GrVkTransferBuffer* transferBuffer =
cdaltone2e71c22016-04-07 18:13:29 -07001476 static_cast<GrVkTransferBuffer*>(this->createBuffer(rowBytes * height,
1477 kXferGpuToCpu_GrBufferType,
cdalton397536c2016-03-25 12:15:03 -07001478 kStream_GrAccessPattern));
Greg Daniel164a9f02016-02-22 09:56:40 -05001479
1480 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1481 VkOffset3D offset = {
1482 left,
1483 flipY ? surface->height() - top - height : top,
1484 0
1485 };
1486
1487 // Copy the image to a buffer so we can map it to cpu memory
1488 VkBufferImageCopy region;
1489 memset(&region, 0, sizeof(VkBufferImageCopy));
1490 region.bufferOffset = 0;
1491 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1492 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1493 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1494 region.imageOffset = offset;
1495 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1496
1497 fCurrentCmdBuffer->copyImageToBuffer(this,
1498 tgt,
1499 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1500 transferBuffer,
1501 1,
1502 &region);
1503
1504 // make sure the copy to buffer has finished
1505 transferBuffer->addMemoryBarrier(this,
1506 VK_ACCESS_TRANSFER_WRITE_BIT,
1507 VK_ACCESS_HOST_READ_BIT,
1508 VK_PIPELINE_STAGE_TRANSFER_BIT,
1509 VK_PIPELINE_STAGE_HOST_BIT,
1510 false);
1511
1512 // We need to submit the current command buffer to the Queue and make sure it finishes before
1513 // we can copy the data out of the buffer.
1514 this->submitCommandBuffer(kForce_SyncQueue);
1515
1516 void* mappedMemory = transferBuffer->map();
1517
1518 memcpy(buffer, mappedMemory, rowBytes*height);
1519
1520 transferBuffer->unmap();
1521 transferBuffer->unref();
1522
1523 if (flipY) {
1524 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1525 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1526 scratch.reset(tightRowBytes);
1527 void* tmpRow = scratch.get();
1528 // flip y in-place by rows
1529 const int halfY = height >> 1;
1530 char* top = reinterpret_cast<char*>(buffer);
1531 char* bottom = top + (height - 1) * rowBytes;
1532 for (int y = 0; y < halfY; y++) {
1533 memcpy(tmpRow, top, tightRowBytes);
1534 memcpy(top, bottom, tightRowBytes);
1535 memcpy(bottom, tmpRow, tightRowBytes);
1536 top += rowBytes;
1537 bottom -= rowBytes;
1538 }
1539 }
1540
1541 return true;
1542}
egdanielaf132772016-03-28 12:39:29 -07001543sk_sp<GrVkPipelineState> GrVkGpu::prepareDrawState(const GrPipeline& pipeline,
1544 const GrPrimitiveProcessor& primProc,
1545 GrPrimitiveType primitiveType,
1546 const GrVkRenderPass& renderPass) {
1547 sk_sp<GrVkPipelineState> pipelineState =
1548 fResourceProvider.findOrCreateCompatiblePipelineState(pipeline,
1549 primProc,
1550 primitiveType,
1551 renderPass);
egdaniel22281c12016-03-23 13:49:40 -07001552 if (!pipelineState) {
egdanielaf132772016-03-28 12:39:29 -07001553 return pipelineState;
egdaniel0e1853c2016-03-17 11:35:45 -07001554 }
1555
egdanielaf132772016-03-28 12:39:29 -07001556 pipelineState->setData(this, primProc, pipeline);
egdaniel0e1853c2016-03-17 11:35:45 -07001557
egdanielaf132772016-03-28 12:39:29 -07001558 pipelineState->bind(this, fCurrentCmdBuffer);
egdaniel470d77a2016-03-18 12:50:27 -07001559
1560 GrVkPipeline::SetDynamicState(this, fCurrentCmdBuffer, pipeline);
1561
egdanielaf132772016-03-28 12:39:29 -07001562 return pipelineState;
egdaniel0e1853c2016-03-17 11:35:45 -07001563}
1564
1565void GrVkGpu::onDraw(const GrPipeline& pipeline,
1566 const GrPrimitiveProcessor& primProc,
1567 const GrMesh* meshes,
1568 int meshCount) {
1569 if (!meshCount) {
1570 return;
1571 }
1572 GrRenderTarget* rt = pipeline.getRenderTarget();
Greg Daniel164a9f02016-02-22 09:56:40 -05001573 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1574 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1575 SkASSERT(renderPass);
1576
egdaniel470d77a2016-03-18 12:50:27 -07001577 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1578
egdaniel0e1853c2016-03-17 11:35:45 -07001579 GrPrimitiveType primitiveType = meshes[0].primitiveType();
egdanielaf132772016-03-28 12:39:29 -07001580 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline,
1581 primProc,
1582 primitiveType,
1583 *renderPass);
1584 if (!pipelineState) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001585 return;
1586 }
1587
Greg Daniel164a9f02016-02-22 09:56:40 -05001588 // Change layout of our render target so it can be used as the color attachment
1589 VkImageLayout layout = vkRT->currentLayout();
1590 // Our color attachment is purely a destination and won't be read so don't need to flush or
1591 // invalidate any caches
1592 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
1593 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1594 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1595 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1596 vkRT->setImageLayout(this,
1597 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1598 srcAccessMask,
1599 dstAccessMask,
1600 srcStageMask,
1601 dstStageMask,
1602 false);
1603
egdaniel3d5d9ac2016-03-01 12:56:15 -08001604 // If we are using a stencil attachment we also need to update its layout
egdaniel0e1853c2016-03-17 11:35:45 -07001605 if (!pipeline.getStencil().isDisabled()) {
egdaniel3d5d9ac2016-03-01 12:56:15 -08001606 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
1607 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1608 VkImageLayout origDstLayout = vkStencil->currentLayout();
1609 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1610 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
egdaniel0e1853c2016-03-17 11:35:45 -07001611 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
egdaniel3d5d9ac2016-03-01 12:56:15 -08001612 VkPipelineStageFlags srcStageMask =
1613 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1614 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1615 vkStencil->setImageLayout(this,
1616 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1617 srcAccessMask,
1618 dstAccessMask,
1619 srcStageMask,
1620 dstStageMask,
1621 false);
1622 }
1623
egdaniel0e1853c2016-03-17 11:35:45 -07001624
1625 for (int i = 0; i < meshCount; ++i) {
1626 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
1627 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1628 }
1629
1630 const GrMesh& mesh = meshes[i];
1631 GrMesh::Iterator iter;
1632 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1633 do {
1634 if (nonIdxMesh->primitiveType() != primitiveType) {
1635 // Technically we don't have to call this here (since there is a safety check in
egdaniel22281c12016-03-23 13:49:40 -07001636 // pipelineState:setData but this will allow for quicker freeing of resources if the
1637 // pipelineState sits in a cache for a while.
1638 pipelineState->freeTempResources(this);
egdaniel22281c12016-03-23 13:49:40 -07001639 SkDEBUGCODE(pipelineState = nullptr);
egdaniel0e1853c2016-03-17 11:35:45 -07001640 primitiveType = nonIdxMesh->primitiveType();
egdanielaf132772016-03-28 12:39:29 -07001641 pipelineState = this->prepareDrawState(pipeline,
1642 primProc,
1643 primitiveType,
1644 *renderPass);
1645 if (!pipelineState) {
egdaniel0e1853c2016-03-17 11:35:45 -07001646 return;
1647 }
1648 }
egdaniel22281c12016-03-23 13:49:40 -07001649 SkASSERT(pipelineState);
egdaniel0e1853c2016-03-17 11:35:45 -07001650 this->bindGeometry(primProc, *nonIdxMesh);
1651
1652 if (nonIdxMesh->isIndexed()) {
1653 fCurrentCmdBuffer->drawIndexed(this,
1654 nonIdxMesh->indexCount(),
1655 1,
1656 nonIdxMesh->startIndex(),
1657 nonIdxMesh->startVertex(),
1658 0);
1659 } else {
1660 fCurrentCmdBuffer->draw(this,
1661 nonIdxMesh->vertexCount(),
1662 1,
1663 nonIdxMesh->startVertex(),
1664 0);
1665 }
1666
1667 fStats.incNumDraws();
1668 } while ((nonIdxMesh = iter.next()));
Greg Daniel164a9f02016-02-22 09:56:40 -05001669 }
1670
1671 fCurrentCmdBuffer->endRenderPass(this);
1672
egdaniel22281c12016-03-23 13:49:40 -07001673 // Technically we don't have to call this here (since there is a safety check in
1674 // pipelineState:setData but this will allow for quicker freeing of resources if the
1675 // pipelineState sits in a cache for a while.
1676 pipelineState->freeTempResources(this);
Greg Daniel164a9f02016-02-22 09:56:40 -05001677
1678#if SWAP_PER_DRAW
1679 glFlush();
1680#if defined(SK_BUILD_FOR_MAC)
1681 aglSwapBuffers(aglGetCurrentContext());
1682 int set_a_break_pt_here = 9;
1683 aglSwapBuffers(aglGetCurrentContext());
1684#elif defined(SK_BUILD_FOR_WIN32)
1685 SwapBuf();
1686 int set_a_break_pt_here = 9;
1687 SwapBuf();
1688#endif
1689#endif
1690}