blob: ee5d324dc8930701607d7cc4f5b08a901822f9ba [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
egdaniel066df7c2016-06-08 14:02:27 -070020#include "GrVkGpuCommandBuffer.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050021#include "GrVkImage.h"
22#include "GrVkIndexBuffer.h"
23#include "GrVkMemory.h"
24#include "GrVkPipeline.h"
egdaniel22281c12016-03-23 13:49:40 -070025#include "GrVkPipelineState.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050026#include "GrVkRenderPass.h"
27#include "GrVkResourceProvider.h"
28#include "GrVkTexture.h"
29#include "GrVkTextureRenderTarget.h"
30#include "GrVkTransferBuffer.h"
31#include "GrVkVertexBuffer.h"
32
33#include "SkConfig8888.h"
jvanverth900bd4a2016-04-29 13:53:12 -070034#include "SkMipMap.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050035
36#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070037#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050038
39#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
40#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
41#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
42
jvanverthd2497f32016-03-18 12:39:05 -070043#ifdef ENABLE_VK_LAYERS
44VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
45 VkDebugReportFlagsEXT flags,
46 VkDebugReportObjectTypeEXT objectType,
47 uint64_t object,
48 size_t location,
49 int32_t messageCode,
50 const char* pLayerPrefix,
51 const char* pMessage,
52 void* pUserData) {
53 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
54 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
55 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
56 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
57 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
58 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 } else {
60 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
61 }
62 return VK_FALSE;
63}
jvanverthd2497f32016-03-18 12:39:05 -070064#endif
65
jvanverth633b3562016-03-23 11:01:22 -070066GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
67 GrContext* context) {
bsalomondc0fcd42016-04-11 14:21:33 -070068 const GrVkBackendContext* vkBackendContext =
69 reinterpret_cast<const GrVkBackendContext*>(backendContext);
jvanverth633b3562016-03-23 11:01:22 -070070 if (!vkBackendContext) {
bsalomondc0fcd42016-04-11 14:21:33 -070071 vkBackendContext = GrVkBackendContext::Create();
jvanverth633b3562016-03-23 11:01:22 -070072 if (!vkBackendContext) {
73 return nullptr;
Greg Daniel164a9f02016-02-22 09:56:40 -050074 }
jvanverth633b3562016-03-23 11:01:22 -070075 } else {
76 vkBackendContext->ref();
Greg Daniel164a9f02016-02-22 09:56:40 -050077 }
78
jvanverth633b3562016-03-23 11:01:22 -070079 return new GrVkGpu(context, options, vkBackendContext);
Greg Daniel164a9f02016-02-22 09:56:40 -050080}
81
82////////////////////////////////////////////////////////////////////////////////
83
halcanary9d524f22016-03-29 09:03:52 -070084GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
jvanverth633b3562016-03-23 11:01:22 -070085 const GrVkBackendContext* backendCtx)
Greg Daniel164a9f02016-02-22 09:56:40 -050086 : INHERITED(context)
jvanverth633b3562016-03-23 11:01:22 -070087 , fDevice(backendCtx->fDevice)
88 , fQueue(backendCtx->fQueue)
89 , fResourceProvider(this) {
90 fBackendContext.reset(backendCtx);
Greg Daniel164a9f02016-02-22 09:56:40 -050091
jvanverthd2497f32016-03-18 12:39:05 -070092#ifdef ENABLE_VK_LAYERS
brianosman419ca642016-05-04 08:19:44 -070093 fCallback = VK_NULL_HANDLE;
jvanverthfd7bd452016-03-25 06:29:52 -070094 if (backendCtx->fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
95 // Setup callback creation information
jvanverthd2497f32016-03-18 12:39:05 -070096 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
97 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
98 callbackCreateInfo.pNext = nullptr;
99 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
egdanielef0c10c2016-04-07 07:51:22 -0700100 VK_DEBUG_REPORT_WARNING_BIT_EXT |
jvanverthd2497f32016-03-18 12:39:05 -0700101 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
102 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
egdanielb4aa3622016-04-06 13:47:08 -0700103 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
jvanverthd2497f32016-03-18 12:39:05 -0700104 callbackCreateInfo.pfnCallback = &DebugReportCallback;
105 callbackCreateInfo.pUserData = nullptr;
106
jvanverthfd7bd452016-03-25 06:29:52 -0700107 // Register the callback
jvanvertha00980e2016-05-02 13:24:48 -0700108 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(
109 backendCtx->fInstance, &callbackCreateInfo, nullptr, &fCallback));
jvanverthd2497f32016-03-18 12:39:05 -0700110 }
111#endif
jvanverth633b3562016-03-23 11:01:22 -0700112
113 fCompiler = shaderc_compiler_initialize();
114
jvanverthfd7bd452016-03-25 06:29:52 -0700115 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice,
egdanielc5ec1402016-03-28 12:14:42 -0700116 backendCtx->fFeatures, backendCtx->fExtensions));
jvanverth633b3562016-03-23 11:01:22 -0700117 fCaps.reset(SkRef(fVkCaps.get()));
118
119 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps));
120
121 const VkCommandPoolCreateInfo cmdPoolInfo = {
122 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
123 nullptr, // pNext
124 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags
jvanverthb0d43522016-04-21 11:46:23 -0700125 backendCtx->fGraphicsQueueIndex, // queueFamilyIndex
jvanverth633b3562016-03-23 11:01:22 -0700126 };
halcanary9d524f22016-03-29 09:03:52 -0700127 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
jvanverth633b3562016-03-23 11:01:22 -0700128 &fCmdPool));
129
130 // must call this after creating the CommandPool
131 fResourceProvider.init();
egdaniel9a6cf802016-06-08 08:22:05 -0700132 fCurrentCmdBuffer = fResourceProvider.createPrimaryCommandBuffer();
jvanverth633b3562016-03-23 11:01:22 -0700133 SkASSERT(fCurrentCmdBuffer);
134 fCurrentCmdBuffer->begin(this);
jvanverth6b6ffc42016-06-13 14:28:07 -0700135
136 // set up our heaps
137 fHeaps[kLinearImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
egdaniel05dceab2016-06-22 07:45:50 -0700138 // We want the OptimalImage_Heap to use a SubAlloc_strategy but it occasionally causes the
139 // device to run out of memory. Most likely this is caused by fragmentation in the device heap
140 // and we can't allocate more. Until we get a fix moving this to SingleAlloc.
141 fHeaps[kOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 64*1024*1024));
jvanverth6b6ffc42016-06-13 14:28:07 -0700142 fHeaps[kSmallOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 2*1024*1024));
143 fHeaps[kVertexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
144 fHeaps[kIndexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
145 fHeaps[kUniformBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 64*1024));
146 fHeaps[kCopyReadBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
147 fHeaps[kCopyWriteBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
Greg Daniel164a9f02016-02-22 09:56:40 -0500148}
149
150GrVkGpu::~GrVkGpu() {
Greg Daniel164a9f02016-02-22 09:56:40 -0500151 fCurrentCmdBuffer->end(this);
152 fCurrentCmdBuffer->unref(this);
153
154 // wait for all commands to finish
jvanverthddf98352016-03-21 11:46:00 -0700155 fResourceProvider.checkCommandBuffers();
jvanvertha00980e2016-05-02 13:24:48 -0700156 SkDEBUGCODE(VkResult res = ) VK_CALL(QueueWaitIdle(fQueue));
egdanielf8c2be32016-06-24 13:18:27 -0700157
158 // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences
159 // on the command buffers even though they have completed. This causes an assert to fire when
160 // destroying the command buffers. Currently this ony seems to happen on windows, so we add a
161 // sleep to make sure the fence singals.
162#ifdef SK_DEBUG
163#if defined(SK_BUILD_FOR_WIN)
164 Sleep(10); // In milliseconds
165#else
166 // Uncomment if above bug happens on non windows build.
167 // sleep(1); // In seconds
168#endif
169#endif
170
jvanverthddf98352016-03-21 11:46:00 -0700171 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec)
172 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
halcanary9d524f22016-03-29 09:03:52 -0700173
Greg Daniel164a9f02016-02-22 09:56:40 -0500174 // must call this just before we destroy the VkDevice
175 fResourceProvider.destroyResources();
176
jvanverth633b3562016-03-23 11:01:22 -0700177 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
178
179 shaderc_compiler_release(fCompiler);
180
181#ifdef ENABLE_VK_LAYERS
jvanvertha00980e2016-05-02 13:24:48 -0700182 if (fCallback) {
183 VK_CALL(DestroyDebugReportCallbackEXT(fBackendContext->fInstance, fCallback, nullptr));
brianosman419ca642016-05-04 08:19:44 -0700184 fCallback = VK_NULL_HANDLE;
jvanvertha00980e2016-05-02 13:24:48 -0700185 }
jvanverthd2497f32016-03-18 12:39:05 -0700186#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500187}
188
189///////////////////////////////////////////////////////////////////////////////
190
egdaniel9cb63402016-06-23 08:37:05 -0700191GrGpuCommandBuffer* GrVkGpu::createCommandBuffer(
192 GrRenderTarget* target,
193 const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
194 const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) {
195 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
196 return new GrVkGpuCommandBuffer(this, vkRT, colorInfo, stencilInfo);
egdaniel066df7c2016-06-08 14:02:27 -0700197}
198
Greg Daniel164a9f02016-02-22 09:56:40 -0500199void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
200 SkASSERT(fCurrentCmdBuffer);
201 fCurrentCmdBuffer->end(this);
202
203 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
204 fResourceProvider.checkCommandBuffers();
205
206 // Release old command buffer and create a new one
207 fCurrentCmdBuffer->unref(this);
egdaniel9a6cf802016-06-08 08:22:05 -0700208 fCurrentCmdBuffer = fResourceProvider.createPrimaryCommandBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500209 SkASSERT(fCurrentCmdBuffer);
210
211 fCurrentCmdBuffer->begin(this);
212}
213
214///////////////////////////////////////////////////////////////////////////////
cdalton1bf3e712016-04-19 10:00:02 -0700215GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern,
216 const void* data) {
217 GrBuffer* buff;
cdalton397536c2016-03-25 12:15:03 -0700218 switch (type) {
219 case kVertex_GrBufferType:
220 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
221 kStatic_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700222 buff = GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
egdaniele05bbbb2016-04-19 12:13:41 -0700223 break;
cdalton397536c2016-03-25 12:15:03 -0700224 case kIndex_GrBufferType:
225 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
226 kStatic_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700227 buff = GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
egdaniele05bbbb2016-04-19 12:13:41 -0700228 break;
cdalton397536c2016-03-25 12:15:03 -0700229 case kXferCpuToGpu_GrBufferType:
jvanverthc3d706f2016-04-20 10:33:27 -0700230 SkASSERT(kStream_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700231 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
egdaniele05bbbb2016-04-19 12:13:41 -0700232 break;
cdalton397536c2016-03-25 12:15:03 -0700233 case kXferGpuToCpu_GrBufferType:
jvanverthc3d706f2016-04-20 10:33:27 -0700234 SkASSERT(kStream_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700235 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
egdaniele05bbbb2016-04-19 12:13:41 -0700236 break;
cdalton397536c2016-03-25 12:15:03 -0700237 default:
238 SkFAIL("Unknown buffer type.");
239 return nullptr;
240 }
cdalton1bf3e712016-04-19 10:00:02 -0700241 if (data && buff) {
242 buff->updateData(data, size);
243 }
244 return buff;
Greg Daniel164a9f02016-02-22 09:56:40 -0500245}
246
247////////////////////////////////////////////////////////////////////////////////
248bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
249 GrPixelConfig srcConfig, DrawPreference* drawPreference,
250 WritePixelTempDrawInfo* tempDrawInfo) {
251 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
252 return false;
253 }
254
255 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
256 if (kNoDraw_DrawPreference != *drawPreference) {
257 return false;
258 }
259
260 if (dstSurface->config() != srcConfig) {
halcanary9d524f22016-03-29 09:03:52 -0700261 // TODO: This should fall back to drawing or copying to change config of dstSurface to
Greg Daniel164a9f02016-02-22 09:56:40 -0500262 // match that of srcConfig.
263 return false;
264 }
265
266 return true;
267}
268
269bool GrVkGpu::onWritePixels(GrSurface* surface,
270 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800271 GrPixelConfig config,
272 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500273 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
274 if (!vkTex) {
275 return false;
276 }
277
jvanverth900bd4a2016-04-29 13:53:12 -0700278 // Make sure we have at least the base level
jvanverth03509ea2016-03-02 13:19:47 -0800279 if (texels.empty() || !texels.begin()->fPixels) {
280 return false;
281 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800282
Greg Daniel164a9f02016-02-22 09:56:40 -0500283 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
284 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
285 return false;
286 }
287
288 bool success = false;
289 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
290 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
291 SkASSERT(config == vkTex->desc().fConfig);
292 // TODO: add compressed texture support
293 // delete the following two lines and uncomment the two after that when ready
294 vkTex->unref();
295 return false;
296 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
297 // height);
298 } else {
299 bool linearTiling = vkTex->isLinearTiled();
jvanverth900bd4a2016-04-29 13:53:12 -0700300 if (linearTiling) {
301 if (texels.count() > 1) {
302 SkDebugf("Can't upload mipmap data to linear tiled texture");
303 return false;
304 }
305 if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
306 // Need to change the layout to general in order to perform a host write
jvanverth900bd4a2016-04-29 13:53:12 -0700307 vkTex->setImageLayout(this,
308 VK_IMAGE_LAYOUT_GENERAL,
jvanverth50c46c72016-05-06 12:31:28 -0700309 VK_ACCESS_HOST_WRITE_BIT,
310 VK_PIPELINE_STAGE_HOST_BIT,
jvanverth900bd4a2016-04-29 13:53:12 -0700311 false);
egdanielbdf88112016-05-03 07:25:56 -0700312 this->submitCommandBuffer(kForce_SyncQueue);
jvanverth900bd4a2016-04-29 13:53:12 -0700313 }
314 success = this->uploadTexDataLinear(vkTex, left, top, width, height, config,
315 texels.begin()->fPixels, texels.begin()->fRowBytes);
316 } else {
jvanverthc578b0632016-05-02 10:58:12 -0700317 int newMipLevels = texels.count();
jvanverth82c05582016-05-03 11:19:01 -0700318 int currentMipLevels = vkTex->texturePriv().maxMipMapLevel() + 1;
319 if (newMipLevels != currentMipLevels) {
jvanverthc578b0632016-05-02 10:58:12 -0700320 if (!vkTex->reallocForMipmap(this, newMipLevels)) {
jvanverth900bd4a2016-04-29 13:53:12 -0700321 return false;
322 }
323 }
324 success = this->uploadTexDataOptimal(vkTex, left, top, width, height, config, texels);
Greg Daniel164a9f02016-02-22 09:56:40 -0500325 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500326 }
jvanverth900bd4a2016-04-29 13:53:12 -0700327
328 return success;
Greg Daniel164a9f02016-02-22 09:56:40 -0500329}
330
jvanverth900bd4a2016-04-29 13:53:12 -0700331bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex,
332 int left, int top, int width, int height,
333 GrPixelConfig dataConfig,
334 const void* data,
335 size_t rowBytes) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500336 SkASSERT(data);
jvanverth900bd4a2016-04-29 13:53:12 -0700337 SkASSERT(tex->isLinearTiled());
Greg Daniel164a9f02016-02-22 09:56:40 -0500338
339 // If we're uploading compressed data then we should be using uploadCompressedTexData
340 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
341
Greg Daniel164a9f02016-02-22 09:56:40 -0500342 size_t bpp = GrBytesPerPixel(dataConfig);
343
344 const GrSurfaceDesc& desc = tex->desc();
345
346 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
347 &width, &height, &data, &rowBytes)) {
348 return false;
349 }
350 size_t trimRowBytes = width * bpp;
351
jvanverth900bd4a2016-04-29 13:53:12 -0700352 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
353 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
354 const VkImageSubresource subres = {
355 VK_IMAGE_ASPECT_COLOR_BIT,
356 0, // mipLevel
357 0, // arraySlice
358 };
359 VkSubresourceLayout layout;
360 VkResult err;
Greg Daniel164a9f02016-02-22 09:56:40 -0500361
jvanverth900bd4a2016-04-29 13:53:12 -0700362 const GrVkInterface* interface = this->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -0500363
jvanverth900bd4a2016-04-29 13:53:12 -0700364 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
egdanielb2df0c22016-05-13 11:30:37 -0700365 tex->image(),
jvanverth900bd4a2016-04-29 13:53:12 -0700366 &subres,
367 &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500368
jvanverth900bd4a2016-04-29 13:53:12 -0700369 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height : top;
jvanverth1e305ba2016-06-01 09:39:15 -0700370 const GrVkAlloc& alloc = tex->alloc();
371 VkDeviceSize offset = alloc.fOffset + texTop*layout.rowPitch + left*bpp;
jvanverth900bd4a2016-04-29 13:53:12 -0700372 VkDeviceSize size = height*layout.rowPitch;
373 void* mapPtr;
jvanverth1e305ba2016-06-01 09:39:15 -0700374 err = GR_VK_CALL(interface, MapMemory(fDevice, alloc.fMemory, offset, size, 0, &mapPtr));
jvanverth900bd4a2016-04-29 13:53:12 -0700375 if (err) {
376 return false;
377 }
378
379 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
380 // copy into buffer by rows
381 const char* srcRow = reinterpret_cast<const char*>(data);
382 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
383 for (int y = 0; y < height; y++) {
384 memcpy(dstRow, srcRow, trimRowBytes);
385 srcRow += rowBytes;
386 dstRow -= layout.rowPitch;
387 }
388 } else {
389 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
390 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
391 memcpy(mapPtr, data, trimRowBytes * height);
392 } else {
393 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
394 trimRowBytes, height);
395 }
396 }
397
jvanverth1e305ba2016-06-01 09:39:15 -0700398 GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory));
jvanverth900bd4a2016-04-29 13:53:12 -0700399
400 return true;
401}
402
403bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex,
404 int left, int top, int width, int height,
405 GrPixelConfig dataConfig,
406 const SkTArray<GrMipLevel>& texels) {
407 SkASSERT(!tex->isLinearTiled());
408 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
409 SkASSERT(1 == texels.count() ||
410 (0 == left && 0 == top && width == tex->width() && height == tex->height()));
411
412 // If we're uploading compressed data then we should be using uploadCompressedTexData
413 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
414
415 if (width == 0 || height == 0) {
416 return false;
417 }
418
419 const GrSurfaceDesc& desc = tex->desc();
420 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
421 size_t bpp = GrBytesPerPixel(dataConfig);
422
423 // texels is const.
jvanverthc578b0632016-05-02 10:58:12 -0700424 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
425 // Because of this we need to make a non-const shallow copy of texels.
426 SkTArray<GrMipLevel> texelsShallowCopy(texels);
jvanverth900bd4a2016-04-29 13:53:12 -0700427
jvanverthc578b0632016-05-02 10:58:12 -0700428 for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
429 currentMipLevel--) {
430 SkASSERT(texelsShallowCopy[currentMipLevel].fPixels);
Greg Daniel164a9f02016-02-22 09:56:40 -0500431 }
432
jvanverth900bd4a2016-04-29 13:53:12 -0700433 // Determine whether we need to flip when we copy into the buffer
jvanverthc578b0632016-05-02 10:58:12 -0700434 bool flipY = (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsShallowCopy.empty());
jvanverth900bd4a2016-04-29 13:53:12 -0700435
jvanverthc578b0632016-05-02 10:58:12 -0700436 // adjust any params (left, top, currentWidth, currentHeight
jvanverth900bd4a2016-04-29 13:53:12 -0700437 // find the combined size of all the mip levels and the relative offset of
438 // each into the collective buffer
jvanverthc578b0632016-05-02 10:58:12 -0700439 // Do the first level separately because we may need to adjust width and height
440 // (for the non-mipped case).
441 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
442 &width,
443 &height,
444 &texelsShallowCopy[0].fPixels,
445 &texelsShallowCopy[0].fRowBytes)) {
446 return false;
447 }
448 SkTArray<size_t> individualMipOffsets(texelsShallowCopy.count());
449 individualMipOffsets.push_back(0);
450 size_t combinedBufferSize = width * bpp * height;
451 int currentWidth = width;
452 int currentHeight = height;
453 for (int currentMipLevel = 1; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
454 currentWidth = SkTMax(1, currentWidth/2);
455 currentHeight = SkTMax(1, currentHeight/2);
456 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
457 &currentWidth,
458 &currentHeight,
459 &texelsShallowCopy[currentMipLevel].fPixels,
460 &texelsShallowCopy[currentMipLevel].fRowBytes)) {
461 return false;
462 }
jvanverth900bd4a2016-04-29 13:53:12 -0700463 const size_t trimmedSize = currentWidth * bpp * currentHeight;
464 individualMipOffsets.push_back(combinedBufferSize);
465 combinedBufferSize += trimmedSize;
466 }
467
468 // allocate buffer to hold our mip data
469 GrVkTransferBuffer* transferBuffer =
470 GrVkTransferBuffer::Create(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type);
471
472 char* buffer = (char*) transferBuffer->map();
jvanverthc578b0632016-05-02 10:58:12 -0700473 SkTArray<VkBufferImageCopy> regions(texelsShallowCopy.count());
jvanverth900bd4a2016-04-29 13:53:12 -0700474
jvanverthc578b0632016-05-02 10:58:12 -0700475 currentWidth = width;
476 currentHeight = height;
477 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
jvanverth900bd4a2016-04-29 13:53:12 -0700478 const size_t trimRowBytes = currentWidth * bpp;
jvanverthc578b0632016-05-02 10:58:12 -0700479 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
jvanverth900bd4a2016-04-29 13:53:12 -0700480
481 // copy data into the buffer, skipping the trailing bytes
482 char* dst = buffer + individualMipOffsets[currentMipLevel];
jvanverthc578b0632016-05-02 10:58:12 -0700483 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
jvanverth900bd4a2016-04-29 13:53:12 -0700484 if (flipY) {
485 src += (currentHeight - 1) * rowBytes;
486 for (int y = 0; y < currentHeight; y++) {
487 memcpy(dst, src, trimRowBytes);
488 src -= rowBytes;
489 dst += trimRowBytes;
490 }
491 } else if (trimRowBytes == rowBytes) {
492 memcpy(dst, src, trimRowBytes * currentHeight);
493 } else {
494 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
495 }
496
497 VkBufferImageCopy& region = regions.push_back();
498 memset(&region, 0, sizeof(VkBufferImageCopy));
499 region.bufferOffset = individualMipOffsets[currentMipLevel];
500 region.bufferRowLength = currentWidth;
501 region.bufferImageHeight = currentHeight;
bsalomoncf942c42016-04-29 18:30:06 -0700502 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
jvanverthc578b0632016-05-02 10:58:12 -0700503 region.imageOffset = { left, flipY ? tex->height() - top - currentHeight : top, 0 };
jvanverth900bd4a2016-04-29 13:53:12 -0700504 region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
jvanverthc578b0632016-05-02 10:58:12 -0700505
506 currentWidth = SkTMax(1, currentWidth/2);
507 currentHeight = SkTMax(1, currentHeight/2);
jvanverth900bd4a2016-04-29 13:53:12 -0700508 }
509
510 transferBuffer->unmap();
511
512 // make sure the unmap has finished
513 transferBuffer->addMemoryBarrier(this,
514 VK_ACCESS_HOST_WRITE_BIT,
515 VK_ACCESS_TRANSFER_READ_BIT,
516 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
517 VK_PIPELINE_STAGE_TRANSFER_BIT,
518 false);
519
520 // Change layout of our target so it can be copied to
jvanverth900bd4a2016-04-29 13:53:12 -0700521 tex->setImageLayout(this,
522 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -0700523 VK_ACCESS_TRANSFER_WRITE_BIT,
524 VK_PIPELINE_STAGE_TRANSFER_BIT,
jvanverth900bd4a2016-04-29 13:53:12 -0700525 false);
526
527 // Copy the buffer to the image
528 fCurrentCmdBuffer->copyBufferToImage(this,
529 transferBuffer,
530 tex,
531 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
532 regions.count(),
533 regions.begin());
534
535 // Submit the current command buffer to the Queue
536 this->submitCommandBuffer(kSkip_SyncQueue);
537
538 transferBuffer->unref();
539
Greg Daniel164a9f02016-02-22 09:56:40 -0500540 return true;
541}
542
543////////////////////////////////////////////////////////////////////////////////
kkinnunen2e6055b2016-04-22 01:48:29 -0700544GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
bsalomona1e6b3b2016-03-02 10:58:23 -0800545 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500546 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
547
548 VkFormat pixelFormat;
549 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
550 return nullptr;
551 }
552
553 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
554 return nullptr;
555 }
556
egdaniel0a3a7f72016-06-24 09:22:31 -0700557 if (renderTarget && !fVkCaps->isConfigRenderable(desc.fConfig, false)) {
558 return nullptr;
559 }
560
Greg Daniel164a9f02016-02-22 09:56:40 -0500561 bool linearTiling = false;
562 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
jvanverth900bd4a2016-04-29 13:53:12 -0700563 // we can't have a linear texture with a mipmap
564 if (texels.count() > 1) {
565 SkDebugf("Trying to create linear tiled texture with mipmap");
566 return nullptr;
567 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500568 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
569 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
570 linearTiling = true;
571 } else {
572 return nullptr;
573 }
574 }
575
576 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
577 if (renderTarget) {
578 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
579 }
580
581 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
582 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
583 // will be using this texture in some copy or not. Also this assumes, as is the current case,
jvanverth62340062016-04-26 08:01:44 -0700584 // that all render targets in vulkan are also textures. If we change this practice of setting
Greg Daniel164a9f02016-02-22 09:56:40 -0500585 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
586 // texture.
587 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
588
bsalomona1e6b3b2016-03-02 10:58:23 -0800589 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
590 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500591
592 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
jvanverth62340062016-04-26 08:01:44 -0700593 // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
Greg Daniel164a9f02016-02-22 09:56:40 -0500594 // to 1.
jvanverthc578b0632016-05-02 10:58:12 -0700595 int mipLevels = texels.empty() ? 1 : texels.count();
Greg Daniel164a9f02016-02-22 09:56:40 -0500596 GrVkImage::ImageDesc imageDesc;
597 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
598 imageDesc.fFormat = pixelFormat;
599 imageDesc.fWidth = desc.fWidth;
600 imageDesc.fHeight = desc.fHeight;
jvanverthc578b0632016-05-02 10:58:12 -0700601 imageDesc.fLevels = linearTiling ? 1 : mipLevels;
Greg Daniel164a9f02016-02-22 09:56:40 -0500602 imageDesc.fSamples = 1;
603 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
604 imageDesc.fUsageFlags = usageFlags;
605 imageDesc.fMemProps = memProps;
606
607 GrVkTexture* tex;
608 if (renderTarget) {
kkinnunen2e6055b2016-04-22 01:48:29 -0700609 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budgeted, desc,
Greg Daniel164a9f02016-02-22 09:56:40 -0500610 imageDesc);
611 } else {
kkinnunen2e6055b2016-04-22 01:48:29 -0700612 tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc);
Greg Daniel164a9f02016-02-22 09:56:40 -0500613 }
614
615 if (!tex) {
616 return nullptr;
617 }
618
bsalomone699d0c2016-03-09 06:25:15 -0800619 if (!texels.empty()) {
620 SkASSERT(texels.begin()->fPixels);
jvanverth900bd4a2016-04-29 13:53:12 -0700621 bool success;
622 if (linearTiling) {
623 success = this->uploadTexDataLinear(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
624 texels.begin()->fPixels, texels.begin()->fRowBytes);
625 } else {
626 success = this->uploadTexDataOptimal(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
627 texels);
628 }
629 if (!success) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500630 tex->unref();
631 return nullptr;
632 }
633 }
634
635 return tex;
636}
637
638////////////////////////////////////////////////////////////////////////////////
639
640static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
641 // By default, all textures in Vk use TopLeft
642 if (kDefault_GrSurfaceOrigin == origin) {
643 return kTopLeft_GrSurfaceOrigin;
644 } else {
645 return origin;
646 }
647}
648
649GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
650 GrWrapOwnership ownership) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500651 if (0 == desc.fTextureHandle) {
652 return nullptr;
653 }
654
655 int maxSize = this->caps()->maxTextureSize();
656 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
657 return nullptr;
658 }
659
egdanielb2df0c22016-05-13 11:30:37 -0700660 const GrVkImageInfo* info = reinterpret_cast<const GrVkImageInfo*>(desc.fTextureHandle);
jvanverth1e305ba2016-06-01 09:39:15 -0700661 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc.fMemory) {
jvanverthfd359ca2016-03-18 11:57:24 -0700662 return nullptr;
663 }
egdanielb2df0c22016-05-13 11:30:37 -0700664#ifdef SK_DEBUG
665 VkFormat format;
666 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
667 return nullptr;
668 }
669 SkASSERT(format == info->fFormat);
670#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500671
Greg Daniel164a9f02016-02-22 09:56:40 -0500672 GrSurfaceDesc surfDesc;
673 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
674 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
675 surfDesc.fWidth = desc.fWidth;
676 surfDesc.fHeight = desc.fHeight;
677 surfDesc.fConfig = desc.fConfig;
678 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
679 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
680 // In GL, Chrome assumes all textures are BottomLeft
681 // In VK, we don't have this restriction
682 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
683
684 GrVkTexture* texture = nullptr;
685 if (renderTarget) {
halcanary9d524f22016-03-29 09:03:52 -0700686 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
egdanielb2df0c22016-05-13 11:30:37 -0700687 ownership, info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500688 } else {
egdanielb2df0c22016-05-13 11:30:37 -0700689 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, ownership, info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500690 }
691 if (!texture) {
692 return nullptr;
693 }
694
695 return texture;
696}
697
698GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
699 GrWrapOwnership ownership) {
halcanary9d524f22016-03-29 09:03:52 -0700700
egdanielb2df0c22016-05-13 11:30:37 -0700701 const GrVkImageInfo* info =
702 reinterpret_cast<const GrVkImageInfo*>(wrapDesc.fRenderTargetHandle);
jvanverthfd359ca2016-03-18 11:57:24 -0700703 if (VK_NULL_HANDLE == info->fImage ||
jvanverth1e305ba2016-06-01 09:39:15 -0700704 (VK_NULL_HANDLE == info->fAlloc.fMemory && kAdopt_GrWrapOwnership == ownership)) {
jvanverthfd359ca2016-03-18 11:57:24 -0700705 return nullptr;
706 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500707
Greg Daniel164a9f02016-02-22 09:56:40 -0500708 GrSurfaceDesc desc;
709 desc.fConfig = wrapDesc.fConfig;
710 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
711 desc.fWidth = wrapDesc.fWidth;
712 desc.fHeight = wrapDesc.fHeight;
713 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
714
715 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
716
717 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
kkinnunen2e6055b2016-04-22 01:48:29 -0700718 ownership,
jvanverthfd359ca2016-03-18 11:57:24 -0700719 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500720 if (tgt && wrapDesc.fStencilBits) {
721 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
722 tgt->unref();
723 return nullptr;
724 }
725 }
726 return tgt;
727}
728
jvanverth62340062016-04-26 08:01:44 -0700729void GrVkGpu::generateMipmap(GrVkTexture* tex) const {
jvanverth900bd4a2016-04-29 13:53:12 -0700730 // don't do anything for linearly tiled textures (can't have mipmaps)
jvanverth62340062016-04-26 08:01:44 -0700731 if (tex->isLinearTiled()) {
jvanverth900bd4a2016-04-29 13:53:12 -0700732 SkDebugf("Trying to create mipmap for linear tiled texture");
jvanverth62340062016-04-26 08:01:44 -0700733 return;
734 }
735
736 // We cannot generate mipmaps for images that are multisampled.
737 // TODO: does it even make sense for rendertargets in general?
738 if (tex->asRenderTarget() && tex->asRenderTarget()->numColorSamples() > 1) {
739 return;
740 }
741
742 // determine if we can blit to and from this format
743 const GrVkCaps& caps = this->vkCaps();
744 if (!caps.configCanBeDstofBlit(tex->config(), false) ||
745 !caps.configCanBeSrcofBlit(tex->config(), false)) {
746 return;
747 }
748
749 // change the original image's layout
jvanverth62340062016-04-26 08:01:44 -0700750 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -0700751 VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
jvanverth62340062016-04-26 08:01:44 -0700752
753 // grab handle to the original image resource
egdanielb2df0c22016-05-13 11:30:37 -0700754 const GrVkResource* oldResource = tex->resource();
jvanverth62340062016-04-26 08:01:44 -0700755 oldResource->ref();
egdanielb2df0c22016-05-13 11:30:37 -0700756 VkImage oldImage = tex->image();
jvanverth62340062016-04-26 08:01:44 -0700757
jvanverth82c05582016-05-03 11:19:01 -0700758 // SkMipMap doesn't include the base level in the level count so we have to add 1
759 uint32_t levelCount = SkMipMap::ComputeLevelCount(tex->width(), tex->height()) + 1;
760 if (!tex->reallocForMipmap(this, levelCount)) {
jvanverth62340062016-04-26 08:01:44 -0700761 oldResource->unref(this);
762 return;
763 }
764
765 // change the new image's layout
jvanverth50c46c72016-05-06 12:31:28 -0700766 tex->setImageLayout(this, VK_IMAGE_LAYOUT_GENERAL,
767 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
jvanverth62340062016-04-26 08:01:44 -0700768
769 // Blit original image
770 int width = tex->width();
771 int height = tex->height();
jvanverth62340062016-04-26 08:01:44 -0700772
773 VkImageBlit blitRegion;
774 memset(&blitRegion, 0, sizeof(VkImageBlit));
775 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
776 blitRegion.srcOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700777 blitRegion.srcOffsets[1] = { width, height, 1 };
jvanverth82c05582016-05-03 11:19:01 -0700778 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
jvanverth62340062016-04-26 08:01:44 -0700779 blitRegion.dstOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700780 blitRegion.dstOffsets[1] = { width, height, 1 };
jvanverth62340062016-04-26 08:01:44 -0700781
782 fCurrentCmdBuffer->blitImage(this,
783 oldResource,
egdanielb2df0c22016-05-13 11:30:37 -0700784 oldImage,
jvanverth62340062016-04-26 08:01:44 -0700785 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
786 tex->resource(),
egdanielb2df0c22016-05-13 11:30:37 -0700787 tex->image(),
jvanverth50c46c72016-05-06 12:31:28 -0700788 VK_IMAGE_LAYOUT_GENERAL,
jvanverth62340062016-04-26 08:01:44 -0700789 1,
790 &blitRegion,
791 VK_FILTER_LINEAR);
jvanverth50c46c72016-05-06 12:31:28 -0700792
793 // setup memory barrier
egdanielb2df0c22016-05-13 11:30:37 -0700794 SkASSERT(GrVkFormatToPixelConfig(tex->imageFormat(), nullptr));
jvanverth50c46c72016-05-06 12:31:28 -0700795 VkImageAspectFlags aspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
796 VkImageMemoryBarrier imageMemoryBarrier = {
797 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
798 NULL, // pNext
799 VK_ACCESS_TRANSFER_WRITE_BIT, // outputMask
800 VK_ACCESS_TRANSFER_READ_BIT, // inputMask
801 VK_IMAGE_LAYOUT_GENERAL, // oldLayout
802 VK_IMAGE_LAYOUT_GENERAL, // newLayout
803 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
804 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
egdanielb2df0c22016-05-13 11:30:37 -0700805 tex->image(), // image
jvanverth50c46c72016-05-06 12:31:28 -0700806 { aspectFlags, 0, 1, 0, 1 } // subresourceRange
807 };
808
jvanverth62340062016-04-26 08:01:44 -0700809 // Blit the miplevels
jvanverth82c05582016-05-03 11:19:01 -0700810 uint32_t mipLevel = 1;
811 while (mipLevel < levelCount) {
812 int prevWidth = width;
813 int prevHeight = height;
814 width = SkTMax(1, width / 2);
815 height = SkTMax(1, height / 2);
jvanverth62340062016-04-26 08:01:44 -0700816
jvanverth50c46c72016-05-06 12:31:28 -0700817 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
818 this->addImageMemoryBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
819 false, &imageMemoryBarrier);
820
821 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
jvanverth82c05582016-05-03 11:19:01 -0700822 blitRegion.srcOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700823 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
jvanverth82c05582016-05-03 11:19:01 -0700824 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
825 blitRegion.dstOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700826 blitRegion.dstOffsets[1] = { width, height, 1 };
jvanverth62340062016-04-26 08:01:44 -0700827 fCurrentCmdBuffer->blitImage(this,
egdanielb2df0c22016-05-13 11:30:37 -0700828 *tex,
829 *tex,
jvanverth62340062016-04-26 08:01:44 -0700830 1,
831 &blitRegion,
832 VK_FILTER_LINEAR);
jvanverth82c05582016-05-03 11:19:01 -0700833 ++mipLevel;
jvanverth62340062016-04-26 08:01:44 -0700834 }
835
836 oldResource->unref(this);
837}
838
Greg Daniel164a9f02016-02-22 09:56:40 -0500839////////////////////////////////////////////////////////////////////////////////
840
841GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
842 int width,
843 int height) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500844 SkASSERT(width >= rt->width());
845 SkASSERT(height >= rt->height());
846
847 int samples = rt->numStencilSamples();
848
egdaniel8f1dcaa2016-04-01 10:10:45 -0700849 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferedStencilFormat();
Greg Daniel164a9f02016-02-22 09:56:40 -0500850
851 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
Greg Daniel164a9f02016-02-22 09:56:40 -0500852 width,
853 height,
854 samples,
855 sFmt));
856 fStats.incStencilAttachmentCreates();
857 return stencil;
858}
859
860////////////////////////////////////////////////////////////////////////////////
861
862GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
egdaniel0a3a7f72016-06-24 09:22:31 -0700863 GrPixelConfig config,
864 bool isRenderTarget) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500865
866 VkFormat pixelFormat;
867 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
868 return 0;
869 }
870
871 bool linearTiling = false;
872 if (!fVkCaps->isConfigTexturable(config)) {
873 return 0;
874 }
875
egdaniel0a3a7f72016-06-24 09:22:31 -0700876 if (isRenderTarget && !fVkCaps->isConfigRenderable(config, false)) {
877 return 0;
878 }
879
880 if (fVkCaps->isConfigTexurableLinearly(config) &&
881 (!isRenderTarget || fVkCaps->isConfigRenderableLinearly(config, false))) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500882 linearTiling = true;
883 }
884
885 // Currently this is not supported since it requires a copy which has not yet been implemented.
886 if (srcData && !linearTiling) {
887 return 0;
888 }
889
890 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
891 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
892 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
egdaniel0a3a7f72016-06-24 09:22:31 -0700893 if (isRenderTarget) {
894 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
895 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500896
jvanverthfd359ca2016-03-18 11:57:24 -0700897 VkImage image = VK_NULL_HANDLE;
jvanverth6b6ffc42016-06-13 14:28:07 -0700898 GrVkAlloc alloc = { VK_NULL_HANDLE, 0, 0 };
Greg Daniel164a9f02016-02-22 09:56:40 -0500899
jvanverthfd359ca2016-03-18 11:57:24 -0700900 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
901 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
902 ? VK_IMAGE_LAYOUT_PREINITIALIZED
903 : VK_IMAGE_LAYOUT_UNDEFINED;
904
905 // Create Image
906 VkSampleCountFlagBits vkSamples;
907 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
908 return 0;
909 }
910
911 const VkImageCreateInfo imageCreateInfo = {
912 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
913 NULL, // pNext
914 0, // VkImageCreateFlags
915 VK_IMAGE_TYPE_2D, // VkImageType
916 pixelFormat, // VkFormat
ethannicholas384b5e92016-03-25 11:04:06 -0700917 { (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D
jvanverthfd359ca2016-03-18 11:57:24 -0700918 1, // mipLevels
919 1, // arrayLayers
920 vkSamples, // samples
921 imageTiling, // VkImageTiling
922 usageFlags, // VkImageUsageFlags
923 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
924 0, // queueFamilyCount
925 0, // pQueueFamilyIndices
926 initialLayout // initialLayout
927 };
928
929 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
930
jvanverth6b6ffc42016-06-13 14:28:07 -0700931 if (!GrVkMemory::AllocAndBindImageMemory(this, image, linearTiling, &alloc)) {
jvanverthfd359ca2016-03-18 11:57:24 -0700932 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500933 return 0;
934 }
935
936 if (srcData) {
937 if (linearTiling) {
938 const VkImageSubresource subres = {
939 VK_IMAGE_ASPECT_COLOR_BIT,
940 0, // mipLevel
941 0, // arraySlice
942 };
943 VkSubresourceLayout layout;
944 VkResult err;
945
jvanverthfd359ca2016-03-18 11:57:24 -0700946 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500947
948 void* mapPtr;
jvanverth1e305ba2016-06-01 09:39:15 -0700949 err = VK_CALL(MapMemory(fDevice, alloc.fMemory, alloc.fOffset, layout.rowPitch * h,
950 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500951 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700952 GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
jvanverthfd359ca2016-03-18 11:57:24 -0700953 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500954 return 0;
955 }
956
957 size_t bpp = GrBytesPerPixel(config);
958 size_t rowCopyBytes = bpp * w;
959 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
960 // This assumes the srcData comes in with no padding.
961 if (rowCopyBytes == layout.rowPitch) {
962 memcpy(mapPtr, srcData, rowCopyBytes * h);
963 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700964 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
965 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500966 }
jvanverth1e305ba2016-06-01 09:39:15 -0700967 VK_CALL(UnmapMemory(fDevice, alloc.fMemory));
Greg Daniel164a9f02016-02-22 09:56:40 -0500968 } else {
969 // TODO: Add support for copying to optimal tiling
970 SkASSERT(false);
971 }
972 }
973
egdanielb2df0c22016-05-13 11:30:37 -0700974 GrVkImageInfo* info = new GrVkImageInfo;
jvanverthfd359ca2016-03-18 11:57:24 -0700975 info->fImage = image;
976 info->fAlloc = alloc;
977 info->fImageTiling = imageTiling;
978 info->fImageLayout = initialLayout;
egdaniel58a8d922016-04-21 08:03:10 -0700979 info->fFormat = pixelFormat;
jvanverth2af0f1b2016-05-03 10:36:49 -0700980 info->fLevelCount = 1;
jvanverthfd359ca2016-03-18 11:57:24 -0700981
982 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -0500983}
984
985bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
egdanielb2df0c22016-05-13 11:30:37 -0700986 const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -0500987
jvanverth1e305ba2016-06-01 09:39:15 -0700988 if (backend && backend->fImage && backend->fAlloc.fMemory) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500989 VkMemoryRequirements req;
990 memset(&req, 0, sizeof(req));
991 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
992 backend->fImage,
993 &req));
994 // TODO: find a better check
995 // This will probably fail with a different driver
996 return (req.size > 0) && (req.size <= 8192 * 8192);
997 }
998
999 return false;
1000}
1001
1002void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverth6b6ffc42016-06-13 14:28:07 -07001003 GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -05001004 if (backend) {
1005 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -07001006 // something in the command buffer may still be using this, so force submit
1007 this->submitCommandBuffer(kForce_SyncQueue);
jvanverth6b6ffc42016-06-13 14:28:07 -07001008 GrVkImage::DestroyImageInfo(this, backend);
Greg Daniel164a9f02016-02-22 09:56:40 -05001009 }
jvanverthfd359ca2016-03-18 11:57:24 -07001010 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -05001011 }
1012}
1013
1014////////////////////////////////////////////////////////////////////////////////
1015
1016void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
1017 VkPipelineStageFlags dstStageMask,
1018 bool byRegion,
1019 VkMemoryBarrier* barrier) const {
1020 SkASSERT(fCurrentCmdBuffer);
1021 fCurrentCmdBuffer->pipelineBarrier(this,
1022 srcStageMask,
1023 dstStageMask,
1024 byRegion,
1025 GrVkCommandBuffer::kMemory_BarrierType,
1026 barrier);
1027}
1028
1029void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1030 VkPipelineStageFlags dstStageMask,
1031 bool byRegion,
1032 VkBufferMemoryBarrier* barrier) const {
1033 SkASSERT(fCurrentCmdBuffer);
1034 fCurrentCmdBuffer->pipelineBarrier(this,
1035 srcStageMask,
1036 dstStageMask,
1037 byRegion,
1038 GrVkCommandBuffer::kBufferMemory_BarrierType,
1039 barrier);
1040}
1041
1042void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
1043 VkPipelineStageFlags dstStageMask,
1044 bool byRegion,
1045 VkImageMemoryBarrier* barrier) const {
1046 SkASSERT(fCurrentCmdBuffer);
1047 fCurrentCmdBuffer->pipelineBarrier(this,
1048 srcStageMask,
1049 dstStageMask,
1050 byRegion,
1051 GrVkCommandBuffer::kImageMemory_BarrierType,
1052 barrier);
1053}
1054
1055void GrVkGpu::finishDrawTarget() {
1056 // Submit the current command buffer to the Queue
1057 this->submitCommandBuffer(kSkip_SyncQueue);
1058}
1059
egdaniel3d5d9ac2016-03-01 12:56:15 -08001060void GrVkGpu::clearStencil(GrRenderTarget* target) {
1061 if (nullptr == target) {
1062 return;
1063 }
1064 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
1065 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1066
1067
1068 VkClearDepthStencilValue vkStencilColor;
1069 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
1070
egdaniel3d5d9ac2016-03-01 12:56:15 -08001071 vkStencil->setImageLayout(this,
1072 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001073 VK_ACCESS_TRANSFER_WRITE_BIT,
1074 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel3d5d9ac2016-03-01 12:56:15 -08001075 false);
1076
egdaniel3d5d9ac2016-03-01 12:56:15 -08001077 VkImageSubresourceRange subRange;
1078 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1079 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1080 subRange.baseMipLevel = 0;
1081 subRange.levelCount = 1;
1082 subRange.baseArrayLayer = 0;
1083 subRange.layerCount = 1;
1084
1085 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
1086 // draw. Thus we should look into using the load op functions on the render pass to clear out
1087 // the stencil there.
1088 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
1089}
1090
Greg Daniel164a9f02016-02-22 09:56:40 -05001091inline bool can_copy_image(const GrSurface* dst,
1092 const GrSurface* src,
1093 const GrVkGpu* gpu) {
egdaniel17b89252016-04-05 07:23:38 -07001094 // Currently we don't support msaa
1095 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1096 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1097 return false;
1098 }
1099
1100 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1101 // as image usage flags.
1102 if (src->origin() == dst->origin() &&
1103 GrBytesPerPixel(src->config()) == GrBytesPerPixel(dst->config())) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001104 return true;
1105 }
1106
1107 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
egdaniel17b89252016-04-05 07:23:38 -07001108 // or the resolved image here? Im multisampled, Vulkan requires sample counts to be the same.
Greg Daniel164a9f02016-02-22 09:56:40 -05001109
1110 return false;
1111}
1112
1113void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1114 GrSurface* src,
egdaniel17b89252016-04-05 07:23:38 -07001115 GrVkImage* dstImage,
1116 GrVkImage* srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001117 const SkIRect& srcRect,
1118 const SkIPoint& dstPoint) {
1119 SkASSERT(can_copy_image(dst, src, this));
1120
Greg Daniel164a9f02016-02-22 09:56:40 -05001121 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1122 // the cache is flushed since it is only being written to.
egdaniel17b89252016-04-05 07:23:38 -07001123 dstImage->setImageLayout(this,
jvanverth50c46c72016-05-06 12:31:28 -07001124 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1125 VK_ACCESS_TRANSFER_WRITE_BIT,
1126 VK_PIPELINE_STAGE_TRANSFER_BIT,
1127 false);
Greg Daniel164a9f02016-02-22 09:56:40 -05001128
egdaniel17b89252016-04-05 07:23:38 -07001129 srcImage->setImageLayout(this,
1130 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001131 VK_ACCESS_TRANSFER_READ_BIT,
1132 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel17b89252016-04-05 07:23:38 -07001133 false);
Greg Daniel164a9f02016-02-22 09:56:40 -05001134
1135 // Flip rect if necessary
1136 SkIRect srcVkRect = srcRect;
1137 int32_t dstY = dstPoint.fY;
1138
1139 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1140 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1141 srcVkRect.fTop = src->height() - srcRect.fBottom;
1142 srcVkRect.fBottom = src->height() - srcRect.fTop;
1143 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1144 }
1145
1146 VkImageCopy copyRegion;
1147 memset(&copyRegion, 0, sizeof(VkImageCopy));
1148 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1149 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1150 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1151 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
egdanielc355bc82016-04-27 11:31:59 -07001152 // The depth value of the extent is ignored according the vulkan spec for 2D images. However, on
1153 // at least the nexus 5X it seems to be checking it. Thus as a working around we must have the
1154 // depth value be 1.
1155 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 1 };
Greg Daniel164a9f02016-02-22 09:56:40 -05001156
1157 fCurrentCmdBuffer->copyImage(this,
egdaniel17b89252016-04-05 07:23:38 -07001158 srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001159 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
egdaniel17b89252016-04-05 07:23:38 -07001160 dstImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001161 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1162 1,
1163 &copyRegion);
jvanverth900bd4a2016-04-29 13:53:12 -07001164
1165 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
1166 srcRect.width(), srcRect.height());
1167 this->didWriteToSurface(dst, &dstRect);
Greg Daniel164a9f02016-02-22 09:56:40 -05001168}
1169
egdaniel17b89252016-04-05 07:23:38 -07001170inline bool can_copy_as_blit(const GrSurface* dst,
1171 const GrSurface* src,
1172 const GrVkImage* dstImage,
1173 const GrVkImage* srcImage,
1174 const GrVkGpu* gpu) {
1175 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1176 // as image usage flags.
1177 const GrVkCaps& caps = gpu->vkCaps();
1178 if (!caps.configCanBeDstofBlit(dst->config(), dstImage->isLinearTiled()) ||
1179 !caps.configCanBeSrcofBlit(src->config(), srcImage->isLinearTiled())) {
1180 return false;
1181 }
1182
1183 // We cannot blit images that are multisampled. Will need to figure out if we can blit the
1184 // resolved msaa though.
1185 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1186 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1187 return false;
1188 }
1189
1190 return true;
1191}
1192
1193void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
1194 GrSurface* src,
1195 GrVkImage* dstImage,
1196 GrVkImage* srcImage,
1197 const SkIRect& srcRect,
1198 const SkIPoint& dstPoint) {
1199 SkASSERT(can_copy_as_blit(dst, src, dstImage, srcImage, this));
1200
egdaniel17b89252016-04-05 07:23:38 -07001201 dstImage->setImageLayout(this,
1202 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001203 VK_ACCESS_TRANSFER_WRITE_BIT,
1204 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel17b89252016-04-05 07:23:38 -07001205 false);
1206
egdaniel17b89252016-04-05 07:23:38 -07001207 srcImage->setImageLayout(this,
1208 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001209 VK_ACCESS_TRANSFER_READ_BIT,
1210 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel17b89252016-04-05 07:23:38 -07001211 false);
1212
1213 // Flip rect if necessary
1214 SkIRect srcVkRect;
egdaniel8af936d2016-04-07 10:17:47 -07001215 srcVkRect.fLeft = srcRect.fLeft;
1216 srcVkRect.fRight = srcRect.fRight;
egdaniel17b89252016-04-05 07:23:38 -07001217 SkIRect dstRect;
1218 dstRect.fLeft = dstPoint.fX;
egdaniel8af936d2016-04-07 10:17:47 -07001219 dstRect.fRight = dstPoint.fX + srcRect.width();
egdaniel17b89252016-04-05 07:23:38 -07001220
1221 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1222 srcVkRect.fTop = src->height() - srcRect.fBottom;
1223 srcVkRect.fBottom = src->height() - srcRect.fTop;
1224 } else {
egdaniel8af936d2016-04-07 10:17:47 -07001225 srcVkRect.fTop = srcRect.fTop;
1226 srcVkRect.fBottom = srcRect.fBottom;
egdaniel17b89252016-04-05 07:23:38 -07001227 }
1228
1229 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
1230 dstRect.fTop = dst->height() - dstPoint.fY - srcVkRect.height();
1231 } else {
1232 dstRect.fTop = dstPoint.fY;
1233 }
1234 dstRect.fBottom = dstRect.fTop + srcVkRect.height();
1235
1236 // If we have different origins, we need to flip the top and bottom of the dst rect so that we
1237 // get the correct origintation of the copied data.
1238 if (src->origin() != dst->origin()) {
1239 SkTSwap(dstRect.fTop, dstRect.fBottom);
1240 }
1241
1242 VkImageBlit blitRegion;
1243 memset(&blitRegion, 0, sizeof(VkImageBlit));
1244 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1245 blitRegion.srcOffsets[0] = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1246 blitRegion.srcOffsets[1] = { srcVkRect.fRight, srcVkRect.fBottom, 0 };
1247 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1248 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
1249 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 0 };
1250
1251 fCurrentCmdBuffer->blitImage(this,
egdanielb2df0c22016-05-13 11:30:37 -07001252 *srcImage,
1253 *dstImage,
egdaniel17b89252016-04-05 07:23:38 -07001254 1,
1255 &blitRegion,
1256 VK_FILTER_NEAREST); // We never scale so any filter works here
jvanverth900bd4a2016-04-29 13:53:12 -07001257
1258 this->didWriteToSurface(dst, &dstRect);
egdaniel17b89252016-04-05 07:23:38 -07001259}
1260
Greg Daniel164a9f02016-02-22 09:56:40 -05001261inline bool can_copy_as_draw(const GrSurface* dst,
1262 const GrSurface* src,
1263 const GrVkGpu* gpu) {
1264 return false;
1265}
1266
1267void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1268 GrSurface* src,
1269 const SkIRect& srcRect,
1270 const SkIPoint& dstPoint) {
1271 SkASSERT(false);
1272}
1273
1274bool GrVkGpu::onCopySurface(GrSurface* dst,
1275 GrSurface* src,
1276 const SkIRect& srcRect,
1277 const SkIPoint& dstPoint) {
egdaniel17b89252016-04-05 07:23:38 -07001278 GrVkImage* dstImage;
1279 GrVkImage* srcImage;
1280 if (dst->asTexture()) {
1281 dstImage = static_cast<GrVkTexture*>(dst->asTexture());
1282 } else {
1283 SkASSERT(dst->asRenderTarget());
1284 dstImage = static_cast<GrVkRenderTarget*>(dst->asRenderTarget());
1285 }
1286 if (src->asTexture()) {
1287 srcImage = static_cast<GrVkTexture*>(src->asTexture());
1288 } else {
1289 SkASSERT(src->asRenderTarget());
1290 srcImage = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
1291 }
1292
Greg Daniel164a9f02016-02-22 09:56:40 -05001293 if (can_copy_image(dst, src, this)) {
egdaniel17b89252016-04-05 07:23:38 -07001294 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
1295 return true;
1296 }
1297
1298 if (can_copy_as_blit(dst, src, dstImage, srcImage, this)) {
1299 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
Greg Daniel164a9f02016-02-22 09:56:40 -05001300 return true;
1301 }
1302
1303 if (can_copy_as_draw(dst, src, this)) {
1304 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1305 return true;
1306 }
1307
1308 return false;
1309}
1310
egdaniel37798fb2016-04-12 07:31:49 -07001311bool GrVkGpu::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) const {
1312 // Currently we don't support msaa
1313 if (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1) {
1314 return false;
1315 }
1316
1317 // This will support copying the dst as CopyImage since all of our surfaces require transferSrc
1318 // and transferDst usage flags in Vulkan.
1319 desc->fOrigin = src->origin();
1320 desc->fConfig = src->config();
1321 desc->fFlags = kNone_GrSurfaceFlags;
1322 return true;
1323}
1324
cdalton28f45b92016-03-07 13:58:26 -08001325void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1326 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1327 // TODO: stub.
1328 SkASSERT(!this->caps()->sampleLocationsSupport());
1329 *effectiveSampleCnt = rt->desc().fSampleCnt;
1330}
1331
Greg Daniel164a9f02016-02-22 09:56:40 -05001332bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1333 GrPixelConfig readConfig, DrawPreference* drawPreference,
1334 ReadPixelTempDrawInfo* tempDrawInfo) {
1335 // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
1336 if (kNoDraw_DrawPreference != *drawPreference) {
1337 return false;
1338 }
1339
1340 if (srcSurface->config() != readConfig) {
1341 // TODO: This should fall back to drawing or copying to change config of srcSurface to match
1342 // that of readConfig.
1343 return false;
1344 }
1345
1346 return true;
1347}
1348
1349bool GrVkGpu::onReadPixels(GrSurface* surface,
1350 int left, int top, int width, int height,
1351 GrPixelConfig config,
1352 void* buffer,
1353 size_t rowBytes) {
1354 VkFormat pixelFormat;
1355 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1356 return false;
1357 }
1358
1359 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1360 if (!tgt) {
1361 return false;
1362 }
1363
1364 // Change layout of our target so it can be used as copy
Greg Daniel164a9f02016-02-22 09:56:40 -05001365 tgt->setImageLayout(this,
1366 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001367 VK_ACCESS_TRANSFER_READ_BIT,
1368 VK_PIPELINE_STAGE_TRANSFER_BIT,
Greg Daniel164a9f02016-02-22 09:56:40 -05001369 false);
1370
halcanary9d524f22016-03-29 09:03:52 -07001371 GrVkTransferBuffer* transferBuffer =
cdaltone2e71c22016-04-07 18:13:29 -07001372 static_cast<GrVkTransferBuffer*>(this->createBuffer(rowBytes * height,
1373 kXferGpuToCpu_GrBufferType,
cdalton397536c2016-03-25 12:15:03 -07001374 kStream_GrAccessPattern));
Greg Daniel164a9f02016-02-22 09:56:40 -05001375
1376 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1377 VkOffset3D offset = {
1378 left,
1379 flipY ? surface->height() - top - height : top,
1380 0
1381 };
1382
1383 // Copy the image to a buffer so we can map it to cpu memory
1384 VkBufferImageCopy region;
1385 memset(&region, 0, sizeof(VkBufferImageCopy));
1386 region.bufferOffset = 0;
1387 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1388 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1389 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1390 region.imageOffset = offset;
1391 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1392
1393 fCurrentCmdBuffer->copyImageToBuffer(this,
1394 tgt,
1395 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1396 transferBuffer,
1397 1,
1398 &region);
1399
1400 // make sure the copy to buffer has finished
1401 transferBuffer->addMemoryBarrier(this,
1402 VK_ACCESS_TRANSFER_WRITE_BIT,
1403 VK_ACCESS_HOST_READ_BIT,
1404 VK_PIPELINE_STAGE_TRANSFER_BIT,
1405 VK_PIPELINE_STAGE_HOST_BIT,
1406 false);
1407
1408 // We need to submit the current command buffer to the Queue and make sure it finishes before
1409 // we can copy the data out of the buffer.
1410 this->submitCommandBuffer(kForce_SyncQueue);
1411
1412 void* mappedMemory = transferBuffer->map();
1413
1414 memcpy(buffer, mappedMemory, rowBytes*height);
1415
1416 transferBuffer->unmap();
1417 transferBuffer->unref();
1418
1419 if (flipY) {
1420 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1421 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1422 scratch.reset(tightRowBytes);
1423 void* tmpRow = scratch.get();
1424 // flip y in-place by rows
1425 const int halfY = height >> 1;
1426 char* top = reinterpret_cast<char*>(buffer);
1427 char* bottom = top + (height - 1) * rowBytes;
1428 for (int y = 0; y < halfY; y++) {
1429 memcpy(tmpRow, top, tightRowBytes);
1430 memcpy(top, bottom, tightRowBytes);
1431 memcpy(bottom, tmpRow, tightRowBytes);
1432 top += rowBytes;
1433 bottom -= rowBytes;
1434 }
1435 }
1436
1437 return true;
1438}
egdaniel066df7c2016-06-08 14:02:27 -07001439
egdaniel9cb63402016-06-23 08:37:05 -07001440void GrVkGpu::submitSecondaryCommandBuffer(const GrVkSecondaryCommandBuffer* buffer,
1441 const GrVkRenderPass* renderPass,
1442 const VkClearValue* colorClear,
1443 GrVkRenderTarget* target,
1444 const SkIRect& bounds) {
1445 // Currently it is fine for us to always pass in 1 for the clear count even if no attachment
1446 // uses it. In the current state, we also only use the LOAD_OP_CLEAR for the color attachment
1447 // which is always at the first attachment.
1448 fCurrentCmdBuffer->beginRenderPass(this, renderPass, 1, colorClear, *target, bounds, true);
egdaniel066df7c2016-06-08 14:02:27 -07001449 fCurrentCmdBuffer->executeCommands(this, buffer);
Greg Daniel164a9f02016-02-22 09:56:40 -05001450 fCurrentCmdBuffer->endRenderPass(this);
Greg Daniel164a9f02016-02-22 09:56:40 -05001451}
egdaniel9cb63402016-06-23 08:37:05 -07001452