blob: 8716c63e6575c4c68123902fb739a2c1bbc80694 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
egdaniel066df7c2016-06-08 14:02:27 -070020#include "GrVkGpuCommandBuffer.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050021#include "GrVkImage.h"
22#include "GrVkIndexBuffer.h"
23#include "GrVkMemory.h"
24#include "GrVkPipeline.h"
egdaniel22281c12016-03-23 13:49:40 -070025#include "GrVkPipelineState.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050026#include "GrVkRenderPass.h"
27#include "GrVkResourceProvider.h"
28#include "GrVkTexture.h"
29#include "GrVkTextureRenderTarget.h"
30#include "GrVkTransferBuffer.h"
31#include "GrVkVertexBuffer.h"
32
33#include "SkConfig8888.h"
jvanverth900bd4a2016-04-29 13:53:12 -070034#include "SkMipMap.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050035
36#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070037#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050038
39#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
40#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
41#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
42
jvanverthd2497f32016-03-18 12:39:05 -070043#ifdef ENABLE_VK_LAYERS
44VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
45 VkDebugReportFlagsEXT flags,
46 VkDebugReportObjectTypeEXT objectType,
47 uint64_t object,
48 size_t location,
49 int32_t messageCode,
50 const char* pLayerPrefix,
51 const char* pMessage,
52 void* pUserData) {
53 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
54 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
55 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
56 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
57 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
58 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 } else {
60 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
61 }
62 return VK_FALSE;
63}
jvanverthd2497f32016-03-18 12:39:05 -070064#endif
65
jvanverth633b3562016-03-23 11:01:22 -070066GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
67 GrContext* context) {
bsalomondc0fcd42016-04-11 14:21:33 -070068 const GrVkBackendContext* vkBackendContext =
69 reinterpret_cast<const GrVkBackendContext*>(backendContext);
jvanverth633b3562016-03-23 11:01:22 -070070 if (!vkBackendContext) {
bsalomondc0fcd42016-04-11 14:21:33 -070071 vkBackendContext = GrVkBackendContext::Create();
jvanverth633b3562016-03-23 11:01:22 -070072 if (!vkBackendContext) {
73 return nullptr;
Greg Daniel164a9f02016-02-22 09:56:40 -050074 }
jvanverth633b3562016-03-23 11:01:22 -070075 } else {
76 vkBackendContext->ref();
Greg Daniel164a9f02016-02-22 09:56:40 -050077 }
78
jvanverth633b3562016-03-23 11:01:22 -070079 return new GrVkGpu(context, options, vkBackendContext);
Greg Daniel164a9f02016-02-22 09:56:40 -050080}
81
82////////////////////////////////////////////////////////////////////////////////
83
halcanary9d524f22016-03-29 09:03:52 -070084GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
jvanverth633b3562016-03-23 11:01:22 -070085 const GrVkBackendContext* backendCtx)
Greg Daniel164a9f02016-02-22 09:56:40 -050086 : INHERITED(context)
jvanverth633b3562016-03-23 11:01:22 -070087 , fDevice(backendCtx->fDevice)
88 , fQueue(backendCtx->fQueue)
89 , fResourceProvider(this) {
90 fBackendContext.reset(backendCtx);
Greg Daniel164a9f02016-02-22 09:56:40 -050091
jvanverthd2497f32016-03-18 12:39:05 -070092#ifdef ENABLE_VK_LAYERS
brianosman419ca642016-05-04 08:19:44 -070093 fCallback = VK_NULL_HANDLE;
jvanverthfd7bd452016-03-25 06:29:52 -070094 if (backendCtx->fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
95 // Setup callback creation information
jvanverthd2497f32016-03-18 12:39:05 -070096 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
97 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
98 callbackCreateInfo.pNext = nullptr;
99 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
egdanielef0c10c2016-04-07 07:51:22 -0700100 VK_DEBUG_REPORT_WARNING_BIT_EXT |
jvanverthd2497f32016-03-18 12:39:05 -0700101 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
102 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
egdanielb4aa3622016-04-06 13:47:08 -0700103 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
jvanverthd2497f32016-03-18 12:39:05 -0700104 callbackCreateInfo.pfnCallback = &DebugReportCallback;
105 callbackCreateInfo.pUserData = nullptr;
106
jvanverthfd7bd452016-03-25 06:29:52 -0700107 // Register the callback
jvanvertha00980e2016-05-02 13:24:48 -0700108 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(
109 backendCtx->fInstance, &callbackCreateInfo, nullptr, &fCallback));
jvanverthd2497f32016-03-18 12:39:05 -0700110 }
111#endif
jvanverth633b3562016-03-23 11:01:22 -0700112
113 fCompiler = shaderc_compiler_initialize();
114
jvanverthfd7bd452016-03-25 06:29:52 -0700115 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice,
egdanielc5ec1402016-03-28 12:14:42 -0700116 backendCtx->fFeatures, backendCtx->fExtensions));
jvanverth633b3562016-03-23 11:01:22 -0700117 fCaps.reset(SkRef(fVkCaps.get()));
118
119 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps));
120
121 const VkCommandPoolCreateInfo cmdPoolInfo = {
122 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
123 nullptr, // pNext
124 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags
jvanverthb0d43522016-04-21 11:46:23 -0700125 backendCtx->fGraphicsQueueIndex, // queueFamilyIndex
jvanverth633b3562016-03-23 11:01:22 -0700126 };
halcanary9d524f22016-03-29 09:03:52 -0700127 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
jvanverth633b3562016-03-23 11:01:22 -0700128 &fCmdPool));
129
130 // must call this after creating the CommandPool
131 fResourceProvider.init();
egdaniel9a6cf802016-06-08 08:22:05 -0700132 fCurrentCmdBuffer = fResourceProvider.createPrimaryCommandBuffer();
jvanverth633b3562016-03-23 11:01:22 -0700133 SkASSERT(fCurrentCmdBuffer);
134 fCurrentCmdBuffer->begin(this);
jvanverth6b6ffc42016-06-13 14:28:07 -0700135
136 // set up our heaps
137 fHeaps[kLinearImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
egdaniel05dceab2016-06-22 07:45:50 -0700138 // We want the OptimalImage_Heap to use a SubAlloc_strategy but it occasionally causes the
139 // device to run out of memory. Most likely this is caused by fragmentation in the device heap
140 // and we can't allocate more. Until we get a fix moving this to SingleAlloc.
141 fHeaps[kOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 64*1024*1024));
jvanverth6b6ffc42016-06-13 14:28:07 -0700142 fHeaps[kSmallOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 2*1024*1024));
143 fHeaps[kVertexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
144 fHeaps[kIndexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
145 fHeaps[kUniformBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 64*1024));
146 fHeaps[kCopyReadBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
147 fHeaps[kCopyWriteBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
Greg Daniel164a9f02016-02-22 09:56:40 -0500148}
149
150GrVkGpu::~GrVkGpu() {
Greg Daniel164a9f02016-02-22 09:56:40 -0500151 fCurrentCmdBuffer->end(this);
152 fCurrentCmdBuffer->unref(this);
153
154 // wait for all commands to finish
jvanverthddf98352016-03-21 11:46:00 -0700155 fResourceProvider.checkCommandBuffers();
jvanvertha00980e2016-05-02 13:24:48 -0700156 SkDEBUGCODE(VkResult res = ) VK_CALL(QueueWaitIdle(fQueue));
egdanielf8c2be32016-06-24 13:18:27 -0700157
158 // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences
159 // on the command buffers even though they have completed. This causes an assert to fire when
160 // destroying the command buffers. Currently this ony seems to happen on windows, so we add a
161 // sleep to make sure the fence singals.
162#ifdef SK_DEBUG
163#if defined(SK_BUILD_FOR_WIN)
164 Sleep(10); // In milliseconds
165#else
166 // Uncomment if above bug happens on non windows build.
167 // sleep(1); // In seconds
168#endif
169#endif
170
jvanverthddf98352016-03-21 11:46:00 -0700171 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec)
172 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
halcanary9d524f22016-03-29 09:03:52 -0700173
Greg Daniel164a9f02016-02-22 09:56:40 -0500174 // must call this just before we destroy the VkDevice
175 fResourceProvider.destroyResources();
176
jvanverth633b3562016-03-23 11:01:22 -0700177 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
178
179 shaderc_compiler_release(fCompiler);
180
181#ifdef ENABLE_VK_LAYERS
jvanvertha00980e2016-05-02 13:24:48 -0700182 if (fCallback) {
183 VK_CALL(DestroyDebugReportCallbackEXT(fBackendContext->fInstance, fCallback, nullptr));
brianosman419ca642016-05-04 08:19:44 -0700184 fCallback = VK_NULL_HANDLE;
jvanvertha00980e2016-05-02 13:24:48 -0700185 }
jvanverthd2497f32016-03-18 12:39:05 -0700186#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500187}
188
189///////////////////////////////////////////////////////////////////////////////
190
egdaniel9cb63402016-06-23 08:37:05 -0700191GrGpuCommandBuffer* GrVkGpu::createCommandBuffer(
192 GrRenderTarget* target,
193 const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
194 const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) {
195 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
196 return new GrVkGpuCommandBuffer(this, vkRT, colorInfo, stencilInfo);
egdaniel066df7c2016-06-08 14:02:27 -0700197}
198
Greg Daniel164a9f02016-02-22 09:56:40 -0500199void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
200 SkASSERT(fCurrentCmdBuffer);
201 fCurrentCmdBuffer->end(this);
202
203 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
204 fResourceProvider.checkCommandBuffers();
205
206 // Release old command buffer and create a new one
207 fCurrentCmdBuffer->unref(this);
egdaniel9a6cf802016-06-08 08:22:05 -0700208 fCurrentCmdBuffer = fResourceProvider.createPrimaryCommandBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500209 SkASSERT(fCurrentCmdBuffer);
210
211 fCurrentCmdBuffer->begin(this);
212}
213
214///////////////////////////////////////////////////////////////////////////////
cdalton1bf3e712016-04-19 10:00:02 -0700215GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern,
216 const void* data) {
217 GrBuffer* buff;
cdalton397536c2016-03-25 12:15:03 -0700218 switch (type) {
219 case kVertex_GrBufferType:
220 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
221 kStatic_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700222 buff = GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
egdaniele05bbbb2016-04-19 12:13:41 -0700223 break;
cdalton397536c2016-03-25 12:15:03 -0700224 case kIndex_GrBufferType:
225 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
226 kStatic_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700227 buff = GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
egdaniele05bbbb2016-04-19 12:13:41 -0700228 break;
cdalton397536c2016-03-25 12:15:03 -0700229 case kXferCpuToGpu_GrBufferType:
jvanverthc3d706f2016-04-20 10:33:27 -0700230 SkASSERT(kStream_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700231 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
egdaniele05bbbb2016-04-19 12:13:41 -0700232 break;
cdalton397536c2016-03-25 12:15:03 -0700233 case kXferGpuToCpu_GrBufferType:
jvanverthc3d706f2016-04-20 10:33:27 -0700234 SkASSERT(kStream_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700235 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
egdaniele05bbbb2016-04-19 12:13:41 -0700236 break;
cdalton397536c2016-03-25 12:15:03 -0700237 default:
238 SkFAIL("Unknown buffer type.");
239 return nullptr;
240 }
cdalton1bf3e712016-04-19 10:00:02 -0700241 if (data && buff) {
242 buff->updateData(data, size);
243 }
244 return buff;
Greg Daniel164a9f02016-02-22 09:56:40 -0500245}
246
247////////////////////////////////////////////////////////////////////////////////
248bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
249 GrPixelConfig srcConfig, DrawPreference* drawPreference,
250 WritePixelTempDrawInfo* tempDrawInfo) {
251 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
252 return false;
253 }
254
egdaniel4583ec52016-06-27 12:57:00 -0700255 GrRenderTarget* renderTarget = dstSurface->asRenderTarget();
256
257 // Start off assuming no swizzling
258 tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
259 tempDrawInfo->fWriteConfig = srcConfig;
260
261 // These settings we will always want if a temp draw is performed. Initially set the config
262 // to srcConfig, though that may be modified if we decide to do a R/B swap
263 tempDrawInfo->fTempSurfaceDesc.fFlags = kNone_GrSurfaceFlags;
264 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
265 tempDrawInfo->fTempSurfaceDesc.fWidth = width;
266 tempDrawInfo->fTempSurfaceDesc.fHeight = height;
267 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
268 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin;
269
egdanield66110f2016-06-28 13:38:26 -0700270 if (dstSurface->config() == srcConfig) {
271 return true;
272 }
273
egdaniel4583ec52016-06-27 12:57:00 -0700274 if (renderTarget && this->vkCaps().isConfigRenderable(renderTarget->config(), false)) {
275 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
276
277 bool configsAreRBSwaps = GrPixelConfigSwapRAndB(srcConfig) == dstSurface->config();
278
279 if (!this->vkCaps().isConfigTexturable(srcConfig) && configsAreRBSwaps) {
280 if (!this->vkCaps().isConfigTexturable(dstSurface->config())) {
281 return false;
282 }
283 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
284 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
285 tempDrawInfo->fWriteConfig = dstSurface->config();
286 }
287 return true;
Greg Daniel164a9f02016-02-22 09:56:40 -0500288 }
289
egdaniel4583ec52016-06-27 12:57:00 -0700290 return false;
Greg Daniel164a9f02016-02-22 09:56:40 -0500291}
292
293bool GrVkGpu::onWritePixels(GrSurface* surface,
294 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800295 GrPixelConfig config,
296 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500297 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
298 if (!vkTex) {
299 return false;
300 }
301
jvanverth900bd4a2016-04-29 13:53:12 -0700302 // Make sure we have at least the base level
jvanverth03509ea2016-03-02 13:19:47 -0800303 if (texels.empty() || !texels.begin()->fPixels) {
304 return false;
305 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800306
Greg Daniel164a9f02016-02-22 09:56:40 -0500307 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
308 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
309 return false;
310 }
311
312 bool success = false;
313 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
314 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
315 SkASSERT(config == vkTex->desc().fConfig);
316 // TODO: add compressed texture support
317 // delete the following two lines and uncomment the two after that when ready
318 vkTex->unref();
319 return false;
320 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
321 // height);
322 } else {
323 bool linearTiling = vkTex->isLinearTiled();
jvanverth900bd4a2016-04-29 13:53:12 -0700324 if (linearTiling) {
325 if (texels.count() > 1) {
326 SkDebugf("Can't upload mipmap data to linear tiled texture");
327 return false;
328 }
329 if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
330 // Need to change the layout to general in order to perform a host write
jvanverth900bd4a2016-04-29 13:53:12 -0700331 vkTex->setImageLayout(this,
332 VK_IMAGE_LAYOUT_GENERAL,
jvanverth50c46c72016-05-06 12:31:28 -0700333 VK_ACCESS_HOST_WRITE_BIT,
334 VK_PIPELINE_STAGE_HOST_BIT,
jvanverth900bd4a2016-04-29 13:53:12 -0700335 false);
egdanielbdf88112016-05-03 07:25:56 -0700336 this->submitCommandBuffer(kForce_SyncQueue);
jvanverth900bd4a2016-04-29 13:53:12 -0700337 }
338 success = this->uploadTexDataLinear(vkTex, left, top, width, height, config,
339 texels.begin()->fPixels, texels.begin()->fRowBytes);
340 } else {
jvanverthc578b0632016-05-02 10:58:12 -0700341 int newMipLevels = texels.count();
jvanverth82c05582016-05-03 11:19:01 -0700342 int currentMipLevels = vkTex->texturePriv().maxMipMapLevel() + 1;
343 if (newMipLevels != currentMipLevels) {
jvanverthc578b0632016-05-02 10:58:12 -0700344 if (!vkTex->reallocForMipmap(this, newMipLevels)) {
jvanverth900bd4a2016-04-29 13:53:12 -0700345 return false;
346 }
347 }
348 success = this->uploadTexDataOptimal(vkTex, left, top, width, height, config, texels);
Greg Daniel164a9f02016-02-22 09:56:40 -0500349 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500350 }
egdaniel4583ec52016-06-27 12:57:00 -0700351
jvanverth900bd4a2016-04-29 13:53:12 -0700352 return success;
Greg Daniel164a9f02016-02-22 09:56:40 -0500353}
354
jvanverth900bd4a2016-04-29 13:53:12 -0700355bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex,
356 int left, int top, int width, int height,
357 GrPixelConfig dataConfig,
358 const void* data,
359 size_t rowBytes) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500360 SkASSERT(data);
jvanverth900bd4a2016-04-29 13:53:12 -0700361 SkASSERT(tex->isLinearTiled());
Greg Daniel164a9f02016-02-22 09:56:40 -0500362
363 // If we're uploading compressed data then we should be using uploadCompressedTexData
364 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
365
Greg Daniel164a9f02016-02-22 09:56:40 -0500366 size_t bpp = GrBytesPerPixel(dataConfig);
367
368 const GrSurfaceDesc& desc = tex->desc();
369
370 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
371 &width, &height, &data, &rowBytes)) {
372 return false;
373 }
374 size_t trimRowBytes = width * bpp;
375
jvanverth900bd4a2016-04-29 13:53:12 -0700376 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
377 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
378 const VkImageSubresource subres = {
379 VK_IMAGE_ASPECT_COLOR_BIT,
380 0, // mipLevel
381 0, // arraySlice
382 };
383 VkSubresourceLayout layout;
384 VkResult err;
Greg Daniel164a9f02016-02-22 09:56:40 -0500385
jvanverth900bd4a2016-04-29 13:53:12 -0700386 const GrVkInterface* interface = this->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -0500387
jvanverth900bd4a2016-04-29 13:53:12 -0700388 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
egdanielb2df0c22016-05-13 11:30:37 -0700389 tex->image(),
jvanverth900bd4a2016-04-29 13:53:12 -0700390 &subres,
391 &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500392
jvanverth900bd4a2016-04-29 13:53:12 -0700393 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height : top;
jvanverth1e305ba2016-06-01 09:39:15 -0700394 const GrVkAlloc& alloc = tex->alloc();
395 VkDeviceSize offset = alloc.fOffset + texTop*layout.rowPitch + left*bpp;
jvanverth900bd4a2016-04-29 13:53:12 -0700396 VkDeviceSize size = height*layout.rowPitch;
397 void* mapPtr;
jvanverth1e305ba2016-06-01 09:39:15 -0700398 err = GR_VK_CALL(interface, MapMemory(fDevice, alloc.fMemory, offset, size, 0, &mapPtr));
jvanverth900bd4a2016-04-29 13:53:12 -0700399 if (err) {
400 return false;
401 }
402
403 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
404 // copy into buffer by rows
405 const char* srcRow = reinterpret_cast<const char*>(data);
406 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
407 for (int y = 0; y < height; y++) {
408 memcpy(dstRow, srcRow, trimRowBytes);
409 srcRow += rowBytes;
410 dstRow -= layout.rowPitch;
411 }
412 } else {
413 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
414 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
415 memcpy(mapPtr, data, trimRowBytes * height);
416 } else {
egdaniel88e8aef2016-06-27 14:34:55 -0700417 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, trimRowBytes,
418 height);
jvanverth900bd4a2016-04-29 13:53:12 -0700419 }
420 }
421
jvanverth1e305ba2016-06-01 09:39:15 -0700422 GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory));
jvanverth900bd4a2016-04-29 13:53:12 -0700423
424 return true;
425}
426
427bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex,
jvanvertha584de92016-06-30 09:10:52 -0700428 int left, int top, int width, int height,
429 GrPixelConfig dataConfig,
430 const SkTArray<GrMipLevel>& texels) {
jvanverth900bd4a2016-04-29 13:53:12 -0700431 SkASSERT(!tex->isLinearTiled());
432 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
433 SkASSERT(1 == texels.count() ||
434 (0 == left && 0 == top && width == tex->width() && height == tex->height()));
435
436 // If we're uploading compressed data then we should be using uploadCompressedTexData
437 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
438
439 if (width == 0 || height == 0) {
440 return false;
441 }
442
443 const GrSurfaceDesc& desc = tex->desc();
444 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
445 size_t bpp = GrBytesPerPixel(dataConfig);
446
447 // texels is const.
jvanverthc578b0632016-05-02 10:58:12 -0700448 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
449 // Because of this we need to make a non-const shallow copy of texels.
450 SkTArray<GrMipLevel> texelsShallowCopy(texels);
jvanverth900bd4a2016-04-29 13:53:12 -0700451
jvanverthc578b0632016-05-02 10:58:12 -0700452 for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
453 currentMipLevel--) {
454 SkASSERT(texelsShallowCopy[currentMipLevel].fPixels);
Greg Daniel164a9f02016-02-22 09:56:40 -0500455 }
456
jvanverth900bd4a2016-04-29 13:53:12 -0700457 // Determine whether we need to flip when we copy into the buffer
jvanverthc578b0632016-05-02 10:58:12 -0700458 bool flipY = (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsShallowCopy.empty());
jvanverth900bd4a2016-04-29 13:53:12 -0700459
jvanverthc578b0632016-05-02 10:58:12 -0700460 // adjust any params (left, top, currentWidth, currentHeight
jvanverth900bd4a2016-04-29 13:53:12 -0700461 // find the combined size of all the mip levels and the relative offset of
462 // each into the collective buffer
jvanverthc578b0632016-05-02 10:58:12 -0700463 // Do the first level separately because we may need to adjust width and height
464 // (for the non-mipped case).
465 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
466 &width,
467 &height,
468 &texelsShallowCopy[0].fPixels,
469 &texelsShallowCopy[0].fRowBytes)) {
470 return false;
471 }
472 SkTArray<size_t> individualMipOffsets(texelsShallowCopy.count());
473 individualMipOffsets.push_back(0);
474 size_t combinedBufferSize = width * bpp * height;
475 int currentWidth = width;
476 int currentHeight = height;
477 for (int currentMipLevel = 1; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
478 currentWidth = SkTMax(1, currentWidth/2);
479 currentHeight = SkTMax(1, currentHeight/2);
480 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
481 &currentWidth,
482 &currentHeight,
483 &texelsShallowCopy[currentMipLevel].fPixels,
484 &texelsShallowCopy[currentMipLevel].fRowBytes)) {
485 return false;
486 }
jvanverth900bd4a2016-04-29 13:53:12 -0700487 const size_t trimmedSize = currentWidth * bpp * currentHeight;
488 individualMipOffsets.push_back(combinedBufferSize);
489 combinedBufferSize += trimmedSize;
490 }
491
492 // allocate buffer to hold our mip data
493 GrVkTransferBuffer* transferBuffer =
494 GrVkTransferBuffer::Create(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type);
495
496 char* buffer = (char*) transferBuffer->map();
jvanverthc578b0632016-05-02 10:58:12 -0700497 SkTArray<VkBufferImageCopy> regions(texelsShallowCopy.count());
jvanverth900bd4a2016-04-29 13:53:12 -0700498
jvanverthc578b0632016-05-02 10:58:12 -0700499 currentWidth = width;
500 currentHeight = height;
501 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
jvanverth900bd4a2016-04-29 13:53:12 -0700502 const size_t trimRowBytes = currentWidth * bpp;
jvanverthc578b0632016-05-02 10:58:12 -0700503 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
jvanverth900bd4a2016-04-29 13:53:12 -0700504
505 // copy data into the buffer, skipping the trailing bytes
506 char* dst = buffer + individualMipOffsets[currentMipLevel];
jvanverthc578b0632016-05-02 10:58:12 -0700507 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
jvanverth900bd4a2016-04-29 13:53:12 -0700508 if (flipY) {
509 src += (currentHeight - 1) * rowBytes;
510 for (int y = 0; y < currentHeight; y++) {
511 memcpy(dst, src, trimRowBytes);
512 src -= rowBytes;
513 dst += trimRowBytes;
514 }
515 } else if (trimRowBytes == rowBytes) {
516 memcpy(dst, src, trimRowBytes * currentHeight);
517 } else {
518 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
519 }
520
521 VkBufferImageCopy& region = regions.push_back();
522 memset(&region, 0, sizeof(VkBufferImageCopy));
523 region.bufferOffset = individualMipOffsets[currentMipLevel];
524 region.bufferRowLength = currentWidth;
525 region.bufferImageHeight = currentHeight;
bsalomoncf942c42016-04-29 18:30:06 -0700526 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
jvanverthc578b0632016-05-02 10:58:12 -0700527 region.imageOffset = { left, flipY ? tex->height() - top - currentHeight : top, 0 };
jvanverth900bd4a2016-04-29 13:53:12 -0700528 region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
egdaniel4583ec52016-06-27 12:57:00 -0700529
jvanverthc578b0632016-05-02 10:58:12 -0700530 currentWidth = SkTMax(1, currentWidth/2);
531 currentHeight = SkTMax(1, currentHeight/2);
jvanverth900bd4a2016-04-29 13:53:12 -0700532 }
533
534 transferBuffer->unmap();
535
536 // make sure the unmap has finished
537 transferBuffer->addMemoryBarrier(this,
538 VK_ACCESS_HOST_WRITE_BIT,
539 VK_ACCESS_TRANSFER_READ_BIT,
540 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
541 VK_PIPELINE_STAGE_TRANSFER_BIT,
542 false);
543
544 // Change layout of our target so it can be copied to
jvanverth900bd4a2016-04-29 13:53:12 -0700545 tex->setImageLayout(this,
546 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -0700547 VK_ACCESS_TRANSFER_WRITE_BIT,
548 VK_PIPELINE_STAGE_TRANSFER_BIT,
jvanverth900bd4a2016-04-29 13:53:12 -0700549 false);
550
551 // Copy the buffer to the image
552 fCurrentCmdBuffer->copyBufferToImage(this,
553 transferBuffer,
554 tex,
555 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
556 regions.count(),
557 regions.begin());
jvanverth900bd4a2016-04-29 13:53:12 -0700558 transferBuffer->unref();
559
Greg Daniel164a9f02016-02-22 09:56:40 -0500560 return true;
561}
562
563////////////////////////////////////////////////////////////////////////////////
kkinnunen2e6055b2016-04-22 01:48:29 -0700564GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
bsalomona1e6b3b2016-03-02 10:58:23 -0800565 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500566 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
567
568 VkFormat pixelFormat;
569 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
570 return nullptr;
571 }
572
573 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
574 return nullptr;
575 }
576
egdaniel0a3a7f72016-06-24 09:22:31 -0700577 if (renderTarget && !fVkCaps->isConfigRenderable(desc.fConfig, false)) {
578 return nullptr;
579 }
580
Greg Daniel164a9f02016-02-22 09:56:40 -0500581 bool linearTiling = false;
582 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
jvanverth900bd4a2016-04-29 13:53:12 -0700583 // we can't have a linear texture with a mipmap
584 if (texels.count() > 1) {
585 SkDebugf("Trying to create linear tiled texture with mipmap");
586 return nullptr;
587 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500588 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
589 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
590 linearTiling = true;
591 } else {
592 return nullptr;
593 }
594 }
595
596 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
597 if (renderTarget) {
598 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
599 }
600
601 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
602 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
603 // will be using this texture in some copy or not. Also this assumes, as is the current case,
jvanverth62340062016-04-26 08:01:44 -0700604 // that all render targets in vulkan are also textures. If we change this practice of setting
Greg Daniel164a9f02016-02-22 09:56:40 -0500605 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
606 // texture.
607 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
608
bsalomona1e6b3b2016-03-02 10:58:23 -0800609 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
610 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500611
612 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
jvanverth62340062016-04-26 08:01:44 -0700613 // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
Greg Daniel164a9f02016-02-22 09:56:40 -0500614 // to 1.
jvanverthc578b0632016-05-02 10:58:12 -0700615 int mipLevels = texels.empty() ? 1 : texels.count();
Greg Daniel164a9f02016-02-22 09:56:40 -0500616 GrVkImage::ImageDesc imageDesc;
617 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
618 imageDesc.fFormat = pixelFormat;
619 imageDesc.fWidth = desc.fWidth;
620 imageDesc.fHeight = desc.fHeight;
jvanverthc578b0632016-05-02 10:58:12 -0700621 imageDesc.fLevels = linearTiling ? 1 : mipLevels;
Greg Daniel164a9f02016-02-22 09:56:40 -0500622 imageDesc.fSamples = 1;
623 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
624 imageDesc.fUsageFlags = usageFlags;
625 imageDesc.fMemProps = memProps;
626
627 GrVkTexture* tex;
628 if (renderTarget) {
kkinnunen2e6055b2016-04-22 01:48:29 -0700629 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budgeted, desc,
Greg Daniel164a9f02016-02-22 09:56:40 -0500630 imageDesc);
631 } else {
kkinnunen2e6055b2016-04-22 01:48:29 -0700632 tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc);
Greg Daniel164a9f02016-02-22 09:56:40 -0500633 }
634
635 if (!tex) {
636 return nullptr;
637 }
638
bsalomone699d0c2016-03-09 06:25:15 -0800639 if (!texels.empty()) {
640 SkASSERT(texels.begin()->fPixels);
jvanverth900bd4a2016-04-29 13:53:12 -0700641 bool success;
642 if (linearTiling) {
643 success = this->uploadTexDataLinear(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
644 texels.begin()->fPixels, texels.begin()->fRowBytes);
645 } else {
646 success = this->uploadTexDataOptimal(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
647 texels);
648 }
649 if (!success) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500650 tex->unref();
651 return nullptr;
652 }
653 }
654
655 return tex;
656}
657
658////////////////////////////////////////////////////////////////////////////////
659
jvanvertha584de92016-06-30 09:10:52 -0700660bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src, size_t srcSizeInBytes) {
661
662 // Update the buffer
663 fCurrentCmdBuffer->updateBuffer(this, buffer, 0, srcSizeInBytes, src);
664
665 return true;
666}
667
668////////////////////////////////////////////////////////////////////////////////
669
Greg Daniel164a9f02016-02-22 09:56:40 -0500670static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
671 // By default, all textures in Vk use TopLeft
672 if (kDefault_GrSurfaceOrigin == origin) {
673 return kTopLeft_GrSurfaceOrigin;
674 } else {
675 return origin;
676 }
677}
678
679GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
680 GrWrapOwnership ownership) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500681 if (0 == desc.fTextureHandle) {
682 return nullptr;
683 }
684
685 int maxSize = this->caps()->maxTextureSize();
686 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
687 return nullptr;
688 }
689
egdanielb2df0c22016-05-13 11:30:37 -0700690 const GrVkImageInfo* info = reinterpret_cast<const GrVkImageInfo*>(desc.fTextureHandle);
jvanverth1e305ba2016-06-01 09:39:15 -0700691 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc.fMemory) {
jvanverthfd359ca2016-03-18 11:57:24 -0700692 return nullptr;
693 }
egdanielb2df0c22016-05-13 11:30:37 -0700694#ifdef SK_DEBUG
695 VkFormat format;
696 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
697 return nullptr;
698 }
699 SkASSERT(format == info->fFormat);
700#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500701
Greg Daniel164a9f02016-02-22 09:56:40 -0500702 GrSurfaceDesc surfDesc;
703 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
704 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
705 surfDesc.fWidth = desc.fWidth;
706 surfDesc.fHeight = desc.fHeight;
707 surfDesc.fConfig = desc.fConfig;
708 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
709 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
710 // In GL, Chrome assumes all textures are BottomLeft
711 // In VK, we don't have this restriction
712 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
713
714 GrVkTexture* texture = nullptr;
715 if (renderTarget) {
halcanary9d524f22016-03-29 09:03:52 -0700716 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
egdanielb2df0c22016-05-13 11:30:37 -0700717 ownership, info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500718 } else {
egdanielb2df0c22016-05-13 11:30:37 -0700719 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, ownership, info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500720 }
721 if (!texture) {
722 return nullptr;
723 }
724
725 return texture;
726}
727
728GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
729 GrWrapOwnership ownership) {
halcanary9d524f22016-03-29 09:03:52 -0700730
egdanielb2df0c22016-05-13 11:30:37 -0700731 const GrVkImageInfo* info =
732 reinterpret_cast<const GrVkImageInfo*>(wrapDesc.fRenderTargetHandle);
jvanverthfd359ca2016-03-18 11:57:24 -0700733 if (VK_NULL_HANDLE == info->fImage ||
jvanverth1e305ba2016-06-01 09:39:15 -0700734 (VK_NULL_HANDLE == info->fAlloc.fMemory && kAdopt_GrWrapOwnership == ownership)) {
jvanverthfd359ca2016-03-18 11:57:24 -0700735 return nullptr;
736 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500737
Greg Daniel164a9f02016-02-22 09:56:40 -0500738 GrSurfaceDesc desc;
739 desc.fConfig = wrapDesc.fConfig;
740 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
741 desc.fWidth = wrapDesc.fWidth;
742 desc.fHeight = wrapDesc.fHeight;
743 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
744
745 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
746
747 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
kkinnunen2e6055b2016-04-22 01:48:29 -0700748 ownership,
jvanverthfd359ca2016-03-18 11:57:24 -0700749 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500750 if (tgt && wrapDesc.fStencilBits) {
751 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
752 tgt->unref();
753 return nullptr;
754 }
755 }
756 return tgt;
757}
758
jvanverth62340062016-04-26 08:01:44 -0700759void GrVkGpu::generateMipmap(GrVkTexture* tex) const {
jvanverth900bd4a2016-04-29 13:53:12 -0700760 // don't do anything for linearly tiled textures (can't have mipmaps)
jvanverth62340062016-04-26 08:01:44 -0700761 if (tex->isLinearTiled()) {
jvanverth900bd4a2016-04-29 13:53:12 -0700762 SkDebugf("Trying to create mipmap for linear tiled texture");
jvanverth62340062016-04-26 08:01:44 -0700763 return;
764 }
765
766 // We cannot generate mipmaps for images that are multisampled.
767 // TODO: does it even make sense for rendertargets in general?
768 if (tex->asRenderTarget() && tex->asRenderTarget()->numColorSamples() > 1) {
769 return;
770 }
771
772 // determine if we can blit to and from this format
773 const GrVkCaps& caps = this->vkCaps();
774 if (!caps.configCanBeDstofBlit(tex->config(), false) ||
775 !caps.configCanBeSrcofBlit(tex->config(), false)) {
776 return;
777 }
778
779 // change the original image's layout
jvanverth62340062016-04-26 08:01:44 -0700780 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -0700781 VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
jvanverth62340062016-04-26 08:01:44 -0700782
783 // grab handle to the original image resource
egdanielb2df0c22016-05-13 11:30:37 -0700784 const GrVkResource* oldResource = tex->resource();
jvanverth62340062016-04-26 08:01:44 -0700785 oldResource->ref();
egdanielb2df0c22016-05-13 11:30:37 -0700786 VkImage oldImage = tex->image();
jvanverth62340062016-04-26 08:01:44 -0700787
jvanverth82c05582016-05-03 11:19:01 -0700788 // SkMipMap doesn't include the base level in the level count so we have to add 1
789 uint32_t levelCount = SkMipMap::ComputeLevelCount(tex->width(), tex->height()) + 1;
790 if (!tex->reallocForMipmap(this, levelCount)) {
jvanverth62340062016-04-26 08:01:44 -0700791 oldResource->unref(this);
792 return;
793 }
794
795 // change the new image's layout
jvanverth50c46c72016-05-06 12:31:28 -0700796 tex->setImageLayout(this, VK_IMAGE_LAYOUT_GENERAL,
797 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
jvanverth62340062016-04-26 08:01:44 -0700798
799 // Blit original image
800 int width = tex->width();
801 int height = tex->height();
jvanverth62340062016-04-26 08:01:44 -0700802
803 VkImageBlit blitRegion;
804 memset(&blitRegion, 0, sizeof(VkImageBlit));
805 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
806 blitRegion.srcOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700807 blitRegion.srcOffsets[1] = { width, height, 1 };
jvanverth82c05582016-05-03 11:19:01 -0700808 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
jvanverth62340062016-04-26 08:01:44 -0700809 blitRegion.dstOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700810 blitRegion.dstOffsets[1] = { width, height, 1 };
jvanverth62340062016-04-26 08:01:44 -0700811
812 fCurrentCmdBuffer->blitImage(this,
813 oldResource,
egdanielb2df0c22016-05-13 11:30:37 -0700814 oldImage,
jvanverth62340062016-04-26 08:01:44 -0700815 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
816 tex->resource(),
egdanielb2df0c22016-05-13 11:30:37 -0700817 tex->image(),
jvanverth50c46c72016-05-06 12:31:28 -0700818 VK_IMAGE_LAYOUT_GENERAL,
jvanverth62340062016-04-26 08:01:44 -0700819 1,
820 &blitRegion,
821 VK_FILTER_LINEAR);
jvanverth50c46c72016-05-06 12:31:28 -0700822
823 // setup memory barrier
egdanielb2df0c22016-05-13 11:30:37 -0700824 SkASSERT(GrVkFormatToPixelConfig(tex->imageFormat(), nullptr));
jvanverth50c46c72016-05-06 12:31:28 -0700825 VkImageAspectFlags aspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
826 VkImageMemoryBarrier imageMemoryBarrier = {
827 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
828 NULL, // pNext
829 VK_ACCESS_TRANSFER_WRITE_BIT, // outputMask
830 VK_ACCESS_TRANSFER_READ_BIT, // inputMask
831 VK_IMAGE_LAYOUT_GENERAL, // oldLayout
832 VK_IMAGE_LAYOUT_GENERAL, // newLayout
833 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
834 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
egdanielb2df0c22016-05-13 11:30:37 -0700835 tex->image(), // image
jvanverth50c46c72016-05-06 12:31:28 -0700836 { aspectFlags, 0, 1, 0, 1 } // subresourceRange
837 };
838
jvanverth62340062016-04-26 08:01:44 -0700839 // Blit the miplevels
jvanverth82c05582016-05-03 11:19:01 -0700840 uint32_t mipLevel = 1;
841 while (mipLevel < levelCount) {
842 int prevWidth = width;
843 int prevHeight = height;
844 width = SkTMax(1, width / 2);
845 height = SkTMax(1, height / 2);
jvanverth62340062016-04-26 08:01:44 -0700846
jvanverth50c46c72016-05-06 12:31:28 -0700847 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
848 this->addImageMemoryBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
849 false, &imageMemoryBarrier);
850
851 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
jvanverth82c05582016-05-03 11:19:01 -0700852 blitRegion.srcOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700853 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
jvanverth82c05582016-05-03 11:19:01 -0700854 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
855 blitRegion.dstOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700856 blitRegion.dstOffsets[1] = { width, height, 1 };
jvanverth62340062016-04-26 08:01:44 -0700857 fCurrentCmdBuffer->blitImage(this,
egdanielb2df0c22016-05-13 11:30:37 -0700858 *tex,
859 *tex,
jvanverth62340062016-04-26 08:01:44 -0700860 1,
861 &blitRegion,
862 VK_FILTER_LINEAR);
jvanverth82c05582016-05-03 11:19:01 -0700863 ++mipLevel;
jvanverth62340062016-04-26 08:01:44 -0700864 }
865
866 oldResource->unref(this);
867}
868
Greg Daniel164a9f02016-02-22 09:56:40 -0500869////////////////////////////////////////////////////////////////////////////////
870
871GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
872 int width,
873 int height) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500874 SkASSERT(width >= rt->width());
875 SkASSERT(height >= rt->height());
876
877 int samples = rt->numStencilSamples();
878
egdaniel8f1dcaa2016-04-01 10:10:45 -0700879 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferedStencilFormat();
Greg Daniel164a9f02016-02-22 09:56:40 -0500880
881 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
Greg Daniel164a9f02016-02-22 09:56:40 -0500882 width,
883 height,
884 samples,
885 sFmt));
886 fStats.incStencilAttachmentCreates();
887 return stencil;
888}
889
890////////////////////////////////////////////////////////////////////////////////
891
892GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
egdaniel0a3a7f72016-06-24 09:22:31 -0700893 GrPixelConfig config,
894 bool isRenderTarget) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500895
896 VkFormat pixelFormat;
897 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
898 return 0;
899 }
900
901 bool linearTiling = false;
902 if (!fVkCaps->isConfigTexturable(config)) {
903 return 0;
904 }
905
egdaniel0a3a7f72016-06-24 09:22:31 -0700906 if (isRenderTarget && !fVkCaps->isConfigRenderable(config, false)) {
907 return 0;
908 }
909
910 if (fVkCaps->isConfigTexurableLinearly(config) &&
911 (!isRenderTarget || fVkCaps->isConfigRenderableLinearly(config, false))) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500912 linearTiling = true;
913 }
914
915 // Currently this is not supported since it requires a copy which has not yet been implemented.
916 if (srcData && !linearTiling) {
917 return 0;
918 }
919
920 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
921 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
922 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
egdaniel0a3a7f72016-06-24 09:22:31 -0700923 if (isRenderTarget) {
924 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
925 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500926
jvanverthfd359ca2016-03-18 11:57:24 -0700927 VkImage image = VK_NULL_HANDLE;
jvanverth6b6ffc42016-06-13 14:28:07 -0700928 GrVkAlloc alloc = { VK_NULL_HANDLE, 0, 0 };
Greg Daniel164a9f02016-02-22 09:56:40 -0500929
jvanverthfd359ca2016-03-18 11:57:24 -0700930 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
931 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
932 ? VK_IMAGE_LAYOUT_PREINITIALIZED
933 : VK_IMAGE_LAYOUT_UNDEFINED;
934
935 // Create Image
936 VkSampleCountFlagBits vkSamples;
937 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
938 return 0;
939 }
940
941 const VkImageCreateInfo imageCreateInfo = {
942 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
943 NULL, // pNext
944 0, // VkImageCreateFlags
945 VK_IMAGE_TYPE_2D, // VkImageType
946 pixelFormat, // VkFormat
ethannicholas384b5e92016-03-25 11:04:06 -0700947 { (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D
jvanverthfd359ca2016-03-18 11:57:24 -0700948 1, // mipLevels
949 1, // arrayLayers
950 vkSamples, // samples
951 imageTiling, // VkImageTiling
952 usageFlags, // VkImageUsageFlags
953 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
954 0, // queueFamilyCount
955 0, // pQueueFamilyIndices
956 initialLayout // initialLayout
957 };
958
959 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
960
jvanverth6b6ffc42016-06-13 14:28:07 -0700961 if (!GrVkMemory::AllocAndBindImageMemory(this, image, linearTiling, &alloc)) {
jvanverthfd359ca2016-03-18 11:57:24 -0700962 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500963 return 0;
964 }
965
966 if (srcData) {
967 if (linearTiling) {
968 const VkImageSubresource subres = {
969 VK_IMAGE_ASPECT_COLOR_BIT,
970 0, // mipLevel
971 0, // arraySlice
972 };
973 VkSubresourceLayout layout;
974 VkResult err;
975
jvanverthfd359ca2016-03-18 11:57:24 -0700976 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500977
978 void* mapPtr;
jvanverth1e305ba2016-06-01 09:39:15 -0700979 err = VK_CALL(MapMemory(fDevice, alloc.fMemory, alloc.fOffset, layout.rowPitch * h,
980 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500981 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700982 GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
jvanverthfd359ca2016-03-18 11:57:24 -0700983 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500984 return 0;
985 }
986
987 size_t bpp = GrBytesPerPixel(config);
988 size_t rowCopyBytes = bpp * w;
989 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
990 // This assumes the srcData comes in with no padding.
991 if (rowCopyBytes == layout.rowPitch) {
992 memcpy(mapPtr, srcData, rowCopyBytes * h);
993 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700994 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
995 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500996 }
jvanverth1e305ba2016-06-01 09:39:15 -0700997 VK_CALL(UnmapMemory(fDevice, alloc.fMemory));
Greg Daniel164a9f02016-02-22 09:56:40 -0500998 } else {
999 // TODO: Add support for copying to optimal tiling
1000 SkASSERT(false);
1001 }
1002 }
1003
egdanielb2df0c22016-05-13 11:30:37 -07001004 GrVkImageInfo* info = new GrVkImageInfo;
jvanverthfd359ca2016-03-18 11:57:24 -07001005 info->fImage = image;
1006 info->fAlloc = alloc;
1007 info->fImageTiling = imageTiling;
1008 info->fImageLayout = initialLayout;
egdaniel58a8d922016-04-21 08:03:10 -07001009 info->fFormat = pixelFormat;
jvanverth2af0f1b2016-05-03 10:36:49 -07001010 info->fLevelCount = 1;
jvanverthfd359ca2016-03-18 11:57:24 -07001011
1012 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -05001013}
1014
1015bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
egdanielb2df0c22016-05-13 11:30:37 -07001016 const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -05001017
jvanverth1e305ba2016-06-01 09:39:15 -07001018 if (backend && backend->fImage && backend->fAlloc.fMemory) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001019 VkMemoryRequirements req;
1020 memset(&req, 0, sizeof(req));
1021 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1022 backend->fImage,
1023 &req));
1024 // TODO: find a better check
1025 // This will probably fail with a different driver
1026 return (req.size > 0) && (req.size <= 8192 * 8192);
1027 }
1028
1029 return false;
1030}
1031
1032void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverth6b6ffc42016-06-13 14:28:07 -07001033 GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -05001034 if (backend) {
1035 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -07001036 // something in the command buffer may still be using this, so force submit
1037 this->submitCommandBuffer(kForce_SyncQueue);
jvanverth6b6ffc42016-06-13 14:28:07 -07001038 GrVkImage::DestroyImageInfo(this, backend);
Greg Daniel164a9f02016-02-22 09:56:40 -05001039 }
jvanverthfd359ca2016-03-18 11:57:24 -07001040 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -05001041 }
1042}
1043
1044////////////////////////////////////////////////////////////////////////////////
1045
1046void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
1047 VkPipelineStageFlags dstStageMask,
1048 bool byRegion,
1049 VkMemoryBarrier* barrier) const {
1050 SkASSERT(fCurrentCmdBuffer);
1051 fCurrentCmdBuffer->pipelineBarrier(this,
1052 srcStageMask,
1053 dstStageMask,
1054 byRegion,
1055 GrVkCommandBuffer::kMemory_BarrierType,
1056 barrier);
1057}
1058
1059void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1060 VkPipelineStageFlags dstStageMask,
1061 bool byRegion,
1062 VkBufferMemoryBarrier* barrier) const {
1063 SkASSERT(fCurrentCmdBuffer);
1064 fCurrentCmdBuffer->pipelineBarrier(this,
1065 srcStageMask,
1066 dstStageMask,
1067 byRegion,
1068 GrVkCommandBuffer::kBufferMemory_BarrierType,
1069 barrier);
1070}
1071
1072void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
1073 VkPipelineStageFlags dstStageMask,
1074 bool byRegion,
1075 VkImageMemoryBarrier* barrier) const {
1076 SkASSERT(fCurrentCmdBuffer);
1077 fCurrentCmdBuffer->pipelineBarrier(this,
1078 srcStageMask,
1079 dstStageMask,
1080 byRegion,
1081 GrVkCommandBuffer::kImageMemory_BarrierType,
1082 barrier);
1083}
1084
1085void GrVkGpu::finishDrawTarget() {
1086 // Submit the current command buffer to the Queue
1087 this->submitCommandBuffer(kSkip_SyncQueue);
1088}
1089
egdaniel3d5d9ac2016-03-01 12:56:15 -08001090void GrVkGpu::clearStencil(GrRenderTarget* target) {
1091 if (nullptr == target) {
1092 return;
1093 }
1094 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
1095 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1096
1097
1098 VkClearDepthStencilValue vkStencilColor;
1099 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
1100
egdaniel3d5d9ac2016-03-01 12:56:15 -08001101 vkStencil->setImageLayout(this,
1102 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001103 VK_ACCESS_TRANSFER_WRITE_BIT,
1104 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel3d5d9ac2016-03-01 12:56:15 -08001105 false);
1106
egdaniel3d5d9ac2016-03-01 12:56:15 -08001107 VkImageSubresourceRange subRange;
1108 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1109 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1110 subRange.baseMipLevel = 0;
1111 subRange.levelCount = 1;
1112 subRange.baseArrayLayer = 0;
1113 subRange.layerCount = 1;
1114
1115 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
1116 // draw. Thus we should look into using the load op functions on the render pass to clear out
1117 // the stencil there.
1118 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
1119}
1120
Greg Daniel164a9f02016-02-22 09:56:40 -05001121inline bool can_copy_image(const GrSurface* dst,
1122 const GrSurface* src,
1123 const GrVkGpu* gpu) {
egdaniel17b89252016-04-05 07:23:38 -07001124 // Currently we don't support msaa
1125 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1126 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1127 return false;
1128 }
1129
1130 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1131 // as image usage flags.
1132 if (src->origin() == dst->origin() &&
1133 GrBytesPerPixel(src->config()) == GrBytesPerPixel(dst->config())) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001134 return true;
1135 }
1136
1137 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
egdaniel17b89252016-04-05 07:23:38 -07001138 // or the resolved image here? Im multisampled, Vulkan requires sample counts to be the same.
Greg Daniel164a9f02016-02-22 09:56:40 -05001139
1140 return false;
1141}
1142
1143void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1144 GrSurface* src,
egdaniel17b89252016-04-05 07:23:38 -07001145 GrVkImage* dstImage,
1146 GrVkImage* srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001147 const SkIRect& srcRect,
1148 const SkIPoint& dstPoint) {
1149 SkASSERT(can_copy_image(dst, src, this));
1150
Greg Daniel164a9f02016-02-22 09:56:40 -05001151 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1152 // the cache is flushed since it is only being written to.
egdaniel17b89252016-04-05 07:23:38 -07001153 dstImage->setImageLayout(this,
jvanverth50c46c72016-05-06 12:31:28 -07001154 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1155 VK_ACCESS_TRANSFER_WRITE_BIT,
1156 VK_PIPELINE_STAGE_TRANSFER_BIT,
1157 false);
Greg Daniel164a9f02016-02-22 09:56:40 -05001158
egdaniel17b89252016-04-05 07:23:38 -07001159 srcImage->setImageLayout(this,
1160 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001161 VK_ACCESS_TRANSFER_READ_BIT,
1162 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel17b89252016-04-05 07:23:38 -07001163 false);
Greg Daniel164a9f02016-02-22 09:56:40 -05001164
1165 // Flip rect if necessary
1166 SkIRect srcVkRect = srcRect;
1167 int32_t dstY = dstPoint.fY;
1168
1169 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1170 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1171 srcVkRect.fTop = src->height() - srcRect.fBottom;
1172 srcVkRect.fBottom = src->height() - srcRect.fTop;
1173 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1174 }
1175
1176 VkImageCopy copyRegion;
1177 memset(&copyRegion, 0, sizeof(VkImageCopy));
1178 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1179 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1180 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1181 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
egdanielc355bc82016-04-27 11:31:59 -07001182 // The depth value of the extent is ignored according the vulkan spec for 2D images. However, on
1183 // at least the nexus 5X it seems to be checking it. Thus as a working around we must have the
1184 // depth value be 1.
1185 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 1 };
Greg Daniel164a9f02016-02-22 09:56:40 -05001186
1187 fCurrentCmdBuffer->copyImage(this,
egdaniel17b89252016-04-05 07:23:38 -07001188 srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001189 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
egdaniel17b89252016-04-05 07:23:38 -07001190 dstImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001191 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1192 1,
1193 &copyRegion);
jvanverth900bd4a2016-04-29 13:53:12 -07001194
1195 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
1196 srcRect.width(), srcRect.height());
1197 this->didWriteToSurface(dst, &dstRect);
Greg Daniel164a9f02016-02-22 09:56:40 -05001198}
1199
egdaniel17b89252016-04-05 07:23:38 -07001200inline bool can_copy_as_blit(const GrSurface* dst,
1201 const GrSurface* src,
1202 const GrVkImage* dstImage,
1203 const GrVkImage* srcImage,
1204 const GrVkGpu* gpu) {
1205 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1206 // as image usage flags.
1207 const GrVkCaps& caps = gpu->vkCaps();
1208 if (!caps.configCanBeDstofBlit(dst->config(), dstImage->isLinearTiled()) ||
1209 !caps.configCanBeSrcofBlit(src->config(), srcImage->isLinearTiled())) {
1210 return false;
1211 }
1212
1213 // We cannot blit images that are multisampled. Will need to figure out if we can blit the
1214 // resolved msaa though.
1215 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1216 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1217 return false;
1218 }
1219
1220 return true;
1221}
1222
1223void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
1224 GrSurface* src,
1225 GrVkImage* dstImage,
1226 GrVkImage* srcImage,
1227 const SkIRect& srcRect,
1228 const SkIPoint& dstPoint) {
1229 SkASSERT(can_copy_as_blit(dst, src, dstImage, srcImage, this));
1230
egdaniel17b89252016-04-05 07:23:38 -07001231 dstImage->setImageLayout(this,
1232 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001233 VK_ACCESS_TRANSFER_WRITE_BIT,
1234 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel17b89252016-04-05 07:23:38 -07001235 false);
1236
egdaniel17b89252016-04-05 07:23:38 -07001237 srcImage->setImageLayout(this,
1238 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001239 VK_ACCESS_TRANSFER_READ_BIT,
1240 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel17b89252016-04-05 07:23:38 -07001241 false);
1242
1243 // Flip rect if necessary
1244 SkIRect srcVkRect;
egdaniel8af936d2016-04-07 10:17:47 -07001245 srcVkRect.fLeft = srcRect.fLeft;
1246 srcVkRect.fRight = srcRect.fRight;
egdaniel17b89252016-04-05 07:23:38 -07001247 SkIRect dstRect;
1248 dstRect.fLeft = dstPoint.fX;
egdaniel8af936d2016-04-07 10:17:47 -07001249 dstRect.fRight = dstPoint.fX + srcRect.width();
egdaniel17b89252016-04-05 07:23:38 -07001250
1251 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1252 srcVkRect.fTop = src->height() - srcRect.fBottom;
1253 srcVkRect.fBottom = src->height() - srcRect.fTop;
1254 } else {
egdaniel8af936d2016-04-07 10:17:47 -07001255 srcVkRect.fTop = srcRect.fTop;
1256 srcVkRect.fBottom = srcRect.fBottom;
egdaniel17b89252016-04-05 07:23:38 -07001257 }
1258
1259 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
1260 dstRect.fTop = dst->height() - dstPoint.fY - srcVkRect.height();
1261 } else {
1262 dstRect.fTop = dstPoint.fY;
1263 }
1264 dstRect.fBottom = dstRect.fTop + srcVkRect.height();
1265
1266 // If we have different origins, we need to flip the top and bottom of the dst rect so that we
1267 // get the correct origintation of the copied data.
1268 if (src->origin() != dst->origin()) {
1269 SkTSwap(dstRect.fTop, dstRect.fBottom);
1270 }
1271
1272 VkImageBlit blitRegion;
1273 memset(&blitRegion, 0, sizeof(VkImageBlit));
1274 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1275 blitRegion.srcOffsets[0] = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1276 blitRegion.srcOffsets[1] = { srcVkRect.fRight, srcVkRect.fBottom, 0 };
1277 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1278 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
1279 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 0 };
1280
1281 fCurrentCmdBuffer->blitImage(this,
egdanielb2df0c22016-05-13 11:30:37 -07001282 *srcImage,
1283 *dstImage,
egdaniel17b89252016-04-05 07:23:38 -07001284 1,
1285 &blitRegion,
1286 VK_FILTER_NEAREST); // We never scale so any filter works here
jvanverth900bd4a2016-04-29 13:53:12 -07001287
1288 this->didWriteToSurface(dst, &dstRect);
egdaniel17b89252016-04-05 07:23:38 -07001289}
1290
Greg Daniel164a9f02016-02-22 09:56:40 -05001291inline bool can_copy_as_draw(const GrSurface* dst,
1292 const GrSurface* src,
1293 const GrVkGpu* gpu) {
1294 return false;
1295}
1296
1297void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1298 GrSurface* src,
1299 const SkIRect& srcRect,
1300 const SkIPoint& dstPoint) {
1301 SkASSERT(false);
1302}
1303
1304bool GrVkGpu::onCopySurface(GrSurface* dst,
1305 GrSurface* src,
1306 const SkIRect& srcRect,
1307 const SkIPoint& dstPoint) {
egdaniel17b89252016-04-05 07:23:38 -07001308 GrVkImage* dstImage;
1309 GrVkImage* srcImage;
1310 if (dst->asTexture()) {
1311 dstImage = static_cast<GrVkTexture*>(dst->asTexture());
1312 } else {
1313 SkASSERT(dst->asRenderTarget());
1314 dstImage = static_cast<GrVkRenderTarget*>(dst->asRenderTarget());
1315 }
1316 if (src->asTexture()) {
1317 srcImage = static_cast<GrVkTexture*>(src->asTexture());
1318 } else {
1319 SkASSERT(src->asRenderTarget());
1320 srcImage = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
1321 }
1322
Greg Daniel164a9f02016-02-22 09:56:40 -05001323 if (can_copy_image(dst, src, this)) {
egdaniel17b89252016-04-05 07:23:38 -07001324 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
1325 return true;
1326 }
1327
1328 if (can_copy_as_blit(dst, src, dstImage, srcImage, this)) {
1329 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
Greg Daniel164a9f02016-02-22 09:56:40 -05001330 return true;
1331 }
1332
1333 if (can_copy_as_draw(dst, src, this)) {
1334 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1335 return true;
1336 }
1337
1338 return false;
1339}
1340
egdaniel37798fb2016-04-12 07:31:49 -07001341bool GrVkGpu::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) const {
1342 // Currently we don't support msaa
1343 if (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1) {
1344 return false;
1345 }
1346
1347 // This will support copying the dst as CopyImage since all of our surfaces require transferSrc
1348 // and transferDst usage flags in Vulkan.
1349 desc->fOrigin = src->origin();
1350 desc->fConfig = src->config();
1351 desc->fFlags = kNone_GrSurfaceFlags;
1352 return true;
1353}
1354
cdalton28f45b92016-03-07 13:58:26 -08001355void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1356 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1357 // TODO: stub.
1358 SkASSERT(!this->caps()->sampleLocationsSupport());
1359 *effectiveSampleCnt = rt->desc().fSampleCnt;
1360}
1361
Greg Daniel164a9f02016-02-22 09:56:40 -05001362bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1363 GrPixelConfig readConfig, DrawPreference* drawPreference,
1364 ReadPixelTempDrawInfo* tempDrawInfo) {
egdaniel88e8aef2016-06-27 14:34:55 -07001365 // These settings we will always want if a temp draw is performed.
1366 tempDrawInfo->fTempSurfaceDesc.fFlags = kRenderTarget_GrSurfaceFlag;
1367 tempDrawInfo->fTempSurfaceDesc.fWidth = width;
1368 tempDrawInfo->fTempSurfaceDesc.fHeight = height;
1369 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
1370 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL.
1371 tempDrawInfo->fUseExactScratch = false;
1372
1373 // For now assume no swizzling, we may change that below.
1374 tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
1375
1376 // Depends on why we need/want a temp draw. Start off assuming no change, the surface we read
1377 // from will be srcConfig and we will read readConfig pixels from it.
1378 // Not that if we require a draw and return a non-renderable format for the temp surface the
1379 // base class will fail for us.
1380 tempDrawInfo->fTempSurfaceDesc.fConfig = srcSurface->config();
1381 tempDrawInfo->fReadConfig = readConfig;
1382
egdaniel4583ec52016-06-27 12:57:00 -07001383 if (srcSurface->config() == readConfig) {
1384 return true;
Greg Daniel164a9f02016-02-22 09:56:40 -05001385 }
1386
egdaniel4583ec52016-06-27 12:57:00 -07001387 if (this->vkCaps().isConfigRenderable(readConfig, false)) {
1388 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
1389 tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig;
1390 tempDrawInfo->fReadConfig = readConfig;
1391 return true;
Greg Daniel164a9f02016-02-22 09:56:40 -05001392 }
1393
egdaniel4583ec52016-06-27 12:57:00 -07001394 return false;
Greg Daniel164a9f02016-02-22 09:56:40 -05001395}
1396
1397bool GrVkGpu::onReadPixels(GrSurface* surface,
1398 int left, int top, int width, int height,
1399 GrPixelConfig config,
1400 void* buffer,
1401 size_t rowBytes) {
1402 VkFormat pixelFormat;
1403 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1404 return false;
1405 }
1406
1407 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1408 if (!tgt) {
1409 return false;
1410 }
1411
1412 // Change layout of our target so it can be used as copy
Greg Daniel164a9f02016-02-22 09:56:40 -05001413 tgt->setImageLayout(this,
1414 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001415 VK_ACCESS_TRANSFER_READ_BIT,
1416 VK_PIPELINE_STAGE_TRANSFER_BIT,
Greg Daniel164a9f02016-02-22 09:56:40 -05001417 false);
1418
halcanary9d524f22016-03-29 09:03:52 -07001419 GrVkTransferBuffer* transferBuffer =
cdaltone2e71c22016-04-07 18:13:29 -07001420 static_cast<GrVkTransferBuffer*>(this->createBuffer(rowBytes * height,
1421 kXferGpuToCpu_GrBufferType,
cdalton397536c2016-03-25 12:15:03 -07001422 kStream_GrAccessPattern));
Greg Daniel164a9f02016-02-22 09:56:40 -05001423
1424 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1425 VkOffset3D offset = {
1426 left,
1427 flipY ? surface->height() - top - height : top,
1428 0
1429 };
1430
1431 // Copy the image to a buffer so we can map it to cpu memory
1432 VkBufferImageCopy region;
1433 memset(&region, 0, sizeof(VkBufferImageCopy));
1434 region.bufferOffset = 0;
egdaniel88e8aef2016-06-27 14:34:55 -07001435 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
Greg Daniel164a9f02016-02-22 09:56:40 -05001436 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1437 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1438 region.imageOffset = offset;
1439 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1440
1441 fCurrentCmdBuffer->copyImageToBuffer(this,
1442 tgt,
1443 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1444 transferBuffer,
1445 1,
1446 &region);
1447
1448 // make sure the copy to buffer has finished
1449 transferBuffer->addMemoryBarrier(this,
1450 VK_ACCESS_TRANSFER_WRITE_BIT,
1451 VK_ACCESS_HOST_READ_BIT,
1452 VK_PIPELINE_STAGE_TRANSFER_BIT,
1453 VK_PIPELINE_STAGE_HOST_BIT,
1454 false);
1455
1456 // We need to submit the current command buffer to the Queue and make sure it finishes before
1457 // we can copy the data out of the buffer.
1458 this->submitCommandBuffer(kForce_SyncQueue);
1459
1460 void* mappedMemory = transferBuffer->map();
1461
egdaniel88e8aef2016-06-27 14:34:55 -07001462 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1463 if (flipY) {
1464 const char* srcRow = reinterpret_cast<const char*>(mappedMemory);
1465 char* dstRow = reinterpret_cast<char*>(buffer)+(height - 1) * rowBytes;
1466 for (int y = 0; y < height; y++) {
1467 memcpy(dstRow, srcRow, tightRowBytes);
1468 srcRow += tightRowBytes;
1469 dstRow -= rowBytes;
1470 }
1471 } else {
1472 if (tightRowBytes == rowBytes) {
1473 memcpy(buffer, mappedMemory, rowBytes*height);
1474 } else {
1475 SkRectMemcpy(buffer, rowBytes, mappedMemory, tightRowBytes, tightRowBytes, height);
1476 }
1477 }
Greg Daniel164a9f02016-02-22 09:56:40 -05001478
1479 transferBuffer->unmap();
1480 transferBuffer->unref();
1481
Greg Daniel164a9f02016-02-22 09:56:40 -05001482 return true;
1483}
egdaniel066df7c2016-06-08 14:02:27 -07001484
egdaniel9cb63402016-06-23 08:37:05 -07001485void GrVkGpu::submitSecondaryCommandBuffer(const GrVkSecondaryCommandBuffer* buffer,
1486 const GrVkRenderPass* renderPass,
1487 const VkClearValue* colorClear,
1488 GrVkRenderTarget* target,
1489 const SkIRect& bounds) {
egdaniele7d1b242016-07-01 08:06:45 -07001490 const SkIRect* pBounds = &bounds;
1491 SkIRect flippedBounds;
1492 if (kBottomLeft_GrSurfaceOrigin == target->origin()) {
1493 flippedBounds = bounds;
1494 flippedBounds.fTop = target->height() - bounds.fBottom;
1495 flippedBounds.fBottom = target->height() - bounds.fTop;
1496 pBounds = &flippedBounds;
1497 }
1498
egdaniel9cb63402016-06-23 08:37:05 -07001499 // Currently it is fine for us to always pass in 1 for the clear count even if no attachment
1500 // uses it. In the current state, we also only use the LOAD_OP_CLEAR for the color attachment
1501 // which is always at the first attachment.
egdaniele7d1b242016-07-01 08:06:45 -07001502 fCurrentCmdBuffer->beginRenderPass(this, renderPass, 1, colorClear, *target, *pBounds, true);
egdaniel066df7c2016-06-08 14:02:27 -07001503 fCurrentCmdBuffer->executeCommands(this, buffer);
Greg Daniel164a9f02016-02-22 09:56:40 -05001504 fCurrentCmdBuffer->endRenderPass(this);
Greg Daniel164a9f02016-02-22 09:56:40 -05001505}
egdaniel9cb63402016-06-23 08:37:05 -07001506