blob: bfda30a0338030928eab74b79b4576b719a1d150 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrVkGpu.h"
9
10#include "GrContextOptions.h"
11#include "GrGeometryProcessor.h"
12#include "GrGpuResourceCacheAccess.h"
egdaniel0e1853c2016-03-17 11:35:45 -070013#include "GrMesh.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrPipeline.h"
15#include "GrRenderTargetPriv.h"
16#include "GrSurfacePriv.h"
17#include "GrTexturePriv.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018
19#include "GrVkCommandBuffer.h"
egdaniel066df7c2016-06-08 14:02:27 -070020#include "GrVkGpuCommandBuffer.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050021#include "GrVkImage.h"
22#include "GrVkIndexBuffer.h"
23#include "GrVkMemory.h"
24#include "GrVkPipeline.h"
egdaniel22281c12016-03-23 13:49:40 -070025#include "GrVkPipelineState.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050026#include "GrVkRenderPass.h"
27#include "GrVkResourceProvider.h"
28#include "GrVkTexture.h"
29#include "GrVkTextureRenderTarget.h"
30#include "GrVkTransferBuffer.h"
31#include "GrVkVertexBuffer.h"
32
33#include "SkConfig8888.h"
jvanverth900bd4a2016-04-29 13:53:12 -070034#include "SkMipMap.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050035
36#include "vk/GrVkInterface.h"
jvanverthfd359ca2016-03-18 11:57:24 -070037#include "vk/GrVkTypes.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050038
39#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
40#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
41#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
42
jvanverthd2497f32016-03-18 12:39:05 -070043#ifdef ENABLE_VK_LAYERS
44VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
45 VkDebugReportFlagsEXT flags,
46 VkDebugReportObjectTypeEXT objectType,
47 uint64_t object,
48 size_t location,
49 int32_t messageCode,
50 const char* pLayerPrefix,
51 const char* pMessage,
52 void* pUserData) {
53 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
54 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
55 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
56 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
57 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
58 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
59 } else {
60 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
61 }
62 return VK_FALSE;
63}
jvanverthd2497f32016-03-18 12:39:05 -070064#endif
65
jvanverth633b3562016-03-23 11:01:22 -070066GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
67 GrContext* context) {
bsalomondc0fcd42016-04-11 14:21:33 -070068 const GrVkBackendContext* vkBackendContext =
69 reinterpret_cast<const GrVkBackendContext*>(backendContext);
jvanverth633b3562016-03-23 11:01:22 -070070 if (!vkBackendContext) {
bsalomondc0fcd42016-04-11 14:21:33 -070071 vkBackendContext = GrVkBackendContext::Create();
jvanverth633b3562016-03-23 11:01:22 -070072 if (!vkBackendContext) {
73 return nullptr;
Greg Daniel164a9f02016-02-22 09:56:40 -050074 }
jvanverth633b3562016-03-23 11:01:22 -070075 } else {
76 vkBackendContext->ref();
Greg Daniel164a9f02016-02-22 09:56:40 -050077 }
78
jvanverth633b3562016-03-23 11:01:22 -070079 return new GrVkGpu(context, options, vkBackendContext);
Greg Daniel164a9f02016-02-22 09:56:40 -050080}
81
82////////////////////////////////////////////////////////////////////////////////
83
halcanary9d524f22016-03-29 09:03:52 -070084GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
jvanverth633b3562016-03-23 11:01:22 -070085 const GrVkBackendContext* backendCtx)
Greg Daniel164a9f02016-02-22 09:56:40 -050086 : INHERITED(context)
jvanverth633b3562016-03-23 11:01:22 -070087 , fDevice(backendCtx->fDevice)
88 , fQueue(backendCtx->fQueue)
89 , fResourceProvider(this) {
90 fBackendContext.reset(backendCtx);
Greg Daniel164a9f02016-02-22 09:56:40 -050091
jvanverthd2497f32016-03-18 12:39:05 -070092#ifdef ENABLE_VK_LAYERS
brianosman419ca642016-05-04 08:19:44 -070093 fCallback = VK_NULL_HANDLE;
jvanverthfd7bd452016-03-25 06:29:52 -070094 if (backendCtx->fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
95 // Setup callback creation information
jvanverthd2497f32016-03-18 12:39:05 -070096 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
97 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
98 callbackCreateInfo.pNext = nullptr;
99 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
egdanielef0c10c2016-04-07 07:51:22 -0700100 VK_DEBUG_REPORT_WARNING_BIT_EXT |
jvanverthd2497f32016-03-18 12:39:05 -0700101 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
102 //VK_DEBUG_REPORT_DEBUG_BIT_EXT |
egdanielb4aa3622016-04-06 13:47:08 -0700103 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
jvanverthd2497f32016-03-18 12:39:05 -0700104 callbackCreateInfo.pfnCallback = &DebugReportCallback;
105 callbackCreateInfo.pUserData = nullptr;
106
jvanverthfd7bd452016-03-25 06:29:52 -0700107 // Register the callback
jvanvertha00980e2016-05-02 13:24:48 -0700108 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(
109 backendCtx->fInstance, &callbackCreateInfo, nullptr, &fCallback));
jvanverthd2497f32016-03-18 12:39:05 -0700110 }
111#endif
jvanverth633b3562016-03-23 11:01:22 -0700112
113 fCompiler = shaderc_compiler_initialize();
114
jvanverthfd7bd452016-03-25 06:29:52 -0700115 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice,
egdanielc5ec1402016-03-28 12:14:42 -0700116 backendCtx->fFeatures, backendCtx->fExtensions));
jvanverth633b3562016-03-23 11:01:22 -0700117 fCaps.reset(SkRef(fVkCaps.get()));
118
119 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps));
120
121 const VkCommandPoolCreateInfo cmdPoolInfo = {
122 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
123 nullptr, // pNext
124 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags
jvanverthb0d43522016-04-21 11:46:23 -0700125 backendCtx->fGraphicsQueueIndex, // queueFamilyIndex
jvanverth633b3562016-03-23 11:01:22 -0700126 };
halcanary9d524f22016-03-29 09:03:52 -0700127 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
jvanverth633b3562016-03-23 11:01:22 -0700128 &fCmdPool));
129
130 // must call this after creating the CommandPool
131 fResourceProvider.init();
egdaniel9a6cf802016-06-08 08:22:05 -0700132 fCurrentCmdBuffer = fResourceProvider.createPrimaryCommandBuffer();
jvanverth633b3562016-03-23 11:01:22 -0700133 SkASSERT(fCurrentCmdBuffer);
134 fCurrentCmdBuffer->begin(this);
jvanverth6b6ffc42016-06-13 14:28:07 -0700135
136 // set up our heaps
137 fHeaps[kLinearImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
egdaniel05dceab2016-06-22 07:45:50 -0700138 // We want the OptimalImage_Heap to use a SubAlloc_strategy but it occasionally causes the
139 // device to run out of memory. Most likely this is caused by fragmentation in the device heap
140 // and we can't allocate more. Until we get a fix moving this to SingleAlloc.
141 fHeaps[kOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 64*1024*1024));
jvanverth6b6ffc42016-06-13 14:28:07 -0700142 fHeaps[kSmallOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 2*1024*1024));
143 fHeaps[kVertexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
144 fHeaps[kIndexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
145 fHeaps[kUniformBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 64*1024));
146 fHeaps[kCopyReadBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0));
147 fHeaps[kCopyWriteBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024));
Greg Daniel164a9f02016-02-22 09:56:40 -0500148}
149
150GrVkGpu::~GrVkGpu() {
Greg Daniel164a9f02016-02-22 09:56:40 -0500151 fCurrentCmdBuffer->end(this);
152 fCurrentCmdBuffer->unref(this);
153
154 // wait for all commands to finish
jvanverthddf98352016-03-21 11:46:00 -0700155 fResourceProvider.checkCommandBuffers();
jvanvertha00980e2016-05-02 13:24:48 -0700156 SkDEBUGCODE(VkResult res = ) VK_CALL(QueueWaitIdle(fQueue));
egdanielf8c2be32016-06-24 13:18:27 -0700157
158 // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences
159 // on the command buffers even though they have completed. This causes an assert to fire when
160 // destroying the command buffers. Currently this ony seems to happen on windows, so we add a
161 // sleep to make sure the fence singals.
162#ifdef SK_DEBUG
163#if defined(SK_BUILD_FOR_WIN)
164 Sleep(10); // In milliseconds
165#else
166 // Uncomment if above bug happens on non windows build.
167 // sleep(1); // In seconds
168#endif
169#endif
170
jvanverthddf98352016-03-21 11:46:00 -0700171 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec)
172 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
halcanary9d524f22016-03-29 09:03:52 -0700173
Greg Daniel164a9f02016-02-22 09:56:40 -0500174 // must call this just before we destroy the VkDevice
175 fResourceProvider.destroyResources();
176
jvanverth633b3562016-03-23 11:01:22 -0700177 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
178
179 shaderc_compiler_release(fCompiler);
180
181#ifdef ENABLE_VK_LAYERS
jvanvertha00980e2016-05-02 13:24:48 -0700182 if (fCallback) {
183 VK_CALL(DestroyDebugReportCallbackEXT(fBackendContext->fInstance, fCallback, nullptr));
brianosman419ca642016-05-04 08:19:44 -0700184 fCallback = VK_NULL_HANDLE;
jvanvertha00980e2016-05-02 13:24:48 -0700185 }
jvanverthd2497f32016-03-18 12:39:05 -0700186#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500187}
188
189///////////////////////////////////////////////////////////////////////////////
190
egdaniel9cb63402016-06-23 08:37:05 -0700191GrGpuCommandBuffer* GrVkGpu::createCommandBuffer(
192 GrRenderTarget* target,
193 const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
194 const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) {
195 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
196 return new GrVkGpuCommandBuffer(this, vkRT, colorInfo, stencilInfo);
egdaniel066df7c2016-06-08 14:02:27 -0700197}
198
Greg Daniel164a9f02016-02-22 09:56:40 -0500199void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
200 SkASSERT(fCurrentCmdBuffer);
201 fCurrentCmdBuffer->end(this);
202
203 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
204 fResourceProvider.checkCommandBuffers();
205
206 // Release old command buffer and create a new one
207 fCurrentCmdBuffer->unref(this);
egdaniel9a6cf802016-06-08 08:22:05 -0700208 fCurrentCmdBuffer = fResourceProvider.createPrimaryCommandBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -0500209 SkASSERT(fCurrentCmdBuffer);
210
211 fCurrentCmdBuffer->begin(this);
212}
213
214///////////////////////////////////////////////////////////////////////////////
cdalton1bf3e712016-04-19 10:00:02 -0700215GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern,
216 const void* data) {
217 GrBuffer* buff;
cdalton397536c2016-03-25 12:15:03 -0700218 switch (type) {
219 case kVertex_GrBufferType:
220 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
221 kStatic_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700222 buff = GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
egdaniele05bbbb2016-04-19 12:13:41 -0700223 break;
cdalton397536c2016-03-25 12:15:03 -0700224 case kIndex_GrBufferType:
225 SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
226 kStatic_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700227 buff = GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
egdaniele05bbbb2016-04-19 12:13:41 -0700228 break;
cdalton397536c2016-03-25 12:15:03 -0700229 case kXferCpuToGpu_GrBufferType:
jvanverthc3d706f2016-04-20 10:33:27 -0700230 SkASSERT(kStream_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700231 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
egdaniele05bbbb2016-04-19 12:13:41 -0700232 break;
cdalton397536c2016-03-25 12:15:03 -0700233 case kXferGpuToCpu_GrBufferType:
jvanverthc3d706f2016-04-20 10:33:27 -0700234 SkASSERT(kStream_GrAccessPattern == accessPattern);
cdalton1bf3e712016-04-19 10:00:02 -0700235 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
egdaniele05bbbb2016-04-19 12:13:41 -0700236 break;
cdalton397536c2016-03-25 12:15:03 -0700237 default:
238 SkFAIL("Unknown buffer type.");
239 return nullptr;
240 }
cdalton1bf3e712016-04-19 10:00:02 -0700241 if (data && buff) {
242 buff->updateData(data, size);
243 }
244 return buff;
Greg Daniel164a9f02016-02-22 09:56:40 -0500245}
246
247////////////////////////////////////////////////////////////////////////////////
248bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
249 GrPixelConfig srcConfig, DrawPreference* drawPreference,
250 WritePixelTempDrawInfo* tempDrawInfo) {
251 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
252 return false;
253 }
254
egdaniel4583ec52016-06-27 12:57:00 -0700255 if (dstSurface->config() == srcConfig) {
256 return true;
Greg Daniel164a9f02016-02-22 09:56:40 -0500257 }
258
egdaniel4583ec52016-06-27 12:57:00 -0700259 GrRenderTarget* renderTarget = dstSurface->asRenderTarget();
260
261 // Start off assuming no swizzling
262 tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
263 tempDrawInfo->fWriteConfig = srcConfig;
264
265 // These settings we will always want if a temp draw is performed. Initially set the config
266 // to srcConfig, though that may be modified if we decide to do a R/B swap
267 tempDrawInfo->fTempSurfaceDesc.fFlags = kNone_GrSurfaceFlags;
268 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
269 tempDrawInfo->fTempSurfaceDesc.fWidth = width;
270 tempDrawInfo->fTempSurfaceDesc.fHeight = height;
271 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
272 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin;
273
274 if (renderTarget && this->vkCaps().isConfigRenderable(renderTarget->config(), false)) {
275 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
276
277 bool configsAreRBSwaps = GrPixelConfigSwapRAndB(srcConfig) == dstSurface->config();
278
279 if (!this->vkCaps().isConfigTexturable(srcConfig) && configsAreRBSwaps) {
280 if (!this->vkCaps().isConfigTexturable(dstSurface->config())) {
281 return false;
282 }
283 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
284 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
285 tempDrawInfo->fWriteConfig = dstSurface->config();
286 }
287 return true;
Greg Daniel164a9f02016-02-22 09:56:40 -0500288 }
289
egdaniel4583ec52016-06-27 12:57:00 -0700290 return false;
Greg Daniel164a9f02016-02-22 09:56:40 -0500291}
292
293bool GrVkGpu::onWritePixels(GrSurface* surface,
294 int left, int top, int width, int height,
bsalomona1e6b3b2016-03-02 10:58:23 -0800295 GrPixelConfig config,
296 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500297 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
298 if (!vkTex) {
299 return false;
300 }
301
jvanverth900bd4a2016-04-29 13:53:12 -0700302 // Make sure we have at least the base level
jvanverth03509ea2016-03-02 13:19:47 -0800303 if (texels.empty() || !texels.begin()->fPixels) {
304 return false;
305 }
bsalomona1e6b3b2016-03-02 10:58:23 -0800306
Greg Daniel164a9f02016-02-22 09:56:40 -0500307 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
308 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
309 return false;
310 }
311
312 bool success = false;
313 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
314 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
315 SkASSERT(config == vkTex->desc().fConfig);
316 // TODO: add compressed texture support
317 // delete the following two lines and uncomment the two after that when ready
318 vkTex->unref();
319 return false;
320 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
321 // height);
322 } else {
323 bool linearTiling = vkTex->isLinearTiled();
jvanverth900bd4a2016-04-29 13:53:12 -0700324 if (linearTiling) {
325 if (texels.count() > 1) {
326 SkDebugf("Can't upload mipmap data to linear tiled texture");
327 return false;
328 }
329 if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
330 // Need to change the layout to general in order to perform a host write
jvanverth900bd4a2016-04-29 13:53:12 -0700331 vkTex->setImageLayout(this,
332 VK_IMAGE_LAYOUT_GENERAL,
jvanverth50c46c72016-05-06 12:31:28 -0700333 VK_ACCESS_HOST_WRITE_BIT,
334 VK_PIPELINE_STAGE_HOST_BIT,
jvanverth900bd4a2016-04-29 13:53:12 -0700335 false);
egdanielbdf88112016-05-03 07:25:56 -0700336 this->submitCommandBuffer(kForce_SyncQueue);
jvanverth900bd4a2016-04-29 13:53:12 -0700337 }
338 success = this->uploadTexDataLinear(vkTex, left, top, width, height, config,
339 texels.begin()->fPixels, texels.begin()->fRowBytes);
340 } else {
jvanverthc578b0632016-05-02 10:58:12 -0700341 int newMipLevels = texels.count();
jvanverth82c05582016-05-03 11:19:01 -0700342 int currentMipLevels = vkTex->texturePriv().maxMipMapLevel() + 1;
343 if (newMipLevels != currentMipLevels) {
jvanverthc578b0632016-05-02 10:58:12 -0700344 if (!vkTex->reallocForMipmap(this, newMipLevels)) {
jvanverth900bd4a2016-04-29 13:53:12 -0700345 return false;
346 }
347 }
348 success = this->uploadTexDataOptimal(vkTex, left, top, width, height, config, texels);
Greg Daniel164a9f02016-02-22 09:56:40 -0500349 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500350 }
egdaniel4583ec52016-06-27 12:57:00 -0700351
jvanverth900bd4a2016-04-29 13:53:12 -0700352 return success;
Greg Daniel164a9f02016-02-22 09:56:40 -0500353}
354
jvanverth900bd4a2016-04-29 13:53:12 -0700355bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex,
356 int left, int top, int width, int height,
357 GrPixelConfig dataConfig,
358 const void* data,
359 size_t rowBytes) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500360 SkASSERT(data);
jvanverth900bd4a2016-04-29 13:53:12 -0700361 SkASSERT(tex->isLinearTiled());
Greg Daniel164a9f02016-02-22 09:56:40 -0500362
363 // If we're uploading compressed data then we should be using uploadCompressedTexData
364 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
365
Greg Daniel164a9f02016-02-22 09:56:40 -0500366 size_t bpp = GrBytesPerPixel(dataConfig);
367
368 const GrSurfaceDesc& desc = tex->desc();
369
370 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
371 &width, &height, &data, &rowBytes)) {
372 return false;
373 }
374 size_t trimRowBytes = width * bpp;
375
jvanverth900bd4a2016-04-29 13:53:12 -0700376 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
377 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
378 const VkImageSubresource subres = {
379 VK_IMAGE_ASPECT_COLOR_BIT,
380 0, // mipLevel
381 0, // arraySlice
382 };
383 VkSubresourceLayout layout;
384 VkResult err;
Greg Daniel164a9f02016-02-22 09:56:40 -0500385
jvanverth900bd4a2016-04-29 13:53:12 -0700386 const GrVkInterface* interface = this->vkInterface();
Greg Daniel164a9f02016-02-22 09:56:40 -0500387
jvanverth900bd4a2016-04-29 13:53:12 -0700388 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
egdanielb2df0c22016-05-13 11:30:37 -0700389 tex->image(),
jvanverth900bd4a2016-04-29 13:53:12 -0700390 &subres,
391 &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500392
jvanverth900bd4a2016-04-29 13:53:12 -0700393 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height : top;
jvanverth1e305ba2016-06-01 09:39:15 -0700394 const GrVkAlloc& alloc = tex->alloc();
395 VkDeviceSize offset = alloc.fOffset + texTop*layout.rowPitch + left*bpp;
jvanverth900bd4a2016-04-29 13:53:12 -0700396 VkDeviceSize size = height*layout.rowPitch;
397 void* mapPtr;
jvanverth1e305ba2016-06-01 09:39:15 -0700398 err = GR_VK_CALL(interface, MapMemory(fDevice, alloc.fMemory, offset, size, 0, &mapPtr));
jvanverth900bd4a2016-04-29 13:53:12 -0700399 if (err) {
400 return false;
401 }
402
403 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
404 // copy into buffer by rows
405 const char* srcRow = reinterpret_cast<const char*>(data);
406 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
407 for (int y = 0; y < height; y++) {
408 memcpy(dstRow, srcRow, trimRowBytes);
409 srcRow += rowBytes;
410 dstRow -= layout.rowPitch;
411 }
412 } else {
413 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
414 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
415 memcpy(mapPtr, data, trimRowBytes * height);
416 } else {
417 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
418 trimRowBytes, height);
419 }
420 }
421
jvanverth1e305ba2016-06-01 09:39:15 -0700422 GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory));
jvanverth900bd4a2016-04-29 13:53:12 -0700423
424 return true;
425}
426
427bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex,
428 int left, int top, int width, int height,
429 GrPixelConfig dataConfig,
430 const SkTArray<GrMipLevel>& texels) {
431 SkASSERT(!tex->isLinearTiled());
432 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
433 SkASSERT(1 == texels.count() ||
434 (0 == left && 0 == top && width == tex->width() && height == tex->height()));
435
436 // If we're uploading compressed data then we should be using uploadCompressedTexData
437 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
438
439 if (width == 0 || height == 0) {
440 return false;
441 }
442
443 const GrSurfaceDesc& desc = tex->desc();
444 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
445 size_t bpp = GrBytesPerPixel(dataConfig);
446
447 // texels is const.
jvanverthc578b0632016-05-02 10:58:12 -0700448 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
449 // Because of this we need to make a non-const shallow copy of texels.
450 SkTArray<GrMipLevel> texelsShallowCopy(texels);
jvanverth900bd4a2016-04-29 13:53:12 -0700451
jvanverthc578b0632016-05-02 10:58:12 -0700452 for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
453 currentMipLevel--) {
454 SkASSERT(texelsShallowCopy[currentMipLevel].fPixels);
Greg Daniel164a9f02016-02-22 09:56:40 -0500455 }
456
jvanverth900bd4a2016-04-29 13:53:12 -0700457 // Determine whether we need to flip when we copy into the buffer
jvanverthc578b0632016-05-02 10:58:12 -0700458 bool flipY = (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsShallowCopy.empty());
jvanverth900bd4a2016-04-29 13:53:12 -0700459
jvanverthc578b0632016-05-02 10:58:12 -0700460 // adjust any params (left, top, currentWidth, currentHeight
jvanverth900bd4a2016-04-29 13:53:12 -0700461 // find the combined size of all the mip levels and the relative offset of
462 // each into the collective buffer
jvanverthc578b0632016-05-02 10:58:12 -0700463 // Do the first level separately because we may need to adjust width and height
464 // (for the non-mipped case).
465 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
466 &width,
467 &height,
468 &texelsShallowCopy[0].fPixels,
469 &texelsShallowCopy[0].fRowBytes)) {
470 return false;
471 }
472 SkTArray<size_t> individualMipOffsets(texelsShallowCopy.count());
473 individualMipOffsets.push_back(0);
474 size_t combinedBufferSize = width * bpp * height;
475 int currentWidth = width;
476 int currentHeight = height;
477 for (int currentMipLevel = 1; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
478 currentWidth = SkTMax(1, currentWidth/2);
479 currentHeight = SkTMax(1, currentHeight/2);
480 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
481 &currentWidth,
482 &currentHeight,
483 &texelsShallowCopy[currentMipLevel].fPixels,
484 &texelsShallowCopy[currentMipLevel].fRowBytes)) {
485 return false;
486 }
jvanverth900bd4a2016-04-29 13:53:12 -0700487 const size_t trimmedSize = currentWidth * bpp * currentHeight;
488 individualMipOffsets.push_back(combinedBufferSize);
489 combinedBufferSize += trimmedSize;
490 }
491
492 // allocate buffer to hold our mip data
493 GrVkTransferBuffer* transferBuffer =
494 GrVkTransferBuffer::Create(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type);
495
496 char* buffer = (char*) transferBuffer->map();
jvanverthc578b0632016-05-02 10:58:12 -0700497 SkTArray<VkBufferImageCopy> regions(texelsShallowCopy.count());
jvanverth900bd4a2016-04-29 13:53:12 -0700498
jvanverthc578b0632016-05-02 10:58:12 -0700499 currentWidth = width;
500 currentHeight = height;
501 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
jvanverth900bd4a2016-04-29 13:53:12 -0700502 const size_t trimRowBytes = currentWidth * bpp;
jvanverthc578b0632016-05-02 10:58:12 -0700503 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
jvanverth900bd4a2016-04-29 13:53:12 -0700504
505 // copy data into the buffer, skipping the trailing bytes
506 char* dst = buffer + individualMipOffsets[currentMipLevel];
jvanverthc578b0632016-05-02 10:58:12 -0700507 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
jvanverth900bd4a2016-04-29 13:53:12 -0700508 if (flipY) {
509 src += (currentHeight - 1) * rowBytes;
510 for (int y = 0; y < currentHeight; y++) {
511 memcpy(dst, src, trimRowBytes);
512 src -= rowBytes;
513 dst += trimRowBytes;
514 }
515 } else if (trimRowBytes == rowBytes) {
516 memcpy(dst, src, trimRowBytes * currentHeight);
517 } else {
518 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
519 }
520
521 VkBufferImageCopy& region = regions.push_back();
522 memset(&region, 0, sizeof(VkBufferImageCopy));
523 region.bufferOffset = individualMipOffsets[currentMipLevel];
524 region.bufferRowLength = currentWidth;
525 region.bufferImageHeight = currentHeight;
bsalomoncf942c42016-04-29 18:30:06 -0700526 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
jvanverthc578b0632016-05-02 10:58:12 -0700527 region.imageOffset = { left, flipY ? tex->height() - top - currentHeight : top, 0 };
jvanverth900bd4a2016-04-29 13:53:12 -0700528 region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
egdaniel4583ec52016-06-27 12:57:00 -0700529
jvanverthc578b0632016-05-02 10:58:12 -0700530 currentWidth = SkTMax(1, currentWidth/2);
531 currentHeight = SkTMax(1, currentHeight/2);
jvanverth900bd4a2016-04-29 13:53:12 -0700532 }
533
534 transferBuffer->unmap();
535
536 // make sure the unmap has finished
537 transferBuffer->addMemoryBarrier(this,
538 VK_ACCESS_HOST_WRITE_BIT,
539 VK_ACCESS_TRANSFER_READ_BIT,
540 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
541 VK_PIPELINE_STAGE_TRANSFER_BIT,
542 false);
543
544 // Change layout of our target so it can be copied to
jvanverth900bd4a2016-04-29 13:53:12 -0700545 tex->setImageLayout(this,
546 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -0700547 VK_ACCESS_TRANSFER_WRITE_BIT,
548 VK_PIPELINE_STAGE_TRANSFER_BIT,
jvanverth900bd4a2016-04-29 13:53:12 -0700549 false);
550
551 // Copy the buffer to the image
552 fCurrentCmdBuffer->copyBufferToImage(this,
553 transferBuffer,
554 tex,
555 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
556 regions.count(),
557 regions.begin());
558
559 // Submit the current command buffer to the Queue
560 this->submitCommandBuffer(kSkip_SyncQueue);
561
562 transferBuffer->unref();
563
Greg Daniel164a9f02016-02-22 09:56:40 -0500564 return true;
565}
566
567////////////////////////////////////////////////////////////////////////////////
kkinnunen2e6055b2016-04-22 01:48:29 -0700568GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
bsalomona1e6b3b2016-03-02 10:58:23 -0800569 const SkTArray<GrMipLevel>& texels) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500570 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
571
572 VkFormat pixelFormat;
573 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
574 return nullptr;
575 }
576
577 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
578 return nullptr;
579 }
580
egdaniel0a3a7f72016-06-24 09:22:31 -0700581 if (renderTarget && !fVkCaps->isConfigRenderable(desc.fConfig, false)) {
582 return nullptr;
583 }
584
Greg Daniel164a9f02016-02-22 09:56:40 -0500585 bool linearTiling = false;
586 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
jvanverth900bd4a2016-04-29 13:53:12 -0700587 // we can't have a linear texture with a mipmap
588 if (texels.count() > 1) {
589 SkDebugf("Trying to create linear tiled texture with mipmap");
590 return nullptr;
591 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500592 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
593 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
594 linearTiling = true;
595 } else {
596 return nullptr;
597 }
598 }
599
600 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
601 if (renderTarget) {
602 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
603 }
604
605 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
606 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
607 // will be using this texture in some copy or not. Also this assumes, as is the current case,
jvanverth62340062016-04-26 08:01:44 -0700608 // that all render targets in vulkan are also textures. If we change this practice of setting
Greg Daniel164a9f02016-02-22 09:56:40 -0500609 // both bits, we must make sure to set the destination bit if we are uploading srcData to the
610 // texture.
611 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
612
bsalomona1e6b3b2016-03-02 10:58:23 -0800613 VkFlags memProps = (!texels.empty() && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
614 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
Greg Daniel164a9f02016-02-22 09:56:40 -0500615
616 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
jvanverth62340062016-04-26 08:01:44 -0700617 // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
Greg Daniel164a9f02016-02-22 09:56:40 -0500618 // to 1.
jvanverthc578b0632016-05-02 10:58:12 -0700619 int mipLevels = texels.empty() ? 1 : texels.count();
Greg Daniel164a9f02016-02-22 09:56:40 -0500620 GrVkImage::ImageDesc imageDesc;
621 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
622 imageDesc.fFormat = pixelFormat;
623 imageDesc.fWidth = desc.fWidth;
624 imageDesc.fHeight = desc.fHeight;
jvanverthc578b0632016-05-02 10:58:12 -0700625 imageDesc.fLevels = linearTiling ? 1 : mipLevels;
Greg Daniel164a9f02016-02-22 09:56:40 -0500626 imageDesc.fSamples = 1;
627 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
628 imageDesc.fUsageFlags = usageFlags;
629 imageDesc.fMemProps = memProps;
630
631 GrVkTexture* tex;
632 if (renderTarget) {
kkinnunen2e6055b2016-04-22 01:48:29 -0700633 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budgeted, desc,
Greg Daniel164a9f02016-02-22 09:56:40 -0500634 imageDesc);
635 } else {
kkinnunen2e6055b2016-04-22 01:48:29 -0700636 tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc);
Greg Daniel164a9f02016-02-22 09:56:40 -0500637 }
638
639 if (!tex) {
640 return nullptr;
641 }
642
bsalomone699d0c2016-03-09 06:25:15 -0800643 if (!texels.empty()) {
644 SkASSERT(texels.begin()->fPixels);
jvanverth900bd4a2016-04-29 13:53:12 -0700645 bool success;
646 if (linearTiling) {
647 success = this->uploadTexDataLinear(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
648 texels.begin()->fPixels, texels.begin()->fRowBytes);
649 } else {
650 success = this->uploadTexDataOptimal(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
651 texels);
652 }
653 if (!success) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500654 tex->unref();
655 return nullptr;
656 }
657 }
658
659 return tex;
660}
661
662////////////////////////////////////////////////////////////////////////////////
663
664static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
665 // By default, all textures in Vk use TopLeft
666 if (kDefault_GrSurfaceOrigin == origin) {
667 return kTopLeft_GrSurfaceOrigin;
668 } else {
669 return origin;
670 }
671}
672
673GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
674 GrWrapOwnership ownership) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500675 if (0 == desc.fTextureHandle) {
676 return nullptr;
677 }
678
679 int maxSize = this->caps()->maxTextureSize();
680 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
681 return nullptr;
682 }
683
egdanielb2df0c22016-05-13 11:30:37 -0700684 const GrVkImageInfo* info = reinterpret_cast<const GrVkImageInfo*>(desc.fTextureHandle);
jvanverth1e305ba2016-06-01 09:39:15 -0700685 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc.fMemory) {
jvanverthfd359ca2016-03-18 11:57:24 -0700686 return nullptr;
687 }
egdanielb2df0c22016-05-13 11:30:37 -0700688#ifdef SK_DEBUG
689 VkFormat format;
690 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
691 return nullptr;
692 }
693 SkASSERT(format == info->fFormat);
694#endif
Greg Daniel164a9f02016-02-22 09:56:40 -0500695
Greg Daniel164a9f02016-02-22 09:56:40 -0500696 GrSurfaceDesc surfDesc;
697 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
698 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
699 surfDesc.fWidth = desc.fWidth;
700 surfDesc.fHeight = desc.fHeight;
701 surfDesc.fConfig = desc.fConfig;
702 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
703 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
704 // In GL, Chrome assumes all textures are BottomLeft
705 // In VK, we don't have this restriction
706 surfDesc.fOrigin = resolve_origin(desc.fOrigin);
707
708 GrVkTexture* texture = nullptr;
709 if (renderTarget) {
halcanary9d524f22016-03-29 09:03:52 -0700710 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
egdanielb2df0c22016-05-13 11:30:37 -0700711 ownership, info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500712 } else {
egdanielb2df0c22016-05-13 11:30:37 -0700713 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, ownership, info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500714 }
715 if (!texture) {
716 return nullptr;
717 }
718
719 return texture;
720}
721
722GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
723 GrWrapOwnership ownership) {
halcanary9d524f22016-03-29 09:03:52 -0700724
egdanielb2df0c22016-05-13 11:30:37 -0700725 const GrVkImageInfo* info =
726 reinterpret_cast<const GrVkImageInfo*>(wrapDesc.fRenderTargetHandle);
jvanverthfd359ca2016-03-18 11:57:24 -0700727 if (VK_NULL_HANDLE == info->fImage ||
jvanverth1e305ba2016-06-01 09:39:15 -0700728 (VK_NULL_HANDLE == info->fAlloc.fMemory && kAdopt_GrWrapOwnership == ownership)) {
jvanverthfd359ca2016-03-18 11:57:24 -0700729 return nullptr;
730 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500731
Greg Daniel164a9f02016-02-22 09:56:40 -0500732 GrSurfaceDesc desc;
733 desc.fConfig = wrapDesc.fConfig;
734 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
735 desc.fWidth = wrapDesc.fWidth;
736 desc.fHeight = wrapDesc.fHeight;
737 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
738
739 desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
740
741 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
kkinnunen2e6055b2016-04-22 01:48:29 -0700742 ownership,
jvanverthfd359ca2016-03-18 11:57:24 -0700743 info);
Greg Daniel164a9f02016-02-22 09:56:40 -0500744 if (tgt && wrapDesc.fStencilBits) {
745 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
746 tgt->unref();
747 return nullptr;
748 }
749 }
750 return tgt;
751}
752
jvanverth62340062016-04-26 08:01:44 -0700753void GrVkGpu::generateMipmap(GrVkTexture* tex) const {
jvanverth900bd4a2016-04-29 13:53:12 -0700754 // don't do anything for linearly tiled textures (can't have mipmaps)
jvanverth62340062016-04-26 08:01:44 -0700755 if (tex->isLinearTiled()) {
jvanverth900bd4a2016-04-29 13:53:12 -0700756 SkDebugf("Trying to create mipmap for linear tiled texture");
jvanverth62340062016-04-26 08:01:44 -0700757 return;
758 }
759
760 // We cannot generate mipmaps for images that are multisampled.
761 // TODO: does it even make sense for rendertargets in general?
762 if (tex->asRenderTarget() && tex->asRenderTarget()->numColorSamples() > 1) {
763 return;
764 }
765
766 // determine if we can blit to and from this format
767 const GrVkCaps& caps = this->vkCaps();
768 if (!caps.configCanBeDstofBlit(tex->config(), false) ||
769 !caps.configCanBeSrcofBlit(tex->config(), false)) {
770 return;
771 }
772
773 // change the original image's layout
jvanverth62340062016-04-26 08:01:44 -0700774 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -0700775 VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
jvanverth62340062016-04-26 08:01:44 -0700776
777 // grab handle to the original image resource
egdanielb2df0c22016-05-13 11:30:37 -0700778 const GrVkResource* oldResource = tex->resource();
jvanverth62340062016-04-26 08:01:44 -0700779 oldResource->ref();
egdanielb2df0c22016-05-13 11:30:37 -0700780 VkImage oldImage = tex->image();
jvanverth62340062016-04-26 08:01:44 -0700781
jvanverth82c05582016-05-03 11:19:01 -0700782 // SkMipMap doesn't include the base level in the level count so we have to add 1
783 uint32_t levelCount = SkMipMap::ComputeLevelCount(tex->width(), tex->height()) + 1;
784 if (!tex->reallocForMipmap(this, levelCount)) {
jvanverth62340062016-04-26 08:01:44 -0700785 oldResource->unref(this);
786 return;
787 }
788
789 // change the new image's layout
jvanverth50c46c72016-05-06 12:31:28 -0700790 tex->setImageLayout(this, VK_IMAGE_LAYOUT_GENERAL,
791 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
jvanverth62340062016-04-26 08:01:44 -0700792
793 // Blit original image
794 int width = tex->width();
795 int height = tex->height();
jvanverth62340062016-04-26 08:01:44 -0700796
797 VkImageBlit blitRegion;
798 memset(&blitRegion, 0, sizeof(VkImageBlit));
799 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
800 blitRegion.srcOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700801 blitRegion.srcOffsets[1] = { width, height, 1 };
jvanverth82c05582016-05-03 11:19:01 -0700802 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
jvanverth62340062016-04-26 08:01:44 -0700803 blitRegion.dstOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700804 blitRegion.dstOffsets[1] = { width, height, 1 };
jvanverth62340062016-04-26 08:01:44 -0700805
806 fCurrentCmdBuffer->blitImage(this,
807 oldResource,
egdanielb2df0c22016-05-13 11:30:37 -0700808 oldImage,
jvanverth62340062016-04-26 08:01:44 -0700809 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
810 tex->resource(),
egdanielb2df0c22016-05-13 11:30:37 -0700811 tex->image(),
jvanverth50c46c72016-05-06 12:31:28 -0700812 VK_IMAGE_LAYOUT_GENERAL,
jvanverth62340062016-04-26 08:01:44 -0700813 1,
814 &blitRegion,
815 VK_FILTER_LINEAR);
jvanverth50c46c72016-05-06 12:31:28 -0700816
817 // setup memory barrier
egdanielb2df0c22016-05-13 11:30:37 -0700818 SkASSERT(GrVkFormatToPixelConfig(tex->imageFormat(), nullptr));
jvanverth50c46c72016-05-06 12:31:28 -0700819 VkImageAspectFlags aspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
820 VkImageMemoryBarrier imageMemoryBarrier = {
821 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
822 NULL, // pNext
823 VK_ACCESS_TRANSFER_WRITE_BIT, // outputMask
824 VK_ACCESS_TRANSFER_READ_BIT, // inputMask
825 VK_IMAGE_LAYOUT_GENERAL, // oldLayout
826 VK_IMAGE_LAYOUT_GENERAL, // newLayout
827 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
828 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
egdanielb2df0c22016-05-13 11:30:37 -0700829 tex->image(), // image
jvanverth50c46c72016-05-06 12:31:28 -0700830 { aspectFlags, 0, 1, 0, 1 } // subresourceRange
831 };
832
jvanverth62340062016-04-26 08:01:44 -0700833 // Blit the miplevels
jvanverth82c05582016-05-03 11:19:01 -0700834 uint32_t mipLevel = 1;
835 while (mipLevel < levelCount) {
836 int prevWidth = width;
837 int prevHeight = height;
838 width = SkTMax(1, width / 2);
839 height = SkTMax(1, height / 2);
jvanverth62340062016-04-26 08:01:44 -0700840
jvanverth50c46c72016-05-06 12:31:28 -0700841 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
842 this->addImageMemoryBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
843 false, &imageMemoryBarrier);
844
845 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
jvanverth82c05582016-05-03 11:19:01 -0700846 blitRegion.srcOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700847 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
jvanverth82c05582016-05-03 11:19:01 -0700848 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
849 blitRegion.dstOffsets[0] = { 0, 0, 0 };
brianosmane9906e72016-06-08 12:44:27 -0700850 blitRegion.dstOffsets[1] = { width, height, 1 };
jvanverth62340062016-04-26 08:01:44 -0700851 fCurrentCmdBuffer->blitImage(this,
egdanielb2df0c22016-05-13 11:30:37 -0700852 *tex,
853 *tex,
jvanverth62340062016-04-26 08:01:44 -0700854 1,
855 &blitRegion,
856 VK_FILTER_LINEAR);
jvanverth82c05582016-05-03 11:19:01 -0700857 ++mipLevel;
jvanverth62340062016-04-26 08:01:44 -0700858 }
859
860 oldResource->unref(this);
861}
862
Greg Daniel164a9f02016-02-22 09:56:40 -0500863////////////////////////////////////////////////////////////////////////////////
864
865GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
866 int width,
867 int height) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500868 SkASSERT(width >= rt->width());
869 SkASSERT(height >= rt->height());
870
871 int samples = rt->numStencilSamples();
872
egdaniel8f1dcaa2016-04-01 10:10:45 -0700873 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferedStencilFormat();
Greg Daniel164a9f02016-02-22 09:56:40 -0500874
875 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
Greg Daniel164a9f02016-02-22 09:56:40 -0500876 width,
877 height,
878 samples,
879 sFmt));
880 fStats.incStencilAttachmentCreates();
881 return stencil;
882}
883
884////////////////////////////////////////////////////////////////////////////////
885
886GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
egdaniel0a3a7f72016-06-24 09:22:31 -0700887 GrPixelConfig config,
888 bool isRenderTarget) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500889
890 VkFormat pixelFormat;
891 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
892 return 0;
893 }
894
895 bool linearTiling = false;
896 if (!fVkCaps->isConfigTexturable(config)) {
897 return 0;
898 }
899
egdaniel0a3a7f72016-06-24 09:22:31 -0700900 if (isRenderTarget && !fVkCaps->isConfigRenderable(config, false)) {
901 return 0;
902 }
903
904 if (fVkCaps->isConfigTexurableLinearly(config) &&
905 (!isRenderTarget || fVkCaps->isConfigRenderableLinearly(config, false))) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500906 linearTiling = true;
907 }
908
909 // Currently this is not supported since it requires a copy which has not yet been implemented.
910 if (srcData && !linearTiling) {
911 return 0;
912 }
913
914 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
915 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
916 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
egdaniel0a3a7f72016-06-24 09:22:31 -0700917 if (isRenderTarget) {
918 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
919 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500920
jvanverthfd359ca2016-03-18 11:57:24 -0700921 VkImage image = VK_NULL_HANDLE;
jvanverth6b6ffc42016-06-13 14:28:07 -0700922 GrVkAlloc alloc = { VK_NULL_HANDLE, 0, 0 };
Greg Daniel164a9f02016-02-22 09:56:40 -0500923
jvanverthfd359ca2016-03-18 11:57:24 -0700924 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
925 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
926 ? VK_IMAGE_LAYOUT_PREINITIALIZED
927 : VK_IMAGE_LAYOUT_UNDEFINED;
928
929 // Create Image
930 VkSampleCountFlagBits vkSamples;
931 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
932 return 0;
933 }
934
935 const VkImageCreateInfo imageCreateInfo = {
936 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
937 NULL, // pNext
938 0, // VkImageCreateFlags
939 VK_IMAGE_TYPE_2D, // VkImageType
940 pixelFormat, // VkFormat
ethannicholas384b5e92016-03-25 11:04:06 -0700941 { (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D
jvanverthfd359ca2016-03-18 11:57:24 -0700942 1, // mipLevels
943 1, // arrayLayers
944 vkSamples, // samples
945 imageTiling, // VkImageTiling
946 usageFlags, // VkImageUsageFlags
947 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
948 0, // queueFamilyCount
949 0, // pQueueFamilyIndices
950 initialLayout // initialLayout
951 };
952
953 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
954
jvanverth6b6ffc42016-06-13 14:28:07 -0700955 if (!GrVkMemory::AllocAndBindImageMemory(this, image, linearTiling, &alloc)) {
jvanverthfd359ca2016-03-18 11:57:24 -0700956 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500957 return 0;
958 }
959
960 if (srcData) {
961 if (linearTiling) {
962 const VkImageSubresource subres = {
963 VK_IMAGE_ASPECT_COLOR_BIT,
964 0, // mipLevel
965 0, // arraySlice
966 };
967 VkSubresourceLayout layout;
968 VkResult err;
969
jvanverthfd359ca2016-03-18 11:57:24 -0700970 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
Greg Daniel164a9f02016-02-22 09:56:40 -0500971
972 void* mapPtr;
jvanverth1e305ba2016-06-01 09:39:15 -0700973 err = VK_CALL(MapMemory(fDevice, alloc.fMemory, alloc.fOffset, layout.rowPitch * h,
974 0, &mapPtr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500975 if (err) {
jvanverth6b6ffc42016-06-13 14:28:07 -0700976 GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
jvanverthfd359ca2016-03-18 11:57:24 -0700977 VK_CALL(DestroyImage(this->device(), image, nullptr));
Greg Daniel164a9f02016-02-22 09:56:40 -0500978 return 0;
979 }
980
981 size_t bpp = GrBytesPerPixel(config);
982 size_t rowCopyBytes = bpp * w;
983 // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
984 // This assumes the srcData comes in with no padding.
985 if (rowCopyBytes == layout.rowPitch) {
986 memcpy(mapPtr, srcData, rowCopyBytes * h);
987 } else {
jvanverthfd359ca2016-03-18 11:57:24 -0700988 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
989 rowCopyBytes, h);
Greg Daniel164a9f02016-02-22 09:56:40 -0500990 }
jvanverth1e305ba2016-06-01 09:39:15 -0700991 VK_CALL(UnmapMemory(fDevice, alloc.fMemory));
Greg Daniel164a9f02016-02-22 09:56:40 -0500992 } else {
993 // TODO: Add support for copying to optimal tiling
994 SkASSERT(false);
995 }
996 }
997
egdanielb2df0c22016-05-13 11:30:37 -0700998 GrVkImageInfo* info = new GrVkImageInfo;
jvanverthfd359ca2016-03-18 11:57:24 -0700999 info->fImage = image;
1000 info->fAlloc = alloc;
1001 info->fImageTiling = imageTiling;
1002 info->fImageLayout = initialLayout;
egdaniel58a8d922016-04-21 08:03:10 -07001003 info->fFormat = pixelFormat;
jvanverth2af0f1b2016-05-03 10:36:49 -07001004 info->fLevelCount = 1;
jvanverthfd359ca2016-03-18 11:57:24 -07001005
1006 return (GrBackendObject)info;
Greg Daniel164a9f02016-02-22 09:56:40 -05001007}
1008
1009bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
egdanielb2df0c22016-05-13 11:30:37 -07001010 const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -05001011
jvanverth1e305ba2016-06-01 09:39:15 -07001012 if (backend && backend->fImage && backend->fAlloc.fMemory) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001013 VkMemoryRequirements req;
1014 memset(&req, 0, sizeof(req));
1015 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
1016 backend->fImage,
1017 &req));
1018 // TODO: find a better check
1019 // This will probably fail with a different driver
1020 return (req.size > 0) && (req.size <= 8192 * 8192);
1021 }
1022
1023 return false;
1024}
1025
1026void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
jvanverth6b6ffc42016-06-13 14:28:07 -07001027 GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id);
Greg Daniel164a9f02016-02-22 09:56:40 -05001028 if (backend) {
1029 if (!abandon) {
jvanverthfd359ca2016-03-18 11:57:24 -07001030 // something in the command buffer may still be using this, so force submit
1031 this->submitCommandBuffer(kForce_SyncQueue);
jvanverth6b6ffc42016-06-13 14:28:07 -07001032 GrVkImage::DestroyImageInfo(this, backend);
Greg Daniel164a9f02016-02-22 09:56:40 -05001033 }
jvanverthfd359ca2016-03-18 11:57:24 -07001034 delete backend;
Greg Daniel164a9f02016-02-22 09:56:40 -05001035 }
1036}
1037
1038////////////////////////////////////////////////////////////////////////////////
1039
1040void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
1041 VkPipelineStageFlags dstStageMask,
1042 bool byRegion,
1043 VkMemoryBarrier* barrier) const {
1044 SkASSERT(fCurrentCmdBuffer);
1045 fCurrentCmdBuffer->pipelineBarrier(this,
1046 srcStageMask,
1047 dstStageMask,
1048 byRegion,
1049 GrVkCommandBuffer::kMemory_BarrierType,
1050 barrier);
1051}
1052
1053void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1054 VkPipelineStageFlags dstStageMask,
1055 bool byRegion,
1056 VkBufferMemoryBarrier* barrier) const {
1057 SkASSERT(fCurrentCmdBuffer);
1058 fCurrentCmdBuffer->pipelineBarrier(this,
1059 srcStageMask,
1060 dstStageMask,
1061 byRegion,
1062 GrVkCommandBuffer::kBufferMemory_BarrierType,
1063 barrier);
1064}
1065
1066void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
1067 VkPipelineStageFlags dstStageMask,
1068 bool byRegion,
1069 VkImageMemoryBarrier* barrier) const {
1070 SkASSERT(fCurrentCmdBuffer);
1071 fCurrentCmdBuffer->pipelineBarrier(this,
1072 srcStageMask,
1073 dstStageMask,
1074 byRegion,
1075 GrVkCommandBuffer::kImageMemory_BarrierType,
1076 barrier);
1077}
1078
1079void GrVkGpu::finishDrawTarget() {
1080 // Submit the current command buffer to the Queue
1081 this->submitCommandBuffer(kSkip_SyncQueue);
1082}
1083
egdaniel3d5d9ac2016-03-01 12:56:15 -08001084void GrVkGpu::clearStencil(GrRenderTarget* target) {
1085 if (nullptr == target) {
1086 return;
1087 }
1088 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment();
1089 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1090
1091
1092 VkClearDepthStencilValue vkStencilColor;
1093 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
1094
egdaniel3d5d9ac2016-03-01 12:56:15 -08001095 vkStencil->setImageLayout(this,
1096 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001097 VK_ACCESS_TRANSFER_WRITE_BIT,
1098 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel3d5d9ac2016-03-01 12:56:15 -08001099 false);
1100
egdaniel3d5d9ac2016-03-01 12:56:15 -08001101 VkImageSubresourceRange subRange;
1102 memset(&subRange, 0, sizeof(VkImageSubresourceRange));
1103 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
1104 subRange.baseMipLevel = 0;
1105 subRange.levelCount = 1;
1106 subRange.baseArrayLayer = 0;
1107 subRange.layerCount = 1;
1108
1109 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a
1110 // draw. Thus we should look into using the load op functions on the render pass to clear out
1111 // the stencil there.
1112 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange);
1113}
1114
Greg Daniel164a9f02016-02-22 09:56:40 -05001115inline bool can_copy_image(const GrSurface* dst,
1116 const GrSurface* src,
1117 const GrVkGpu* gpu) {
egdaniel17b89252016-04-05 07:23:38 -07001118 // Currently we don't support msaa
1119 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1120 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1121 return false;
1122 }
1123
1124 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1125 // as image usage flags.
1126 if (src->origin() == dst->origin() &&
1127 GrBytesPerPixel(src->config()) == GrBytesPerPixel(dst->config())) {
Greg Daniel164a9f02016-02-22 09:56:40 -05001128 return true;
1129 }
1130
1131 // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
egdaniel17b89252016-04-05 07:23:38 -07001132 // or the resolved image here? Im multisampled, Vulkan requires sample counts to be the same.
Greg Daniel164a9f02016-02-22 09:56:40 -05001133
1134 return false;
1135}
1136
1137void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
1138 GrSurface* src,
egdaniel17b89252016-04-05 07:23:38 -07001139 GrVkImage* dstImage,
1140 GrVkImage* srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001141 const SkIRect& srcRect,
1142 const SkIPoint& dstPoint) {
1143 SkASSERT(can_copy_image(dst, src, this));
1144
Greg Daniel164a9f02016-02-22 09:56:40 -05001145 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
1146 // the cache is flushed since it is only being written to.
egdaniel17b89252016-04-05 07:23:38 -07001147 dstImage->setImageLayout(this,
jvanverth50c46c72016-05-06 12:31:28 -07001148 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1149 VK_ACCESS_TRANSFER_WRITE_BIT,
1150 VK_PIPELINE_STAGE_TRANSFER_BIT,
1151 false);
Greg Daniel164a9f02016-02-22 09:56:40 -05001152
egdaniel17b89252016-04-05 07:23:38 -07001153 srcImage->setImageLayout(this,
1154 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001155 VK_ACCESS_TRANSFER_READ_BIT,
1156 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel17b89252016-04-05 07:23:38 -07001157 false);
Greg Daniel164a9f02016-02-22 09:56:40 -05001158
1159 // Flip rect if necessary
1160 SkIRect srcVkRect = srcRect;
1161 int32_t dstY = dstPoint.fY;
1162
1163 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1164 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
1165 srcVkRect.fTop = src->height() - srcRect.fBottom;
1166 srcVkRect.fBottom = src->height() - srcRect.fTop;
1167 dstY = dst->height() - dstPoint.fY - srcVkRect.height();
1168 }
1169
1170 VkImageCopy copyRegion;
1171 memset(&copyRegion, 0, sizeof(VkImageCopy));
1172 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1173 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1174 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1175 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
egdanielc355bc82016-04-27 11:31:59 -07001176 // The depth value of the extent is ignored according the vulkan spec for 2D images. However, on
1177 // at least the nexus 5X it seems to be checking it. Thus as a working around we must have the
1178 // depth value be 1.
1179 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 1 };
Greg Daniel164a9f02016-02-22 09:56:40 -05001180
1181 fCurrentCmdBuffer->copyImage(this,
egdaniel17b89252016-04-05 07:23:38 -07001182 srcImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001183 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
egdaniel17b89252016-04-05 07:23:38 -07001184 dstImage,
Greg Daniel164a9f02016-02-22 09:56:40 -05001185 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1186 1,
1187 &copyRegion);
jvanverth900bd4a2016-04-29 13:53:12 -07001188
1189 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
1190 srcRect.width(), srcRect.height());
1191 this->didWriteToSurface(dst, &dstRect);
Greg Daniel164a9f02016-02-22 09:56:40 -05001192}
1193
egdaniel17b89252016-04-05 07:23:38 -07001194inline bool can_copy_as_blit(const GrSurface* dst,
1195 const GrSurface* src,
1196 const GrVkImage* dstImage,
1197 const GrVkImage* srcImage,
1198 const GrVkGpu* gpu) {
1199 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1200 // as image usage flags.
1201 const GrVkCaps& caps = gpu->vkCaps();
1202 if (!caps.configCanBeDstofBlit(dst->config(), dstImage->isLinearTiled()) ||
1203 !caps.configCanBeSrcofBlit(src->config(), srcImage->isLinearTiled())) {
1204 return false;
1205 }
1206
1207 // We cannot blit images that are multisampled. Will need to figure out if we can blit the
1208 // resolved msaa though.
1209 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) ||
1210 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) {
1211 return false;
1212 }
1213
1214 return true;
1215}
1216
1217void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
1218 GrSurface* src,
1219 GrVkImage* dstImage,
1220 GrVkImage* srcImage,
1221 const SkIRect& srcRect,
1222 const SkIPoint& dstPoint) {
1223 SkASSERT(can_copy_as_blit(dst, src, dstImage, srcImage, this));
1224
egdaniel17b89252016-04-05 07:23:38 -07001225 dstImage->setImageLayout(this,
1226 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001227 VK_ACCESS_TRANSFER_WRITE_BIT,
1228 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel17b89252016-04-05 07:23:38 -07001229 false);
1230
egdaniel17b89252016-04-05 07:23:38 -07001231 srcImage->setImageLayout(this,
1232 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001233 VK_ACCESS_TRANSFER_READ_BIT,
1234 VK_PIPELINE_STAGE_TRANSFER_BIT,
egdaniel17b89252016-04-05 07:23:38 -07001235 false);
1236
1237 // Flip rect if necessary
1238 SkIRect srcVkRect;
egdaniel8af936d2016-04-07 10:17:47 -07001239 srcVkRect.fLeft = srcRect.fLeft;
1240 srcVkRect.fRight = srcRect.fRight;
egdaniel17b89252016-04-05 07:23:38 -07001241 SkIRect dstRect;
1242 dstRect.fLeft = dstPoint.fX;
egdaniel8af936d2016-04-07 10:17:47 -07001243 dstRect.fRight = dstPoint.fX + srcRect.width();
egdaniel17b89252016-04-05 07:23:38 -07001244
1245 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
1246 srcVkRect.fTop = src->height() - srcRect.fBottom;
1247 srcVkRect.fBottom = src->height() - srcRect.fTop;
1248 } else {
egdaniel8af936d2016-04-07 10:17:47 -07001249 srcVkRect.fTop = srcRect.fTop;
1250 srcVkRect.fBottom = srcRect.fBottom;
egdaniel17b89252016-04-05 07:23:38 -07001251 }
1252
1253 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
1254 dstRect.fTop = dst->height() - dstPoint.fY - srcVkRect.height();
1255 } else {
1256 dstRect.fTop = dstPoint.fY;
1257 }
1258 dstRect.fBottom = dstRect.fTop + srcVkRect.height();
1259
1260 // If we have different origins, we need to flip the top and bottom of the dst rect so that we
1261 // get the correct origintation of the copied data.
1262 if (src->origin() != dst->origin()) {
1263 SkTSwap(dstRect.fTop, dstRect.fBottom);
1264 }
1265
1266 VkImageBlit blitRegion;
1267 memset(&blitRegion, 0, sizeof(VkImageBlit));
1268 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1269 blitRegion.srcOffsets[0] = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
1270 blitRegion.srcOffsets[1] = { srcVkRect.fRight, srcVkRect.fBottom, 0 };
1271 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1272 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
1273 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 0 };
1274
1275 fCurrentCmdBuffer->blitImage(this,
egdanielb2df0c22016-05-13 11:30:37 -07001276 *srcImage,
1277 *dstImage,
egdaniel17b89252016-04-05 07:23:38 -07001278 1,
1279 &blitRegion,
1280 VK_FILTER_NEAREST); // We never scale so any filter works here
jvanverth900bd4a2016-04-29 13:53:12 -07001281
1282 this->didWriteToSurface(dst, &dstRect);
egdaniel17b89252016-04-05 07:23:38 -07001283}
1284
Greg Daniel164a9f02016-02-22 09:56:40 -05001285inline bool can_copy_as_draw(const GrSurface* dst,
1286 const GrSurface* src,
1287 const GrVkGpu* gpu) {
1288 return false;
1289}
1290
1291void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1292 GrSurface* src,
1293 const SkIRect& srcRect,
1294 const SkIPoint& dstPoint) {
1295 SkASSERT(false);
1296}
1297
1298bool GrVkGpu::onCopySurface(GrSurface* dst,
1299 GrSurface* src,
1300 const SkIRect& srcRect,
1301 const SkIPoint& dstPoint) {
egdaniel17b89252016-04-05 07:23:38 -07001302 GrVkImage* dstImage;
1303 GrVkImage* srcImage;
1304 if (dst->asTexture()) {
1305 dstImage = static_cast<GrVkTexture*>(dst->asTexture());
1306 } else {
1307 SkASSERT(dst->asRenderTarget());
1308 dstImage = static_cast<GrVkRenderTarget*>(dst->asRenderTarget());
1309 }
1310 if (src->asTexture()) {
1311 srcImage = static_cast<GrVkTexture*>(src->asTexture());
1312 } else {
1313 SkASSERT(src->asRenderTarget());
1314 srcImage = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
1315 }
1316
Greg Daniel164a9f02016-02-22 09:56:40 -05001317 if (can_copy_image(dst, src, this)) {
egdaniel17b89252016-04-05 07:23:38 -07001318 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
1319 return true;
1320 }
1321
1322 if (can_copy_as_blit(dst, src, dstImage, srcImage, this)) {
1323 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
Greg Daniel164a9f02016-02-22 09:56:40 -05001324 return true;
1325 }
1326
1327 if (can_copy_as_draw(dst, src, this)) {
1328 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
1329 return true;
1330 }
1331
1332 return false;
1333}
1334
egdaniel37798fb2016-04-12 07:31:49 -07001335bool GrVkGpu::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) const {
1336 // Currently we don't support msaa
1337 if (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1) {
1338 return false;
1339 }
1340
1341 // This will support copying the dst as CopyImage since all of our surfaces require transferSrc
1342 // and transferDst usage flags in Vulkan.
1343 desc->fOrigin = src->origin();
1344 desc->fConfig = src->config();
1345 desc->fFlags = kNone_GrSurfaceFlags;
1346 return true;
1347}
1348
cdalton28f45b92016-03-07 13:58:26 -08001349void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&,
1350 int* effectiveSampleCnt, SkAutoTDeleteArray<SkPoint>*) {
1351 // TODO: stub.
1352 SkASSERT(!this->caps()->sampleLocationsSupport());
1353 *effectiveSampleCnt = rt->desc().fSampleCnt;
1354}
1355
Greg Daniel164a9f02016-02-22 09:56:40 -05001356bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
1357 GrPixelConfig readConfig, DrawPreference* drawPreference,
1358 ReadPixelTempDrawInfo* tempDrawInfo) {
egdaniel4583ec52016-06-27 12:57:00 -07001359 if (srcSurface->config() == readConfig) {
1360 return true;
Greg Daniel164a9f02016-02-22 09:56:40 -05001361 }
1362
egdaniel4583ec52016-06-27 12:57:00 -07001363 if (this->vkCaps().isConfigRenderable(readConfig, false)) {
1364 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
1365 tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig;
1366 tempDrawInfo->fReadConfig = readConfig;
1367 return true;
Greg Daniel164a9f02016-02-22 09:56:40 -05001368 }
1369
egdaniel4583ec52016-06-27 12:57:00 -07001370 return false;
Greg Daniel164a9f02016-02-22 09:56:40 -05001371}
1372
1373bool GrVkGpu::onReadPixels(GrSurface* surface,
1374 int left, int top, int width, int height,
1375 GrPixelConfig config,
1376 void* buffer,
1377 size_t rowBytes) {
1378 VkFormat pixelFormat;
1379 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
1380 return false;
1381 }
1382
1383 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
1384 if (!tgt) {
1385 return false;
1386 }
1387
1388 // Change layout of our target so it can be used as copy
Greg Daniel164a9f02016-02-22 09:56:40 -05001389 tgt->setImageLayout(this,
1390 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
jvanverth50c46c72016-05-06 12:31:28 -07001391 VK_ACCESS_TRANSFER_READ_BIT,
1392 VK_PIPELINE_STAGE_TRANSFER_BIT,
Greg Daniel164a9f02016-02-22 09:56:40 -05001393 false);
1394
halcanary9d524f22016-03-29 09:03:52 -07001395 GrVkTransferBuffer* transferBuffer =
cdaltone2e71c22016-04-07 18:13:29 -07001396 static_cast<GrVkTransferBuffer*>(this->createBuffer(rowBytes * height,
1397 kXferGpuToCpu_GrBufferType,
cdalton397536c2016-03-25 12:15:03 -07001398 kStream_GrAccessPattern));
Greg Daniel164a9f02016-02-22 09:56:40 -05001399
1400 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
1401 VkOffset3D offset = {
1402 left,
1403 flipY ? surface->height() - top - height : top,
1404 0
1405 };
1406
1407 // Copy the image to a buffer so we can map it to cpu memory
1408 VkBufferImageCopy region;
1409 memset(&region, 0, sizeof(VkBufferImageCopy));
1410 region.bufferOffset = 0;
1411 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
1412 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
1413 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1414 region.imageOffset = offset;
1415 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
1416
1417 fCurrentCmdBuffer->copyImageToBuffer(this,
1418 tgt,
1419 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1420 transferBuffer,
1421 1,
1422 &region);
1423
1424 // make sure the copy to buffer has finished
1425 transferBuffer->addMemoryBarrier(this,
1426 VK_ACCESS_TRANSFER_WRITE_BIT,
1427 VK_ACCESS_HOST_READ_BIT,
1428 VK_PIPELINE_STAGE_TRANSFER_BIT,
1429 VK_PIPELINE_STAGE_HOST_BIT,
1430 false);
1431
1432 // We need to submit the current command buffer to the Queue and make sure it finishes before
1433 // we can copy the data out of the buffer.
1434 this->submitCommandBuffer(kForce_SyncQueue);
1435
1436 void* mappedMemory = transferBuffer->map();
1437
1438 memcpy(buffer, mappedMemory, rowBytes*height);
1439
1440 transferBuffer->unmap();
1441 transferBuffer->unref();
1442
1443 if (flipY) {
1444 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1445 size_t tightRowBytes = GrBytesPerPixel(config) * width;
1446 scratch.reset(tightRowBytes);
1447 void* tmpRow = scratch.get();
1448 // flip y in-place by rows
1449 const int halfY = height >> 1;
1450 char* top = reinterpret_cast<char*>(buffer);
1451 char* bottom = top + (height - 1) * rowBytes;
1452 for (int y = 0; y < halfY; y++) {
1453 memcpy(tmpRow, top, tightRowBytes);
1454 memcpy(top, bottom, tightRowBytes);
1455 memcpy(bottom, tmpRow, tightRowBytes);
1456 top += rowBytes;
1457 bottom -= rowBytes;
1458 }
1459 }
1460
1461 return true;
1462}
egdaniel066df7c2016-06-08 14:02:27 -07001463
egdaniel9cb63402016-06-23 08:37:05 -07001464void GrVkGpu::submitSecondaryCommandBuffer(const GrVkSecondaryCommandBuffer* buffer,
1465 const GrVkRenderPass* renderPass,
1466 const VkClearValue* colorClear,
1467 GrVkRenderTarget* target,
1468 const SkIRect& bounds) {
1469 // Currently it is fine for us to always pass in 1 for the clear count even if no attachment
1470 // uses it. In the current state, we also only use the LOAD_OP_CLEAR for the color attachment
1471 // which is always at the first attachment.
1472 fCurrentCmdBuffer->beginRenderPass(this, renderPass, 1, colorClear, *target, bounds, true);
egdaniel066df7c2016-06-08 14:02:27 -07001473 fCurrentCmdBuffer->executeCommands(this, buffer);
Greg Daniel164a9f02016-02-22 09:56:40 -05001474 fCurrentCmdBuffer->endRenderPass(this);
Greg Daniel164a9f02016-02-22 09:56:40 -05001475}
egdaniel9cb63402016-06-23 08:37:05 -07001476