blob: e05fb78057657f48197cdbcf65ab8f2a2708e9a2 [file] [log] [blame]
Robert Phillipsad248452020-06-30 09:27:52 -04001/*
2 * Copyright 2020 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrDirectContext_DEFINED
9#define GrDirectContext_DEFINED
10
Adlai Holler2edf18d2020-10-14 13:02:09 -040011#include "include/gpu/GrRecordingContext.h"
Robert Phillipsad248452020-06-30 09:27:52 -040012
Adlai Holler6d0745b2020-10-13 13:29:00 -040013#include "include/gpu/GrBackendSurface.h"
14
15// We shouldn't need this but currently Android is relying on this being include transitively.
16#include "include/core/SkUnPreMultiply.h"
17
Adlai Holler53cf44c2020-10-13 17:40:21 -040018class GrAtlasManager;
Adlai Holler6d0745b2020-10-13 13:29:00 -040019class GrBackendSemaphore;
Adlai Holler53cf44c2020-10-13 17:40:21 -040020class GrClientMappedBufferManager;
Adlai Hollera0693042020-10-14 11:23:11 -040021class GrDirectContextPriv;
Adlai Holler6d0745b2020-10-13 13:29:00 -040022class GrContextThreadSafeProxy;
23struct GrD3DBackendContext;
24class GrFragmentProcessor;
Adlai Holler53cf44c2020-10-13 17:40:21 -040025class GrGpu;
Adlai Holler6d0745b2020-10-13 13:29:00 -040026struct GrGLInterface;
27struct GrMockOptions;
28class GrPath;
Adlai Holler53cf44c2020-10-13 17:40:21 -040029class GrResourceCache;
30class GrSmallPathAtlasMgr;
Adlai Holler6d0745b2020-10-13 13:29:00 -040031class GrRenderTargetContext;
Adlai Holler53cf44c2020-10-13 17:40:21 -040032class GrResourceProvider;
33class GrStrikeCache;
Adlai Holler6d0745b2020-10-13 13:29:00 -040034class GrSurfaceProxy;
35class GrSwizzle;
36class GrTextureProxy;
37struct GrVkBackendContext;
38
39class SkImage;
40class SkString;
41class SkSurfaceCharacterization;
42class SkSurfaceProps;
Adlai Holler53cf44c2020-10-13 17:40:21 -040043class SkTaskGroup;
Adlai Holler6d0745b2020-10-13 13:29:00 -040044class SkTraceMemoryDump;
Robert Phillipsad248452020-06-30 09:27:52 -040045
Adlai Holler2edf18d2020-10-14 13:02:09 -040046class SK_API GrDirectContext : public GrRecordingContext {
Robert Phillipsad248452020-06-30 09:27:52 -040047public:
Robert Phillipsf4f80112020-07-13 16:13:31 -040048#ifdef SK_GL
49 /**
50 * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the
51 * result of GrGLMakeNativeInterface() is used if it succeeds.
52 */
53 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&);
54 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>);
55 static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&);
56 static sk_sp<GrDirectContext> MakeGL();
57#endif
58
59#ifdef SK_VULKAN
60 /**
61 * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned
62 * GrDirectContext is destroyed. This also means that any objects created with this
63 * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold
64 * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released,
65 * then it is safe to delete the vulkan objects.
66 */
67 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
68 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&);
69#endif
70
71#ifdef SK_METAL
72 /**
73 * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an
74 * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects
75 * must have a ref on them which can be transferred to Ganesh which will release the ref
76 * when the GrDirectContext is destroyed.
77 */
78 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&);
79 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue);
80#endif
81
82#ifdef SK_DIRECT3D
83 /**
84 * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context
85 * must be kept alive until the returned GrDirectContext is first destroyed or abandoned.
86 */
87 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&);
88 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&);
89#endif
90
91#ifdef SK_DAWN
92 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&,
93 const GrContextOptions&);
94 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&);
95#endif
96
97 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
98 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*);
Robert Phillipsad248452020-06-30 09:27:52 -040099
100 ~GrDirectContext() override;
101
Adlai Hollera7a40442020-10-09 09:49:42 -0400102 /**
103 * The context normally assumes that no outsider is setting state
104 * within the underlying 3D API's context/device/whatever. This call informs
105 * the context that the state was modified and it should resend. Shouldn't
106 * be called frequently for good performance.
107 * The flag bits, state, is dependent on which backend is used by the
108 * context, either GL or D3D (possible in future).
109 */
110 void resetContext(uint32_t state = kAll_GrBackendState);
111
112 /**
113 * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
114 * the context has modified the bound texture will have texture id 0 bound. This does not
115 * flush the context. Calling resetContext() does not change the set that will be bound
116 * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
117 * all unit/target combinations are considered to have unmodified bindings until the context
118 * subsequently modifies them (meaning if this is called twice in a row with no intervening
119 * context usage then the second call is a no-op.)
120 */
121 void resetGLTextureBindings();
122
123 /**
124 * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
125 * usable. Call this if you have lost the associated GPU context, and thus internal texture,
126 * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
Adlai Holler98dd0042020-10-13 10:04:00 -0400127 * context and any of its created resource objects will not make backend 3D API calls. Content
Adlai Hollera7a40442020-10-09 09:49:42 -0400128 * rendered but not previously flushed may be lost. After this function is called all subsequent
Adlai Holler98dd0042020-10-13 10:04:00 -0400129 * calls on the context will fail or be no-ops.
Adlai Hollera7a40442020-10-09 09:49:42 -0400130 *
131 * The typical use case for this function is that the underlying 3D context was lost and further
132 * API calls may crash.
133 *
134 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
135 * create the context must be kept alive even after abandoning the context. Those objects must
136 * live for the lifetime of the context object itself. The reason for this is so that
137 * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be
138 * cleaned up even in a device lost state.
139 */
Robert Phillipsad248452020-06-30 09:27:52 -0400140 void abandonContext() override;
141
Adlai Hollera7a40442020-10-09 09:49:42 -0400142 /**
143 * Returns true if the context was abandoned or if the if the backend specific context has
144 * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a
Adlai Holler64e13832020-10-13 08:21:56 -0400145 * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this
146 * context.
Adlai Hollera7a40442020-10-09 09:49:42 -0400147 */
148 bool abandoned() override;
149
Adlai Holler61a591c2020-10-12 12:38:33 -0400150 // TODO: Remove this from public after migrating Chrome.
151 sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
152
153 /**
154 * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is
155 * reset and will return false until another out-of-memory error is reported by the 3D API. If
156 * the context is abandoned then this will report false.
157 *
158 * Currently this is implemented for:
159 *
160 * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and
161 * therefore hide the error from Skia. Also, it is not advised to use this in combination with
162 * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever
163 * checking the GL context for OOM.
164 *
165 * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has
166 * occurred.
167 */
168 bool oomed();
169
170 /**
171 * This is similar to abandonContext() however the underlying 3D context is not yet lost and
172 * the context will cleanup all allocated resources before returning. After returning it will
173 * assume that the underlying context may no longer be valid.
174 *
175 * The typical use case for this function is that the client is going to destroy the 3D context
176 * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed
177 * elsewhere by either the client or Skia objects).
178 *
179 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
180 * create the context must be alive before calling releaseResourcesAndAbandonContext.
181 */
182 void releaseResourcesAndAbandonContext();
Robert Phillipsad248452020-06-30 09:27:52 -0400183
Adlai Holler3a508e92020-10-12 13:58:01 -0400184 ///////////////////////////////////////////////////////////////////////////
185 // Resource Cache
186
187 /** DEPRECATED
188 * Return the current GPU resource cache limits.
189 *
190 * @param maxResources If non-null, will be set to -1.
191 * @param maxResourceBytes If non-null, returns maximum number of bytes of
192 * video memory that can be held in the cache.
193 */
194 void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
195
196 /**
197 * Return the current GPU resource cache limit in bytes.
198 */
199 size_t getResourceCacheLimit() const;
200
201 /**
202 * Gets the current GPU resource cache usage.
203 *
204 * @param resourceCount If non-null, returns the number of resources that are held in the
205 * cache.
206 * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
207 * in the cache.
208 */
209 void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
210
211 /**
212 * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
213 */
214 size_t getResourceCachePurgeableBytes() const;
215
216 /** DEPRECATED
217 * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
218 * limit, it will be purged (LRU) to keep the cache within the limit.
219 *
220 * @param maxResources Unused.
221 * @param maxResourceBytes The maximum number of bytes of video memory
222 * that can be held in the cache.
223 */
224 void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
225
226 /**
227 * Specify the GPU resource cache limit. If the cache currently exceeds this limit,
228 * it will be purged (LRU) to keep the cache within the limit.
229 *
230 * @param maxResourceBytes The maximum number of bytes of video memory
231 * that can be held in the cache.
232 */
233 void setResourceCacheLimit(size_t maxResourceBytes);
234
Adlai Holler4aa4c602020-10-12 13:58:52 -0400235 /**
236 * Frees GPU created by the context. Can be called to reduce GPU memory
237 * pressure.
238 */
239 void freeGpuResources();
240
241 /**
242 * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
243 * otherwise marked for deletion, regardless of whether the context is under budget.
244 */
245 void performDeferredCleanup(std::chrono::milliseconds msNotUsed);
246
247 // Temporary compatibility API for Android.
248 void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
249 this->performDeferredCleanup(msNotUsed);
250 }
251
252 /**
253 * Purge unlocked resources from the cache until the the provided byte count has been reached
254 * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
255 * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
256 * resource types.
257 *
258 * @param maxBytesToPurge the desired number of bytes to be purged.
259 * @param preferScratchResources If true scratch resources will be purged prior to other
260 * resource types.
261 */
262 void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
263
264 /**
265 * This entry point is intended for instances where an app has been backgrounded or
266 * suspended.
267 * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
268 * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
269 * then all unlocked resources will be purged.
270 * In either case, after the unlocked resources are purged a separate pass will be made to
271 * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
272 * some resources with persistent data may be purged to be under budget).
273 *
274 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior
275 * enforcing the budget requirements.
276 */
277 void purgeUnlockedResources(bool scratchResourcesOnly);
278
279 /**
280 * Gets the maximum supported texture size.
281 */
282 using GrRecordingContext::maxTextureSize;
283
284 /**
285 * Gets the maximum supported render target size.
286 */
287 using GrRecordingContext::maxRenderTargetSize;
288
289 /**
290 * Can a SkImage be created with the given color type.
291 */
292 using GrRecordingContext::colorTypeSupportedAsImage;
293
294 /**
295 * Can a SkSurface be created with the given color type. To check whether MSAA is supported
296 * use maxSurfaceSampleCountForColorType().
297 */
298 using GrRecordingContext::colorTypeSupportedAsSurface;
299
300 /**
301 * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
302 * rendering is supported for the color type. 0 is returned if rendering to this color type
303 * is not supported at all.
304 */
305 using GrRecordingContext::maxSurfaceSampleCountForColorType;
Robert Phillipsad248452020-06-30 09:27:52 -0400306
Adlai Holler3acc69a2020-10-13 08:20:51 -0400307 ///////////////////////////////////////////////////////////////////////////
308 // Misc.
309
310 /**
311 * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
312 * executing any more commands on the GPU. If this call returns false, then the GPU back-end
313 * will not wait on any passed in semaphores, and the client will still own the semaphores,
314 * regardless of the value of deleteSemaphoresAfterWait.
315 *
316 * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
317 * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
318 * knows that Skia has finished waiting on them. This can be done by using finishedProcs on
319 * flush calls.
320 */
321 bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
322 bool deleteSemaphoresAfterWait = true);
323
324 /**
325 * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D
326 * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by
327 * GrContext::submit(syncCpu).
328 */
329 void flushAndSubmit(bool syncCpu = false) {
330 this->flush(GrFlushInfo());
331 this->submit(syncCpu);
332 }
333
334 /**
335 * Call to ensure all drawing to the context has been flushed to underlying 3D API specific
336 * objects. A call to `submit` is always required to ensure work is actually sent to
337 * the gpu. Some specific API details:
338 * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
339 * sync objects from the flush will not be valid until a submission occurs.
340 *
341 * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
342 * buffer or encoder objects. However, these objects are not sent to the gpu until a
343 * submission occurs.
344 *
345 * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
346 * submitted to the gpu during the next submit call (it is possible Skia failed to create a
347 * subset of the semaphores). The client should not wait on these semaphores until after submit
348 * has been called, and must keep them alive until then. If this call returns
349 * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
350 * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with
351 * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
352 * client is still responsible for deleting any initialized semaphores.
353 * Regardleess of semaphore submission the context will still be flushed. It should be
354 * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
355 * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
356 * take this as a failure if they passed in semaphores to be submitted.
357 */
358 GrSemaphoresSubmitted flush(const GrFlushInfo& info);
359
360 void flush() { this->flush({}); }
361
362 /**
363 * Submit outstanding work to the gpu from all previously un-submitted flushes. The return
364 * value of the submit will indicate whether or not the submission to the GPU was successful.
365 *
366 * If the call returns true, all previously passed in semaphores in flush calls will have been
367 * submitted to the GPU and they can safely be waited on. The caller should wait on those
368 * semaphores or perform some other global synchronization before deleting the semaphores.
369 *
370 * If it returns false, then those same semaphores will not have been submitted and we will not
371 * try to submit them again. The caller is free to delete the semaphores at any time.
372 *
373 * If the syncCpu flag is true this function will return once the gpu has finished with all
374 * submitted work.
375 */
376 bool submit(bool syncCpu = false);
377
378 /**
379 * Checks whether any asynchronous work is complete and if so calls related callbacks.
380 */
381 void checkAsyncWorkCompletion();
382
383 /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
384 // Chrome is using this!
385 void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
386
387 bool supportsDistanceFieldText() const;
388
389 void storeVkPipelineCacheData();
390
391 // Returns the gpu memory size of the the texture that backs the passed in SkImage. Returns 0 if
392 // the SkImage is not texture backed. For external format textures this will also return 0 as we
393 // cannot determine the correct size.
394 static size_t ComputeImageSize(sk_sp<SkImage> image, GrMipmapped, bool useNextPow2 = false);
395
396 /**
397 * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
398 * It is guaranteed that this backend format will be the one used by the following
399 * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
400 *
401 * The caller should check that the returned format is valid.
402 */
Adlai Holler2e0c70d2020-10-13 08:21:37 -0400403 using GrRecordingContext::defaultBackendFormat;
Adlai Holler98dd0042020-10-13 10:04:00 -0400404
405 /**
406 * The explicitly allocated backend texture API allows clients to use Skia to create backend
407 * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
408 *
409 * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
410 * before deleting the context used to create them. If the backend is Vulkan, the textures must
411 * be deleted before abandoning the context as well. Additionally, clients should only delete
412 * these objects on the thread for which that context is active.
413 *
414 * The client is responsible for ensuring synchronization between different uses
415 * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
416 * surface, rewrapping it in a image and drawing the image will require explicit
417 * synchronization on the client's part).
418 */
419
420 /**
421 * If possible, create an uninitialized backend texture. The client should ensure that the
422 * returned backend texture is valid.
423 * For the Vulkan backend the layout of the created VkImage will be:
424 * VK_IMAGE_LAYOUT_UNDEFINED.
425 */
426 GrBackendTexture createBackendTexture(int width, int height,
427 const GrBackendFormat&,
428 GrMipmapped,
429 GrRenderable,
430 GrProtected = GrProtected::kNo);
431
432 /**
433 * If possible, create an uninitialized backend texture. The client should ensure that the
434 * returned backend texture is valid.
435 * If successful, the created backend texture will be compatible with the provided
436 * SkColorType.
437 * For the Vulkan backend the layout of the created VkImage will be:
438 * VK_IMAGE_LAYOUT_UNDEFINED.
439 */
440 GrBackendTexture createBackendTexture(int width, int height,
441 SkColorType,
442 GrMipmapped,
443 GrRenderable,
444 GrProtected = GrProtected::kNo);
445
446 /**
447 * If possible, create a backend texture initialized to a particular color. The client should
448 * ensure that the returned backend texture is valid. The client can pass in a finishedProc
449 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
450 * client is required to call `submit` to send the upload work to the gpu. The
451 * finishedProc will always get called even if we failed to create the GrBackendTexture.
452 * For the Vulkan backend the layout of the created VkImage will be:
453 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
454 */
455 GrBackendTexture createBackendTexture(int width, int height,
456 const GrBackendFormat&,
457 const SkColor4f& color,
458 GrMipmapped,
459 GrRenderable,
460 GrProtected = GrProtected::kNo,
461 GrGpuFinishedProc finishedProc = nullptr,
462 GrGpuFinishedContext finishedContext = nullptr);
463
464 /**
465 * If possible, create a backend texture initialized to a particular color. The client should
466 * ensure that the returned backend texture is valid. The client can pass in a finishedProc
467 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
468 * client is required to call `submit` to send the upload work to the gpu. The
469 * finishedProc will always get called even if we failed to create the GrBackendTexture.
470 * If successful, the created backend texture will be compatible with the provided
471 * SkColorType.
472 * For the Vulkan backend the layout of the created VkImage will be:
473 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
474 */
475 GrBackendTexture createBackendTexture(int width, int height,
476 SkColorType,
477 const SkColor4f& color,
478 GrMipmapped,
479 GrRenderable,
480 GrProtected = GrProtected::kNo,
481 GrGpuFinishedProc finishedProc = nullptr,
482 GrGpuFinishedContext finishedContext = nullptr);
483
484 /**
485 * If possible, create a backend texture initialized with the provided pixmap data. The client
486 * should ensure that the returned backend texture is valid. The client can pass in a
487 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
488 * deleted. The client is required to call `submit` to send the upload work to the gpu.
489 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
490 * If successful, the created backend texture will be compatible with the provided
491 * pixmap(s). Compatible, in this case, means that the backend format will be the result
492 * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted
493 * when this call returns.
494 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired
495 * the data for all the mipmap levels must be provided. In the mipmapped case all the
496 * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
497 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
498 * Note: the pixmap's alphatypes and colorspaces are ignored.
499 * For the Vulkan backend the layout of the created VkImage will be:
500 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
501 */
502 GrBackendTexture createBackendTexture(const SkPixmap srcData[], int numLevels,
503 GrRenderable, GrProtected,
504 GrGpuFinishedProc finishedProc = nullptr,
505 GrGpuFinishedContext finishedContext = nullptr);
506
507 // Helper version of above for a single level.
508 GrBackendTexture createBackendTexture(const SkPixmap& srcData,
509 GrRenderable renderable,
510 GrProtected isProtected,
511 GrGpuFinishedProc finishedProc = nullptr,
512 GrGpuFinishedContext finishedContext = nullptr) {
513 return this->createBackendTexture(&srcData, 1, renderable, isProtected, finishedProc,
514 finishedContext);
515 }
Adlai Holler2e0c70d2020-10-13 08:21:37 -0400516
517 /**
518 * If possible, updates a backend texture to be filled to a particular color. The client should
519 * check the return value to see if the update was successful. The client can pass in a
520 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
521 * deleted. The client is required to call `submit` to send the upload work to the gpu.
522 * The finishedProc will always get called even if we failed to update the GrBackendTexture.
523 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
524 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
525 */
526 bool updateBackendTexture(const GrBackendTexture&,
527 const SkColor4f& color,
528 GrGpuFinishedProc finishedProc,
529 GrGpuFinishedContext finishedContext);
530
531 /**
532 * If possible, updates a backend texture to be filled to a particular color. The data in
533 * GrBackendTexture and passed in color is interpreted with respect to the passed in
534 * SkColorType. The client should check the return value to see if the update was successful.
535 * The client can pass in a finishedProc to be notified when the data has been uploaded by the
536 * gpu and the texture can be deleted. The client is required to call `submit` to send
537 * the upload work to the gpu. The finishedProc will always get called even if we failed to
538 * update the GrBackendTexture.
539 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
540 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
541 */
542 bool updateBackendTexture(const GrBackendTexture&,
543 SkColorType skColorType,
544 const SkColor4f& color,
545 GrGpuFinishedProc finishedProc,
546 GrGpuFinishedContext finishedContext);
547
548 /**
549 * If possible, updates a backend texture filled with the provided pixmap data. The client
550 * should check the return value to see if the update was successful. The client can pass in a
551 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
552 * deleted. The client is required to call `submit` to send the upload work to the gpu.
553 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
554 * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
555 * means that the backend format is compatible with the base pixmap's colortype. The src data
556 * can be deleted when this call returns.
557 * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
558 * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
559 * Additionally, all the miplevels must be sized correctly (please see
560 * SkMipmap::ComputeLevelSize and ComputeLevelCount).
561 * Note: the pixmap's alphatypes and colorspaces are ignored.
562 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
563 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
564 */
565 bool updateBackendTexture(const GrBackendTexture&,
566 const SkPixmap srcData[],
567 int numLevels,
568 GrGpuFinishedProc finishedProc,
569 GrGpuFinishedContext finishedContext);
570
571 /**
572 * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is
573 * guaranteed to match the backend format used by the following
574 * createCompressedBackendTexture methods that take a CompressionType.
575 * The caller should check that the returned format is valid.
576 */
577 using GrRecordingContext::compressedBackendFormat;
578
Adlai Holler64e13832020-10-13 08:21:56 -0400579 /**
580 *If possible, create a compressed backend texture initialized to a particular color. The
581 * client should ensure that the returned backend texture is valid. The client can pass in a
582 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
583 * deleted. The client is required to call `submit` to send the upload work to the gpu.
584 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
585 * For the Vulkan backend the layout of the created VkImage will be:
586 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
587 */
588 GrBackendTexture createCompressedBackendTexture(int width, int height,
589 const GrBackendFormat&,
590 const SkColor4f& color,
591 GrMipmapped,
592 GrProtected = GrProtected::kNo,
593 GrGpuFinishedProc finishedProc = nullptr,
594 GrGpuFinishedContext finishedContext = nullptr);
595
596 GrBackendTexture createCompressedBackendTexture(int width, int height,
597 SkImage::CompressionType,
598 const SkColor4f& color,
599 GrMipmapped,
600 GrProtected = GrProtected::kNo,
601 GrGpuFinishedProc finishedProc = nullptr,
602 GrGpuFinishedContext finishedContext = nullptr);
603
604 /**
605 * If possible, create a backend texture initialized with the provided raw data. The client
606 * should ensure that the returned backend texture is valid. The client can pass in a
607 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
608 * deleted. The client is required to call `submit` to send the upload work to the gpu.
609 * The finishedProc will always get called even if we failed to create the GrBackendTexture
610 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired
611 * the data for all the mipmap levels must be provided. Additionally, all the miplevels
612 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
613 * For the Vulkan backend the layout of the created VkImage will be:
614 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
615 */
616 GrBackendTexture createCompressedBackendTexture(int width, int height,
617 const GrBackendFormat&,
618 const void* data, size_t dataSize,
619 GrMipmapped,
620 GrProtected = GrProtected::kNo,
621 GrGpuFinishedProc finishedProc = nullptr,
622 GrGpuFinishedContext finishedContext = nullptr);
623
624 GrBackendTexture createCompressedBackendTexture(int width, int height,
625 SkImage::CompressionType,
626 const void* data, size_t dataSize,
627 GrMipmapped,
628 GrProtected = GrProtected::kNo,
629 GrGpuFinishedProc finishedProc = nullptr,
630 GrGpuFinishedContext finishedContext = nullptr);
631
632 /**
633 * If possible, updates a backend texture filled with the provided color. If the texture is
634 * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client
635 * should check the return value to see if the update was successful. The client can pass in a
636 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
637 * deleted. The client is required to call `submit` to send the upload work to the gpu.
638 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
639 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
640 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
641 */
642 bool updateCompressedBackendTexture(const GrBackendTexture&,
643 const SkColor4f& color,
644 GrGpuFinishedProc finishedProc,
645 GrGpuFinishedContext finishedContext);
646
647 /**
648 * If possible, updates a backend texture filled with the provided raw data. The client
649 * should check the return value to see if the update was successful. The client can pass in a
650 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
651 * deleted. The client is required to call `submit` to send the upload work to the gpu.
652 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
653 * If a mipMapped texture is passed in, the data for all the mipmap levels must be provided.
654 * Additionally, all the miplevels must be sized correctly (please see
655 * SkMipMap::ComputeLevelSize and ComputeLevelCount).
656 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
657 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
658 */
659 bool updateCompressedBackendTexture(const GrBackendTexture&,
660 const void* data,
661 size_t dataSize,
662 GrGpuFinishedProc finishedProc,
663 GrGpuFinishedContext finishedContext);
664
Adlai Holler6d0745b2020-10-13 13:29:00 -0400665 /**
666 * Updates the state of the GrBackendTexture/RenderTarget to have the passed in
667 * GrBackendSurfaceMutableState. All objects that wrap the backend surface (i.e. SkSurfaces and
668 * SkImages) will also be aware of this state change. This call does not submit the state change
669 * to the gpu, but requires the client to call `submit` to send it to the GPU. The work
670 * for this call is ordered linearly with all other calls that require GrContext::submit to be
671 * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be
672 * called with finishedContext after the state transition is known to have occurred on the GPU.
673 *
674 * See GrBackendSurfaceMutableState to see what state can be set via this call.
675 *
676 * If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's
677 * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
678 * tell Skia to not change those respective states.
679 *
680 * If previousState is not null and this returns true, then Skia will have filled in
681 * previousState to have the values of the state before this call.
682 */
683 bool setBackendTextureState(const GrBackendTexture&,
684 const GrBackendSurfaceMutableState&,
685 GrBackendSurfaceMutableState* previousState = nullptr,
686 GrGpuFinishedProc finishedProc = nullptr,
687 GrGpuFinishedContext finishedContext = nullptr);
688 bool setBackendRenderTargetState(const GrBackendRenderTarget&,
689 const GrBackendSurfaceMutableState&,
690 GrBackendSurfaceMutableState* previousState = nullptr,
691 GrGpuFinishedProc finishedProc = nullptr,
692 GrGpuFinishedContext finishedContext = nullptr);
693
694 void deleteBackendTexture(GrBackendTexture);
695
696 // This interface allows clients to pre-compile shaders and populate the runtime program cache.
697 // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
698 //
699 // Steps to use this API:
700 //
Adlai Holler2edf18d2020-10-14 13:02:09 -0400701 // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to
702 // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This
703 // will ensure that the blobs are SkSL, and are suitable for pre-compilation.
Adlai Holler6d0745b2020-10-13 13:29:00 -0400704 // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
705 //
706 // 3) Switch over to shipping your application. Include the key/data pairs from above.
707 // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
708 // This will compile the SkSL to create a GL program, and populate the runtime cache.
709 //
710 // This is only guaranteed to work if the context/device used in step #2 are created in the
711 // same way as the one used in step #4, and the same GrContextOptions are specified.
712 // Using cached shader blobs on a different device or driver are undefined.
713 bool precompileShader(const SkData& key, const SkData& data);
714
715#ifdef SK_ENABLE_DUMP_GPU
716 /** Returns a string with detailed information about the context & GPU, in JSON format. */
717 SkString dump() const;
718#endif
719
Adlai Holler53cf44c2020-10-13 17:40:21 -0400720 // Provides access to functions that aren't part of the public API.
Adlai Hollera0693042020-10-14 11:23:11 -0400721 GrDirectContextPriv priv();
722 const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type)
Adlai Holler53cf44c2020-10-13 17:40:21 -0400723
Robert Phillipsad248452020-06-30 09:27:52 -0400724protected:
Robert Phillipsf4f80112020-07-13 16:13:31 -0400725 GrDirectContext(GrBackendApi backend, const GrContextOptions& options);
726
Robert Phillipsad248452020-06-30 09:27:52 -0400727 bool init() override;
728
Adlai Holler53cf44c2020-10-13 17:40:21 -0400729 GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); }
730 GrSmallPathAtlasMgr* onGetSmallPathAtlasMgr();
Robert Phillipsad248452020-06-30 09:27:52 -0400731
Robert Phillips44333c52020-06-30 13:28:00 -0400732 GrDirectContext* asDirectContext() override { return this; }
733
Robert Phillipsad248452020-06-30 09:27:52 -0400734private:
Adlai Holler53cf44c2020-10-13 17:40:21 -0400735 // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
736 // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
737 // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
738 // invoked after objects they depend upon have already been destroyed.
739 std::unique_ptr<SkTaskGroup> fTaskGroup;
740 std::unique_ptr<GrStrikeCache> fStrikeCache;
741 sk_sp<GrGpu> fGpu;
742 std::unique_ptr<GrResourceCache> fResourceCache;
743 std::unique_ptr<GrResourceProvider> fResourceProvider;
744
745 bool fDidTestPMConversions;
746 // true if the PM/UPM conversion succeeded; false otherwise
747 bool fPMUPMConversionsRoundTrip;
748
749 GrContextOptions::PersistentCache* fPersistentCache;
750 GrContextOptions::ShaderErrorHandler* fShaderErrorHandler;
751
752 std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
Robert Phillips3262bc82020-08-10 12:11:58 -0400753 std::unique_ptr<GrAtlasManager> fAtlasManager;
Robert Phillipsad248452020-06-30 09:27:52 -0400754
Robert Phillips079455c2020-08-11 15:18:46 -0400755 std::unique_ptr<GrSmallPathAtlasMgr> fSmallPathAtlasMgr;
Robert Phillips5edf5102020-08-10 16:30:36 -0400756
Adlai Hollera0693042020-10-14 11:23:11 -0400757 friend class GrDirectContextPriv;
Adlai Holler53cf44c2020-10-13 17:40:21 -0400758
Adlai Holler2edf18d2020-10-14 13:02:09 -0400759 using INHERITED = GrRecordingContext;
Robert Phillipsad248452020-06-30 09:27:52 -0400760};
761
762
763#endif