blob: 6df48de12820c7af9eaf2ec8343c1f7f9253cd9f [file] [log] [blame]
Robert Phillipsad248452020-06-30 09:27:52 -04001/*
2 * Copyright 2020 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrDirectContext_DEFINED
9#define GrDirectContext_DEFINED
10
Adlai Holler9ae860a2020-10-20 10:13:32 -040011#include "include/gpu/GrRecordingContext.h"
Robert Phillipsad248452020-06-30 09:27:52 -040012
Adlai Holler6d0745b2020-10-13 13:29:00 -040013#include "include/gpu/GrBackendSurface.h"
14
15// We shouldn't need this but currently Android is relying on this being include transitively.
16#include "include/core/SkUnPreMultiply.h"
17
Adlai Holler53cf44c2020-10-13 17:40:21 -040018class GrAtlasManager;
Adlai Holler6d0745b2020-10-13 13:29:00 -040019class GrBackendSemaphore;
Adlai Holler53cf44c2020-10-13 17:40:21 -040020class GrClientMappedBufferManager;
Adlai Hollera0693042020-10-14 11:23:11 -040021class GrDirectContextPriv;
Adlai Holler6d0745b2020-10-13 13:29:00 -040022class GrContextThreadSafeProxy;
23struct GrD3DBackendContext;
24class GrFragmentProcessor;
Adlai Holler53cf44c2020-10-13 17:40:21 -040025class GrGpu;
Adlai Holler6d0745b2020-10-13 13:29:00 -040026struct GrGLInterface;
Jim Van Verth351c9b52020-11-12 15:21:11 -050027struct GrMtlBackendContext;
Adlai Holler6d0745b2020-10-13 13:29:00 -040028struct GrMockOptions;
29class GrPath;
Adlai Holler53cf44c2020-10-13 17:40:21 -040030class GrResourceCache;
31class GrSmallPathAtlasMgr;
Brian Salomoneebe7352020-12-09 16:37:04 -050032class GrSurfaceDrawContext;
Adlai Holler53cf44c2020-10-13 17:40:21 -040033class GrResourceProvider;
34class GrStrikeCache;
Adlai Holler6d0745b2020-10-13 13:29:00 -040035class GrSurfaceProxy;
36class GrSwizzle;
37class GrTextureProxy;
38struct GrVkBackendContext;
39
40class SkImage;
41class SkString;
42class SkSurfaceCharacterization;
43class SkSurfaceProps;
Adlai Holler53cf44c2020-10-13 17:40:21 -040044class SkTaskGroup;
Adlai Holler6d0745b2020-10-13 13:29:00 -040045class SkTraceMemoryDump;
Robert Phillipsad248452020-06-30 09:27:52 -040046
Adlai Holler9ae860a2020-10-20 10:13:32 -040047class SK_API GrDirectContext : public GrRecordingContext {
Robert Phillipsad248452020-06-30 09:27:52 -040048public:
Robert Phillipsf4f80112020-07-13 16:13:31 -040049#ifdef SK_GL
50 /**
51 * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the
52 * result of GrGLMakeNativeInterface() is used if it succeeds.
53 */
54 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&);
55 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>);
56 static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&);
57 static sk_sp<GrDirectContext> MakeGL();
58#endif
59
60#ifdef SK_VULKAN
61 /**
62 * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned
63 * GrDirectContext is destroyed. This also means that any objects created with this
64 * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold
65 * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released,
66 * then it is safe to delete the vulkan objects.
67 */
68 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
69 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&);
70#endif
71
72#ifdef SK_METAL
73 /**
Jim Van Verth351c9b52020-11-12 15:21:11 -050074 * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a
75 * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must
76 * have their own ref which will be released when the GrMtlBackendContext is destroyed.
77 * Ganesh will take its own ref on the objects which will be released when the GrDirectContext
78 * is destroyed.
79 */
80 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&);
81 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&);
82 /**
83 * Deprecated.
84 *
Robert Phillipsf4f80112020-07-13 16:13:31 -040085 * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an
86 * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects
Jim Van Verth351c9b52020-11-12 15:21:11 -050087 * must have a ref on them that can be transferred to Ganesh, which will release the ref
Robert Phillipsf4f80112020-07-13 16:13:31 -040088 * when the GrDirectContext is destroyed.
89 */
90 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&);
91 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue);
92#endif
93
94#ifdef SK_DIRECT3D
95 /**
96 * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context
97 * must be kept alive until the returned GrDirectContext is first destroyed or abandoned.
98 */
99 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&);
100 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&);
101#endif
102
103#ifdef SK_DAWN
104 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&,
105 const GrContextOptions&);
106 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&);
107#endif
108
109 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
110 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*);
Robert Phillipsad248452020-06-30 09:27:52 -0400111
112 ~GrDirectContext() override;
113
Adlai Hollera7a40442020-10-09 09:49:42 -0400114 /**
115 * The context normally assumes that no outsider is setting state
116 * within the underlying 3D API's context/device/whatever. This call informs
117 * the context that the state was modified and it should resend. Shouldn't
118 * be called frequently for good performance.
119 * The flag bits, state, is dependent on which backend is used by the
120 * context, either GL or D3D (possible in future).
121 */
122 void resetContext(uint32_t state = kAll_GrBackendState);
123
124 /**
125 * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
126 * the context has modified the bound texture will have texture id 0 bound. This does not
127 * flush the context. Calling resetContext() does not change the set that will be bound
128 * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
129 * all unit/target combinations are considered to have unmodified bindings until the context
130 * subsequently modifies them (meaning if this is called twice in a row with no intervening
131 * context usage then the second call is a no-op.)
132 */
133 void resetGLTextureBindings();
134
135 /**
136 * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
137 * usable. Call this if you have lost the associated GPU context, and thus internal texture,
138 * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
Adlai Holler98dd0042020-10-13 10:04:00 -0400139 * context and any of its created resource objects will not make backend 3D API calls. Content
Adlai Hollera7a40442020-10-09 09:49:42 -0400140 * rendered but not previously flushed may be lost. After this function is called all subsequent
Adlai Holler98dd0042020-10-13 10:04:00 -0400141 * calls on the context will fail or be no-ops.
Adlai Hollera7a40442020-10-09 09:49:42 -0400142 *
143 * The typical use case for this function is that the underlying 3D context was lost and further
144 * API calls may crash.
145 *
146 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
147 * create the context must be kept alive even after abandoning the context. Those objects must
148 * live for the lifetime of the context object itself. The reason for this is so that
149 * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be
150 * cleaned up even in a device lost state.
151 */
Robert Phillipsad248452020-06-30 09:27:52 -0400152 void abandonContext() override;
153
Adlai Hollera7a40442020-10-09 09:49:42 -0400154 /**
155 * Returns true if the context was abandoned or if the if the backend specific context has
156 * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a
Adlai Holler64e13832020-10-13 08:21:56 -0400157 * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this
158 * context.
Adlai Hollera7a40442020-10-09 09:49:42 -0400159 */
160 bool abandoned() override;
161
Adlai Holler61a591c2020-10-12 12:38:33 -0400162 // TODO: Remove this from public after migrating Chrome.
163 sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
164
165 /**
166 * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is
167 * reset and will return false until another out-of-memory error is reported by the 3D API. If
168 * the context is abandoned then this will report false.
169 *
170 * Currently this is implemented for:
171 *
172 * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and
173 * therefore hide the error from Skia. Also, it is not advised to use this in combination with
174 * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever
175 * checking the GL context for OOM.
176 *
177 * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has
178 * occurred.
179 */
180 bool oomed();
181
182 /**
183 * This is similar to abandonContext() however the underlying 3D context is not yet lost and
184 * the context will cleanup all allocated resources before returning. After returning it will
185 * assume that the underlying context may no longer be valid.
186 *
187 * The typical use case for this function is that the client is going to destroy the 3D context
188 * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed
189 * elsewhere by either the client or Skia objects).
190 *
191 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
192 * create the context must be alive before calling releaseResourcesAndAbandonContext.
193 */
194 void releaseResourcesAndAbandonContext();
Robert Phillipsad248452020-06-30 09:27:52 -0400195
Adlai Holler3a508e92020-10-12 13:58:01 -0400196 ///////////////////////////////////////////////////////////////////////////
197 // Resource Cache
198
199 /** DEPRECATED
200 * Return the current GPU resource cache limits.
201 *
202 * @param maxResources If non-null, will be set to -1.
203 * @param maxResourceBytes If non-null, returns maximum number of bytes of
204 * video memory that can be held in the cache.
205 */
206 void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
207
208 /**
209 * Return the current GPU resource cache limit in bytes.
210 */
211 size_t getResourceCacheLimit() const;
212
213 /**
214 * Gets the current GPU resource cache usage.
215 *
216 * @param resourceCount If non-null, returns the number of resources that are held in the
217 * cache.
218 * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
219 * in the cache.
220 */
221 void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
222
223 /**
224 * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
225 */
226 size_t getResourceCachePurgeableBytes() const;
227
228 /** DEPRECATED
229 * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
230 * limit, it will be purged (LRU) to keep the cache within the limit.
231 *
232 * @param maxResources Unused.
233 * @param maxResourceBytes The maximum number of bytes of video memory
234 * that can be held in the cache.
235 */
236 void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
237
238 /**
239 * Specify the GPU resource cache limit. If the cache currently exceeds this limit,
240 * it will be purged (LRU) to keep the cache within the limit.
241 *
242 * @param maxResourceBytes The maximum number of bytes of video memory
243 * that can be held in the cache.
244 */
245 void setResourceCacheLimit(size_t maxResourceBytes);
246
Adlai Holler4aa4c602020-10-12 13:58:52 -0400247 /**
248 * Frees GPU created by the context. Can be called to reduce GPU memory
249 * pressure.
250 */
251 void freeGpuResources();
252
253 /**
254 * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
255 * otherwise marked for deletion, regardless of whether the context is under budget.
256 */
257 void performDeferredCleanup(std::chrono::milliseconds msNotUsed);
258
259 // Temporary compatibility API for Android.
260 void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
261 this->performDeferredCleanup(msNotUsed);
262 }
263
264 /**
265 * Purge unlocked resources from the cache until the the provided byte count has been reached
266 * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
267 * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
268 * resource types.
269 *
270 * @param maxBytesToPurge the desired number of bytes to be purged.
271 * @param preferScratchResources If true scratch resources will be purged prior to other
272 * resource types.
273 */
274 void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
275
276 /**
277 * This entry point is intended for instances where an app has been backgrounded or
278 * suspended.
279 * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
280 * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
281 * then all unlocked resources will be purged.
282 * In either case, after the unlocked resources are purged a separate pass will be made to
283 * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
284 * some resources with persistent data may be purged to be under budget).
285 *
286 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior
287 * enforcing the budget requirements.
288 */
289 void purgeUnlockedResources(bool scratchResourcesOnly);
290
291 /**
292 * Gets the maximum supported texture size.
293 */
294 using GrRecordingContext::maxTextureSize;
295
296 /**
297 * Gets the maximum supported render target size.
298 */
299 using GrRecordingContext::maxRenderTargetSize;
300
301 /**
302 * Can a SkImage be created with the given color type.
303 */
304 using GrRecordingContext::colorTypeSupportedAsImage;
305
306 /**
307 * Can a SkSurface be created with the given color type. To check whether MSAA is supported
308 * use maxSurfaceSampleCountForColorType().
309 */
310 using GrRecordingContext::colorTypeSupportedAsSurface;
311
312 /**
313 * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
314 * rendering is supported for the color type. 0 is returned if rendering to this color type
315 * is not supported at all.
316 */
317 using GrRecordingContext::maxSurfaceSampleCountForColorType;
Robert Phillipsad248452020-06-30 09:27:52 -0400318
Adlai Holler3acc69a2020-10-13 08:20:51 -0400319 ///////////////////////////////////////////////////////////////////////////
320 // Misc.
321
322 /**
323 * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
324 * executing any more commands on the GPU. If this call returns false, then the GPU back-end
325 * will not wait on any passed in semaphores, and the client will still own the semaphores,
326 * regardless of the value of deleteSemaphoresAfterWait.
327 *
328 * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
329 * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
330 * knows that Skia has finished waiting on them. This can be done by using finishedProcs on
331 * flush calls.
332 */
333 bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
334 bool deleteSemaphoresAfterWait = true);
335
336 /**
337 * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D
338 * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by
339 * GrContext::submit(syncCpu).
340 */
341 void flushAndSubmit(bool syncCpu = false) {
342 this->flush(GrFlushInfo());
343 this->submit(syncCpu);
344 }
345
346 /**
347 * Call to ensure all drawing to the context has been flushed to underlying 3D API specific
348 * objects. A call to `submit` is always required to ensure work is actually sent to
349 * the gpu. Some specific API details:
350 * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
351 * sync objects from the flush will not be valid until a submission occurs.
352 *
353 * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
354 * buffer or encoder objects. However, these objects are not sent to the gpu until a
355 * submission occurs.
356 *
357 * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
358 * submitted to the gpu during the next submit call (it is possible Skia failed to create a
359 * subset of the semaphores). The client should not wait on these semaphores until after submit
360 * has been called, and must keep them alive until then. If this call returns
361 * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
362 * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with
363 * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
364 * client is still responsible for deleting any initialized semaphores.
365 * Regardleess of semaphore submission the context will still be flushed. It should be
366 * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
367 * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
368 * take this as a failure if they passed in semaphores to be submitted.
369 */
370 GrSemaphoresSubmitted flush(const GrFlushInfo& info);
371
372 void flush() { this->flush({}); }
373
374 /**
375 * Submit outstanding work to the gpu from all previously un-submitted flushes. The return
376 * value of the submit will indicate whether or not the submission to the GPU was successful.
377 *
378 * If the call returns true, all previously passed in semaphores in flush calls will have been
379 * submitted to the GPU and they can safely be waited on. The caller should wait on those
380 * semaphores or perform some other global synchronization before deleting the semaphores.
381 *
382 * If it returns false, then those same semaphores will not have been submitted and we will not
383 * try to submit them again. The caller is free to delete the semaphores at any time.
384 *
385 * If the syncCpu flag is true this function will return once the gpu has finished with all
386 * submitted work.
387 */
388 bool submit(bool syncCpu = false);
389
390 /**
391 * Checks whether any asynchronous work is complete and if so calls related callbacks.
392 */
393 void checkAsyncWorkCompletion();
394
395 /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
396 // Chrome is using this!
397 void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
398
399 bool supportsDistanceFieldText() const;
400
401 void storeVkPipelineCacheData();
402
Adlai Holler3acc69a2020-10-13 08:20:51 -0400403 /**
404 * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
405 * It is guaranteed that this backend format will be the one used by the following
406 * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
407 *
408 * The caller should check that the returned format is valid.
409 */
Adlai Holler2e0c70d2020-10-13 08:21:37 -0400410 using GrRecordingContext::defaultBackendFormat;
Adlai Holler98dd0042020-10-13 10:04:00 -0400411
412 /**
413 * The explicitly allocated backend texture API allows clients to use Skia to create backend
414 * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
415 *
416 * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
417 * before deleting the context used to create them. If the backend is Vulkan, the textures must
418 * be deleted before abandoning the context as well. Additionally, clients should only delete
419 * these objects on the thread for which that context is active.
420 *
421 * The client is responsible for ensuring synchronization between different uses
422 * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
423 * surface, rewrapping it in a image and drawing the image will require explicit
424 * synchronization on the client's part).
425 */
426
427 /**
428 * If possible, create an uninitialized backend texture. The client should ensure that the
429 * returned backend texture is valid.
430 * For the Vulkan backend the layout of the created VkImage will be:
431 * VK_IMAGE_LAYOUT_UNDEFINED.
432 */
433 GrBackendTexture createBackendTexture(int width, int height,
434 const GrBackendFormat&,
435 GrMipmapped,
436 GrRenderable,
437 GrProtected = GrProtected::kNo);
438
439 /**
440 * If possible, create an uninitialized backend texture. The client should ensure that the
441 * returned backend texture is valid.
442 * If successful, the created backend texture will be compatible with the provided
443 * SkColorType.
444 * For the Vulkan backend the layout of the created VkImage will be:
445 * VK_IMAGE_LAYOUT_UNDEFINED.
446 */
447 GrBackendTexture createBackendTexture(int width, int height,
448 SkColorType,
449 GrMipmapped,
450 GrRenderable,
451 GrProtected = GrProtected::kNo);
452
453 /**
454 * If possible, create a backend texture initialized to a particular color. The client should
455 * ensure that the returned backend texture is valid. The client can pass in a finishedProc
456 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
457 * client is required to call `submit` to send the upload work to the gpu. The
458 * finishedProc will always get called even if we failed to create the GrBackendTexture.
459 * For the Vulkan backend the layout of the created VkImage will be:
460 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
461 */
462 GrBackendTexture createBackendTexture(int width, int height,
463 const GrBackendFormat&,
464 const SkColor4f& color,
465 GrMipmapped,
466 GrRenderable,
467 GrProtected = GrProtected::kNo,
468 GrGpuFinishedProc finishedProc = nullptr,
469 GrGpuFinishedContext finishedContext = nullptr);
470
471 /**
472 * If possible, create a backend texture initialized to a particular color. The client should
473 * ensure that the returned backend texture is valid. The client can pass in a finishedProc
474 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
475 * client is required to call `submit` to send the upload work to the gpu. The
476 * finishedProc will always get called even if we failed to create the GrBackendTexture.
477 * If successful, the created backend texture will be compatible with the provided
478 * SkColorType.
479 * For the Vulkan backend the layout of the created VkImage will be:
480 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
481 */
482 GrBackendTexture createBackendTexture(int width, int height,
483 SkColorType,
484 const SkColor4f& color,
485 GrMipmapped,
486 GrRenderable,
487 GrProtected = GrProtected::kNo,
488 GrGpuFinishedProc finishedProc = nullptr,
489 GrGpuFinishedContext finishedContext = nullptr);
490
491 /**
492 * If possible, create a backend texture initialized with the provided pixmap data. The client
493 * should ensure that the returned backend texture is valid. The client can pass in a
494 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
495 * deleted. The client is required to call `submit` to send the upload work to the gpu.
496 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
497 * If successful, the created backend texture will be compatible with the provided
498 * pixmap(s). Compatible, in this case, means that the backend format will be the result
499 * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted
500 * when this call returns.
501 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired
502 * the data for all the mipmap levels must be provided. In the mipmapped case all the
503 * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
Brian Salomonb5f880a2020-12-07 11:30:16 -0500504 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The
505 * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture.
Adlai Holler98dd0042020-10-13 10:04:00 -0400506 * Note: the pixmap's alphatypes and colorspaces are ignored.
507 * For the Vulkan backend the layout of the created VkImage will be:
508 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
509 */
Brian Salomonb5f880a2020-12-07 11:30:16 -0500510 GrBackendTexture createBackendTexture(const SkPixmap srcData[],
511 int numLevels,
512 GrSurfaceOrigin,
513 GrRenderable,
514 GrProtected,
Adlai Holler98dd0042020-10-13 10:04:00 -0400515 GrGpuFinishedProc finishedProc = nullptr,
516 GrGpuFinishedContext finishedContext = nullptr);
517
Brian Salomonb5f880a2020-12-07 11:30:16 -0500518 /**
519 * Convenience version createBackendTexture() that takes just a base level pixmap.
520 */
Adlai Holler98dd0042020-10-13 10:04:00 -0400521 GrBackendTexture createBackendTexture(const SkPixmap& srcData,
Brian Salomonb5f880a2020-12-07 11:30:16 -0500522 GrSurfaceOrigin textureOrigin,
Adlai Holler98dd0042020-10-13 10:04:00 -0400523 GrRenderable renderable,
524 GrProtected isProtected,
525 GrGpuFinishedProc finishedProc = nullptr,
526 GrGpuFinishedContext finishedContext = nullptr) {
Brian Salomonb5f880a2020-12-07 11:30:16 -0500527 return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected,
528 finishedProc, finishedContext);
Adlai Holler98dd0042020-10-13 10:04:00 -0400529 }
Adlai Holler2e0c70d2020-10-13 08:21:37 -0400530
Brian Salomonb5f880a2020-12-07 11:30:16 -0500531 // Deprecated versions that do not take origin and assume top-left.
532 GrBackendTexture createBackendTexture(const SkPixmap srcData[],
533 int numLevels,
534 GrRenderable renderable,
535 GrProtected isProtected,
536 GrGpuFinishedProc finishedProc = nullptr,
537 GrGpuFinishedContext finishedContext = nullptr) {
538 return this->createBackendTexture(srcData,
539 numLevels,
540 kTopLeft_GrSurfaceOrigin,
541 renderable,
542 isProtected,
543 finishedProc,
544 finishedContext);
545 }
546 GrBackendTexture createBackendTexture(const SkPixmap& srcData,
547 GrRenderable renderable,
548 GrProtected isProtected,
549 GrGpuFinishedProc finishedProc = nullptr,
550 GrGpuFinishedContext finishedContext = nullptr) {
551 return this->createBackendTexture(&srcData,
552 1,
553 renderable,
554 isProtected,
555 finishedProc,
556 finishedContext);
557 }
558
Adlai Holler2e0c70d2020-10-13 08:21:37 -0400559 /**
560 * If possible, updates a backend texture to be filled to a particular color. The client should
561 * check the return value to see if the update was successful. The client can pass in a
562 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
563 * deleted. The client is required to call `submit` to send the upload work to the gpu.
564 * The finishedProc will always get called even if we failed to update the GrBackendTexture.
565 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
566 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
567 */
568 bool updateBackendTexture(const GrBackendTexture&,
569 const SkColor4f& color,
570 GrGpuFinishedProc finishedProc,
571 GrGpuFinishedContext finishedContext);
572
573 /**
574 * If possible, updates a backend texture to be filled to a particular color. The data in
575 * GrBackendTexture and passed in color is interpreted with respect to the passed in
576 * SkColorType. The client should check the return value to see if the update was successful.
577 * The client can pass in a finishedProc to be notified when the data has been uploaded by the
578 * gpu and the texture can be deleted. The client is required to call `submit` to send
579 * the upload work to the gpu. The finishedProc will always get called even if we failed to
580 * update the GrBackendTexture.
581 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
582 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
583 */
584 bool updateBackendTexture(const GrBackendTexture&,
585 SkColorType skColorType,
586 const SkColor4f& color,
587 GrGpuFinishedProc finishedProc,
588 GrGpuFinishedContext finishedContext);
589
590 /**
591 * If possible, updates a backend texture filled with the provided pixmap data. The client
592 * should check the return value to see if the update was successful. The client can pass in a
593 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
594 * deleted. The client is required to call `submit` to send the upload work to the gpu.
595 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
596 * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
597 * means that the backend format is compatible with the base pixmap's colortype. The src data
598 * can be deleted when this call returns.
599 * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
600 * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
601 * Additionally, all the miplevels must be sized correctly (please see
Brian Salomonb5f880a2020-12-07 11:30:16 -0500602 * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the
603 * pixmap data is vertically flipped in the texture.
Adlai Holler2e0c70d2020-10-13 08:21:37 -0400604 * Note: the pixmap's alphatypes and colorspaces are ignored.
605 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
606 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
607 */
608 bool updateBackendTexture(const GrBackendTexture&,
609 const SkPixmap srcData[],
610 int numLevels,
Brian Salomonb5f880a2020-12-07 11:30:16 -0500611 GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
612 GrGpuFinishedProc finishedProc = nullptr,
613 GrGpuFinishedContext finishedContext = nullptr);
614
615 /**
616 * Convenience version of updateBackendTexture that takes just a base level pixmap.
617 */
618 bool updateBackendTexture(const GrBackendTexture& texture,
619 const SkPixmap& srcData,
620 GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin,
621 GrGpuFinishedProc finishedProc = nullptr,
622 GrGpuFinishedContext finishedContext = nullptr) {
623 return this->updateBackendTexture(texture,
624 &srcData,
625 1,
626 textureOrigin,
627 finishedProc,
628 finishedContext);
629 }
630
631 // Deprecated version that does not take origin and assumes top-left.
632 bool updateBackendTexture(const GrBackendTexture& texture,
633 const SkPixmap srcData[],
634 int numLevels,
635 GrGpuFinishedProc finishedProc,
636 GrGpuFinishedContext finishedContext) {
637 return this->updateBackendTexture(texture,
638 srcData,
639 numLevels,
640 kTopLeft_GrSurfaceOrigin,
641 finishedProc,
642 finishedContext);
643 }
Adlai Holler2e0c70d2020-10-13 08:21:37 -0400644
645 /**
646 * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is
647 * guaranteed to match the backend format used by the following
648 * createCompressedBackendTexture methods that take a CompressionType.
649 * The caller should check that the returned format is valid.
650 */
651 using GrRecordingContext::compressedBackendFormat;
652
Adlai Holler64e13832020-10-13 08:21:56 -0400653 /**
654 *If possible, create a compressed backend texture initialized to a particular color. The
655 * client should ensure that the returned backend texture is valid. The client can pass in a
656 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
657 * deleted. The client is required to call `submit` to send the upload work to the gpu.
658 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
659 * For the Vulkan backend the layout of the created VkImage will be:
660 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
661 */
662 GrBackendTexture createCompressedBackendTexture(int width, int height,
663 const GrBackendFormat&,
664 const SkColor4f& color,
665 GrMipmapped,
666 GrProtected = GrProtected::kNo,
667 GrGpuFinishedProc finishedProc = nullptr,
668 GrGpuFinishedContext finishedContext = nullptr);
669
670 GrBackendTexture createCompressedBackendTexture(int width, int height,
671 SkImage::CompressionType,
672 const SkColor4f& color,
673 GrMipmapped,
674 GrProtected = GrProtected::kNo,
675 GrGpuFinishedProc finishedProc = nullptr,
676 GrGpuFinishedContext finishedContext = nullptr);
677
678 /**
679 * If possible, create a backend texture initialized with the provided raw data. The client
680 * should ensure that the returned backend texture is valid. The client can pass in a
681 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
682 * deleted. The client is required to call `submit` to send the upload work to the gpu.
683 * The finishedProc will always get called even if we failed to create the GrBackendTexture
684 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired
685 * the data for all the mipmap levels must be provided. Additionally, all the miplevels
686 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
687 * For the Vulkan backend the layout of the created VkImage will be:
688 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
689 */
690 GrBackendTexture createCompressedBackendTexture(int width, int height,
691 const GrBackendFormat&,
692 const void* data, size_t dataSize,
693 GrMipmapped,
694 GrProtected = GrProtected::kNo,
695 GrGpuFinishedProc finishedProc = nullptr,
696 GrGpuFinishedContext finishedContext = nullptr);
697
698 GrBackendTexture createCompressedBackendTexture(int width, int height,
699 SkImage::CompressionType,
700 const void* data, size_t dataSize,
701 GrMipmapped,
702 GrProtected = GrProtected::kNo,
703 GrGpuFinishedProc finishedProc = nullptr,
704 GrGpuFinishedContext finishedContext = nullptr);
705
706 /**
707 * If possible, updates a backend texture filled with the provided color. If the texture is
708 * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client
709 * should check the return value to see if the update was successful. The client can pass in a
710 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
711 * deleted. The client is required to call `submit` to send the upload work to the gpu.
712 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
713 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
714 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
715 */
716 bool updateCompressedBackendTexture(const GrBackendTexture&,
717 const SkColor4f& color,
718 GrGpuFinishedProc finishedProc,
719 GrGpuFinishedContext finishedContext);
720
721 /**
722 * If possible, updates a backend texture filled with the provided raw data. The client
723 * should check the return value to see if the update was successful. The client can pass in a
724 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
725 * deleted. The client is required to call `submit` to send the upload work to the gpu.
726 * The finishedProc will always get called even if we failed to create the GrBackendTexture.
727 * If a mipMapped texture is passed in, the data for all the mipmap levels must be provided.
728 * Additionally, all the miplevels must be sized correctly (please see
729 * SkMipMap::ComputeLevelSize and ComputeLevelCount).
730 * For the Vulkan backend after a successful update the layout of the created VkImage will be:
731 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
732 */
733 bool updateCompressedBackendTexture(const GrBackendTexture&,
734 const void* data,
735 size_t dataSize,
736 GrGpuFinishedProc finishedProc,
737 GrGpuFinishedContext finishedContext);
738
Adlai Holler6d0745b2020-10-13 13:29:00 -0400739 /**
740 * Updates the state of the GrBackendTexture/RenderTarget to have the passed in
741 * GrBackendSurfaceMutableState. All objects that wrap the backend surface (i.e. SkSurfaces and
742 * SkImages) will also be aware of this state change. This call does not submit the state change
743 * to the gpu, but requires the client to call `submit` to send it to the GPU. The work
744 * for this call is ordered linearly with all other calls that require GrContext::submit to be
745 * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be
746 * called with finishedContext after the state transition is known to have occurred on the GPU.
747 *
748 * See GrBackendSurfaceMutableState to see what state can be set via this call.
749 *
750 * If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's
751 * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
752 * tell Skia to not change those respective states.
753 *
754 * If previousState is not null and this returns true, then Skia will have filled in
755 * previousState to have the values of the state before this call.
756 */
757 bool setBackendTextureState(const GrBackendTexture&,
758 const GrBackendSurfaceMutableState&,
759 GrBackendSurfaceMutableState* previousState = nullptr,
760 GrGpuFinishedProc finishedProc = nullptr,
761 GrGpuFinishedContext finishedContext = nullptr);
762 bool setBackendRenderTargetState(const GrBackendRenderTarget&,
763 const GrBackendSurfaceMutableState&,
764 GrBackendSurfaceMutableState* previousState = nullptr,
765 GrGpuFinishedProc finishedProc = nullptr,
766 GrGpuFinishedContext finishedContext = nullptr);
767
768 void deleteBackendTexture(GrBackendTexture);
769
770 // This interface allows clients to pre-compile shaders and populate the runtime program cache.
771 // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
772 //
773 // Steps to use this API:
774 //
Adlai Hollerb2705682020-10-20 10:11:53 -0400775 // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to
776 // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This
777 // will ensure that the blobs are SkSL, and are suitable for pre-compilation.
Adlai Holler6d0745b2020-10-13 13:29:00 -0400778 // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
779 //
780 // 3) Switch over to shipping your application. Include the key/data pairs from above.
781 // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
782 // This will compile the SkSL to create a GL program, and populate the runtime cache.
783 //
784 // This is only guaranteed to work if the context/device used in step #2 are created in the
785 // same way as the one used in step #4, and the same GrContextOptions are specified.
786 // Using cached shader blobs on a different device or driver are undefined.
787 bool precompileShader(const SkData& key, const SkData& data);
788
789#ifdef SK_ENABLE_DUMP_GPU
790 /** Returns a string with detailed information about the context & GPU, in JSON format. */
791 SkString dump() const;
792#endif
793
Robert Phillipsedff4672021-03-11 09:16:25 -0500794 class DirectContextID {
795 public:
796 static GrDirectContext::DirectContextID NextID();
797
798 DirectContextID() : fID(SK_InvalidUniqueID) {}
799
800 bool operator==(const DirectContextID& that) const { return fID == that.fID; }
801 bool operator!=(const DirectContextID& that) const { return !(*this == that); }
802
803 void makeInvalid() { fID = SK_InvalidUniqueID; }
804 bool isValid() const { return fID != SK_InvalidUniqueID; }
805
806 private:
807 constexpr DirectContextID(uint32_t id) : fID(id) {}
808 uint32_t fID;
809 };
810
811 DirectContextID directContextID() const { return fDirectContextID; }
812
Adlai Holler53cf44c2020-10-13 17:40:21 -0400813 // Provides access to functions that aren't part of the public API.
Adlai Hollera0693042020-10-14 11:23:11 -0400814 GrDirectContextPriv priv();
815 const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type)
Adlai Holler53cf44c2020-10-13 17:40:21 -0400816
Robert Phillipsad248452020-06-30 09:27:52 -0400817protected:
Robert Phillipsf4f80112020-07-13 16:13:31 -0400818 GrDirectContext(GrBackendApi backend, const GrContextOptions& options);
819
Robert Phillipsad248452020-06-30 09:27:52 -0400820 bool init() override;
821
Adlai Holler53cf44c2020-10-13 17:40:21 -0400822 GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); }
823 GrSmallPathAtlasMgr* onGetSmallPathAtlasMgr();
Robert Phillipsad248452020-06-30 09:27:52 -0400824
Robert Phillips44333c52020-06-30 13:28:00 -0400825 GrDirectContext* asDirectContext() override { return this; }
826
Robert Phillipsad248452020-06-30 09:27:52 -0400827private:
Greg Daniela89b4302021-01-29 10:48:40 -0500828 // This call will make sure out work on the GPU is finished and will execute any outstanding
829 // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the
830 // outstanding work on the gpu. The main use currently for this function is when tearing down or
831 // abandoning the context.
832 //
833 // When we finish up work on the GPU it could trigger callbacks to the client. In the case we
834 // are abandoning the context we don't want the client to be able to use the GrDirectContext to
835 // issue more commands during the callback. Thus before calling this function we set the
836 // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded
837 // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned
838 // bool is used for this signal.
839 void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned);
840
Robert Phillipsedff4672021-03-11 09:16:25 -0500841 const DirectContextID fDirectContextID;
Adlai Holler53cf44c2020-10-13 17:40:21 -0400842 // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
843 // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
844 // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
845 // invoked after objects they depend upon have already been destroyed.
846 std::unique_ptr<SkTaskGroup> fTaskGroup;
847 std::unique_ptr<GrStrikeCache> fStrikeCache;
848 sk_sp<GrGpu> fGpu;
849 std::unique_ptr<GrResourceCache> fResourceCache;
850 std::unique_ptr<GrResourceProvider> fResourceProvider;
851
852 bool fDidTestPMConversions;
853 // true if the PM/UPM conversion succeeded; false otherwise
854 bool fPMUPMConversionsRoundTrip;
855
856 GrContextOptions::PersistentCache* fPersistentCache;
857 GrContextOptions::ShaderErrorHandler* fShaderErrorHandler;
858
859 std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
Robert Phillips3262bc82020-08-10 12:11:58 -0400860 std::unique_ptr<GrAtlasManager> fAtlasManager;
Robert Phillipsad248452020-06-30 09:27:52 -0400861
Robert Phillips079455c2020-08-11 15:18:46 -0400862 std::unique_ptr<GrSmallPathAtlasMgr> fSmallPathAtlasMgr;
Robert Phillips5edf5102020-08-10 16:30:36 -0400863
Adlai Hollera0693042020-10-14 11:23:11 -0400864 friend class GrDirectContextPriv;
Adlai Holler53cf44c2020-10-13 17:40:21 -0400865
Adlai Holler9ae860a2020-10-20 10:13:32 -0400866 using INHERITED = GrRecordingContext;
Robert Phillipsad248452020-06-30 09:27:52 -0400867};
868
869
870#endif