blob: 10077f41e7fb6728c0c6f3954b957ca8a2ff6b2e [file] [log] [blame]
robertphillips77a2e522015-10-17 07:43:27 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrDrawingManager_DEFINED
9#define GrDrawingManager_DEFINED
10
Mike Kleinc0bd9f92019-04-23 12:05:21 -050011#include "include/core/SkSurface.h"
12#include "include/private/SkTArray.h"
Adlai Hollerd71b7b02020-06-08 15:55:00 -040013#include "include/private/SkTHash.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050014#include "src/gpu/GrBufferAllocPool.h"
15#include "src/gpu/GrDeferredUpload.h"
Robert Phillipsc7ed7e62020-06-29 20:04:57 +000016#include "src/gpu/GrHashMapWithCache.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050017#include "src/gpu/GrPathRenderer.h"
18#include "src/gpu/GrPathRendererChain.h"
19#include "src/gpu/GrResourceCache.h"
Herb Derby082232b2020-06-10 15:08:18 -040020#include "src/gpu/GrSurfaceProxy.h"
Robert Phillipsf2361d22016-10-25 14:20:06 -040021
Robert Phillipsd81379d2020-04-21 10:39:02 -040022// Enabling this will print out which path renderers are being chosen
23#define GR_PATH_RENDERER_SPEW 0
24
Chris Daltonfddb6c02017-11-04 15:22:22 -060025class GrCoverageCountingPathRenderer;
Herb Derby082232b2020-06-10 15:08:18 -040026class GrGpuBuffer;
Robert Phillipsfbcef6e2017-06-15 12:07:18 -040027class GrOnFlushCallbackObject;
Herb Derbydc214c22018-11-08 13:31:39 -050028class GrOpFlushState;
Greg Danielf41b2bd2019-08-22 16:19:24 -040029class GrOpsTask;
Robert Phillips69893702019-02-22 11:16:30 -050030class GrRecordingContext;
Brian Osman11052242016-10-27 14:47:55 -040031class GrRenderTargetContext;
Robert Phillipsc7635fa2016-10-28 13:25:24 -040032class GrRenderTargetProxy;
Herb Derby082232b2020-06-10 15:08:18 -040033class GrRenderTask;
34class GrSemaphore;
robertphillips68737822015-10-29 12:12:21 -070035class GrSoftwarePathRenderer;
Greg Daniel46e366a2019-12-16 14:38:36 -050036class GrSurfaceContext;
Greg Daniel16f5c652019-10-29 11:26:01 -040037class GrSurfaceProxyView;
Chris Daltone2a903e2019-09-18 13:41:50 -060038class GrTextureResolveRenderTask;
Brian Salomon653f42f2018-07-10 10:07:31 -040039class SkDeferredDisplayList;
robertphillips77a2e522015-10-17 07:43:27 -070040
robertphillips77a2e522015-10-17 07:43:27 -070041class GrDrawingManager {
42public:
43 ~GrDrawingManager();
44
robertphillips68737822015-10-29 12:12:21 -070045 void freeGpuResources();
robertphillips77a2e522015-10-17 07:43:27 -070046
Greg Danielf41b2bd2019-08-22 16:19:24 -040047 // A managed opsTask is controlled by the drawing manager (i.e., sorted & flushed with the
Robert Phillips831a2932019-04-12 17:18:39 -040048 // others). An unmanaged one is created and used by the onFlushCallback.
Greg Daniel16f5c652019-10-29 11:26:01 -040049 sk_sp<GrOpsTask> newOpsTask(GrSurfaceProxyView, bool managedOpsTask);
robertphillips77a2e522015-10-17 07:43:27 -070050
Chris Daltone2a903e2019-09-18 13:41:50 -060051 // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This
52 // method will only add the new render task to the list. It is up to the caller to call
53 // addProxy() on the returned object.
54 GrTextureResolveRenderTask* newTextureResolveRenderTask(const GrCaps&);
Chris Dalton3d770272019-08-14 09:24:37 -060055
Greg Danielc30f1a92019-09-06 15:28:58 -040056 // Create a new render task that will cause the gpu to wait on semaphores before executing any
57 // more RenderTasks that target proxy. It is possible for this wait to also block additional
58 // work (even to other proxies) that has already been recorded or will be recorded later. The
59 // only guarantee is that future work to the passed in proxy will wait on the semaphores to be
60 // signaled.
Greg Daniel301015c2019-11-18 14:06:46 -050061 void newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
62 std::unique_ptr<std::unique_ptr<GrSemaphore>[]>,
Greg Danielc30f1a92019-09-06 15:28:58 -040063 int numSemaphores);
64
Greg Danielbbfec9d2019-08-20 10:56:51 -040065 // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This
66 // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy
67 // to be copied. The surfaceColorType says how we should interpret the data when reading back
68 // from the source. DstColorType describes how the data should be stored in the dstBuffer.
69 // DstOffset is the offset into the dstBuffer where we will start writing data.
70 void newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, const SkIRect& srcRect,
71 GrColorType surfaceColorType, GrColorType dstColorType,
72 sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset);
73
Greg Daniel16f5c652019-10-29 11:26:01 -040074 // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src
Greg Daniele227fe42019-08-21 13:52:24 -040075 // pixels copied are specified by srcRect. They are copied to a rect of the same size in
76 // dstProxy with top left at dstPoint. If the src rect is clipped by the src bounds then pixel
77 // values in the dst rect corresponding to the area clipped by the src rect are not overwritten.
78 // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and
79 // the backend-specific limitations.
Greg Daniel16f5c652019-10-29 11:26:01 -040080 bool newCopyRenderTask(GrSurfaceProxyView srcView, const SkIRect& srcRect,
81 GrSurfaceProxyView dstView, const SkIPoint& dstPoint);
Greg Daniele227fe42019-08-21 13:52:24 -040082
Robert Phillips69893702019-02-22 11:16:30 -050083 GrRecordingContext* getContext() { return fContext; }
robertphillips77a2e522015-10-17 07:43:27 -070084
robertphillips68737822015-10-29 12:12:21 -070085 GrPathRenderer* getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
86 bool allowSW,
87 GrPathRendererChain::DrawType drawType,
Ben Wagnera93a14a2017-08-28 10:34:05 -040088 GrPathRenderer::StencilSupport* stencilSupport = nullptr);
robertphillips68737822015-10-29 12:12:21 -070089
Brian Salomone7df0bb2018-05-07 14:44:57 -040090 GrPathRenderer* getSoftwarePathRenderer();
91
Chris Daltonfddb6c02017-11-04 15:22:22 -060092 // Returns a direct pointer to the coverage counting path renderer, or null if it is not
93 // supported and turned on.
94 GrCoverageCountingPathRenderer* getCoverageCountingPathRenderer();
95
Brian Salomon653f42f2018-07-10 10:07:31 -040096 void flushIfNecessary();
bsalomonb77a9072016-09-07 10:02:04 -070097
Robert Phillips4e105e22020-07-16 09:18:50 -040098 static bool ProgramUnitTest(GrDirectContext*, int maxStages, int maxLevels);
robertphillipsa13e2022015-11-11 12:01:09 -080099
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400100 GrSemaphoresSubmitted flushSurfaces(GrSurfaceProxy* proxies[],
101 int cnt,
102 SkSurface::BackendSurfaceAccess access,
Greg Daniel9efe3862020-06-11 11:51:06 -0400103 const GrFlushInfo& info,
104 const GrBackendSurfaceMutableState* newState);
Greg Daniel55f040b2020-02-13 15:38:32 +0000105 GrSemaphoresSubmitted flushSurface(GrSurfaceProxy* proxy,
Greg Daniel4aa13e72019-04-15 14:42:20 -0400106 SkSurface::BackendSurfaceAccess access,
Greg Daniel9efe3862020-06-11 11:51:06 -0400107 const GrFlushInfo& info,
108 const GrBackendSurfaceMutableState* newState) {
109 return this->flushSurfaces(&proxy, 1, access, info, newState);
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400110 }
bsalomon6a2b1942016-09-08 11:28:59 -0700111
Chris Daltonfe199b72017-05-05 11:26:15 -0400112 void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
Robert Phillipsdbaf3172019-02-06 15:12:53 -0500113
114#if GR_TEST_UTILS
Chris Daltonfe199b72017-05-05 11:26:15 -0400115 void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*);
Chris Dalton31634282020-09-17 12:16:54 -0600116 GrPathRendererChain::Options testingOnly_getOptionsForPathRendererChain() {
117 return fOptionsForPathRendererChain;
118 }
Robert Phillipsdbaf3172019-02-06 15:12:53 -0500119#endif
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400120
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400121 GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const;
122 GrOpsTask* getLastOpsTask(const GrSurfaceProxy*) const;
123 void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*);
124
Chris Dalton6b498102019-08-01 14:14:52 -0600125 void moveRenderTasksToDDL(SkDeferredDisplayList* ddl);
Adlai Holler7580ad42020-06-24 13:45:25 -0400126 void copyRenderTasksFromDDL(sk_sp<const SkDeferredDisplayList>, GrRenderTargetProxy* newDest);
Robert Phillips62000362018-02-01 09:10:04 -0500127
robertphillips77a2e522015-10-17 07:43:27 -0700128private:
Chris Dalton6b498102019-08-01 14:14:52 -0600129 // This class encapsulates maintenance and manipulation of the drawing manager's DAG of
130 // renderTasks.
131 class RenderTaskDAG {
Robert Phillips22310d62018-09-05 11:07:21 -0400132 public:
Chris Dalton6b498102019-08-01 14:14:52 -0600133 RenderTaskDAG(bool sortRenderTasks);
134 ~RenderTaskDAG();
Robert Phillips22310d62018-09-05 11:07:21 -0400135
136 // Currently, when explicitly allocating resources, this call will topologically sort the
Greg Danielf41b2bd2019-08-22 16:19:24 -0400137 // GrRenderTasks.
138 // MDB TODO: remove once incremental GrRenderTask sorting is enabled
Robert Phillips22310d62018-09-05 11:07:21 -0400139 void prepForFlush();
140
141 void closeAll(const GrCaps* caps);
142
Robert Phillips22310d62018-09-05 11:07:21 -0400143 void gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const;
144
145 void reset();
146
Adlai Holler96ead542020-06-26 08:50:14 -0400147 // This call forceably removes GrRenderTasks from the DAG. It is problematic bc it
148 // just removes the GrRenderTasks but doesn't cleanup any referring pointers (i.e.
149 // dependency pointers in the DAG). It works right now bc it is only called after the
150 // topological sort is complete (so the dangling pointers aren't used).
151 void rawRemoveRenderTasks(int startIndex, int stopIndex);
Robert Phillips22310d62018-09-05 11:07:21 -0400152
Chris Dalton6b498102019-08-01 14:14:52 -0600153 bool empty() const { return fRenderTasks.empty(); }
154 int numRenderTasks() const { return fRenderTasks.count(); }
Robert Phillips22310d62018-09-05 11:07:21 -0400155
Robert Phillips9313aa72019-04-09 18:41:27 -0400156 bool isUsed(GrSurfaceProxy*) const;
157
Chris Dalton6b498102019-08-01 14:14:52 -0600158 GrRenderTask* renderTask(int index) { return fRenderTasks[index].get(); }
159 const GrRenderTask* renderTask(int index) const { return fRenderTasks[index].get(); }
Robert Phillips22310d62018-09-05 11:07:21 -0400160
Chris Dalton6b498102019-08-01 14:14:52 -0600161 GrRenderTask* back() { return fRenderTasks.back().get(); }
162 const GrRenderTask* back() const { return fRenderTasks.back().get(); }
Robert Phillips22310d62018-09-05 11:07:21 -0400163
Chris Dalton3d770272019-08-14 09:24:37 -0600164 GrRenderTask* add(sk_sp<GrRenderTask>);
165 GrRenderTask* addBeforeLast(sk_sp<GrRenderTask>);
Chris Dalton6b498102019-08-01 14:14:52 -0600166 void add(const SkTArray<sk_sp<GrRenderTask>>&);
Robert Phillips22310d62018-09-05 11:07:21 -0400167
Chris Dalton6b498102019-08-01 14:14:52 -0600168 void swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks);
Robert Phillips22310d62018-09-05 11:07:21 -0400169
Chris Dalton6b498102019-08-01 14:14:52 -0600170 bool sortingRenderTasks() const { return fSortRenderTasks; }
Robert Phillips46acf9d2018-10-09 09:31:40 -0400171
Robert Phillips22310d62018-09-05 11:07:21 -0400172 private:
Chris Dalton6b498102019-08-01 14:14:52 -0600173 SkTArray<sk_sp<GrRenderTask>> fRenderTasks;
174 bool fSortRenderTasks;
Robert Phillips22310d62018-09-05 11:07:21 -0400175 };
176
Herb Derby082232b2020-06-10 15:08:18 -0400177 GrDrawingManager(GrRecordingContext*,
178 const GrPathRendererChain::Options&,
Chris Dalton6b498102019-08-01 14:14:52 -0600179 bool sortRenderTasks,
Greg Danielf41b2bd2019-08-22 16:19:24 -0400180 bool reduceOpsTaskSplitting);
robertphillips77a2e522015-10-17 07:43:27 -0700181
Robert Phillipsa9162df2019-02-11 14:12:03 -0500182 bool wasAbandoned() const;
183
Greg Danielf41b2bd2019-08-22 16:19:24 -0400184 // Closes the target's dependent render tasks (or, if not in sorting/opsTask-splitting-reduction
185 // mode, closes fActiveOpsTask) in preparation for us opening a new opsTask that will write to
Chris Dalton5fe99772019-08-06 11:57:39 -0600186 // 'target'.
Greg Danielbbfec9d2019-08-20 10:56:51 -0400187 void closeRenderTasksForNewRenderTask(GrSurfaceProxy* target);
Chris Dalton5fe99772019-08-06 11:57:39 -0600188
Greg Danielf41b2bd2019-08-22 16:19:24 -0400189 // return true if any GrRenderTasks were actually executed; false otherwise
Chris Dalton6b498102019-08-01 14:14:52 -0600190 bool executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState*,
191 int* numRenderTasksExecuted);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500192
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400193 void removeRenderTasks(int startIndex, int stopIndex);
194
Greg Danielfe159622020-04-10 17:43:51 +0000195 bool flush(GrSurfaceProxy* proxies[],
196 int numProxies,
197 SkSurface::BackendSurfaceAccess access,
Greg Daniel9efe3862020-06-11 11:51:06 -0400198 const GrFlushInfo&,
199 const GrBackendSurfaceMutableState* newState);
Greg Danielfe159622020-04-10 17:43:51 +0000200
201 bool submitToGpu(bool syncToCpu);
robertphillips77a2e522015-10-17 07:43:27 -0700202
Robert Phillips38d64b02018-09-04 13:23:26 -0400203 SkDEBUGCODE(void validate() const);
204
Robert Phillips69893702019-02-22 11:16:30 -0500205 friend class GrContext; // access to: flush & cleanup
Adlai Holler3acc69a2020-10-13 08:20:51 -0400206 friend class GrDirectContext; // access to: flush & cleanup
Adlai Hollera0693042020-10-14 11:23:11 -0400207 friend class GrDirectContextPriv; // access to: flush
Chris Daltonfe199b72017-05-05 11:26:15 -0400208 friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class
Robert Phillips69893702019-02-22 11:16:30 -0500209 friend class GrRecordingContext; // access to: ctor
Greg Danielb6c15ba2019-03-04 13:08:25 -0500210 friend class SkImage; // for access to: flush
robertphillips77a2e522015-10-17 07:43:27 -0700211
212 static const int kNumPixelGeometries = 5; // The different pixel geometries
213 static const int kNumDFTOptions = 2; // DFT or no DFT
214
Robert Phillips69893702019-02-22 11:16:30 -0500215 GrRecordingContext* fContext;
bsalomon6b2552f2016-09-15 13:50:26 -0700216 GrPathRendererChain::Options fOptionsForPathRendererChain;
Herb Derby082232b2020-06-10 15:08:18 -0400217
Brian Salomon601ac802019-02-07 13:37:16 -0500218 // This cache is used by both the vertex and index pools. It reuses memory across multiple
219 // flushes.
220 sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
joshualittde8dc7e2016-01-08 10:09:13 -0800221
Chris Dalton6b498102019-08-01 14:14:52 -0600222 RenderTaskDAG fDAG;
Greg Danielf41b2bd2019-08-22 16:19:24 -0400223 GrOpsTask* fActiveOpsTask = nullptr;
224 // These are the IDs of the opsTask currently being flushed (in internalFlush)
Chris Dalton6b498102019-08-01 14:14:52 -0600225 SkSTArray<8, uint32_t, true> fFlushingRenderTaskIDs;
Chris Daltonc4b47352019-08-23 10:10:36 -0600226 // These are the new renderTasks generated by the onFlush CBs
227 SkSTArray<4, sk_sp<GrRenderTask>> fOnFlushRenderTasks;
robertphillips77a2e522015-10-17 07:43:27 -0700228
Ben Wagner9ec70c62018-07-12 13:30:47 -0400229 std::unique_ptr<GrPathRendererChain> fPathRendererChain;
230 sk_sp<GrSoftwarePathRenderer> fSoftwarePathRenderer;
brianosman86e76262016-08-11 12:17:31 -0700231
Robert Phillips40a29d72018-01-18 12:59:22 -0500232 GrTokenTracker fTokenTracker;
brianosman86e76262016-08-11 12:17:31 -0700233 bool fFlushing;
Greg Danielf41b2bd2019-08-22 16:19:24 -0400234 bool fReduceOpsTaskSplitting;
bsalomonb77a9072016-09-07 10:02:04 -0700235
Chris Daltonfe199b72017-05-05 11:26:15 -0400236 SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
Robert Phillips15c91422019-05-07 16:54:48 -0400237
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400238 void addDDLTarget(GrSurfaceProxy* newTarget, GrRenderTargetProxy* ddlTarget) {
239 fDDLTargets.set(newTarget->uniqueID().asUInt(), ddlTarget);
240 }
241 bool isDDLTarget(GrSurfaceProxy* newTarget) {
242 return SkToBool(fDDLTargets.find(newTarget->uniqueID().asUInt()));
243 }
244 GrRenderTargetProxy* getDDLTarget(GrSurfaceProxy* newTarget) {
245 auto entry = fDDLTargets.find(newTarget->uniqueID().asUInt());
246 return entry ? *entry : nullptr;
247 }
248 void clearDDLTargets() { fDDLTargets.reset(); }
Robert Phillips15c91422019-05-07 16:54:48 -0400249
250 // We play a trick with lazy proxies to retarget the base target of a DDL to the SkSurface
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400251 // it is replayed on. 'fDDLTargets' stores this mapping from SkSurface unique proxy ID
252 // to the DDL's lazy proxy.
Robert Phillips15c91422019-05-07 16:54:48 -0400253 // Note: we do not expect a whole lot of these per flush
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400254 SkTHashMap<uint32_t, GrRenderTargetProxy*> fDDLTargets;
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400255
Robert Phillipsc7ed7e62020-06-29 20:04:57 +0000256 struct SurfaceIDKeyTraits {
257 static uint32_t GetInvalidKey() {
258 return GrSurfaceProxy::UniqueID::InvalidID().asUInt();
Adlai Holler1bdbe552020-06-12 11:28:15 -0400259 }
260 };
261
Robert Phillipsc7ed7e62020-06-29 20:04:57 +0000262 GrHashMapWithCache<uint32_t, GrRenderTask*, SurfaceIDKeyTraits, GrCheapHash> fLastRenderTasks;
robertphillips77a2e522015-10-17 07:43:27 -0700263};
264
265#endif