blob: 961885660d1d72e5382fb515320c42fe93de8bc2 [file] [log] [blame]
robertphillips3dc6ae52015-10-20 09:54:32 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/GrDrawingManager.h"
Robert Phillips69893702019-02-22 11:16:30 -05009
Robert Phillips4d5594d2020-02-21 14:24:40 -050010#include "include/core/SkDeferredDisplayList.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050011#include "include/gpu/GrBackendSemaphore.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050012#include "include/private/GrRecordingContext.h"
Robert Phillips7eb0a5f2020-06-09 14:15:09 -040013#include "src/core/SkDeferredDisplayListPriv.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050014#include "src/core/SkTTopoSort.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040015#include "src/gpu/GrAuditTrail.h"
Brian Salomon9241a6d2019-10-03 13:26:54 -040016#include "src/gpu/GrClientMappedBufferManager.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050017#include "src/gpu/GrContextPriv.h"
Greg Daniele227fe42019-08-21 13:52:24 -040018#include "src/gpu/GrCopyRenderTask.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050019#include "src/gpu/GrGpu.h"
20#include "src/gpu/GrMemoryPool.h"
21#include "src/gpu/GrOnFlushResourceProvider.h"
22#include "src/gpu/GrRecordingContextPriv.h"
23#include "src/gpu/GrRenderTargetContext.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040024#include "src/gpu/GrRenderTargetProxy.h"
Chris Dalton6b498102019-08-01 14:14:52 -060025#include "src/gpu/GrRenderTask.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050026#include "src/gpu/GrResourceAllocator.h"
27#include "src/gpu/GrResourceProvider.h"
28#include "src/gpu/GrSoftwarePathRenderer.h"
Greg Daniel46e366a2019-12-16 14:38:36 -050029#include "src/gpu/GrSurfaceContext.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050030#include "src/gpu/GrSurfaceProxyPriv.h"
Greg Daniel456f9b52020-03-05 19:14:18 +000031#include "src/gpu/GrTexture.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050032#include "src/gpu/GrTexturePriv.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040033#include "src/gpu/GrTextureProxy.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050034#include "src/gpu/GrTextureProxyPriv.h"
Chris Dalton3d770272019-08-14 09:24:37 -060035#include "src/gpu/GrTextureResolveRenderTask.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050036#include "src/gpu/GrTracing.h"
Greg Danielbbfec9d2019-08-20 10:56:51 -040037#include "src/gpu/GrTransferFromRenderTask.h"
Adlai Holler6c9bb622020-06-25 09:21:18 -040038#include "src/gpu/GrUnrefDDLTask.h"
Greg Danielc30f1a92019-09-06 15:28:58 -040039#include "src/gpu/GrWaitRenderTask.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050040#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
Herb Derbya08bde62020-06-12 15:46:06 -040041#include "src/gpu/text/GrSDFTOptions.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050042#include "src/image/SkSurface_Gpu.h"
robertphillips498d7ac2015-10-30 10:11:30 -070043
Chris Dalton6b498102019-08-01 14:14:52 -060044GrDrawingManager::RenderTaskDAG::RenderTaskDAG(bool sortRenderTasks)
45 : fSortRenderTasks(sortRenderTasks) {}
Robert Phillipsa3f70262018-02-08 10:59:38 -050046
Chris Dalton6b498102019-08-01 14:14:52 -060047GrDrawingManager::RenderTaskDAG::~RenderTaskDAG() {}
Robert Phillips22310d62018-09-05 11:07:21 -040048
Chris Dalton6b498102019-08-01 14:14:52 -060049void GrDrawingManager::RenderTaskDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
50 idArray->reset(fRenderTasks.count());
51 for (int i = 0; i < fRenderTasks.count(); ++i) {
52 if (fRenderTasks[i]) {
53 (*idArray)[i] = fRenderTasks[i]->uniqueID();
Robert Phillips22310d62018-09-05 11:07:21 -040054 }
55 }
56}
57
Chris Dalton6b498102019-08-01 14:14:52 -060058void GrDrawingManager::RenderTaskDAG::reset() {
59 fRenderTasks.reset();
Robert Phillips22310d62018-09-05 11:07:21 -040060}
61
Adlai Holler65888b82020-06-22 14:06:55 -040062void GrDrawingManager::RenderTaskDAG::removeRenderTasks(int startIndex, int stopIndex) {
Robert Phillips22310d62018-09-05 11:07:21 -040063 for (int i = startIndex; i < stopIndex; ++i) {
Adlai Hollerd71b7b02020-06-08 15:55:00 -040064 fRenderTasks[i] = nullptr;
Robert Phillips22310d62018-09-05 11:07:21 -040065 }
66}
67
Chris Dalton6b498102019-08-01 14:14:52 -060068bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const {
Adlai Holler33d569e2020-06-16 14:30:08 -040069 for (const auto& task : fRenderTasks) {
70 if (task && task->isUsed(proxy)) {
Robert Phillips9313aa72019-04-09 18:41:27 -040071 return true;
72 }
73 }
74
75 return false;
76}
77
Chris Dalton3d770272019-08-14 09:24:37 -060078GrRenderTask* GrDrawingManager::RenderTaskDAG::add(sk_sp<GrRenderTask> renderTask) {
Chris Dalton6aeb8e82019-08-27 11:52:19 -060079 if (renderTask) {
80 return fRenderTasks.emplace_back(std::move(renderTask)).get();
81 }
82 return nullptr;
Chris Dalton3d770272019-08-14 09:24:37 -060083}
84
85GrRenderTask* GrDrawingManager::RenderTaskDAG::addBeforeLast(sk_sp<GrRenderTask> renderTask) {
86 SkASSERT(!fRenderTasks.empty());
Chris Dalton6aeb8e82019-08-27 11:52:19 -060087 if (renderTask) {
88 // Release 'fRenderTasks.back()' and grab the raw pointer, in case the SkTArray grows
89 // and reallocates during emplace_back.
90 fRenderTasks.emplace_back(fRenderTasks.back().release());
91 return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get();
92 }
93 return nullptr;
Robert Phillips22310d62018-09-05 11:07:21 -040094}
95
Greg Danielf41b2bd2019-08-22 16:19:24 -040096void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& renderTasks) {
Robert Phillips19f466d2020-02-26 10:27:07 -050097#ifdef SK_DEBUG
98 for (auto& renderTask : renderTasks) {
99 SkASSERT(renderTask->unique());
100 }
101#endif
102
Greg Danielf41b2bd2019-08-22 16:19:24 -0400103 fRenderTasks.push_back_n(renderTasks.count(), renderTasks.begin());
Robert Phillips22310d62018-09-05 11:07:21 -0400104}
105
Greg Danielf41b2bd2019-08-22 16:19:24 -0400106void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks) {
107 SkASSERT(renderTasks->empty());
108 renderTasks->swap(fRenderTasks);
Robert Phillips22310d62018-09-05 11:07:21 -0400109}
110
Chris Dalton6b498102019-08-01 14:14:52 -0600111void GrDrawingManager::RenderTaskDAG::prepForFlush() {
112 if (fSortRenderTasks) {
113 SkDEBUGCODE(bool result =) SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(
114 &fRenderTasks);
Robert Phillips22310d62018-09-05 11:07:21 -0400115 SkASSERT(result);
116 }
117
118#ifdef SK_DEBUG
Greg Danielf41b2bd2019-08-22 16:19:24 -0400119 // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
120 // share the same backing GrSurfaceProxy it means the opsTask was artificially split.
Chris Dalton6b498102019-08-01 14:14:52 -0600121 if (fRenderTasks.count()) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400122 GrOpsTask* prevOpsTask = fRenderTasks[0]->asOpsTask();
Chris Dalton6b498102019-08-01 14:14:52 -0600123 for (int i = 1; i < fRenderTasks.count(); ++i) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400124 GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask();
Robert Phillips22310d62018-09-05 11:07:21 -0400125
Greg Danielf41b2bd2019-08-22 16:19:24 -0400126 if (prevOpsTask && curOpsTask) {
Adlai Holler33d569e2020-06-16 14:30:08 -0400127 SkASSERT(prevOpsTask->target(0).proxy() != curOpsTask->target(0).proxy());
Robert Phillips22310d62018-09-05 11:07:21 -0400128 }
129
Greg Danielf41b2bd2019-08-22 16:19:24 -0400130 prevOpsTask = curOpsTask;
Robert Phillips22310d62018-09-05 11:07:21 -0400131 }
132 }
133#endif
134}
135
Chris Dalton6b498102019-08-01 14:14:52 -0600136void GrDrawingManager::RenderTaskDAG::closeAll(const GrCaps* caps) {
Adlai Holler65888b82020-06-22 14:06:55 -0400137 for (int i = 0; i < fRenderTasks.count(); ++i) {
138 if (fRenderTasks[i]) {
139 fRenderTasks[i]->makeClosed(*caps);
Robert Phillips22310d62018-09-05 11:07:21 -0400140 }
141 }
142}
143
Adlai Holler65888b82020-06-22 14:06:55 -0400144void GrDrawingManager::RenderTaskDAG::cleanup(GrDrawingManager* drawingMgr, const GrCaps* caps) {
145 for (int i = 0; i < fRenderTasks.count(); ++i) {
146 if (!fRenderTasks[i]) {
147 continue;
148 }
149
150 // no renderTask should receive a dependency
151 fRenderTasks[i]->makeClosed(*caps);
152
153 fRenderTasks[i]->disown(drawingMgr);
154
155 // We shouldn't need to do this, but it turns out some clients still hold onto opsTasks
156 // after a cleanup.
157 // MDB TODO: is this still true?
158 if (!fRenderTasks[i]->unique()) {
159 // TODO: Eventually this should be guaranteed unique.
160 // https://bugs.chromium.org/p/skia/issues/detail?id=7111
161 fRenderTasks[i]->endFlush(drawingMgr);
162 }
163 }
164
165 fRenderTasks.reset();
166}
167
Robert Phillips22310d62018-09-05 11:07:21 -0400168///////////////////////////////////////////////////////////////////////////////////////////////////
Robert Phillips69893702019-02-22 11:16:30 -0500169GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
Robert Phillips22310d62018-09-05 11:07:21 -0400170 const GrPathRendererChain::Options& optionsForPathRendererChain,
Chris Dalton6b498102019-08-01 14:14:52 -0600171 bool sortRenderTasks,
Greg Danielf41b2bd2019-08-22 16:19:24 -0400172 bool reduceOpsTaskSplitting)
Robert Phillips22310d62018-09-05 11:07:21 -0400173 : fContext(context)
174 , fOptionsForPathRendererChain(optionsForPathRendererChain)
Chris Dalton6b498102019-08-01 14:14:52 -0600175 , fDAG(sortRenderTasks)
Robert Phillips22310d62018-09-05 11:07:21 -0400176 , fPathRendererChain(nullptr)
177 , fSoftwarePathRenderer(nullptr)
Robert Phillips6db27c22019-05-01 10:43:56 -0400178 , fFlushing(false)
Herb Derby082232b2020-06-10 15:08:18 -0400179 , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) { }
Robert Phillips22310d62018-09-05 11:07:21 -0400180
Adlai Holler65888b82020-06-22 14:06:55 -0400181void GrDrawingManager::cleanup() {
182 fDAG.cleanup(this, fContext->priv().caps());
183
184 fPathRendererChain = nullptr;
185 fSoftwarePathRenderer = nullptr;
186
187 fOnFlushCBObjects.reset();
188}
189
robertphillips3dc6ae52015-10-20 09:54:32 -0700190GrDrawingManager::~GrDrawingManager() {
Adlai Holler65888b82020-06-22 14:06:55 -0400191 this->cleanup();
robertphillips3dc6ae52015-10-20 09:54:32 -0700192}
193
Robert Phillipsa9162df2019-02-11 14:12:03 -0500194bool GrDrawingManager::wasAbandoned() const {
Robert Phillips6a6de562019-02-15 15:19:15 -0500195 return fContext->priv().abandoned();
robertphillips3dc6ae52015-10-20 09:54:32 -0700196}
197
robertphillips68737822015-10-29 12:12:21 -0700198void GrDrawingManager::freeGpuResources() {
Jim Van Verth106b5c42017-09-26 12:45:29 -0400199 for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
200 if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
201 // it's safe to just do this because we're iterating in reverse
202 fOnFlushCBObjects.removeShuffle(i);
203 }
204 }
205
robertphillips68737822015-10-29 12:12:21 -0700206 // a path renderer may be holding onto resources
robertphillips13391dd2015-10-30 05:15:11 -0700207 fPathRendererChain = nullptr;
Ben Wagner9ec70c62018-07-12 13:30:47 -0400208 fSoftwarePathRenderer = nullptr;
Robert Phillipse3302df2017-04-24 07:31:02 -0400209}
210
Robert Phillips7ee385e2017-03-30 08:02:11 -0400211// MDB TODO: make use of the 'proxy' parameter.
Greg Daniel9efe3862020-06-11 11:51:06 -0400212bool GrDrawingManager::flush(
213 GrSurfaceProxy* proxies[],
214 int numProxies,
215 SkSurface::BackendSurfaceAccess access,
216 const GrFlushInfo& info,
217 const GrBackendSurfaceMutableState* newState) {
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400218 SkASSERT(numProxies >= 0);
219 SkASSERT(!numProxies || proxies);
Brian Salomon57d2beab2018-09-10 09:35:41 -0400220 GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
Brian Salomondcbb9d92017-07-19 10:53:20 -0400221
robertphillips7761d612016-05-16 09:14:53 -0700222 if (fFlushing || this->wasAbandoned()) {
Greg Daniel55822f12020-05-26 11:26:45 -0400223 if (info.fSubmittedProc) {
224 info.fSubmittedProc(info.fSubmittedContext, false);
225 }
Greg Daniele6bfb7d2019-04-17 15:26:11 -0400226 if (info.fFinishedProc) {
227 info.fFinishedProc(info.fFinishedContext);
Greg Daniela3aa75a2019-04-12 14:24:55 -0400228 }
Greg Danielfe159622020-04-10 17:43:51 +0000229 return false;
joshualittb8918c42015-12-18 09:59:46 -0800230 }
Robert Phillips602df412019-04-08 11:10:39 -0400231
Robert Phillips38d64b02018-09-04 13:23:26 -0400232 SkDEBUGCODE(this->validate());
233
Greg Daniel797efca2019-05-09 14:04:20 -0400234 if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc &&
Greg Daniel9efe3862020-06-11 11:51:06 -0400235 access == SkSurface::BackendSurfaceAccess::kNoAccess && !newState) {
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400236 bool canSkip = numProxies > 0;
237 for (int i = 0; i < numProxies && canSkip; ++i) {
238 canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]);
239 }
240 if (canSkip) {
Greg Daniel55822f12020-05-26 11:26:45 -0400241 if (info.fSubmittedProc) {
242 info.fSubmittedProc(info.fSubmittedContext, true);
243 }
Greg Danielfe159622020-04-10 17:43:51 +0000244 return false;
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400245 }
Robert Phillips9313aa72019-04-09 18:41:27 -0400246 }
247
Robert Phillips6a6de562019-02-15 15:19:15 -0500248 auto direct = fContext->priv().asDirectContext();
249 if (!direct) {
Greg Daniel55822f12020-05-26 11:26:45 -0400250 if (info.fSubmittedProc) {
251 info.fSubmittedProc(info.fSubmittedContext, false);
252 }
Greg Daniele6bfb7d2019-04-17 15:26:11 -0400253 if (info.fFinishedProc) {
254 info.fFinishedProc(info.fFinishedContext);
Greg Daniela3aa75a2019-04-12 14:24:55 -0400255 }
Greg Danielfe159622020-04-10 17:43:51 +0000256 return false; // Can't flush while DDL recording
Robert Phillips6a6de562019-02-15 15:19:15 -0500257 }
Brian Salomon9241a6d2019-10-03 13:26:54 -0400258 direct->priv().clientMappedBufferManager()->process();
Robert Phillips6a6de562019-02-15 15:19:15 -0500259
260 GrGpu* gpu = direct->priv().getGpu();
Greg Daniel55822f12020-05-26 11:26:45 -0400261 // We have a non abandoned and direct GrContext. It must have a GrGpu.
262 SkASSERT(gpu);
Greg Daniela3aa75a2019-04-12 14:24:55 -0400263
joshualittb8918c42015-12-18 09:59:46 -0800264 fFlushing = true;
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400265
Robert Phillips6a6de562019-02-15 15:19:15 -0500266 auto resourceProvider = direct->priv().resourceProvider();
267 auto resourceCache = direct->priv().getResourceCache();
268
Chris Dalton6b498102019-08-01 14:14:52 -0600269 // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
Greg Danielf41b2bd2019-08-22 16:19:24 -0400270 // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
271 // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
Chris Dalton6b498102019-08-01 14:14:52 -0600272 // if the SkGpuDevice(s) write to them again.
Robert Phillips9da87e02019-02-04 13:26:26 -0500273 fDAG.closeAll(fContext->priv().caps());
Greg Danielf41b2bd2019-08-22 16:19:24 -0400274 fActiveOpsTask = nullptr;
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400275
Robert Phillips22310d62018-09-05 11:07:21 -0400276 fDAG.prepForFlush();
Brian Salomon601ac802019-02-07 13:37:16 -0500277 if (!fCpuBufferCache) {
278 // We cache more buffers when the backend is using client side arrays. Otherwise, we
279 // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
280 // buffer object. Each pool only requires one staging buffer at a time.
281 int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
282 fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
Brian Salomon58f153c2018-10-18 21:51:15 -0400283 }
Robert Phillipsa4c93ac2017-05-18 11:40:04 -0400284
Robert Phillipse5f73282019-06-18 17:15:04 -0400285 GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
Robert Phillips40a29d72018-01-18 12:59:22 -0500286
Chris Daltonfe199b72017-05-05 11:26:15 -0400287 GrOnFlushResourceProvider onFlushProvider(this);
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400288
Chris Dalton12658942017-10-05 19:45:25 -0600289 // Prepare any onFlush op lists (e.g. atlases).
Chris Daltonfe199b72017-05-05 11:26:15 -0400290 if (!fOnFlushCBObjects.empty()) {
Chris Dalton6b498102019-08-01 14:14:52 -0600291 fDAG.gatherIDs(&fFlushingRenderTaskIDs);
Robert Phillips22310d62018-09-05 11:07:21 -0400292
Chris Daltonfe199b72017-05-05 11:26:15 -0400293 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
Chris Dalton6b498102019-08-01 14:14:52 -0600294 onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
Chris Daltonc4b47352019-08-23 10:10:36 -0600295 fFlushingRenderTaskIDs.count());
296 }
297 for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
298 onFlushRenderTask->makeClosed(*fContext->priv().caps());
Chris Dalton706a6ff2017-11-29 22:01:06 -0700299#ifdef SK_DEBUG
Chris Daltonc4b47352019-08-23 10:10:36 -0600300 // OnFlush callbacks are invoked during flush, and are therefore expected to handle
301 // resource allocation & usage on their own. (No deferred or lazy proxies!)
302 onFlushRenderTask->visitTargetAndSrcProxies_debugOnly(
303 [](GrSurfaceProxy* p, GrMipMapped mipMapped) {
304 SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
Brian Salomonbeb7f522019-08-30 16:19:42 -0400305 SkASSERT(!p->isLazy());
Chris Dalton4ece96d2019-08-30 11:26:39 -0600306 if (p->requiresManualMSAAResolve()) {
307 // The onFlush callback is responsible for ensuring MSAA gets resolved.
308 SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty());
309 }
Chris Daltonc4b47352019-08-23 10:10:36 -0600310 if (GrMipMapped::kYes == mipMapped) {
311 // The onFlush callback is responsible for regenerating mips if needed.
312 SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipMapsAreDirty());
313 }
314 });
Chris Dalton706a6ff2017-11-29 22:01:06 -0700315#endif
Chris Daltonc4b47352019-08-23 10:10:36 -0600316 onFlushRenderTask->prepare(&flushState);
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400317 }
318 }
319
robertphillipsa13e2022015-11-11 12:01:09 -0800320#if 0
Brian Salomon09d994e2016-12-21 11:14:46 -0500321 // Enable this to print out verbose GrOp information
Chris Daltonc4b47352019-08-23 10:10:36 -0600322 SkDEBUGCODE(SkDebugf("onFlush renderTasks:"));
323 for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
324 SkDEBUGCODE(onFlushRenderTask->dump();)
325 }
326 SkDEBUGCODE(SkDebugf("Normal renderTasks:"));
Chris Dalton6b498102019-08-01 14:14:52 -0600327 for (int i = 0; i < fRenderTasks.count(); ++i) {
328 SkDEBUGCODE(fRenderTasks[i]->dump();)
robertphillips3dc6ae52015-10-20 09:54:32 -0700329 }
robertphillipsa13e2022015-11-11 12:01:09 -0800330#endif
331
Robert Phillipseafd48a2017-11-16 07:52:08 -0500332 int startIndex, stopIndex;
333 bool flushed = false;
334
Robert Phillipsf8e25022017-11-08 15:24:31 -0500335 {
Brian Salomonbeb7f522019-08-30 16:19:42 -0400336 GrResourceAllocator alloc(resourceProvider SkDEBUGCODE(, fDAG.numRenderTasks()));
Chris Dalton6b498102019-08-01 14:14:52 -0600337 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
338 if (fDAG.renderTask(i)) {
339 fDAG.renderTask(i)->gatherProxyIntervals(&alloc);
Robert Phillips22310d62018-09-05 11:07:21 -0400340 }
Greg Danielf41b2bd2019-08-22 16:19:24 -0400341 alloc.markEndOfOpsTask(i);
Robert Phillipsf8e25022017-11-08 15:24:31 -0500342 }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400343 alloc.determineRecyclability();
Robert Phillipsd375dbf2017-09-14 12:45:25 -0400344
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500345 GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
Chris Dalton6b498102019-08-01 14:14:52 -0600346 int numRenderTasksExecuted = 0;
Brian Salomon577aa0f2018-11-30 13:32:23 -0500347 while (alloc.assign(&startIndex, &stopIndex, &error)) {
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500348 if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
349 for (int i = startIndex; i < stopIndex; ++i) {
Chris Dalton6b498102019-08-01 14:14:52 -0600350 GrRenderTask* renderTask = fDAG.renderTask(i);
351 if (!renderTask) {
352 continue;
Robert Phillips01a91282018-07-26 08:03:04 -0400353 }
Chris Dalton6b498102019-08-01 14:14:52 -0600354 if (!renderTask->isInstantiated()) {
Greg Daniel15ecdf92019-08-30 15:35:23 -0400355 // No need to call the renderTask's handleInternalAllocationFailure
356 // since we will already skip executing the renderTask since it is not
357 // instantiated.
Chris Dalton6b498102019-08-01 14:14:52 -0600358 continue;
Robert Phillipsbbcb7f72018-05-31 11:16:19 -0400359 }
Chris Dalton6b498102019-08-01 14:14:52 -0600360 renderTask->handleInternalAllocationFailure();
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500361 }
362 }
Robert Phillips4150eea2018-02-07 17:08:21 -0500363
Chris Dalton6b498102019-08-01 14:14:52 -0600364 if (this->executeRenderTasks(
365 startIndex, stopIndex, &flushState, &numRenderTasksExecuted)) {
Robert Phillipseafd48a2017-11-16 07:52:08 -0500366 flushed = true;
367 }
bsalomondc438982016-08-31 11:53:49 -0700368 }
Greg Danielc42b20b2017-10-04 10:34:49 -0400369 }
370
Chris Dalton91ab1552018-04-18 13:24:25 -0600371#ifdef SK_DEBUG
Chris Dalton6b498102019-08-01 14:14:52 -0600372 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
Adlai Holler65888b82020-06-22 14:06:55 -0400373 // If there are any remaining opsTaskss at this point, make sure they will not survive the
374 // flush. Otherwise we need to call endFlush() on them.
375 // http://skbug.com/7111
376 SkASSERT(!fDAG.renderTask(i) || fDAG.renderTask(i)->unique());
Chris Dalton91ab1552018-04-18 13:24:25 -0600377 }
378#endif
Adlai Holler25df1f72020-06-09 13:08:27 -0400379 fLastRenderTasks.reset();
Robert Phillips22310d62018-09-05 11:07:21 -0400380 fDAG.reset();
Robert Phillips15c91422019-05-07 16:54:48 -0400381 this->clearDDLTargets();
robertphillipsa13e2022015-11-11 12:01:09 -0800382
Robert Phillipsc994a932018-06-19 13:09:54 -0400383#ifdef SK_DEBUG
384 // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool.
385 // When we move to partial flushes this assert will no longer be valid.
Greg Danielf41b2bd2019-08-22 16:19:24 -0400386 // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opsTasks
Robert Phillipsc994a932018-06-19 13:09:54 -0400387 // will be stored in the DDL's GrOpMemoryPools.
Robert Phillips9da87e02019-02-04 13:26:26 -0500388 GrOpMemoryPool* opMemoryPool = fContext->priv().opMemoryPool();
Robert Phillipsc994a932018-06-19 13:09:54 -0400389 opMemoryPool->isEmpty();
390#endif
391
Greg Daniel9efe3862020-06-11 11:51:06 -0400392 gpu->executeFlushInfo(proxies, numProxies, access, info, newState);
robertphillipsa13e2022015-11-11 12:01:09 -0800393
Brian Salomon57d2beab2018-09-10 09:35:41 -0400394 // Give the cache a chance to purge resources that become purgeable due to flushing.
395 if (flushed) {
Robert Phillips6a6de562019-02-15 15:19:15 -0500396 resourceCache->purgeAsNeeded();
Brian Salomon876a0172019-03-08 11:12:14 -0500397 flushed = false;
bsalomonb77a9072016-09-07 10:02:04 -0700398 }
Chris Daltonfe199b72017-05-05 11:26:15 -0400399 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
Chris Dalton6b498102019-08-01 14:14:52 -0600400 onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs.begin(),
401 fFlushingRenderTaskIDs.count());
Brian Salomon876a0172019-03-08 11:12:14 -0500402 flushed = true;
403 }
404 if (flushed) {
405 resourceCache->purgeAsNeeded();
Chris Daltonfe199b72017-05-05 11:26:15 -0400406 }
Chris Dalton6b498102019-08-01 14:14:52 -0600407 fFlushingRenderTaskIDs.reset();
joshualittb8918c42015-12-18 09:59:46 -0800408 fFlushing = false;
Greg Daniel51316782017-08-02 15:10:09 +0000409
Greg Danielfe159622020-04-10 17:43:51 +0000410 return true;
411}
412
413bool GrDrawingManager::submitToGpu(bool syncToCpu) {
414 if (fFlushing || this->wasAbandoned()) {
415 return false;
416 }
417
418 auto direct = fContext->priv().asDirectContext();
419 if (!direct) {
420 return false; // Can't submit while DDL recording
421 }
422 GrGpu* gpu = direct->priv().getGpu();
423 return gpu->submitToGpu(syncToCpu);
robertphillips3dc6ae52015-10-20 09:54:32 -0700424}
425
Chris Dalton6b498102019-08-01 14:14:52 -0600426bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState,
427 int* numRenderTasksExecuted) {
428 SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks());
Robert Phillipseafd48a2017-11-16 07:52:08 -0500429
Robert Phillips27483912018-04-20 12:43:18 -0400430#if GR_FLUSH_TIME_OP_SPEW
Greg Danielf41b2bd2019-08-22 16:19:24 -0400431 SkDebugf("Flushing opsTask: %d to %d out of [%d, %d]\n",
Chris Dalton6b498102019-08-01 14:14:52 -0600432 startIndex, stopIndex, 0, fDAG.numRenderTasks());
Robert Phillips27483912018-04-20 12:43:18 -0400433 for (int i = startIndex; i < stopIndex; ++i) {
Chris Dalton6b498102019-08-01 14:14:52 -0600434 if (fDAG.renderTask(i)) {
435 fDAG.renderTask(i)->dump(true);
Robert Phillips1734dd32018-08-21 13:52:09 -0400436 }
Robert Phillips27483912018-04-20 12:43:18 -0400437 }
438#endif
439
Chris Dalton6b498102019-08-01 14:14:52 -0600440 bool anyRenderTasksExecuted = false;
Robert Phillipseafd48a2017-11-16 07:52:08 -0500441
442 for (int i = startIndex; i < stopIndex; ++i) {
Greg Daniel15ecdf92019-08-30 15:35:23 -0400443 GrRenderTask* renderTask = fDAG.renderTask(i);
444 if (!renderTask || !renderTask->isInstantiated()) {
Robert Phillipseafd48a2017-11-16 07:52:08 -0500445 continue;
446 }
447
Chris Dalton6b498102019-08-01 14:14:52 -0600448 SkASSERT(renderTask->deferredProxiesAreInstantiated());
Robert Phillips22310d62018-09-05 11:07:21 -0400449
Chris Dalton6b498102019-08-01 14:14:52 -0600450 renderTask->prepare(flushState);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500451 }
452
453 // Upload all data to the GPU
454 flushState->preExecuteDraws();
455
Greg Danield2073452018-12-07 11:20:33 -0500456 // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
457 // for each command buffer associated with the oplists. If this gets too large we can cause the
458 // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
459 // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
460 // memory pressure.
Chris Dalton6b498102019-08-01 14:14:52 -0600461 static constexpr int kMaxRenderTasksBeforeFlush = 100;
Greg Danield2073452018-12-07 11:20:33 -0500462
Chris Daltonc4b47352019-08-23 10:10:36 -0600463 // Execute the onFlush renderTasks first, if any.
464 for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
465 if (!onFlushRenderTask->execute(flushState)) {
466 SkDebugf("WARNING: onFlushRenderTask failed to execute.\n");
Robert Phillipseafd48a2017-11-16 07:52:08 -0500467 }
Chris Daltonc4b47352019-08-23 10:10:36 -0600468 SkASSERT(onFlushRenderTask->unique());
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400469 onFlushRenderTask->disown(this);
Chris Daltonc4b47352019-08-23 10:10:36 -0600470 onFlushRenderTask = nullptr;
Chris Dalton6b498102019-08-01 14:14:52 -0600471 (*numRenderTasksExecuted)++;
472 if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
Greg Danielfe159622020-04-10 17:43:51 +0000473 flushState->gpu()->submitToGpu(false);
Chris Dalton6b498102019-08-01 14:14:52 -0600474 *numRenderTasksExecuted = 0;
Greg Danield2073452018-12-07 11:20:33 -0500475 }
Robert Phillipseafd48a2017-11-16 07:52:08 -0500476 }
Chris Daltonc4b47352019-08-23 10:10:36 -0600477 fOnFlushRenderTasks.reset();
Robert Phillipseafd48a2017-11-16 07:52:08 -0500478
479 // Execute the normal op lists.
480 for (int i = startIndex; i < stopIndex; ++i) {
Greg Daniel15ecdf92019-08-30 15:35:23 -0400481 GrRenderTask* renderTask = fDAG.renderTask(i);
482 if (!renderTask || !renderTask->isInstantiated()) {
Robert Phillipseafd48a2017-11-16 07:52:08 -0500483 continue;
484 }
485
Greg Daniel15ecdf92019-08-30 15:35:23 -0400486 if (renderTask->execute(flushState)) {
Chris Dalton6b498102019-08-01 14:14:52 -0600487 anyRenderTasksExecuted = true;
Robert Phillipseafd48a2017-11-16 07:52:08 -0500488 }
Chris Dalton6b498102019-08-01 14:14:52 -0600489 (*numRenderTasksExecuted)++;
490 if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
Greg Danielfe159622020-04-10 17:43:51 +0000491 flushState->gpu()->submitToGpu(false);
Chris Dalton6b498102019-08-01 14:14:52 -0600492 *numRenderTasksExecuted = 0;
Greg Danield2073452018-12-07 11:20:33 -0500493 }
Robert Phillipseafd48a2017-11-16 07:52:08 -0500494 }
495
Greg Daniel2d41d0d2019-08-26 11:08:51 -0400496 SkASSERT(!flushState->opsRenderPass());
Robert Phillips40a29d72018-01-18 12:59:22 -0500497 SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
Robert Phillipseafd48a2017-11-16 07:52:08 -0500498
Chris Dalton6b498102019-08-01 14:14:52 -0600499 // We reset the flush state before the RenderTasks so that the last resources to be freed are
500 // those that are written to in the RenderTasks. This helps to make sure the most recently used
501 // resources are the last to be purged by the resource cache.
Robert Phillipseafd48a2017-11-16 07:52:08 -0500502 flushState->reset();
503
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400504 this->removeRenderTasks(startIndex, stopIndex);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500505
Chris Dalton6b498102019-08-01 14:14:52 -0600506 return anyRenderTasksExecuted;
Robert Phillipseafd48a2017-11-16 07:52:08 -0500507}
508
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400509void GrDrawingManager::removeRenderTasks(int startIndex, int stopIndex) {
510 for (int i = startIndex; i < stopIndex; ++i) {
511 GrRenderTask* task = fDAG.renderTask(i);
512 if (!task) {
513 continue;
514 }
515 if (!task->unique()) {
516 // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111
517 task->endFlush(this);
518 }
519 task->disown(this);
520 }
Adlai Holler65888b82020-06-22 14:06:55 -0400521 fDAG.removeRenderTasks(startIndex, stopIndex);
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400522}
523
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400524static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
525 if (!proxy->isInstantiated()) {
526 return;
527 }
528
529 // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
530 // because clients expect the flushed surface's backing texture to be fully resolved
531 // upon return.
532 if (proxy->requiresManualMSAAResolve()) {
533 auto* rtProxy = proxy->asRenderTargetProxy();
534 SkASSERT(rtProxy);
535 if (rtProxy->isMSAADirty()) {
536 SkASSERT(rtProxy->peekRenderTarget());
537 gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect(),
538 GrGpu::ForExternalIO::kYes);
539 rtProxy->markMSAAResolved();
540 }
541 }
542 // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
543 // case their backend textures are being stolen.
544 // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
545 // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
546 if (auto* textureProxy = proxy->asTextureProxy()) {
547 if (textureProxy->mipMapsAreDirty()) {
548 SkASSERT(textureProxy->peekTexture());
549 gpu->regenerateMipMapLevels(textureProxy->peekTexture());
550 textureProxy->markMipMapsClean();
551 }
552 }
553}
554
Greg Daniel9efe3862020-06-11 11:51:06 -0400555GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(
556 GrSurfaceProxy* proxies[],
557 int numProxies,
558 SkSurface::BackendSurfaceAccess access,
559 const GrFlushInfo& info,
560 const GrBackendSurfaceMutableState* newState) {
bsalomon6a2b1942016-09-08 11:28:59 -0700561 if (this->wasAbandoned()) {
Greg Daniel55822f12020-05-26 11:26:45 -0400562 if (info.fSubmittedProc) {
563 info.fSubmittedProc(info.fSubmittedContext, false);
564 }
565 if (info.fFinishedProc) {
566 info.fFinishedProc(info.fFinishedContext);
567 }
Greg Daniel51316782017-08-02 15:10:09 +0000568 return GrSemaphoresSubmitted::kNo;
bsalomon6a2b1942016-09-08 11:28:59 -0700569 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400570 SkDEBUGCODE(this->validate());
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400571 SkASSERT(numProxies >= 0);
572 SkASSERT(!numProxies || proxies);
bsalomon6a2b1942016-09-08 11:28:59 -0700573
Robert Phillips6a6de562019-02-15 15:19:15 -0500574 auto direct = fContext->priv().asDirectContext();
575 if (!direct) {
Greg Daniel55822f12020-05-26 11:26:45 -0400576 if (info.fSubmittedProc) {
577 info.fSubmittedProc(info.fSubmittedContext, false);
578 }
579 if (info.fFinishedProc) {
580 info.fFinishedProc(info.fFinishedContext);
581 }
Robert Phillips6a6de562019-02-15 15:19:15 -0500582 return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
583 }
584
585 GrGpu* gpu = direct->priv().getGpu();
Greg Daniel55822f12020-05-26 11:26:45 -0400586 // We have a non abandoned and direct GrContext. It must have a GrGpu.
587 SkASSERT(gpu);
Robert Phillips874b5352018-03-16 08:48:24 -0400588
Robert Phillipsacc10fa2019-04-01 09:50:20 -0400589 // TODO: It is important to upgrade the drawingmanager to just flushing the
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400590 // portion of the DAG required by 'proxies' in order to restore some of the
Robert Phillipsacc10fa2019-04-01 09:50:20 -0400591 // semantics of this method.
Greg Daniel9efe3862020-06-11 11:51:06 -0400592 bool didFlush = this->flush(proxies, numProxies, access, info, newState);
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400593 for (int i = 0; i < numProxies; ++i) {
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400594 resolve_and_mipmap(gpu, proxies[i]);
bsalomon6a2b1942016-09-08 11:28:59 -0700595 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400596
597 SkDEBUGCODE(this->validate());
Greg Danielfe159622020-04-10 17:43:51 +0000598
Greg Daniel04283f32020-05-20 13:16:00 -0400599 if (!didFlush || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) {
Greg Danielfe159622020-04-10 17:43:51 +0000600 return GrSemaphoresSubmitted::kNo;
601 }
602 return GrSemaphoresSubmitted::kYes;
bsalomon6a2b1942016-09-08 11:28:59 -0700603}
604
Chris Daltonfe199b72017-05-05 11:26:15 -0400605void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
606 fOnFlushCBObjects.push_back(onFlushCBObject);
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400607}
608
Robert Phillipsdbaf3172019-02-06 15:12:53 -0500609#if GR_TEST_UTILS
610void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
611 int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
612 fOnFlushCBObjects.begin();
613 SkASSERT(n < fOnFlushCBObjects.count());
614 fOnFlushCBObjects.removeShuffle(n);
615}
616#endif
617
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400618void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
619#ifdef SK_DEBUG
620 if (GrRenderTask* prior = this->getLastRenderTask(proxy)) {
621 SkASSERT(prior->isClosed());
622 }
623#endif
Adlai Holler25df1f72020-06-09 13:08:27 -0400624 uint32_t key = proxy->uniqueID().asUInt();
625 if (task) {
626 fLastRenderTasks.set(key, task);
627 } else if (fLastRenderTasks.find(key)) {
628 fLastRenderTasks.remove(key);
629 }
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400630}
631
632GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
633 auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
634 return entry ? *entry : nullptr;
635}
636
637GrOpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
638 GrRenderTask* task = this->getLastRenderTask(proxy);
639 return task ? task->asOpsTask() : nullptr;
640}
641
642
Chris Dalton6b498102019-08-01 14:14:52 -0600643void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
Robert Phillips38d64b02018-09-04 13:23:26 -0400644 SkDEBUGCODE(this->validate());
645
Chris Dalton6b498102019-08-01 14:14:52 -0600646 // no renderTask should receive a new command after this
Robert Phillips9da87e02019-02-04 13:26:26 -0500647 fDAG.closeAll(fContext->priv().caps());
Greg Danielf41b2bd2019-08-22 16:19:24 -0400648 fActiveOpsTask = nullptr;
Robert Phillips7a137052018-02-01 11:23:12 -0500649
Chris Dalton6b498102019-08-01 14:14:52 -0600650 fDAG.swap(&ddl->fRenderTasks);
Robert Phillips19f466d2020-02-26 10:27:07 -0500651 SkASSERT(!fDAG.numRenderTasks());
Robert Phillips867ce8f2018-06-21 10:28:36 -0400652
Robert Phillips19f466d2020-02-26 10:27:07 -0500653 for (auto& renderTask : ddl->fRenderTasks) {
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400654 renderTask->disown(this);
Robert Phillips29f38542019-10-16 09:20:25 -0400655 renderTask->prePrepare(fContext);
Robert Phillips7327c9d2019-10-08 16:32:56 -0400656 }
657
Michael Ludwig2c316bd2019-12-19 14:50:44 -0500658 ddl->fArenas = std::move(fContext->priv().detachArenas());
Robert Phillips61fc7992019-10-22 11:58:17 -0400659
Robert Phillipsf6a0b452020-02-18 14:26:46 -0500660 fContext->priv().detachProgramData(&ddl->fProgramData);
Robert Phillips576b6a12019-12-06 13:05:49 -0500661
Robert Phillips774168e2018-05-31 12:43:27 -0400662 if (fPathRendererChain) {
663 if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
664 ddl->fPendingPaths = ccpr->detachPendingPaths();
665 }
666 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400667
668 SkDEBUGCODE(this->validate());
Robert Phillips62000362018-02-01 09:10:04 -0500669}
670
Adlai Holler7580ad42020-06-24 13:45:25 -0400671#ifndef SK_DDL_IS_UNIQUE_POINTER
672void GrDrawingManager::copyRenderTasksFromDDL(sk_sp<const SkDeferredDisplayList> ddl,
673 GrRenderTargetProxy* newDest) {
674#else
Chris Dalton6b498102019-08-01 14:14:52 -0600675void GrDrawingManager::copyRenderTasksFromDDL(const SkDeferredDisplayList* ddl,
Robert Phillips933484f2019-11-26 09:38:55 -0500676 GrRenderTargetProxy* newDest) {
Adlai Holler7580ad42020-06-24 13:45:25 -0400677#endif
Robert Phillips38d64b02018-09-04 13:23:26 -0400678 SkDEBUGCODE(this->validate());
679
Greg Danielf41b2bd2019-08-22 16:19:24 -0400680 if (fActiveOpsTask) {
Robert Phillips38d64b02018-09-04 13:23:26 -0400681 // This is a temporary fix for the partial-MDB world. In that world we're not
Greg Danielf41b2bd2019-08-22 16:19:24 -0400682 // reordering so ops that (in the single opsTask world) would've just glommed onto the
683 // end of the single opsTask but referred to a far earlier RT need to appear in their
684 // own opsTask.
685 fActiveOpsTask->makeClosed(*fContext->priv().caps());
686 fActiveOpsTask = nullptr;
Robert Phillips38d64b02018-09-04 13:23:26 -0400687 }
688
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400689 // Propagate the DDL proxy's state information to the replaying DDL.
690 if (ddl->priv().targetProxy()->isMSAADirty()) {
691 newDest->markMSAADirty(ddl->priv().targetProxy()->msaaDirtyRect(),
692 ddl->characterization().origin());
693 }
694 GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
695 if (newTextureProxy && GrMipMapped::kYes == newTextureProxy->mipMapped()) {
696 newTextureProxy->markMipMapsDirty();
697 }
698
699 this->addDDLTarget(newDest, ddl->priv().targetProxy());
Robert Phillips15c91422019-05-07 16:54:48 -0400700
Robert Phillips62000362018-02-01 09:10:04 -0500701 // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
Greg Danielf41b2bd2019-08-22 16:19:24 -0400702 // The lazy proxy that references it (in the copied opsTasks) will steal its GrTexture.
Robert Phillips62000362018-02-01 09:10:04 -0500703 ddl->fLazyProxyData->fReplayDest = newDest;
Robert Phillips774168e2018-05-31 12:43:27 -0400704
705 if (ddl->fPendingPaths.size()) {
706 GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer();
707
708 ccpr->mergePendingPaths(ddl->fPendingPaths);
709 }
Robert Phillips22310d62018-09-05 11:07:21 -0400710
Chris Dalton6b498102019-08-01 14:14:52 -0600711 fDAG.add(ddl->fRenderTasks);
Robert Phillips38d64b02018-09-04 13:23:26 -0400712
Adlai Holler6c9bb622020-06-25 09:21:18 -0400713#ifndef SK_DDL_IS_UNIQUE_POINTER
714 // Add a task to unref the DDL after flush.
715 GrRenderTask* unrefTask = fDAG.add(sk_make_sp<GrUnrefDDLTask>(std::move(ddl)));
716 unrefTask->makeClosed(*fContext->priv().caps());
717#endif
Adlai Holler7580ad42020-06-24 13:45:25 -0400718
Robert Phillips38d64b02018-09-04 13:23:26 -0400719 SkDEBUGCODE(this->validate());
Robert Phillips62000362018-02-01 09:10:04 -0500720}
721
Robert Phillips38d64b02018-09-04 13:23:26 -0400722#ifdef SK_DEBUG
723void GrDrawingManager::validate() const {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400724 if (fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
725 SkASSERT(!fActiveOpsTask);
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400726 } else {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400727 if (fActiveOpsTask) {
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400728 SkASSERT(!fDAG.empty());
Greg Danielf41b2bd2019-08-22 16:19:24 -0400729 SkASSERT(!fActiveOpsTask->isClosed());
730 SkASSERT(fActiveOpsTask == fDAG.back());
Robert Phillips38d64b02018-09-04 13:23:26 -0400731 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400732
Chris Dalton6b498102019-08-01 14:14:52 -0600733 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400734 if (fActiveOpsTask != fDAG.renderTask(i)) {
Chris Daltone2a903e2019-09-18 13:41:50 -0600735 // The resolveTask associated with the activeTask remains open for as long as the
736 // activeTask does.
737 bool isActiveResolveTask =
738 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG.renderTask(i);
739 SkASSERT(isActiveResolveTask || fDAG.renderTask(i)->isClosed());
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400740 }
741 }
742
743 if (!fDAG.empty() && !fDAG.back()->isClosed()) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400744 SkASSERT(fActiveOpsTask == fDAG.back());
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400745 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400746 }
747}
748#endif
749
Greg Danielbbfec9d2019-08-20 10:56:51 -0400750void GrDrawingManager::closeRenderTasksForNewRenderTask(GrSurfaceProxy* target) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400751 if (target && fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
Chris Dalton5fe99772019-08-06 11:57:39 -0600752 // In this case we need to close all the renderTasks that rely on the current contents of
753 // 'target'. That is bc we're going to update the content of the proxy so they need to be
754 // split in case they use both the old and new content. (This is a bit of an overkill: they
755 // really only need to be split if they ever reference proxy's contents again but that is
756 // hard to predict/handle).
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400757 if (GrRenderTask* lastRenderTask = this->getLastRenderTask(target)) {
Chris Dalton6b498102019-08-01 14:14:52 -0600758 lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps());
Robert Phillips46acf9d2018-10-09 09:31:40 -0400759 }
Greg Danielf41b2bd2019-08-22 16:19:24 -0400760 } else if (fActiveOpsTask) {
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400761 // This is a temporary fix for the partial-MDB world. In that world we're not
Greg Danielf41b2bd2019-08-22 16:19:24 -0400762 // reordering so ops that (in the single opsTask world) would've just glommed onto the
763 // end of the single opsTask but referred to a far earlier RT need to appear in their
764 // own opsTask.
765 fActiveOpsTask->makeClosed(*fContext->priv().caps());
766 fActiveOpsTask = nullptr;
robertphillips3dc6ae52015-10-20 09:54:32 -0700767 }
Chris Dalton5fe99772019-08-06 11:57:39 -0600768}
769
Greg Daniel16f5c652019-10-29 11:26:01 -0400770sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
771 bool managedOpsTask) {
Chris Dalton5fe99772019-08-06 11:57:39 -0600772 SkDEBUGCODE(this->validate());
773 SkASSERT(fContext);
774
Greg Daniel16f5c652019-10-29 11:26:01 -0400775 GrSurfaceProxy* proxy = surfaceView.proxy();
776 this->closeRenderTasksForNewRenderTask(proxy);
robertphillips3dc6ae52015-10-20 09:54:32 -0700777
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400778 sk_sp<GrOpsTask> opsTask(new GrOpsTask(this, fContext->priv().arenas(),
Michael Ludwigd0840ec2019-12-12 09:48:38 -0500779 std::move(surfaceView),
780 fContext->priv().auditTrail()));
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400781 SkASSERT(this->getLastRenderTask(proxy) == opsTask.get());
robertphillips3dc6ae52015-10-20 09:54:32 -0700782
Greg Danielf41b2bd2019-08-22 16:19:24 -0400783 if (managedOpsTask) {
784 fDAG.add(opsTask);
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400785
Greg Danielf41b2bd2019-08-22 16:19:24 -0400786 if (!fDAG.sortingRenderTasks() || !fReduceOpsTaskSplitting) {
787 fActiveOpsTask = opsTask.get();
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400788 }
Robert Phillips941d1442017-06-14 16:37:02 -0400789 }
robertphillips3dc6ae52015-10-20 09:54:32 -0700790
Robert Phillips38d64b02018-09-04 13:23:26 -0400791 SkDEBUGCODE(this->validate());
Greg Danielf41b2bd2019-08-22 16:19:24 -0400792 return opsTask;
robertphillips3dc6ae52015-10-20 09:54:32 -0700793}
794
Chris Daltone2a903e2019-09-18 13:41:50 -0600795GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) {
Chris Dalton6aeb8e82019-08-27 11:52:19 -0600796 // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
Chris Daltone2a903e2019-09-18 13:41:50 -0600797 // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
Greg Danielf41b2bd2019-08-22 16:19:24 -0400798 // state. This is because those opsTasks can still receive new ops and because if they refer to
Chris Dalton4ece96d2019-08-30 11:26:39 -0600799 // the mipmapped version of 'proxy', they will then come to depend on the render task being
800 // created here.
Chris Dalton6aeb8e82019-08-27 11:52:19 -0600801 //
Greg Danielf41b2bd2019-08-22 16:19:24 -0400802 // Add the new textureResolveTask before the fActiveOpsTask (if not in
803 // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
Chris Dalton3d770272019-08-14 09:24:37 -0600804 // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
Chris Daltone2a903e2019-09-18 13:41:50 -0600805 return static_cast<GrTextureResolveRenderTask*>(fDAG.addBeforeLast(
806 sk_make_sp<GrTextureResolveRenderTask>()));
Chris Dalton3d770272019-08-14 09:24:37 -0600807}
808
Greg Danielc30f1a92019-09-06 15:28:58 -0400809void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
Greg Daniel301015c2019-11-18 14:06:46 -0500810 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
Greg Danielc30f1a92019-09-06 15:28:58 -0400811 int numSemaphores) {
812 SkDEBUGCODE(this->validate());
813 SkASSERT(fContext);
814
815 const GrCaps& caps = *fContext->priv().caps();
816
Greg Daniel16f5c652019-10-29 11:26:01 -0400817 sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
818 std::move(semaphores),
Greg Danielc30f1a92019-09-06 15:28:58 -0400819 numSemaphores);
820 if (fReduceOpsTaskSplitting) {
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400821 GrRenderTask* lastTask = this->getLastRenderTask(proxy.get());
Greg Danielc30f1a92019-09-06 15:28:58 -0400822 if (lastTask && !lastTask->isClosed()) {
823 // We directly make the currently open renderTask depend on waitTask instead of using
824 // the proxy version of addDependency. The waitTask will never need to trigger any
825 // resolves or mip map generation which is the main advantage of going through the proxy
826 // version. Additionally we would've had to temporarily set the wait task as the
827 // lastRenderTask on the proxy, add the dependency, and then reset the lastRenderTask to
828 // lastTask. Additionally we add all dependencies of lastTask to waitTask so that the
829 // waitTask doesn't get reordered before them and unnecessarily block those tasks.
830 // Note: Any previous Ops already in lastTask will get blocked by the wait semaphore
831 // even though they don't need to be for correctness.
832
833 // Make sure we add the dependencies of lastTask to waitTask first or else we'll get a
834 // circular self dependency of waitTask on waitTask.
835 waitTask->addDependenciesFromOtherTask(lastTask);
836 lastTask->addDependency(waitTask.get());
837 } else {
838 // If there is a last task we set the waitTask to depend on it so that it doesn't get
839 // reordered in front of the lastTask causing the lastTask to be blocked by the
840 // semaphore. Again we directly just go through adding the dependency to the task and
841 // not the proxy since we don't need to worry about resolving anything.
842 if (lastTask) {
843 waitTask->addDependency(lastTask);
844 }
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400845 this->setLastRenderTask(proxy.get(), waitTask.get());
Greg Danielc30f1a92019-09-06 15:28:58 -0400846 }
847 fDAG.add(waitTask);
848 } else {
Adlai Holler33d569e2020-06-16 14:30:08 -0400849 if (fActiveOpsTask && (fActiveOpsTask->target(0).proxy() == proxy.get())) {
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400850 SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
Greg Danielc30f1a92019-09-06 15:28:58 -0400851 fDAG.addBeforeLast(waitTask);
852 // In this case we keep the current renderTask open but just insert the new waitTask
853 // before it in the list. The waitTask will never need to trigger any resolves or mip
854 // map generation which is the main advantage of going through the proxy version.
855 // Additionally we would've had to temporarily set the wait task as the lastRenderTask
856 // on the proxy, add the dependency, and then reset the lastRenderTask to
857 // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
858 // dependencies so that we don't unnecessarily reorder the waitTask before them.
859 // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
860 // semaphore even though they don't need to be for correctness.
861
862 // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
863 // get a circular self dependency of waitTask on waitTask.
864 waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
865 fActiveOpsTask->addDependency(waitTask.get());
866 } else {
867 // In this case we just close the previous RenderTask and start and append the waitTask
868 // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
869 // there is a lastTask on the proxy we make waitTask depend on that task. This
870 // dependency isn't strictly needed but it does keep the DAG from reordering the
871 // waitTask earlier and blocking more tasks.
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400872 if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
Greg Danielc30f1a92019-09-06 15:28:58 -0400873 waitTask->addDependency(lastTask);
874 }
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400875 this->setLastRenderTask(proxy.get(), waitTask.get());
Greg Danielc30f1a92019-09-06 15:28:58 -0400876 this->closeRenderTasksForNewRenderTask(proxy.get());
877 fDAG.add(waitTask);
878 }
879 }
880 waitTask->makeClosed(caps);
881
882 SkDEBUGCODE(this->validate());
883}
884
Greg Danielbbfec9d2019-08-20 10:56:51 -0400885void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
886 const SkIRect& srcRect,
887 GrColorType surfaceColorType,
888 GrColorType dstColorType,
889 sk_sp<GrGpuBuffer> dstBuffer,
890 size_t dstOffset) {
891 SkDEBUGCODE(this->validate());
892 SkASSERT(fContext);
893 // This copies from srcProxy to dstBuffer so it doesn't have a real target.
894 this->closeRenderTasksForNewRenderTask(nullptr);
895
Chris Dalton6aeb8e82019-08-27 11:52:19 -0600896 GrRenderTask* task = fDAG.add(sk_make_sp<GrTransferFromRenderTask>(
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400897 srcProxy, srcRect, surfaceColorType, dstColorType,
898 std::move(dstBuffer), dstOffset));
Greg Danielbbfec9d2019-08-20 10:56:51 -0400899
900 const GrCaps& caps = *fContext->priv().caps();
901
902 // We always say GrMipMapped::kNo here since we are always just copying from the base layer. We
903 // don't need to make sure the whole mip map chain is valid.
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400904 task->addDependency(this, srcProxy.get(), GrMipMapped::kNo,
905 GrTextureResolveManager(this), caps);
Greg Danielbbfec9d2019-08-20 10:56:51 -0400906 task->makeClosed(caps);
907
Greg Danielbbfec9d2019-08-20 10:56:51 -0400908 // We have closed the previous active oplist but since a new oplist isn't being added there
909 // shouldn't be an active one.
Greg Danielf41b2bd2019-08-22 16:19:24 -0400910 SkASSERT(!fActiveOpsTask);
Greg Danielbbfec9d2019-08-20 10:56:51 -0400911 SkDEBUGCODE(this->validate());
912}
913
Greg Daniel16f5c652019-10-29 11:26:01 -0400914bool GrDrawingManager::newCopyRenderTask(GrSurfaceProxyView srcView,
Greg Daniele227fe42019-08-21 13:52:24 -0400915 const SkIRect& srcRect,
Greg Daniel16f5c652019-10-29 11:26:01 -0400916 GrSurfaceProxyView dstView,
Greg Daniele227fe42019-08-21 13:52:24 -0400917 const SkIPoint& dstPoint) {
918 SkDEBUGCODE(this->validate());
919 SkASSERT(fContext);
Greg Daniele227fe42019-08-21 13:52:24 -0400920
Greg Daniel16f5c652019-10-29 11:26:01 -0400921 this->closeRenderTasksForNewRenderTask(dstView.proxy());
Brian Salomone4bce012019-09-20 15:34:23 -0400922 const GrCaps& caps = *fContext->priv().caps();
923
Greg Daniel16f5c652019-10-29 11:26:01 -0400924 GrSurfaceProxy* srcProxy = srcView.proxy();
925
Brian Salomone4bce012019-09-20 15:34:23 -0400926 GrRenderTask* task =
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400927 fDAG.add(GrCopyRenderTask::Make(this, std::move(srcView), srcRect, std::move(dstView),
Greg Daniel16f5c652019-10-29 11:26:01 -0400928 dstPoint, &caps));
Greg Daniele227fe42019-08-21 13:52:24 -0400929 if (!task) {
930 return false;
931 }
932
Greg Daniele227fe42019-08-21 13:52:24 -0400933 // We always say GrMipMapped::kNo here since we are always just copying from the base layer to
934 // another base layer. We don't need to make sure the whole mip map chain is valid.
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400935 task->addDependency(this, srcProxy, GrMipMapped::kNo, GrTextureResolveManager(this), caps);
Greg Daniele227fe42019-08-21 13:52:24 -0400936 task->makeClosed(caps);
937
Greg Daniele227fe42019-08-21 13:52:24 -0400938 // We have closed the previous active oplist but since a new oplist isn't being added there
939 // shouldn't be an active one.
Greg Danielf41b2bd2019-08-22 16:19:24 -0400940 SkASSERT(!fActiveOpsTask);
Greg Daniele227fe42019-08-21 13:52:24 -0400941 SkDEBUGCODE(this->validate());
942 return true;
943}
944
robertphillips68737822015-10-29 12:12:21 -0700945/*
946 * This method finds a path renderer that can draw the specified path on
947 * the provided target.
948 * Due to its expense, the software path renderer has split out so it can
949 * can be individually allowed/disallowed via the "allowSW" boolean.
950 */
951GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
952 bool allowSW,
953 GrPathRendererChain::DrawType drawType,
954 GrPathRenderer::StencilSupport* stencilSupport) {
955
956 if (!fPathRendererChain) {
Ben Wagner9ec70c62018-07-12 13:30:47 -0400957 fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
robertphillips68737822015-10-29 12:12:21 -0700958 }
959
960 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
961 if (!pr && allowSW) {
Brian Salomone7df0bb2018-05-07 14:44:57 -0400962 auto swPR = this->getSoftwarePathRenderer();
963 if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
964 pr = swPR;
Brian Salomon0e8fc8b2016-12-09 15:10:07 -0500965 }
robertphillips68737822015-10-29 12:12:21 -0700966 }
967
Robert Phillipsd81379d2020-04-21 10:39:02 -0400968#if GR_PATH_RENDERER_SPEW
969 if (pr) {
970 SkDebugf("getPathRenderer: %s\n", pr->name());
971 }
972#endif
973
robertphillips68737822015-10-29 12:12:21 -0700974 return pr;
975}
976
Brian Salomone7df0bb2018-05-07 14:44:57 -0400977GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
978 if (!fSoftwarePathRenderer) {
Ben Wagner9ec70c62018-07-12 13:30:47 -0400979 fSoftwarePathRenderer.reset(
Robert Phillips9da87e02019-02-04 13:26:26 -0500980 new GrSoftwarePathRenderer(fContext->priv().proxyProvider(),
Ben Wagner9ec70c62018-07-12 13:30:47 -0400981 fOptionsForPathRendererChain.fAllowPathMaskCaching));
Brian Salomone7df0bb2018-05-07 14:44:57 -0400982 }
Ben Wagner9ec70c62018-07-12 13:30:47 -0400983 return fSoftwarePathRenderer.get();
Brian Salomone7df0bb2018-05-07 14:44:57 -0400984}
985
Chris Daltonfddb6c02017-11-04 15:22:22 -0600986GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
987 if (!fPathRendererChain) {
Ben Wagner9ec70c62018-07-12 13:30:47 -0400988 fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
Chris Daltonfddb6c02017-11-04 15:22:22 -0600989 }
990 return fPathRendererChain->getCoverageCountingPathRenderer();
991}
992
Brian Salomon653f42f2018-07-10 10:07:31 -0400993void GrDrawingManager::flushIfNecessary() {
Robert Phillips6a6de562019-02-15 15:19:15 -0500994 auto direct = fContext->priv().asDirectContext();
995 if (!direct) {
996 return;
997 }
998
999 auto resourceCache = direct->priv().getResourceCache();
Brian Salomon653f42f2018-07-10 10:07:31 -04001000 if (resourceCache && resourceCache->requestsFlush()) {
Greg Daniel9efe3862020-06-11 11:51:06 -04001001 if (this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
1002 nullptr)) {
Greg Danielfe159622020-04-10 17:43:51 +00001003 this->submitToGpu(false);
1004 }
Brian Salomon57d2beab2018-09-10 09:35:41 -04001005 resourceCache->purgeAsNeeded();
Brian Salomon653f42f2018-07-10 10:07:31 -04001006 }
1007}
1008