blob: 0c181c2b8fc3cc417374fffcf9ecf25bac44ce1b [file] [log] [blame]
robertphillips3dc6ae52015-10-20 09:54:32 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/GrDrawingManager.h"
Robert Phillips69893702019-02-22 11:16:30 -05009
John Stilesfbd050b2020-08-03 13:21:46 -040010#include <memory>
11
Robert Phillips4d5594d2020-02-21 14:24:40 -050012#include "include/core/SkDeferredDisplayList.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050013#include "include/gpu/GrBackendSemaphore.h"
Robert Phillipsb7bfbc22020-07-01 12:55:01 -040014#include "include/gpu/GrDirectContext.h"
15#include "include/gpu/GrRecordingContext.h"
Robert Phillips7eb0a5f2020-06-09 14:15:09 -040016#include "src/core/SkDeferredDisplayListPriv.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050017#include "src/core/SkTTopoSort.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040018#include "src/gpu/GrAuditTrail.h"
Brian Salomon9241a6d2019-10-03 13:26:54 -040019#include "src/gpu/GrClientMappedBufferManager.h"
Greg Daniele227fe42019-08-21 13:52:24 -040020#include "src/gpu/GrCopyRenderTask.h"
Adlai Hollera0693042020-10-14 11:23:11 -040021#include "src/gpu/GrDirectContextPriv.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050022#include "src/gpu/GrGpu.h"
23#include "src/gpu/GrMemoryPool.h"
24#include "src/gpu/GrOnFlushResourceProvider.h"
25#include "src/gpu/GrRecordingContextPriv.h"
26#include "src/gpu/GrRenderTargetContext.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040027#include "src/gpu/GrRenderTargetProxy.h"
Chris Dalton6b498102019-08-01 14:14:52 -060028#include "src/gpu/GrRenderTask.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050029#include "src/gpu/GrResourceAllocator.h"
30#include "src/gpu/GrResourceProvider.h"
31#include "src/gpu/GrSoftwarePathRenderer.h"
Greg Daniel46e366a2019-12-16 14:38:36 -050032#include "src/gpu/GrSurfaceContext.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050033#include "src/gpu/GrSurfaceProxyPriv.h"
Greg Daniel456f9b52020-03-05 19:14:18 +000034#include "src/gpu/GrTexture.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040035#include "src/gpu/GrTextureProxy.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050036#include "src/gpu/GrTextureProxyPriv.h"
Chris Dalton3d770272019-08-14 09:24:37 -060037#include "src/gpu/GrTextureResolveRenderTask.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050038#include "src/gpu/GrTracing.h"
Greg Danielbbfec9d2019-08-20 10:56:51 -040039#include "src/gpu/GrTransferFromRenderTask.h"
Adlai Holler6c9bb622020-06-25 09:21:18 -040040#include "src/gpu/GrUnrefDDLTask.h"
Greg Danielc30f1a92019-09-06 15:28:58 -040041#include "src/gpu/GrWaitRenderTask.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050042#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
Herb Derbya08bde62020-06-12 15:46:06 -040043#include "src/gpu/text/GrSDFTOptions.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050044#include "src/image/SkSurface_Gpu.h"
robertphillips498d7ac2015-10-30 10:11:30 -070045
Chris Dalton6b498102019-08-01 14:14:52 -060046void GrDrawingManager::RenderTaskDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
47 idArray->reset(fRenderTasks.count());
48 for (int i = 0; i < fRenderTasks.count(); ++i) {
49 if (fRenderTasks[i]) {
50 (*idArray)[i] = fRenderTasks[i]->uniqueID();
Robert Phillips22310d62018-09-05 11:07:21 -040051 }
52 }
53}
54
Chris Dalton6b498102019-08-01 14:14:52 -060055void GrDrawingManager::RenderTaskDAG::reset() {
56 fRenderTasks.reset();
Robert Phillips22310d62018-09-05 11:07:21 -040057}
58
Adlai Holler96ead542020-06-26 08:50:14 -040059void GrDrawingManager::RenderTaskDAG::rawRemoveRenderTasks(int startIndex, int stopIndex) {
Robert Phillips22310d62018-09-05 11:07:21 -040060 for (int i = startIndex; i < stopIndex; ++i) {
Adlai Hollerd71b7b02020-06-08 15:55:00 -040061 fRenderTasks[i] = nullptr;
Robert Phillips22310d62018-09-05 11:07:21 -040062 }
63}
64
Chris Dalton6b498102019-08-01 14:14:52 -060065bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const {
Adlai Holler33d569e2020-06-16 14:30:08 -040066 for (const auto& task : fRenderTasks) {
67 if (task && task->isUsed(proxy)) {
Robert Phillips9313aa72019-04-09 18:41:27 -040068 return true;
69 }
70 }
71
72 return false;
73}
74
Chris Dalton3d770272019-08-14 09:24:37 -060075GrRenderTask* GrDrawingManager::RenderTaskDAG::add(sk_sp<GrRenderTask> renderTask) {
Chris Dalton6aeb8e82019-08-27 11:52:19 -060076 if (renderTask) {
77 return fRenderTasks.emplace_back(std::move(renderTask)).get();
78 }
79 return nullptr;
Chris Dalton3d770272019-08-14 09:24:37 -060080}
81
82GrRenderTask* GrDrawingManager::RenderTaskDAG::addBeforeLast(sk_sp<GrRenderTask> renderTask) {
83 SkASSERT(!fRenderTasks.empty());
Chris Dalton6aeb8e82019-08-27 11:52:19 -060084 if (renderTask) {
85 // Release 'fRenderTasks.back()' and grab the raw pointer, in case the SkTArray grows
86 // and reallocates during emplace_back.
87 fRenderTasks.emplace_back(fRenderTasks.back().release());
88 return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get();
89 }
90 return nullptr;
Robert Phillips22310d62018-09-05 11:07:21 -040091}
92
Greg Danielf41b2bd2019-08-22 16:19:24 -040093void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& renderTasks) {
Robert Phillips19f466d2020-02-26 10:27:07 -050094#ifdef SK_DEBUG
95 for (auto& renderTask : renderTasks) {
96 SkASSERT(renderTask->unique());
97 }
98#endif
99
Greg Danielf41b2bd2019-08-22 16:19:24 -0400100 fRenderTasks.push_back_n(renderTasks.count(), renderTasks.begin());
Robert Phillips22310d62018-09-05 11:07:21 -0400101}
102
Greg Danielf41b2bd2019-08-22 16:19:24 -0400103void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks) {
104 SkASSERT(renderTasks->empty());
105 renderTasks->swap(fRenderTasks);
Robert Phillips22310d62018-09-05 11:07:21 -0400106}
107
Chris Dalton6b498102019-08-01 14:14:52 -0600108void GrDrawingManager::RenderTaskDAG::prepForFlush() {
Adlai Holler3078f852020-11-05 15:44:50 -0500109 if (!SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(&fRenderTasks)) {
110 SkDEBUGFAIL("Render task topo sort failed.");
111 return;
Robert Phillips22310d62018-09-05 11:07:21 -0400112 }
113
114#ifdef SK_DEBUG
Greg Danielf41b2bd2019-08-22 16:19:24 -0400115 // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
116 // share the same backing GrSurfaceProxy it means the opsTask was artificially split.
Chris Dalton6b498102019-08-01 14:14:52 -0600117 if (fRenderTasks.count()) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400118 GrOpsTask* prevOpsTask = fRenderTasks[0]->asOpsTask();
Chris Dalton6b498102019-08-01 14:14:52 -0600119 for (int i = 1; i < fRenderTasks.count(); ++i) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400120 GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask();
Robert Phillips22310d62018-09-05 11:07:21 -0400121
Greg Danielf41b2bd2019-08-22 16:19:24 -0400122 if (prevOpsTask && curOpsTask) {
Adlai Holler33d569e2020-06-16 14:30:08 -0400123 SkASSERT(prevOpsTask->target(0).proxy() != curOpsTask->target(0).proxy());
Robert Phillips22310d62018-09-05 11:07:21 -0400124 }
125
Greg Danielf41b2bd2019-08-22 16:19:24 -0400126 prevOpsTask = curOpsTask;
Robert Phillips22310d62018-09-05 11:07:21 -0400127 }
128 }
129#endif
130}
131
Chris Dalton6b498102019-08-01 14:14:52 -0600132void GrDrawingManager::RenderTaskDAG::closeAll(const GrCaps* caps) {
Adlai Holler96ead542020-06-26 08:50:14 -0400133 for (auto& task : fRenderTasks) {
134 if (task) {
135 task->makeClosed(*caps);
Robert Phillips22310d62018-09-05 11:07:21 -0400136 }
137 }
138}
139
Robert Phillips22310d62018-09-05 11:07:21 -0400140///////////////////////////////////////////////////////////////////////////////////////////////////
Robert Phillips69893702019-02-22 11:16:30 -0500141GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
Robert Phillips22310d62018-09-05 11:07:21 -0400142 const GrPathRendererChain::Options& optionsForPathRendererChain,
Greg Danielf41b2bd2019-08-22 16:19:24 -0400143 bool reduceOpsTaskSplitting)
Robert Phillips22310d62018-09-05 11:07:21 -0400144 : fContext(context)
145 , fOptionsForPathRendererChain(optionsForPathRendererChain)
Robert Phillips22310d62018-09-05 11:07:21 -0400146 , fPathRendererChain(nullptr)
147 , fSoftwarePathRenderer(nullptr)
Robert Phillips6db27c22019-05-01 10:43:56 -0400148 , fFlushing(false)
Herb Derby082232b2020-06-10 15:08:18 -0400149 , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) { }
Robert Phillips22310d62018-09-05 11:07:21 -0400150
robertphillips3dc6ae52015-10-20 09:54:32 -0700151GrDrawingManager::~GrDrawingManager() {
Adlai Holler96ead542020-06-26 08:50:14 -0400152 fDAG.closeAll(fContext->priv().caps());
153 this->removeRenderTasks(0, fDAG.numRenderTasks());
robertphillips3dc6ae52015-10-20 09:54:32 -0700154}
155
Robert Phillipsa9162df2019-02-11 14:12:03 -0500156bool GrDrawingManager::wasAbandoned() const {
Robert Phillips9eb00022020-06-30 15:30:12 -0400157 return fContext->abandoned();
robertphillips3dc6ae52015-10-20 09:54:32 -0700158}
159
robertphillips68737822015-10-29 12:12:21 -0700160void GrDrawingManager::freeGpuResources() {
Jim Van Verth106b5c42017-09-26 12:45:29 -0400161 for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
162 if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
163 // it's safe to just do this because we're iterating in reverse
164 fOnFlushCBObjects.removeShuffle(i);
165 }
166 }
167
robertphillips68737822015-10-29 12:12:21 -0700168 // a path renderer may be holding onto resources
robertphillips13391dd2015-10-30 05:15:11 -0700169 fPathRendererChain = nullptr;
Ben Wagner9ec70c62018-07-12 13:30:47 -0400170 fSoftwarePathRenderer = nullptr;
Robert Phillipse3302df2017-04-24 07:31:02 -0400171}
172
Robert Phillips7ee385e2017-03-30 08:02:11 -0400173// MDB TODO: make use of the 'proxy' parameter.
Greg Daniel9efe3862020-06-11 11:51:06 -0400174bool GrDrawingManager::flush(
175 GrSurfaceProxy* proxies[],
176 int numProxies,
177 SkSurface::BackendSurfaceAccess access,
178 const GrFlushInfo& info,
179 const GrBackendSurfaceMutableState* newState) {
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400180 SkASSERT(numProxies >= 0);
181 SkASSERT(!numProxies || proxies);
Brian Salomon57d2beab2018-09-10 09:35:41 -0400182 GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
Brian Salomondcbb9d92017-07-19 10:53:20 -0400183
robertphillips7761d612016-05-16 09:14:53 -0700184 if (fFlushing || this->wasAbandoned()) {
Greg Daniel55822f12020-05-26 11:26:45 -0400185 if (info.fSubmittedProc) {
186 info.fSubmittedProc(info.fSubmittedContext, false);
187 }
Greg Daniele6bfb7d2019-04-17 15:26:11 -0400188 if (info.fFinishedProc) {
189 info.fFinishedProc(info.fFinishedContext);
Greg Daniela3aa75a2019-04-12 14:24:55 -0400190 }
Greg Danielfe159622020-04-10 17:43:51 +0000191 return false;
joshualittb8918c42015-12-18 09:59:46 -0800192 }
Robert Phillips602df412019-04-08 11:10:39 -0400193
Robert Phillips38d64b02018-09-04 13:23:26 -0400194 SkDEBUGCODE(this->validate());
195
Greg Danielce9f0162020-06-30 13:42:46 -0400196 if (!info.fNumSemaphores && !info.fFinishedProc &&
197 access == SkSurface::BackendSurfaceAccess::kNoAccess && !newState) {
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400198 bool canSkip = numProxies > 0;
199 for (int i = 0; i < numProxies && canSkip; ++i) {
200 canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]);
201 }
202 if (canSkip) {
Greg Daniel55822f12020-05-26 11:26:45 -0400203 if (info.fSubmittedProc) {
204 info.fSubmittedProc(info.fSubmittedContext, true);
205 }
Greg Danielfe159622020-04-10 17:43:51 +0000206 return false;
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400207 }
Robert Phillips9313aa72019-04-09 18:41:27 -0400208 }
209
Robert Phillipsf8f45d92020-07-01 11:11:18 -0400210 auto direct = fContext->asDirectContext();
Adlai Holleraee25fd2020-11-06 12:34:56 -0500211 SkASSERT(direct);
Brian Salomon9241a6d2019-10-03 13:26:54 -0400212 direct->priv().clientMappedBufferManager()->process();
Robert Phillips6a6de562019-02-15 15:19:15 -0500213
214 GrGpu* gpu = direct->priv().getGpu();
Greg Daniel55822f12020-05-26 11:26:45 -0400215 // We have a non abandoned and direct GrContext. It must have a GrGpu.
216 SkASSERT(gpu);
Greg Daniela3aa75a2019-04-12 14:24:55 -0400217
joshualittb8918c42015-12-18 09:59:46 -0800218 fFlushing = true;
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400219
Robert Phillips6a6de562019-02-15 15:19:15 -0500220 auto resourceProvider = direct->priv().resourceProvider();
221 auto resourceCache = direct->priv().getResourceCache();
222
Chris Dalton6b498102019-08-01 14:14:52 -0600223 // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
Greg Danielf41b2bd2019-08-22 16:19:24 -0400224 // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
225 // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
Chris Dalton6b498102019-08-01 14:14:52 -0600226 // if the SkGpuDevice(s) write to them again.
Robert Phillips9da87e02019-02-04 13:26:26 -0500227 fDAG.closeAll(fContext->priv().caps());
Greg Danielf41b2bd2019-08-22 16:19:24 -0400228 fActiveOpsTask = nullptr;
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400229
Robert Phillips22310d62018-09-05 11:07:21 -0400230 fDAG.prepForFlush();
Brian Salomon601ac802019-02-07 13:37:16 -0500231 if (!fCpuBufferCache) {
232 // We cache more buffers when the backend is using client side arrays. Otherwise, we
233 // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
234 // buffer object. Each pool only requires one staging buffer at a time.
235 int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
236 fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
Brian Salomon58f153c2018-10-18 21:51:15 -0400237 }
Robert Phillipsa4c93ac2017-05-18 11:40:04 -0400238
Robert Phillipse5f73282019-06-18 17:15:04 -0400239 GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
Robert Phillips40a29d72018-01-18 12:59:22 -0500240
Chris Daltonfe199b72017-05-05 11:26:15 -0400241 GrOnFlushResourceProvider onFlushProvider(this);
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400242
Chris Dalton12658942017-10-05 19:45:25 -0600243 // Prepare any onFlush op lists (e.g. atlases).
Chris Daltonfe199b72017-05-05 11:26:15 -0400244 if (!fOnFlushCBObjects.empty()) {
Chris Dalton6b498102019-08-01 14:14:52 -0600245 fDAG.gatherIDs(&fFlushingRenderTaskIDs);
Robert Phillips22310d62018-09-05 11:07:21 -0400246
Chris Daltonfe199b72017-05-05 11:26:15 -0400247 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
Chris Dalton6b498102019-08-01 14:14:52 -0600248 onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
Chris Daltonc4b47352019-08-23 10:10:36 -0600249 fFlushingRenderTaskIDs.count());
250 }
251 for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
252 onFlushRenderTask->makeClosed(*fContext->priv().caps());
Chris Dalton706a6ff2017-11-29 22:01:06 -0700253#ifdef SK_DEBUG
Chris Daltonc4b47352019-08-23 10:10:36 -0600254 // OnFlush callbacks are invoked during flush, and are therefore expected to handle
255 // resource allocation & usage on their own. (No deferred or lazy proxies!)
256 onFlushRenderTask->visitTargetAndSrcProxies_debugOnly(
Brian Salomon7e67dca2020-07-21 09:27:25 -0400257 [](GrSurfaceProxy* p, GrMipmapped mipMapped) {
Chris Daltonc4b47352019-08-23 10:10:36 -0600258 SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
Brian Salomonbeb7f522019-08-30 16:19:42 -0400259 SkASSERT(!p->isLazy());
Chris Dalton4ece96d2019-08-30 11:26:39 -0600260 if (p->requiresManualMSAAResolve()) {
261 // The onFlush callback is responsible for ensuring MSAA gets resolved.
262 SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty());
263 }
Brian Salomon7e67dca2020-07-21 09:27:25 -0400264 if (GrMipmapped::kYes == mipMapped) {
Chris Daltonc4b47352019-08-23 10:10:36 -0600265 // The onFlush callback is responsible for regenerating mips if needed.
Brian Salomon8c82a872020-07-21 12:09:58 -0400266 SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipmapsAreDirty());
Chris Daltonc4b47352019-08-23 10:10:36 -0600267 }
268 });
Chris Dalton706a6ff2017-11-29 22:01:06 -0700269#endif
Chris Daltonc4b47352019-08-23 10:10:36 -0600270 onFlushRenderTask->prepare(&flushState);
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400271 }
272 }
273
robertphillipsa13e2022015-11-11 12:01:09 -0800274#if 0
Brian Salomon09d994e2016-12-21 11:14:46 -0500275 // Enable this to print out verbose GrOp information
Adlai Hollerabdfd392020-10-19 09:00:36 -0400276 SkDEBUGCODE(SkDebugf("onFlush renderTasks (%d):\n", fOnFlushRenderTasks.count()));
Chris Daltonc4b47352019-08-23 10:10:36 -0600277 for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
Adlai Hollerabdfd392020-10-19 09:00:36 -0400278 SkDEBUGCODE(onFlushRenderTask->dump(/* printDependencies */ true);)
Chris Daltonc4b47352019-08-23 10:10:36 -0600279 }
Adlai Hollerabdfd392020-10-19 09:00:36 -0400280 SkDEBUGCODE(SkDebugf("Normal renderTasks (%d):\n", fDAG.numRenderTasks()));
281 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
282 SkDEBUGCODE(fDAG.renderTask(i)->dump(/* printDependencies */ true);)
robertphillips3dc6ae52015-10-20 09:54:32 -0700283 }
robertphillipsa13e2022015-11-11 12:01:09 -0800284#endif
285
Robert Phillipseafd48a2017-11-16 07:52:08 -0500286 int startIndex, stopIndex;
287 bool flushed = false;
288
Robert Phillipsf8e25022017-11-08 15:24:31 -0500289 {
Brian Salomonbeb7f522019-08-30 16:19:42 -0400290 GrResourceAllocator alloc(resourceProvider SkDEBUGCODE(, fDAG.numRenderTasks()));
Chris Dalton6b498102019-08-01 14:14:52 -0600291 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
292 if (fDAG.renderTask(i)) {
293 fDAG.renderTask(i)->gatherProxyIntervals(&alloc);
Robert Phillips22310d62018-09-05 11:07:21 -0400294 }
Greg Danielf41b2bd2019-08-22 16:19:24 -0400295 alloc.markEndOfOpsTask(i);
Robert Phillipsf8e25022017-11-08 15:24:31 -0500296 }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400297 alloc.determineRecyclability();
Robert Phillipsd375dbf2017-09-14 12:45:25 -0400298
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500299 GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
Chris Dalton6b498102019-08-01 14:14:52 -0600300 int numRenderTasksExecuted = 0;
Brian Salomon577aa0f2018-11-30 13:32:23 -0500301 while (alloc.assign(&startIndex, &stopIndex, &error)) {
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500302 if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
303 for (int i = startIndex; i < stopIndex; ++i) {
Chris Dalton6b498102019-08-01 14:14:52 -0600304 GrRenderTask* renderTask = fDAG.renderTask(i);
305 if (!renderTask) {
306 continue;
Robert Phillips01a91282018-07-26 08:03:04 -0400307 }
Chris Dalton6b498102019-08-01 14:14:52 -0600308 if (!renderTask->isInstantiated()) {
Greg Daniel15ecdf92019-08-30 15:35:23 -0400309 // No need to call the renderTask's handleInternalAllocationFailure
310 // since we will already skip executing the renderTask since it is not
311 // instantiated.
Chris Dalton6b498102019-08-01 14:14:52 -0600312 continue;
Robert Phillipsbbcb7f72018-05-31 11:16:19 -0400313 }
Chris Dalton6b498102019-08-01 14:14:52 -0600314 renderTask->handleInternalAllocationFailure();
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500315 }
Adlai Holler96ead542020-06-26 08:50:14 -0400316 this->removeRenderTasks(startIndex, stopIndex);
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500317 }
Robert Phillips4150eea2018-02-07 17:08:21 -0500318
Chris Dalton6b498102019-08-01 14:14:52 -0600319 if (this->executeRenderTasks(
320 startIndex, stopIndex, &flushState, &numRenderTasksExecuted)) {
Robert Phillipseafd48a2017-11-16 07:52:08 -0500321 flushed = true;
322 }
bsalomondc438982016-08-31 11:53:49 -0700323 }
Greg Danielc42b20b2017-10-04 10:34:49 -0400324 }
325
Chris Dalton91ab1552018-04-18 13:24:25 -0600326#ifdef SK_DEBUG
Chris Dalton6b498102019-08-01 14:14:52 -0600327 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
Adlai Holler96ead542020-06-26 08:50:14 -0400328 // All render tasks should have been cleared out by now – we only reset the array below to
329 // reclaim storage.
330 SkASSERT(!fDAG.renderTask(i));
Chris Dalton91ab1552018-04-18 13:24:25 -0600331 }
332#endif
Adlai Holler25df1f72020-06-09 13:08:27 -0400333 fLastRenderTasks.reset();
Robert Phillips22310d62018-09-05 11:07:21 -0400334 fDAG.reset();
Robert Phillips15c91422019-05-07 16:54:48 -0400335 this->clearDDLTargets();
robertphillipsa13e2022015-11-11 12:01:09 -0800336
Robert Phillipsc994a932018-06-19 13:09:54 -0400337#ifdef SK_DEBUG
338 // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool.
339 // When we move to partial flushes this assert will no longer be valid.
Greg Danielf41b2bd2019-08-22 16:19:24 -0400340 // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opsTasks
Robert Phillipsc994a932018-06-19 13:09:54 -0400341 // will be stored in the DDL's GrOpMemoryPools.
Herb Derbye32e1ab2020-10-27 10:29:46 -0400342 GrMemoryPool* opMemoryPool = fContext->priv().opMemoryPool();
Robert Phillipsc994a932018-06-19 13:09:54 -0400343 opMemoryPool->isEmpty();
344#endif
345
Greg Daniel9efe3862020-06-11 11:51:06 -0400346 gpu->executeFlushInfo(proxies, numProxies, access, info, newState);
robertphillipsa13e2022015-11-11 12:01:09 -0800347
Brian Salomon57d2beab2018-09-10 09:35:41 -0400348 // Give the cache a chance to purge resources that become purgeable due to flushing.
349 if (flushed) {
Robert Phillips6a6de562019-02-15 15:19:15 -0500350 resourceCache->purgeAsNeeded();
Brian Salomon876a0172019-03-08 11:12:14 -0500351 flushed = false;
bsalomonb77a9072016-09-07 10:02:04 -0700352 }
Chris Daltonfe199b72017-05-05 11:26:15 -0400353 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
Chris Dalton6b498102019-08-01 14:14:52 -0600354 onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs.begin(),
355 fFlushingRenderTaskIDs.count());
Brian Salomon876a0172019-03-08 11:12:14 -0500356 flushed = true;
357 }
358 if (flushed) {
359 resourceCache->purgeAsNeeded();
Chris Daltonfe199b72017-05-05 11:26:15 -0400360 }
Chris Dalton6b498102019-08-01 14:14:52 -0600361 fFlushingRenderTaskIDs.reset();
joshualittb8918c42015-12-18 09:59:46 -0800362 fFlushing = false;
Greg Daniel51316782017-08-02 15:10:09 +0000363
Greg Danielfe159622020-04-10 17:43:51 +0000364 return true;
365}
366
367bool GrDrawingManager::submitToGpu(bool syncToCpu) {
368 if (fFlushing || this->wasAbandoned()) {
369 return false;
370 }
371
Robert Phillipsf8f45d92020-07-01 11:11:18 -0400372 auto direct = fContext->asDirectContext();
Greg Danielfe159622020-04-10 17:43:51 +0000373 if (!direct) {
374 return false; // Can't submit while DDL recording
375 }
376 GrGpu* gpu = direct->priv().getGpu();
377 return gpu->submitToGpu(syncToCpu);
robertphillips3dc6ae52015-10-20 09:54:32 -0700378}
379
Chris Dalton6b498102019-08-01 14:14:52 -0600380bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState,
381 int* numRenderTasksExecuted) {
382 SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks());
Robert Phillipseafd48a2017-11-16 07:52:08 -0500383
Robert Phillips27483912018-04-20 12:43:18 -0400384#if GR_FLUSH_TIME_OP_SPEW
Greg Danielf41b2bd2019-08-22 16:19:24 -0400385 SkDebugf("Flushing opsTask: %d to %d out of [%d, %d]\n",
Chris Dalton6b498102019-08-01 14:14:52 -0600386 startIndex, stopIndex, 0, fDAG.numRenderTasks());
Robert Phillips27483912018-04-20 12:43:18 -0400387 for (int i = startIndex; i < stopIndex; ++i) {
Chris Dalton6b498102019-08-01 14:14:52 -0600388 if (fDAG.renderTask(i)) {
389 fDAG.renderTask(i)->dump(true);
Robert Phillips1734dd32018-08-21 13:52:09 -0400390 }
Robert Phillips27483912018-04-20 12:43:18 -0400391 }
392#endif
393
Chris Dalton6b498102019-08-01 14:14:52 -0600394 bool anyRenderTasksExecuted = false;
Robert Phillipseafd48a2017-11-16 07:52:08 -0500395
396 for (int i = startIndex; i < stopIndex; ++i) {
Greg Daniel15ecdf92019-08-30 15:35:23 -0400397 GrRenderTask* renderTask = fDAG.renderTask(i);
398 if (!renderTask || !renderTask->isInstantiated()) {
Robert Phillipseafd48a2017-11-16 07:52:08 -0500399 continue;
400 }
401
Chris Dalton6b498102019-08-01 14:14:52 -0600402 SkASSERT(renderTask->deferredProxiesAreInstantiated());
Robert Phillips22310d62018-09-05 11:07:21 -0400403
Chris Dalton6b498102019-08-01 14:14:52 -0600404 renderTask->prepare(flushState);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500405 }
406
407 // Upload all data to the GPU
408 flushState->preExecuteDraws();
409
Greg Danield2073452018-12-07 11:20:33 -0500410 // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
411 // for each command buffer associated with the oplists. If this gets too large we can cause the
412 // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
413 // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
414 // memory pressure.
Chris Dalton6b498102019-08-01 14:14:52 -0600415 static constexpr int kMaxRenderTasksBeforeFlush = 100;
Greg Danield2073452018-12-07 11:20:33 -0500416
Chris Daltonc4b47352019-08-23 10:10:36 -0600417 // Execute the onFlush renderTasks first, if any.
418 for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) {
419 if (!onFlushRenderTask->execute(flushState)) {
420 SkDebugf("WARNING: onFlushRenderTask failed to execute.\n");
Robert Phillipseafd48a2017-11-16 07:52:08 -0500421 }
Chris Daltonc4b47352019-08-23 10:10:36 -0600422 SkASSERT(onFlushRenderTask->unique());
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400423 onFlushRenderTask->disown(this);
Chris Daltonc4b47352019-08-23 10:10:36 -0600424 onFlushRenderTask = nullptr;
Chris Dalton6b498102019-08-01 14:14:52 -0600425 (*numRenderTasksExecuted)++;
426 if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
Greg Danielfe159622020-04-10 17:43:51 +0000427 flushState->gpu()->submitToGpu(false);
Chris Dalton6b498102019-08-01 14:14:52 -0600428 *numRenderTasksExecuted = 0;
Greg Danield2073452018-12-07 11:20:33 -0500429 }
Robert Phillipseafd48a2017-11-16 07:52:08 -0500430 }
Chris Daltonc4b47352019-08-23 10:10:36 -0600431 fOnFlushRenderTasks.reset();
Robert Phillipseafd48a2017-11-16 07:52:08 -0500432
433 // Execute the normal op lists.
434 for (int i = startIndex; i < stopIndex; ++i) {
Greg Daniel15ecdf92019-08-30 15:35:23 -0400435 GrRenderTask* renderTask = fDAG.renderTask(i);
436 if (!renderTask || !renderTask->isInstantiated()) {
Robert Phillipseafd48a2017-11-16 07:52:08 -0500437 continue;
438 }
439
Greg Daniel15ecdf92019-08-30 15:35:23 -0400440 if (renderTask->execute(flushState)) {
Chris Dalton6b498102019-08-01 14:14:52 -0600441 anyRenderTasksExecuted = true;
Robert Phillipseafd48a2017-11-16 07:52:08 -0500442 }
Chris Dalton6b498102019-08-01 14:14:52 -0600443 (*numRenderTasksExecuted)++;
444 if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
Greg Danielfe159622020-04-10 17:43:51 +0000445 flushState->gpu()->submitToGpu(false);
Chris Dalton6b498102019-08-01 14:14:52 -0600446 *numRenderTasksExecuted = 0;
Greg Danield2073452018-12-07 11:20:33 -0500447 }
Robert Phillipseafd48a2017-11-16 07:52:08 -0500448 }
449
Greg Daniel2d41d0d2019-08-26 11:08:51 -0400450 SkASSERT(!flushState->opsRenderPass());
Robert Phillips40a29d72018-01-18 12:59:22 -0500451 SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
Robert Phillipseafd48a2017-11-16 07:52:08 -0500452
Chris Dalton6b498102019-08-01 14:14:52 -0600453 // We reset the flush state before the RenderTasks so that the last resources to be freed are
454 // those that are written to in the RenderTasks. This helps to make sure the most recently used
455 // resources are the last to be purged by the resource cache.
Robert Phillipseafd48a2017-11-16 07:52:08 -0500456 flushState->reset();
457
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400458 this->removeRenderTasks(startIndex, stopIndex);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500459
Chris Dalton6b498102019-08-01 14:14:52 -0600460 return anyRenderTasksExecuted;
Robert Phillipseafd48a2017-11-16 07:52:08 -0500461}
462
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400463void GrDrawingManager::removeRenderTasks(int startIndex, int stopIndex) {
464 for (int i = startIndex; i < stopIndex; ++i) {
465 GrRenderTask* task = fDAG.renderTask(i);
466 if (!task) {
467 continue;
468 }
469 if (!task->unique()) {
470 // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111
471 task->endFlush(this);
472 }
473 task->disown(this);
474 }
Adlai Holler96ead542020-06-26 08:50:14 -0400475 fDAG.rawRemoveRenderTasks(startIndex, stopIndex);
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400476}
477
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400478static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) {
479 if (!proxy->isInstantiated()) {
480 return;
481 }
482
483 // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is
484 // because clients expect the flushed surface's backing texture to be fully resolved
485 // upon return.
486 if (proxy->requiresManualMSAAResolve()) {
487 auto* rtProxy = proxy->asRenderTargetProxy();
488 SkASSERT(rtProxy);
489 if (rtProxy->isMSAADirty()) {
490 SkASSERT(rtProxy->peekRenderTarget());
Jim Van Verthbb61fe32020-07-07 16:39:04 -0400491 gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect());
492 gpu->submitToGpu(false);
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400493 rtProxy->markMSAAResolved();
494 }
495 }
496 // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in
497 // case their backend textures are being stolen.
498 // (This special case is exercised by the ReimportImageTextureWithMipLevels test.)
499 // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag.
500 if (auto* textureProxy = proxy->asTextureProxy()) {
Brian Salomon8c82a872020-07-21 12:09:58 -0400501 if (textureProxy->mipmapsAreDirty()) {
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400502 SkASSERT(textureProxy->peekTexture());
503 gpu->regenerateMipMapLevels(textureProxy->peekTexture());
Brian Salomon8c82a872020-07-21 12:09:58 -0400504 textureProxy->markMipmapsClean();
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400505 }
506 }
507}
508
Greg Daniel9efe3862020-06-11 11:51:06 -0400509GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(
510 GrSurfaceProxy* proxies[],
511 int numProxies,
512 SkSurface::BackendSurfaceAccess access,
513 const GrFlushInfo& info,
514 const GrBackendSurfaceMutableState* newState) {
bsalomon6a2b1942016-09-08 11:28:59 -0700515 if (this->wasAbandoned()) {
Greg Daniel55822f12020-05-26 11:26:45 -0400516 if (info.fSubmittedProc) {
517 info.fSubmittedProc(info.fSubmittedContext, false);
518 }
519 if (info.fFinishedProc) {
520 info.fFinishedProc(info.fFinishedContext);
521 }
Greg Daniel51316782017-08-02 15:10:09 +0000522 return GrSemaphoresSubmitted::kNo;
bsalomon6a2b1942016-09-08 11:28:59 -0700523 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400524 SkDEBUGCODE(this->validate());
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400525 SkASSERT(numProxies >= 0);
526 SkASSERT(!numProxies || proxies);
bsalomon6a2b1942016-09-08 11:28:59 -0700527
Robert Phillipsf8f45d92020-07-01 11:11:18 -0400528 auto direct = fContext->asDirectContext();
Adlai Holleraee25fd2020-11-06 12:34:56 -0500529 SkASSERT(direct);
Robert Phillips6a6de562019-02-15 15:19:15 -0500530 GrGpu* gpu = direct->priv().getGpu();
Greg Daniel55822f12020-05-26 11:26:45 -0400531 // We have a non abandoned and direct GrContext. It must have a GrGpu.
532 SkASSERT(gpu);
Robert Phillips874b5352018-03-16 08:48:24 -0400533
Robert Phillipsacc10fa2019-04-01 09:50:20 -0400534 // TODO: It is important to upgrade the drawingmanager to just flushing the
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400535 // portion of the DAG required by 'proxies' in order to restore some of the
Robert Phillipsacc10fa2019-04-01 09:50:20 -0400536 // semantics of this method.
Greg Daniel9efe3862020-06-11 11:51:06 -0400537 bool didFlush = this->flush(proxies, numProxies, access, info, newState);
Brian Salomonf9a1fdf2019-05-09 10:30:12 -0400538 for (int i = 0; i < numProxies; ++i) {
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400539 resolve_and_mipmap(gpu, proxies[i]);
bsalomon6a2b1942016-09-08 11:28:59 -0700540 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400541
542 SkDEBUGCODE(this->validate());
Greg Danielfe159622020-04-10 17:43:51 +0000543
Greg Daniel04283f32020-05-20 13:16:00 -0400544 if (!didFlush || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) {
Greg Danielfe159622020-04-10 17:43:51 +0000545 return GrSemaphoresSubmitted::kNo;
546 }
547 return GrSemaphoresSubmitted::kYes;
bsalomon6a2b1942016-09-08 11:28:59 -0700548}
549
Chris Daltonfe199b72017-05-05 11:26:15 -0400550void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
551 fOnFlushCBObjects.push_back(onFlushCBObject);
Robert Phillipseb35f4d2017-03-21 07:56:47 -0400552}
553
Robert Phillipsdbaf3172019-02-06 15:12:53 -0500554#if GR_TEST_UTILS
555void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
556 int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
557 fOnFlushCBObjects.begin();
558 SkASSERT(n < fOnFlushCBObjects.count());
559 fOnFlushCBObjects.removeShuffle(n);
560}
561#endif
562
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400563void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) {
564#ifdef SK_DEBUG
565 if (GrRenderTask* prior = this->getLastRenderTask(proxy)) {
566 SkASSERT(prior->isClosed());
567 }
568#endif
Adlai Holler25df1f72020-06-09 13:08:27 -0400569 uint32_t key = proxy->uniqueID().asUInt();
570 if (task) {
571 fLastRenderTasks.set(key, task);
572 } else if (fLastRenderTasks.find(key)) {
573 fLastRenderTasks.remove(key);
574 }
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400575}
576
577GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const {
578 auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt());
579 return entry ? *entry : nullptr;
580}
581
582GrOpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const {
583 GrRenderTask* task = this->getLastRenderTask(proxy);
584 return task ? task->asOpsTask() : nullptr;
585}
586
587
Chris Dalton6b498102019-08-01 14:14:52 -0600588void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
Robert Phillips38d64b02018-09-04 13:23:26 -0400589 SkDEBUGCODE(this->validate());
590
Chris Dalton6b498102019-08-01 14:14:52 -0600591 // no renderTask should receive a new command after this
Robert Phillips9da87e02019-02-04 13:26:26 -0500592 fDAG.closeAll(fContext->priv().caps());
Greg Danielf41b2bd2019-08-22 16:19:24 -0400593 fActiveOpsTask = nullptr;
Robert Phillips7a137052018-02-01 11:23:12 -0500594
Chris Dalton6b498102019-08-01 14:14:52 -0600595 fDAG.swap(&ddl->fRenderTasks);
Robert Phillips19f466d2020-02-26 10:27:07 -0500596 SkASSERT(!fDAG.numRenderTasks());
Robert Phillips867ce8f2018-06-21 10:28:36 -0400597
Robert Phillips19f466d2020-02-26 10:27:07 -0500598 for (auto& renderTask : ddl->fRenderTasks) {
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400599 renderTask->disown(this);
Robert Phillips29f38542019-10-16 09:20:25 -0400600 renderTask->prePrepare(fContext);
Robert Phillips7327c9d2019-10-08 16:32:56 -0400601 }
602
Michael Ludwig2c316bd2019-12-19 14:50:44 -0500603 ddl->fArenas = std::move(fContext->priv().detachArenas());
Robert Phillips61fc7992019-10-22 11:58:17 -0400604
Robert Phillipsf6a0b452020-02-18 14:26:46 -0500605 fContext->priv().detachProgramData(&ddl->fProgramData);
Robert Phillips576b6a12019-12-06 13:05:49 -0500606
Robert Phillips774168e2018-05-31 12:43:27 -0400607 if (fPathRendererChain) {
608 if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
609 ddl->fPendingPaths = ccpr->detachPendingPaths();
610 }
611 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400612
613 SkDEBUGCODE(this->validate());
Robert Phillips62000362018-02-01 09:10:04 -0500614}
615
Adlai Holler7580ad42020-06-24 13:45:25 -0400616void GrDrawingManager::copyRenderTasksFromDDL(sk_sp<const SkDeferredDisplayList> ddl,
617 GrRenderTargetProxy* newDest) {
Robert Phillips38d64b02018-09-04 13:23:26 -0400618 SkDEBUGCODE(this->validate());
619
Greg Danielf41b2bd2019-08-22 16:19:24 -0400620 if (fActiveOpsTask) {
Robert Phillips38d64b02018-09-04 13:23:26 -0400621 // This is a temporary fix for the partial-MDB world. In that world we're not
Greg Danielf41b2bd2019-08-22 16:19:24 -0400622 // reordering so ops that (in the single opsTask world) would've just glommed onto the
623 // end of the single opsTask but referred to a far earlier RT need to appear in their
624 // own opsTask.
625 fActiveOpsTask->makeClosed(*fContext->priv().caps());
626 fActiveOpsTask = nullptr;
Robert Phillips38d64b02018-09-04 13:23:26 -0400627 }
628
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400629 // Propagate the DDL proxy's state information to the replaying DDL.
630 if (ddl->priv().targetProxy()->isMSAADirty()) {
631 newDest->markMSAADirty(ddl->priv().targetProxy()->msaaDirtyRect(),
632 ddl->characterization().origin());
633 }
634 GrTextureProxy* newTextureProxy = newDest->asTextureProxy();
Brian Salomon8c82a872020-07-21 12:09:58 -0400635 if (newTextureProxy && GrMipmapped::kYes == newTextureProxy->mipmapped()) {
636 newTextureProxy->markMipmapsDirty();
Robert Phillips7eb0a5f2020-06-09 14:15:09 -0400637 }
638
639 this->addDDLTarget(newDest, ddl->priv().targetProxy());
Robert Phillips15c91422019-05-07 16:54:48 -0400640
Robert Phillips62000362018-02-01 09:10:04 -0500641 // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
Greg Danielf41b2bd2019-08-22 16:19:24 -0400642 // The lazy proxy that references it (in the copied opsTasks) will steal its GrTexture.
Robert Phillips62000362018-02-01 09:10:04 -0500643 ddl->fLazyProxyData->fReplayDest = newDest;
Robert Phillips774168e2018-05-31 12:43:27 -0400644
645 if (ddl->fPendingPaths.size()) {
646 GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer();
647
648 ccpr->mergePendingPaths(ddl->fPendingPaths);
649 }
Robert Phillips22310d62018-09-05 11:07:21 -0400650
Chris Dalton6b498102019-08-01 14:14:52 -0600651 fDAG.add(ddl->fRenderTasks);
Robert Phillips38d64b02018-09-04 13:23:26 -0400652
Adlai Holler6c9bb622020-06-25 09:21:18 -0400653 // Add a task to unref the DDL after flush.
654 GrRenderTask* unrefTask = fDAG.add(sk_make_sp<GrUnrefDDLTask>(std::move(ddl)));
655 unrefTask->makeClosed(*fContext->priv().caps());
Adlai Holler7580ad42020-06-24 13:45:25 -0400656
Robert Phillips38d64b02018-09-04 13:23:26 -0400657 SkDEBUGCODE(this->validate());
Robert Phillips62000362018-02-01 09:10:04 -0500658}
659
Robert Phillips38d64b02018-09-04 13:23:26 -0400660#ifdef SK_DEBUG
661void GrDrawingManager::validate() const {
Adlai Holler3078f852020-11-05 15:44:50 -0500662 if (fReduceOpsTaskSplitting) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400663 SkASSERT(!fActiveOpsTask);
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400664 } else {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400665 if (fActiveOpsTask) {
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400666 SkASSERT(!fDAG.empty());
Greg Danielf41b2bd2019-08-22 16:19:24 -0400667 SkASSERT(!fActiveOpsTask->isClosed());
668 SkASSERT(fActiveOpsTask == fDAG.back());
Robert Phillips38d64b02018-09-04 13:23:26 -0400669 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400670
Chris Dalton6b498102019-08-01 14:14:52 -0600671 for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400672 if (fActiveOpsTask != fDAG.renderTask(i)) {
Chris Daltone2a903e2019-09-18 13:41:50 -0600673 // The resolveTask associated with the activeTask remains open for as long as the
674 // activeTask does.
675 bool isActiveResolveTask =
676 fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG.renderTask(i);
677 SkASSERT(isActiveResolveTask || fDAG.renderTask(i)->isClosed());
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400678 }
679 }
680
681 if (!fDAG.empty() && !fDAG.back()->isClosed()) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400682 SkASSERT(fActiveOpsTask == fDAG.back());
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400683 }
Robert Phillips38d64b02018-09-04 13:23:26 -0400684 }
685}
686#endif
687
Greg Danielbbfec9d2019-08-20 10:56:51 -0400688void GrDrawingManager::closeRenderTasksForNewRenderTask(GrSurfaceProxy* target) {
Adlai Holler3078f852020-11-05 15:44:50 -0500689 if (target && fReduceOpsTaskSplitting) {
Chris Dalton5fe99772019-08-06 11:57:39 -0600690 // In this case we need to close all the renderTasks that rely on the current contents of
691 // 'target'. That is bc we're going to update the content of the proxy so they need to be
692 // split in case they use both the old and new content. (This is a bit of an overkill: they
693 // really only need to be split if they ever reference proxy's contents again but that is
694 // hard to predict/handle).
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400695 if (GrRenderTask* lastRenderTask = this->getLastRenderTask(target)) {
Chris Dalton6b498102019-08-01 14:14:52 -0600696 lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps());
Robert Phillips46acf9d2018-10-09 09:31:40 -0400697 }
Greg Danielf41b2bd2019-08-22 16:19:24 -0400698 } else if (fActiveOpsTask) {
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400699 // This is a temporary fix for the partial-MDB world. In that world we're not
Greg Danielf41b2bd2019-08-22 16:19:24 -0400700 // reordering so ops that (in the single opsTask world) would've just glommed onto the
701 // end of the single opsTask but referred to a far earlier RT need to appear in their
702 // own opsTask.
703 fActiveOpsTask->makeClosed(*fContext->priv().caps());
704 fActiveOpsTask = nullptr;
robertphillips3dc6ae52015-10-20 09:54:32 -0700705 }
Chris Dalton5fe99772019-08-06 11:57:39 -0600706}
707
Greg Daniel16f5c652019-10-29 11:26:01 -0400708sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView,
709 bool managedOpsTask) {
Chris Dalton5fe99772019-08-06 11:57:39 -0600710 SkDEBUGCODE(this->validate());
711 SkASSERT(fContext);
712
Greg Daniel16f5c652019-10-29 11:26:01 -0400713 GrSurfaceProxy* proxy = surfaceView.proxy();
714 this->closeRenderTasksForNewRenderTask(proxy);
robertphillips3dc6ae52015-10-20 09:54:32 -0700715
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400716 sk_sp<GrOpsTask> opsTask(new GrOpsTask(this, fContext->priv().arenas(),
Michael Ludwigd0840ec2019-12-12 09:48:38 -0500717 std::move(surfaceView),
718 fContext->priv().auditTrail()));
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400719 SkASSERT(this->getLastRenderTask(proxy) == opsTask.get());
robertphillips3dc6ae52015-10-20 09:54:32 -0700720
Greg Danielf41b2bd2019-08-22 16:19:24 -0400721 if (managedOpsTask) {
722 fDAG.add(opsTask);
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400723
Adlai Holler3078f852020-11-05 15:44:50 -0500724 if (!fReduceOpsTaskSplitting) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400725 fActiveOpsTask = opsTask.get();
Robert Phillipsb7a98ef2018-10-10 09:26:00 -0400726 }
Robert Phillips941d1442017-06-14 16:37:02 -0400727 }
robertphillips3dc6ae52015-10-20 09:54:32 -0700728
Robert Phillips38d64b02018-09-04 13:23:26 -0400729 SkDEBUGCODE(this->validate());
Greg Danielf41b2bd2019-08-22 16:19:24 -0400730 return opsTask;
robertphillips3dc6ae52015-10-20 09:54:32 -0700731}
732
Chris Daltone2a903e2019-09-18 13:41:50 -0600733GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) {
Chris Dalton6aeb8e82019-08-27 11:52:19 -0600734 // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
Chris Daltone2a903e2019-09-18 13:41:50 -0600735 // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
Greg Danielf41b2bd2019-08-22 16:19:24 -0400736 // state. This is because those opsTasks can still receive new ops and because if they refer to
Chris Dalton4ece96d2019-08-30 11:26:39 -0600737 // the mipmapped version of 'proxy', they will then come to depend on the render task being
738 // created here.
Chris Dalton6aeb8e82019-08-27 11:52:19 -0600739 //
Greg Danielf41b2bd2019-08-22 16:19:24 -0400740 // Add the new textureResolveTask before the fActiveOpsTask (if not in
741 // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
Chris Dalton3d770272019-08-14 09:24:37 -0600742 // NOTE: Putting it here will also reduce the amount of work required by the topological sort.
Chris Daltone2a903e2019-09-18 13:41:50 -0600743 return static_cast<GrTextureResolveRenderTask*>(fDAG.addBeforeLast(
744 sk_make_sp<GrTextureResolveRenderTask>()));
Chris Dalton3d770272019-08-14 09:24:37 -0600745}
746
Greg Danielc30f1a92019-09-06 15:28:58 -0400747void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
Greg Daniel301015c2019-11-18 14:06:46 -0500748 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
Greg Danielc30f1a92019-09-06 15:28:58 -0400749 int numSemaphores) {
750 SkDEBUGCODE(this->validate());
751 SkASSERT(fContext);
752
753 const GrCaps& caps = *fContext->priv().caps();
754
Greg Daniel16f5c652019-10-29 11:26:01 -0400755 sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy),
756 std::move(semaphores),
Greg Danielc30f1a92019-09-06 15:28:58 -0400757 numSemaphores);
758 if (fReduceOpsTaskSplitting) {
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400759 GrRenderTask* lastTask = this->getLastRenderTask(proxy.get());
Greg Danielc30f1a92019-09-06 15:28:58 -0400760 if (lastTask && !lastTask->isClosed()) {
761 // We directly make the currently open renderTask depend on waitTask instead of using
762 // the proxy version of addDependency. The waitTask will never need to trigger any
763 // resolves or mip map generation which is the main advantage of going through the proxy
764 // version. Additionally we would've had to temporarily set the wait task as the
765 // lastRenderTask on the proxy, add the dependency, and then reset the lastRenderTask to
766 // lastTask. Additionally we add all dependencies of lastTask to waitTask so that the
767 // waitTask doesn't get reordered before them and unnecessarily block those tasks.
768 // Note: Any previous Ops already in lastTask will get blocked by the wait semaphore
769 // even though they don't need to be for correctness.
770
771 // Make sure we add the dependencies of lastTask to waitTask first or else we'll get a
772 // circular self dependency of waitTask on waitTask.
773 waitTask->addDependenciesFromOtherTask(lastTask);
774 lastTask->addDependency(waitTask.get());
775 } else {
776 // If there is a last task we set the waitTask to depend on it so that it doesn't get
777 // reordered in front of the lastTask causing the lastTask to be blocked by the
778 // semaphore. Again we directly just go through adding the dependency to the task and
779 // not the proxy since we don't need to worry about resolving anything.
780 if (lastTask) {
781 waitTask->addDependency(lastTask);
782 }
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400783 this->setLastRenderTask(proxy.get(), waitTask.get());
Greg Danielc30f1a92019-09-06 15:28:58 -0400784 }
785 fDAG.add(waitTask);
786 } else {
Adlai Holler33d569e2020-06-16 14:30:08 -0400787 if (fActiveOpsTask && (fActiveOpsTask->target(0).proxy() == proxy.get())) {
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400788 SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
Greg Danielc30f1a92019-09-06 15:28:58 -0400789 fDAG.addBeforeLast(waitTask);
790 // In this case we keep the current renderTask open but just insert the new waitTask
791 // before it in the list. The waitTask will never need to trigger any resolves or mip
792 // map generation which is the main advantage of going through the proxy version.
793 // Additionally we would've had to temporarily set the wait task as the lastRenderTask
794 // on the proxy, add the dependency, and then reset the lastRenderTask to
795 // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask
796 // dependencies so that we don't unnecessarily reorder the waitTask before them.
797 // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait
798 // semaphore even though they don't need to be for correctness.
799
800 // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll
801 // get a circular self dependency of waitTask on waitTask.
802 waitTask->addDependenciesFromOtherTask(fActiveOpsTask);
803 fActiveOpsTask->addDependency(waitTask.get());
804 } else {
805 // In this case we just close the previous RenderTask and start and append the waitTask
806 // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If
807 // there is a lastTask on the proxy we make waitTask depend on that task. This
808 // dependency isn't strictly needed but it does keep the DAG from reordering the
809 // waitTask earlier and blocking more tasks.
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400810 if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) {
Greg Danielc30f1a92019-09-06 15:28:58 -0400811 waitTask->addDependency(lastTask);
812 }
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400813 this->setLastRenderTask(proxy.get(), waitTask.get());
Greg Danielc30f1a92019-09-06 15:28:58 -0400814 this->closeRenderTasksForNewRenderTask(proxy.get());
815 fDAG.add(waitTask);
816 }
817 }
818 waitTask->makeClosed(caps);
819
820 SkDEBUGCODE(this->validate());
821}
822
Greg Danielbbfec9d2019-08-20 10:56:51 -0400823void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy,
824 const SkIRect& srcRect,
825 GrColorType surfaceColorType,
826 GrColorType dstColorType,
827 sk_sp<GrGpuBuffer> dstBuffer,
828 size_t dstOffset) {
829 SkDEBUGCODE(this->validate());
830 SkASSERT(fContext);
831 // This copies from srcProxy to dstBuffer so it doesn't have a real target.
832 this->closeRenderTasksForNewRenderTask(nullptr);
833
Chris Dalton6aeb8e82019-08-27 11:52:19 -0600834 GrRenderTask* task = fDAG.add(sk_make_sp<GrTransferFromRenderTask>(
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400835 srcProxy, srcRect, surfaceColorType, dstColorType,
836 std::move(dstBuffer), dstOffset));
Greg Danielbbfec9d2019-08-20 10:56:51 -0400837
838 const GrCaps& caps = *fContext->priv().caps();
839
Brian Salomon7e67dca2020-07-21 09:27:25 -0400840 // We always say GrMipmapped::kNo here since we are always just copying from the base layer. We
Greg Danielbbfec9d2019-08-20 10:56:51 -0400841 // don't need to make sure the whole mip map chain is valid.
Brian Salomon7e67dca2020-07-21 09:27:25 -0400842 task->addDependency(this, srcProxy.get(), GrMipmapped::kNo,
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400843 GrTextureResolveManager(this), caps);
Greg Danielbbfec9d2019-08-20 10:56:51 -0400844 task->makeClosed(caps);
845
Greg Danielbbfec9d2019-08-20 10:56:51 -0400846 // We have closed the previous active oplist but since a new oplist isn't being added there
847 // shouldn't be an active one.
Greg Danielf41b2bd2019-08-22 16:19:24 -0400848 SkASSERT(!fActiveOpsTask);
Greg Danielbbfec9d2019-08-20 10:56:51 -0400849 SkDEBUGCODE(this->validate());
850}
851
Greg Daniel16f5c652019-10-29 11:26:01 -0400852bool GrDrawingManager::newCopyRenderTask(GrSurfaceProxyView srcView,
Greg Daniele227fe42019-08-21 13:52:24 -0400853 const SkIRect& srcRect,
Greg Daniel16f5c652019-10-29 11:26:01 -0400854 GrSurfaceProxyView dstView,
Greg Daniele227fe42019-08-21 13:52:24 -0400855 const SkIPoint& dstPoint) {
856 SkDEBUGCODE(this->validate());
857 SkASSERT(fContext);
Greg Daniele227fe42019-08-21 13:52:24 -0400858
Greg Daniel16f5c652019-10-29 11:26:01 -0400859 this->closeRenderTasksForNewRenderTask(dstView.proxy());
Brian Salomone4bce012019-09-20 15:34:23 -0400860 const GrCaps& caps = *fContext->priv().caps();
861
Greg Daniel16f5c652019-10-29 11:26:01 -0400862 GrSurfaceProxy* srcProxy = srcView.proxy();
863
Brian Salomone4bce012019-09-20 15:34:23 -0400864 GrRenderTask* task =
Adlai Hollerd71b7b02020-06-08 15:55:00 -0400865 fDAG.add(GrCopyRenderTask::Make(this, std::move(srcView), srcRect, std::move(dstView),
Greg Daniel16f5c652019-10-29 11:26:01 -0400866 dstPoint, &caps));
Greg Daniele227fe42019-08-21 13:52:24 -0400867 if (!task) {
868 return false;
869 }
870
Brian Salomon7e67dca2020-07-21 09:27:25 -0400871 // We always say GrMipmapped::kNo here since we are always just copying from the base layer to
Greg Daniele227fe42019-08-21 13:52:24 -0400872 // another base layer. We don't need to make sure the whole mip map chain is valid.
Brian Salomon7e67dca2020-07-21 09:27:25 -0400873 task->addDependency(this, srcProxy, GrMipmapped::kNo, GrTextureResolveManager(this), caps);
Greg Daniele227fe42019-08-21 13:52:24 -0400874 task->makeClosed(caps);
875
Greg Daniele227fe42019-08-21 13:52:24 -0400876 // We have closed the previous active oplist but since a new oplist isn't being added there
877 // shouldn't be an active one.
Greg Danielf41b2bd2019-08-22 16:19:24 -0400878 SkASSERT(!fActiveOpsTask);
Greg Daniele227fe42019-08-21 13:52:24 -0400879 SkDEBUGCODE(this->validate());
880 return true;
881}
882
robertphillips68737822015-10-29 12:12:21 -0700883/*
884 * This method finds a path renderer that can draw the specified path on
885 * the provided target.
886 * Due to its expense, the software path renderer has split out so it can
887 * can be individually allowed/disallowed via the "allowSW" boolean.
888 */
889GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
890 bool allowSW,
891 GrPathRendererChain::DrawType drawType,
892 GrPathRenderer::StencilSupport* stencilSupport) {
893
894 if (!fPathRendererChain) {
John Stilesfbd050b2020-08-03 13:21:46 -0400895 fPathRendererChain =
896 std::make_unique<GrPathRendererChain>(fContext, fOptionsForPathRendererChain);
robertphillips68737822015-10-29 12:12:21 -0700897 }
898
899 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
900 if (!pr && allowSW) {
Brian Salomone7df0bb2018-05-07 14:44:57 -0400901 auto swPR = this->getSoftwarePathRenderer();
902 if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
903 pr = swPR;
Brian Salomon0e8fc8b2016-12-09 15:10:07 -0500904 }
robertphillips68737822015-10-29 12:12:21 -0700905 }
906
Robert Phillipsd81379d2020-04-21 10:39:02 -0400907#if GR_PATH_RENDERER_SPEW
908 if (pr) {
909 SkDebugf("getPathRenderer: %s\n", pr->name());
910 }
911#endif
912
robertphillips68737822015-10-29 12:12:21 -0700913 return pr;
914}
915
Brian Salomone7df0bb2018-05-07 14:44:57 -0400916GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
917 if (!fSoftwarePathRenderer) {
Ben Wagner9ec70c62018-07-12 13:30:47 -0400918 fSoftwarePathRenderer.reset(
Robert Phillips9da87e02019-02-04 13:26:26 -0500919 new GrSoftwarePathRenderer(fContext->priv().proxyProvider(),
Ben Wagner9ec70c62018-07-12 13:30:47 -0400920 fOptionsForPathRendererChain.fAllowPathMaskCaching));
Brian Salomone7df0bb2018-05-07 14:44:57 -0400921 }
Ben Wagner9ec70c62018-07-12 13:30:47 -0400922 return fSoftwarePathRenderer.get();
Brian Salomone7df0bb2018-05-07 14:44:57 -0400923}
924
Chris Daltonfddb6c02017-11-04 15:22:22 -0600925GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
926 if (!fPathRendererChain) {
John Stilesfbd050b2020-08-03 13:21:46 -0400927 fPathRendererChain = std::make_unique<GrPathRendererChain>(fContext, fOptionsForPathRendererChain);
Chris Daltonfddb6c02017-11-04 15:22:22 -0600928 }
929 return fPathRendererChain->getCoverageCountingPathRenderer();
930}
931
Brian Salomon653f42f2018-07-10 10:07:31 -0400932void GrDrawingManager::flushIfNecessary() {
Robert Phillipsf8f45d92020-07-01 11:11:18 -0400933 auto direct = fContext->asDirectContext();
Robert Phillips6a6de562019-02-15 15:19:15 -0500934 if (!direct) {
935 return;
936 }
937
938 auto resourceCache = direct->priv().getResourceCache();
Brian Salomon653f42f2018-07-10 10:07:31 -0400939 if (resourceCache && resourceCache->requestsFlush()) {
Greg Daniel9efe3862020-06-11 11:51:06 -0400940 if (this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
941 nullptr)) {
Greg Danielfe159622020-04-10 17:43:51 +0000942 this->submitToGpu(false);
943 }
Brian Salomon57d2beab2018-09-10 09:35:41 -0400944 resourceCache->purgeAsNeeded();
Brian Salomon653f42f2018-07-10 10:07:31 -0400945 }
946}
947