blob: 93c5de8890b402cebdfa080f632ebb9a18c15487 [file] [log] [blame]
joshualitt4d8da812015-01-28 12:53:54 -08001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Brian Salomon53e4c3c2016-12-21 11:38:53 -05008#ifndef GrOp_DEFINED
9#define GrOp_DEFINED
joshualitt4d8da812015-01-28 12:53:54 -080010
Mike Kleinc0bd9f92019-04-23 12:05:21 -050011#include "include/core/SkMatrix.h"
12#include "include/core/SkRect.h"
13#include "include/core/SkString.h"
Robert Phillipsb7bfbc22020-07-01 12:55:01 -040014#include "include/gpu/GrRecordingContext.h"
Greg Daniel456f9b52020-03-05 19:14:18 +000015#include "src/gpu/GrGpuResource.h"
Herb Derbyc76d4092020-10-07 16:46:15 -040016#include "src/gpu/GrMemoryPool.h"
17#include "src/gpu/GrRecordingContextPriv.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050018#include "src/gpu/GrTracing.h"
19#include "src/gpu/GrXferProcessor.h"
Mike Klein0ec1c572018-12-04 11:52:51 -050020#include <atomic>
21#include <new>
joshualitt4d8da812015-01-28 12:53:54 -080022
Robert Phillips61fc7992019-10-22 11:58:17 -040023class GrAppliedClip;
bsalomon16b99132015-08-13 14:55:50 -070024class GrCaps;
Brian Salomon742e31d2016-12-07 17:06:19 -050025class GrOpFlushState;
Greg Daniel2d41d0d2019-08-26 11:08:51 -040026class GrOpsRenderPass;
Herb Derbyc76d4092020-10-07 16:46:15 -040027class GrPaint;
joshualitt4d8da812015-01-28 12:53:54 -080028
bsalomonabd30f52015-08-13 13:34:48 -070029/**
Brian Salomon53e4c3c2016-12-21 11:38:53 -050030 * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
31 * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
32 * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
33 * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
34 * and minimize state changes.
joshualitt4d8da812015-01-28 12:53:54 -080035 *
Brian Salomond25f5bc2018-08-08 11:25:17 -040036 * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge,
37 * one takes on the union of the data and the other is left empty. The merged op becomes responsible
38 * for drawing the data from both the original ops. When ops are chained each op maintains its own
39 * data but they are linked in a list and the head op becomes responsible for executing the work for
40 * the chain.
bsalomondb4758c2015-11-23 11:14:20 -080041 *
Brian Salomon588cec72018-11-14 13:56:37 -050042 * It is required that chainability is transitive. Moreover, if op A is able to merge with B then
43 * it must be the case that any op that can chain with A will either merge or chain with any op
44 * that can chain to B.
45 *
Brian Salomon25a88092016-12-01 09:36:50 -050046 * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
bsalomondb4758c2015-11-23 11:14:20 -080047 * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
48 * in turn depend upon the clip.
joshualitt4d8da812015-01-28 12:53:54 -080049 */
Brian Salomon25a88092016-12-01 09:36:50 -050050#define GR_OP_SPEW 0
51#if GR_OP_SPEW
52 #define GrOP_SPEW(code) code
53 #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
joshualittca1f07e2015-08-07 08:11:19 -070054#else
Brian Salomon25a88092016-12-01 09:36:50 -050055 #define GrOP_SPEW(code)
56 #define GrOP_INFO(...)
joshualittca1f07e2015-08-07 08:11:19 -070057#endif
joshualitt4d8da812015-01-28 12:53:54 -080058
Robert Phillips27483912018-04-20 12:43:18 -040059// Print out op information at flush time
60#define GR_FLUSH_TIME_OP_SPEW 0
61
reed1b55a962015-09-17 20:16:13 -070062// A helper macro to generate a class static id
Brian Salomon25a88092016-12-01 09:36:50 -050063#define DEFINE_OP_CLASS_ID \
reed1b55a962015-09-17 20:16:13 -070064 static uint32_t ClassID() { \
Brian Salomon25a88092016-12-01 09:36:50 -050065 static uint32_t kClassID = GenOpClassID(); \
reed1b55a962015-09-17 20:16:13 -070066 return kClassID; \
67 }
68
Brian Salomonf8334782017-01-03 09:42:58 -050069class GrOp : private SkNoncopyable {
joshualitt4d8da812015-01-28 12:53:54 -080070public:
Herb Derbyc9a24c92020-12-01 16:59:40 -050071 #if defined(GR_OP_ALLOCATE_USE_POOL)
Herb Derbyc76d4092020-10-07 16:46:15 -040072 struct DeleteFromPool {
73 DeleteFromPool() : fPool{nullptr} {}
Herb Derby2acd43d2020-10-27 13:38:32 -040074 DeleteFromPool(GrMemoryPool* pool) : fPool{pool} {}
Herb Derbyc76d4092020-10-07 16:46:15 -040075 void operator() (GrOp* op);
Herb Derby2acd43d2020-10-27 13:38:32 -040076 GrMemoryPool* fPool;
Herb Derbyc76d4092020-10-07 16:46:15 -040077 };
78 using Owner = std::unique_ptr<GrOp, DeleteFromPool>;
Herb Derbyc9a24c92020-12-01 16:59:40 -050079 #else
80 using Owner = std::unique_ptr<GrOp>;
Herb Derbyc76d4092020-10-07 16:46:15 -040081 #endif
82
83 template<typename Op, typename... Args>
84 static Owner Make(GrRecordingContext* context, Args&&... args) {
85 return MakeWithExtraMemory<Op>(context, 0, std::forward<Args>(args)...);
86 }
87
88 template<typename Op, typename... Args>
89 static Owner MakeWithProcessorSet(
90 GrRecordingContext* context, const SkPMColor4f& color,
91 GrPaint&& paint, Args&&... args);
92
Herb Derbyc9a24c92020-12-01 16:59:40 -050093 #if defined(GR_OP_ALLOCATE_USE_POOL)
Herb Derbyc76d4092020-10-07 16:46:15 -040094 template<typename Op, typename... Args>
95 static Owner MakeWithExtraMemory(
96 GrRecordingContext* context, size_t extraSize, Args&&... args) {
Herb Derby2acd43d2020-10-27 13:38:32 -040097 GrMemoryPool* pool = context->priv().opMemoryPool();
Herb Derbyc76d4092020-10-07 16:46:15 -040098 void* mem = pool->allocate(sizeof(Op) + extraSize);
99 GrOp* op = new (mem) Op(std::forward<Args>(args)...);
100 return Owner{op, pool};
101 }
Herb Derbyc9a24c92020-12-01 16:59:40 -0500102 #else
103 template<typename Op, typename... Args>
104 static Owner MakeWithExtraMemory(
105 GrRecordingContext* context, size_t extraSize, Args&&... args) {
106 void* bytes = ::operator new(sizeof(Op) + extraSize);
107 return Owner{new (bytes) Op(std::forward<Args>(args)...)};
108 }
Herb Derbyc76d4092020-10-07 16:46:15 -0400109 #endif
110
Brian Salomon588cec72018-11-14 13:56:37 -0500111 virtual ~GrOp() = default;
joshualitt4d8da812015-01-28 12:53:54 -0800112
113 virtual const char* name() const = 0;
joshualitt4d8da812015-01-28 12:53:54 -0800114
Brian Salomon7e67dca2020-07-21 09:27:25 -0400115 using VisitProxyFunc = std::function<void(GrSurfaceProxy*, GrMipmapped)>;
Robert Phillipsb493eeb2017-09-13 13:10:52 -0400116
Chris Dalton1706cbf2019-05-21 19:35:29 -0600117 virtual void visitProxies(const VisitProxyFunc&) const {
Robert Phillipsb493eeb2017-09-13 13:10:52 -0400118 // This default implementation assumes the op has no proxies
119 }
120
Brian Salomon7eae3e02018-08-07 14:02:38 +0000121 enum class CombineResult {
122 /**
123 * The op that combineIfPossible was called on now represents its own work plus that of
Brian Salomond25f5bc2018-08-08 11:25:17 -0400124 * the passed op. The passed op should be destroyed without being flushed. Currently it
125 * is not legal to merge an op passed to combineIfPossible() the passed op is already in a
126 * chain (though the op on which combineIfPossible() was called may be).
Brian Salomon7eae3e02018-08-07 14:02:38 +0000127 */
128 kMerged,
129 /**
Brian Salomond25f5bc2018-08-08 11:25:17 -0400130 * The caller *may* (but is not required) to chain these ops together. If they are chained
131 * then prepare() and execute() will be called on the head op but not the other ops in the
132 * chain. The head op will prepare and execute on behalf of all the ops in the chain.
133 */
134 kMayChain,
135 /**
Brian Salomon7eae3e02018-08-07 14:02:38 +0000136 * The ops cannot be combined.
137 */
138 kCannotCombine
139 };
140
Michael Ludwig28b0c5d2019-12-19 14:51:00 -0500141 // The arenas are the same as what was available when the op was created.
Herb Derbye25c3002020-10-27 15:57:27 -0400142 CombineResult combineIfPossible(GrOp* that, SkArenaAlloc* alloc, const GrCaps& caps);
joshualitt4d8da812015-01-28 12:53:54 -0800143
bsalomon88cf17d2016-07-08 06:40:56 -0700144 const SkRect& bounds() const {
145 SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
146 return fBounds;
147 }
148
Brian Salomon9e50f7b2017-03-06 12:02:34 -0500149 void setClippedBounds(const SkRect& clippedBounds) {
150 fBounds = clippedBounds;
151 // The clipped bounds already incorporate any effect of the bounds flags.
152 fBoundsFlags = 0;
153 }
154
bsalomon88cf17d2016-07-08 06:40:56 -0700155 bool hasAABloat() const {
156 SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
157 return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
158 }
159
160 bool hasZeroArea() const {
161 SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
162 return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
163 }
Herb Derbyc9a24c92020-12-01 16:59:40 -0500164
165 #if defined(GR_OP_ALLOCATE_USE_POOL)
166 #if defined(SK_DEBUG)
167 // All GrOp-derived classes should be allocated in and deleted from a GrMemoryPool
168 void* operator new(size_t size);
169 void operator delete(void* target);
170
171 void* operator new(size_t size, void* placement) {
172 return ::operator new(size, placement);
173 }
174 void operator delete(void* target, void* placement) {
175 ::operator delete(target, placement);
176 }
177 #endif
178 #else
Herb Derbyd6cfe722020-10-05 15:50:47 -0400179 // GrOps are allocated using ::operator new in the GrMemoryPool. Doing this style of memory
180 // allocation defeats the delete with size optimization.
181 void* operator new(size_t) { SK_ABORT("All GrOps are created by placement new."); }
182 void* operator new(size_t, void* p) { return p; }
183 void operator delete(void* p) { ::operator delete(p); }
Herb Derbyd6cfe722020-10-05 15:50:47 -0400184 #endif
joshualitt4d8da812015-01-28 12:53:54 -0800185
186 /**
Brian Salomon25a88092016-12-01 09:36:50 -0500187 * Helper for safely down-casting to a GrOp subclass
bsalomonabd30f52015-08-13 13:34:48 -0700188 */
reed1b55a962015-09-17 20:16:13 -0700189 template <typename T> const T& cast() const {
190 SkASSERT(T::ClassID() == this->classID());
191 return *static_cast<const T*>(this);
192 }
193
194 template <typename T> T* cast() {
195 SkASSERT(T::ClassID() == this->classID());
196 return static_cast<T*>(this);
197 }
joshualitt4d8da812015-01-28 12:53:54 -0800198
Brian Salomon25a88092016-12-01 09:36:50 -0500199 uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
joshualitt4d8da812015-01-28 12:53:54 -0800200
joshualitt08e65e72016-03-08 09:31:15 -0800201 // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
halcanary9d524f22016-03-29 09:03:52 -0700202 uint32_t uniqueID() const {
Brian Salomon25a88092016-12-01 09:36:50 -0500203 if (kIllegalOpID == fUniqueID) {
204 fUniqueID = GenOpID();
joshualitt08e65e72016-03-08 09:31:15 -0800205 }
halcanary9d524f22016-03-29 09:03:52 -0700206 return fUniqueID;
joshualitt08e65e72016-03-08 09:31:15 -0800207 }
joshualittca1f07e2015-08-07 08:11:19 -0700208
Brian Salomonbde42852016-12-21 11:37:49 -0500209 /**
Robert Phillips7327c9d2019-10-08 16:32:56 -0400210 * This can optionally be called before 'prepare' (but after sorting). Each op that overrides
211 * onPrePrepare must be prepared to handle both cases (when onPrePrepare has been called
212 * ahead of time and when it has not been called).
213 */
Adlai Hollere2296f72020-11-19 13:41:26 -0500214 void prePrepare(GrRecordingContext* context, const GrSurfaceProxyView& dstView,
215 GrAppliedClip* clip, const GrXferProcessor::DstProxyView& dstProxyView,
Greg Daniel42dbca52020-11-20 10:22:43 -0500216 GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp) {
Greg Daniel0a0ad5b2021-01-29 22:49:30 -0500217 TRACE_EVENT0("skia.gpu", name());
Greg Daniel42dbca52020-11-20 10:22:43 -0500218 this->onPrePrepare(context, dstView, clip, dstProxyView, renderPassXferBarriers,
219 colorLoadOp);
Robert Phillips61fc7992019-10-22 11:58:17 -0400220 }
Robert Phillips7327c9d2019-10-08 16:32:56 -0400221
222 /**
Brian Salomonbde42852016-12-21 11:37:49 -0500223 * Called prior to executing. The op should perform any resource creation or data transfers
224 * necessary before execute() is called.
225 */
Greg Daniel0a0ad5b2021-01-29 22:49:30 -0500226 void prepare(GrOpFlushState* state) {
227 TRACE_EVENT0("skia.gpu", name());
228 this->onPrepare(state);
229 }
bsalomon53469832015-08-18 09:20:09 -0700230
Brian Salomon25a88092016-12-01 09:36:50 -0500231 /** Issues the op's commands to GrGpu. */
Brian Salomon588cec72018-11-14 13:56:37 -0500232 void execute(GrOpFlushState* state, const SkRect& chainBounds) {
Brian Salomon5f394272019-07-02 14:07:49 -0400233 TRACE_EVENT0("skia.gpu", name());
Brian Salomon588cec72018-11-14 13:56:37 -0500234 this->onExecute(state, chainBounds);
Xiao Yu97126012018-04-25 18:11:44 -0700235 }
bsalomon53469832015-08-18 09:20:09 -0700236
Brian Salomon25a88092016-12-01 09:36:50 -0500237 /** Used for spewing information about ops when debugging. */
John Stiles8d9bf642020-08-12 15:07:45 -0400238#if GR_TEST_UTILS
John Stiles8dd1e222020-08-12 19:06:24 -0400239 virtual SkString dumpInfo() const final {
240 return SkStringPrintf("%s\nOpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]",
241 this->onDumpInfo().c_str(), fBounds.fLeft, fBounds.fTop,
242 fBounds.fRight, fBounds.fBottom);
robertphillips44fbc792016-06-29 06:56:12 -0700243 }
Brian Osman9a390ac2018-11-12 09:47:48 -0500244#endif
bsalomon53469832015-08-18 09:20:09 -0700245
Brian Salomond25f5bc2018-08-08 11:25:17 -0400246 /**
247 * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp
248 * subclass. E.g.:
249 * for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) {
250 * // ...
251 * }
252 */
Brian Salomon588cec72018-11-14 13:56:37 -0500253 template <typename OpSubclass = GrOp> class ChainRange {
Brian Salomond25f5bc2018-08-08 11:25:17 -0400254 private:
255 class Iter {
256 public:
Brian Salomon588cec72018-11-14 13:56:37 -0500257 explicit Iter(const OpSubclass* head) : fCurr(head) {}
258 inline Iter& operator++() {
259 return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain()));
260 }
261 const OpSubclass& operator*() const { return *fCurr; }
Brian Salomond25f5bc2018-08-08 11:25:17 -0400262 bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
263
264 private:
Brian Salomon588cec72018-11-14 13:56:37 -0500265 const OpSubclass* fCurr;
Brian Salomond25f5bc2018-08-08 11:25:17 -0400266 };
Brian Salomon588cec72018-11-14 13:56:37 -0500267 const OpSubclass* fHead;
Brian Salomond25f5bc2018-08-08 11:25:17 -0400268
269 public:
Brian Salomon588cec72018-11-14 13:56:37 -0500270 explicit ChainRange(const OpSubclass* head) : fHead(head) {}
Brian Salomond25f5bc2018-08-08 11:25:17 -0400271 Iter begin() { return Iter(fHead); }
272 Iter end() { return Iter(nullptr); }
273 };
274
Brian Salomon588cec72018-11-14 13:56:37 -0500275 /**
276 * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops
277 * must be of the same subclass.
278 */
Herb Derbyc76d4092020-10-07 16:46:15 -0400279 void chainConcat(GrOp::Owner);
Brian Salomond25f5bc2018-08-08 11:25:17 -0400280 /** Returns true if this is the head of a chain (including a length 1 chain). */
Brian Salomon588cec72018-11-14 13:56:37 -0500281 bool isChainHead() const { return !fPrevInChain; }
Brian Salomond25f5bc2018-08-08 11:25:17 -0400282 /** Returns true if this is the tail of a chain (including a length 1 chain). */
283 bool isChainTail() const { return !fNextInChain; }
Brian Salomond25f5bc2018-08-08 11:25:17 -0400284 /** The next op in the chain. */
Brian Salomon588cec72018-11-14 13:56:37 -0500285 GrOp* nextInChain() const { return fNextInChain.get(); }
286 /** The previous op in the chain. */
287 GrOp* prevInChain() const { return fPrevInChain; }
288 /**
289 * Cuts the chain after this op. The returned op is the op that was previously next in the
290 * chain or null if this was already a tail.
291 */
Herb Derbyc76d4092020-10-07 16:46:15 -0400292 GrOp::Owner cutChain();
Brian Salomon588cec72018-11-14 13:56:37 -0500293 SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const);
Brian Salomond25f5bc2018-08-08 11:25:17 -0400294
Ethan Nicholas029b22c2018-10-18 16:49:56 -0400295#ifdef SK_DEBUG
296 virtual void validate() const {}
297#endif
298
joshualitt4d8da812015-01-28 12:53:54 -0800299protected:
Brian Salomond25f5bc2018-08-08 11:25:17 -0400300 GrOp(uint32_t classID);
301
bsalomon88cf17d2016-07-08 06:40:56 -0700302 /**
Brian Salomon25a88092016-12-01 09:36:50 -0500303 * Indicates that the op will produce geometry that extends beyond its bounds for the
bsalomon88cf17d2016-07-08 06:40:56 -0700304 * purpose of ensuring that the fragment shader runs on partially covered pixels for
305 * non-MSAA antialiasing.
306 */
Chris Dalton3b51df12017-11-27 14:33:06 -0700307 enum class HasAABloat : bool {
308 kNo = false,
309 kYes = true
bsalomon88cf17d2016-07-08 06:40:56 -0700310 };
311 /**
Greg Daniel5faf4742019-10-01 15:14:44 -0400312 * Indicates that the geometry being drawn in a hairline stroke. A point that is drawn in device
313 * space is also considered a hairline.
bsalomon88cf17d2016-07-08 06:40:56 -0700314 */
Greg Daniel5faf4742019-10-01 15:14:44 -0400315 enum class IsHairline : bool {
Chris Dalton3b51df12017-11-27 14:33:06 -0700316 kNo = false,
317 kYes = true
bsalomon88cf17d2016-07-08 06:40:56 -0700318 };
Robert Phillips65a88fa2017-08-08 08:36:22 -0400319
Greg Daniel5faf4742019-10-01 15:14:44 -0400320 void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsHairline zeroArea) {
bsalomon88cf17d2016-07-08 06:40:56 -0700321 fBounds = newBounds;
322 this->setBoundsFlags(aabloat, zeroArea);
323 }
324 void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
Greg Daniel5faf4742019-10-01 15:14:44 -0400325 HasAABloat aabloat, IsHairline zeroArea) {
bsalomon88cf17d2016-07-08 06:40:56 -0700326 m.mapRect(&fBounds, srcBounds);
327 this->setBoundsFlags(aabloat, zeroArea);
328 }
Robert Phillips65a88fa2017-08-08 08:36:22 -0400329 void makeFullScreen(GrSurfaceProxy* proxy) {
Brian Salomon9f2b86c2019-10-22 10:37:46 -0400330 this->setBounds(proxy->getBoundsRect(), HasAABloat::kNo, IsHairline::kNo);
Robert Phillips65a88fa2017-08-08 08:36:22 -0400331 }
joshualitt99c7c072015-05-01 13:43:30 -0700332
Brian Salomonb41417f2018-10-24 08:58:48 -0400333 static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
334
335private:
Brian Salomon25a88092016-12-01 09:36:50 -0500336 void joinBounds(const GrOp& that) {
bsalomon88cf17d2016-07-08 06:40:56 -0700337 if (that.hasAABloat()) {
338 fBoundsFlags |= kAABloat_BoundsFlag;
339 }
340 if (that.hasZeroArea()) {
341 fBoundsFlags |= kZeroArea_BoundsFlag;
342 }
343 return fBounds.joinPossiblyEmptyRect(that.fBounds);
344 }
345
Herb Derbye25c3002020-10-27 15:57:27 -0400346 virtual CombineResult onCombineIfPossible(GrOp*, SkArenaAlloc*, const GrCaps&) {
Brian Salomon7eae3e02018-08-07 14:02:38 +0000347 return CombineResult::kCannotCombine;
348 }
bsalomonabd30f52015-08-13 13:34:48 -0700349
Robert Phillips8053c972019-11-21 10:44:53 -0500350 // TODO: the parameters to onPrePrepare mirror GrOpFlushState::OpArgs - fuse the two?
351 virtual void onPrePrepare(GrRecordingContext*,
Adlai Hollere2296f72020-11-19 13:41:26 -0500352 const GrSurfaceProxyView& writeView,
Robert Phillips8053c972019-11-21 10:44:53 -0500353 GrAppliedClip*,
Greg Danield358cbe2020-09-11 09:33:54 -0400354 const GrXferProcessor::DstProxyView&,
Greg Daniel42dbca52020-11-20 10:22:43 -0500355 GrXferBarrierFlags renderPassXferBarriers,
356 GrLoadOp colorLoadOp) = 0;
Brian Salomon742e31d2016-12-07 17:06:19 -0500357 virtual void onPrepare(GrOpFlushState*) = 0;
Brian Salomon588cec72018-11-14 13:56:37 -0500358 // If this op is chained then chainBounds is the union of the bounds of all ops in the chain.
359 // Otherwise, this op's bounds.
360 virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0;
John Stilesaf366522020-08-13 09:57:34 -0400361#if GR_TEST_UTILS
362 virtual SkString onDumpInfo() const { return SkString(); }
363#endif
bsalomon53469832015-08-18 09:20:09 -0700364
Mike Klein0ec1c572018-12-04 11:52:51 -0500365 static uint32_t GenID(std::atomic<uint32_t>* idCounter) {
Adlai Holler4888cda2020-11-06 16:37:37 -0500366 uint32_t id = idCounter->fetch_add(1, std::memory_order_relaxed);
Mike Klein0ec1c572018-12-04 11:52:51 -0500367 if (id == 0) {
Ben Wagnerb4aab9a2017-08-16 10:53:04 -0400368 SK_ABORT("This should never wrap as it should only be called once for each GrOp "
Adlai Holler4888cda2020-11-06 16:37:37 -0500369 "subclass.");
bsalomonabd30f52015-08-13 13:34:48 -0700370 }
371 return id;
372 }
373
Greg Daniel5faf4742019-10-01 15:14:44 -0400374 void setBoundsFlags(HasAABloat aabloat, IsHairline zeroArea) {
bsalomon88cf17d2016-07-08 06:40:56 -0700375 fBoundsFlags = 0;
376 fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
Greg Daniel5faf4742019-10-01 15:14:44 -0400377 fBoundsFlags |= (IsHairline ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
bsalomon88cf17d2016-07-08 06:40:56 -0700378 }
379
bsalomonabd30f52015-08-13 13:34:48 -0700380 enum {
Brian Salomon25a88092016-12-01 09:36:50 -0500381 kIllegalOpID = 0,
bsalomonabd30f52015-08-13 13:34:48 -0700382 };
383
bsalomon88cf17d2016-07-08 06:40:56 -0700384 enum BoundsFlags {
385 kAABloat_BoundsFlag = 0x1,
386 kZeroArea_BoundsFlag = 0x2,
387 SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4)
388 };
389
Adlai Holler4888cda2020-11-06 16:37:37 -0500390 Owner fNextInChain{nullptr};
Brian Salomon588cec72018-11-14 13:56:37 -0500391 GrOp* fPrevInChain = nullptr;
bsalomon88cf17d2016-07-08 06:40:56 -0700392 const uint16_t fClassID;
393 uint16_t fBoundsFlags;
394
Brian Salomon25a88092016-12-01 09:36:50 -0500395 static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
Brian Salomond25f5bc2018-08-08 11:25:17 -0400396 mutable uint32_t fUniqueID = SK_InvalidUniqueID;
bsalomon88cf17d2016-07-08 06:40:56 -0700397 SkRect fBounds;
398
Mike Klein0ec1c572018-12-04 11:52:51 -0500399 static std::atomic<uint32_t> gCurrOpUniqueID;
400 static std::atomic<uint32_t> gCurrOpClassID;
bsalomonabd30f52015-08-13 13:34:48 -0700401};
402
joshualitt4d8da812015-01-28 12:53:54 -0800403#endif