blob: bef01b77c48144b038d0890d53e3ccee67d6ef30 [file] [log] [blame]
joshualitt4d8da812015-01-28 12:53:54 -08001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrBatch_DEFINED
9#define GrBatch_DEFINED
10
bungeman2c4bd072016-04-08 06:58:51 -070011#include "../private/SkAtomics.h"
joshualittdbe1e6f2015-07-16 08:12:45 -070012#include "GrNonAtomicRef.h"
bsalomon88cf17d2016-07-08 06:40:56 -070013#include "SkMatrix.h"
bsalomon16b99132015-08-13 14:55:50 -070014#include "SkRect.h"
bsalomon53469832015-08-18 09:20:09 -070015#include "SkString.h"
joshualitt4d8da812015-01-28 12:53:54 -080016
bungeman2c4bd072016-04-08 06:58:51 -070017#include <new>
18
bsalomon16b99132015-08-13 14:55:50 -070019class GrCaps;
egdaniel9cb63402016-06-23 08:37:05 -070020class GrGpuCommandBuffer;
bsalomon53469832015-08-18 09:20:09 -070021class GrBatchFlushState;
bsalomon6dea83f2015-12-03 12:58:06 -080022class GrRenderTarget;
joshualitt4d8da812015-01-28 12:53:54 -080023
bsalomonabd30f52015-08-13 13:34:48 -070024/**
joshualitt4d8da812015-01-28 12:53:54 -080025 * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
26 * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
27 * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
28 * subclasses complete freedom to decide how / what they can batch.
29 *
30 * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
31 * merged using combineIfPossible. When two batches merge, one takes on the union of the data
32 * and the other is left empty. The merged batch becomes responsible for drawing the data from both
stephana1dc17212016-04-25 07:01:22 -070033 * the original batches.
joshualitt4d8da812015-01-28 12:53:54 -080034 *
35 * If there are any possible optimizations which might require knowing more about the full state of
36 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
37 * information will be communicated to the GrBatch prior to geometry generation.
bsalomondb4758c2015-11-23 11:14:20 -080038 *
39 * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip.
40 * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
41 * in turn depend upon the clip.
joshualitt4d8da812015-01-28 12:53:54 -080042 */
joshualittca1f07e2015-08-07 08:11:19 -070043#define GR_BATCH_SPEW 0
44#if GR_BATCH_SPEW
45 #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__)
46 #define GrBATCH_SPEW(code) code
47#else
48 #define GrBATCH_SPEW(code)
49 #define GrBATCH_INFO(...)
50#endif
joshualitt4d8da812015-01-28 12:53:54 -080051
reed1b55a962015-09-17 20:16:13 -070052// A helper macro to generate a class static id
53#define DEFINE_BATCH_CLASS_ID \
54 static uint32_t ClassID() { \
55 static uint32_t kClassID = GenBatchClassID(); \
56 return kClassID; \
57 }
58
cdalton4833f392016-02-02 22:46:16 -080059class GrBatch : public GrNonAtomicRef<GrBatch> {
joshualitt4d8da812015-01-28 12:53:54 -080060public:
reed1b55a962015-09-17 20:16:13 -070061 GrBatch(uint32_t classID);
cdalton4833f392016-02-02 22:46:16 -080062 virtual ~GrBatch();
joshualitt4d8da812015-01-28 12:53:54 -080063
64 virtual const char* name() const = 0;
joshualitt4d8da812015-01-28 12:53:54 -080065
bsalomoncb02b382015-08-12 11:14:50 -070066 bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
joshualitt4d8da812015-01-28 12:53:54 -080067 if (this->classID() != that->classID()) {
68 return false;
69 }
70
bsalomoncb02b382015-08-12 11:14:50 -070071 return this->onCombineIfPossible(that, caps);
joshualitt4d8da812015-01-28 12:53:54 -080072 }
73
bsalomon88cf17d2016-07-08 06:40:56 -070074 const SkRect& bounds() const {
75 SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
76 return fBounds;
77 }
78
79 bool hasAABloat() const {
80 SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
81 return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
82 }
83
84 bool hasZeroArea() const {
85 SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
86 return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
87 }
joshualitt99c7c072015-05-01 13:43:30 -070088
joshualitt4d8da812015-01-28 12:53:54 -080089 void* operator new(size_t size);
90 void operator delete(void* target);
91
92 void* operator new(size_t size, void* placement) {
93 return ::operator new(size, placement);
94 }
95 void operator delete(void* target, void* placement) {
96 ::operator delete(target, placement);
97 }
98
99 /**
reed1b55a962015-09-17 20:16:13 -0700100 * Helper for safely down-casting to a GrBatch subclass
bsalomonabd30f52015-08-13 13:34:48 -0700101 */
reed1b55a962015-09-17 20:16:13 -0700102 template <typename T> const T& cast() const {
103 SkASSERT(T::ClassID() == this->classID());
104 return *static_cast<const T*>(this);
105 }
106
107 template <typename T> T* cast() {
108 SkASSERT(T::ClassID() == this->classID());
109 return static_cast<T*>(this);
110 }
joshualitt4d8da812015-01-28 12:53:54 -0800111
joshualittca1f07e2015-08-07 08:11:19 -0700112 uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
joshualitt4d8da812015-01-28 12:53:54 -0800113
joshualitt08e65e72016-03-08 09:31:15 -0800114 // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
halcanary9d524f22016-03-29 09:03:52 -0700115 uint32_t uniqueID() const {
joshualitt08e65e72016-03-08 09:31:15 -0800116 if (kIllegalBatchID == fUniqueID) {
117 fUniqueID = GenBatchID();
118 }
halcanary9d524f22016-03-29 09:03:52 -0700119 return fUniqueID;
joshualitt08e65e72016-03-08 09:31:15 -0800120 }
bsalomonabd30f52015-08-13 13:34:48 -0700121 SkDEBUGCODE(bool isUsed() const { return fUsed; })
joshualittca1f07e2015-08-07 08:11:19 -0700122
bsalomon53469832015-08-18 09:20:09 -0700123 /** Called prior to drawing. The batch should perform any resource creation necessary to
124 to quickly issue its draw when draw is called. */
125 void prepare(GrBatchFlushState* state) { this->onPrepare(state); }
126
127 /** Issues the batches commands to GrGpu. */
Greg Daniel36a77ee2016-10-18 10:33:25 -0400128 void draw(GrBatchFlushState* state, const SkRect& bounds) { this->onDraw(state, bounds); }
bsalomon53469832015-08-18 09:20:09 -0700129
130 /** Used to block batching across render target changes. Remove this once we store
131 GrBatches for different RTs in different targets. */
132 virtual uint32_t renderTargetUniqueID() const = 0;
133
134 /** Used for spewing information about batches when debugging. */
robertphillips44fbc792016-06-29 06:56:12 -0700135 virtual SkString dumpInfo() const {
136 SkString string;
137 string.appendf("BatchBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
138 fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
139 return string;
140 }
bsalomon53469832015-08-18 09:20:09 -0700141
bsalomon6dea83f2015-12-03 12:58:06 -0800142 /** Can remove this when multi-draw-buffer lands */
143 virtual GrRenderTarget* renderTarget() const = 0;
144
joshualitt4d8da812015-01-28 12:53:54 -0800145protected:
bsalomon88cf17d2016-07-08 06:40:56 -0700146 /**
147 * Indicates that the batch will produce geometry that extends beyond its bounds for the
148 * purpose of ensuring that the fragment shader runs on partially covered pixels for
149 * non-MSAA antialiasing.
150 */
151 enum class HasAABloat {
152 kYes,
153 kNo
154 };
155 /**
156 * Indicates that the geometry represented by the batch has zero area (i.e. it is hairline
157 * or points).
158 */
159 enum class IsZeroArea {
160 kYes,
161 kNo
162 };
163 void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) {
164 fBounds = newBounds;
165 this->setBoundsFlags(aabloat, zeroArea);
166 }
167 void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
168 HasAABloat aabloat, IsZeroArea zeroArea) {
169 m.mapRect(&fBounds, srcBounds);
170 this->setBoundsFlags(aabloat, zeroArea);
171 }
joshualitt99c7c072015-05-01 13:43:30 -0700172
bsalomon88cf17d2016-07-08 06:40:56 -0700173 void joinBounds(const GrBatch& that) {
174 if (that.hasAABloat()) {
175 fBoundsFlags |= kAABloat_BoundsFlag;
176 }
177 if (that.hasZeroArea()) {
178 fBoundsFlags |= kZeroArea_BoundsFlag;
179 }
180 return fBounds.joinPossiblyEmptyRect(that.fBounds);
181 }
182
183 void replaceBounds(const GrBatch& that) {
184 fBounds = that.fBounds;
185 fBoundsFlags = that.fBoundsFlags;
joshualitt99c7c072015-05-01 13:43:30 -0700186 }
187
reed1b55a962015-09-17 20:16:13 -0700188 static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); }
189
bsalomonabd30f52015-08-13 13:34:48 -0700190private:
191 virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
192
bsalomon53469832015-08-18 09:20:09 -0700193 virtual void onPrepare(GrBatchFlushState*) = 0;
Greg Daniel36a77ee2016-10-18 10:33:25 -0400194 virtual void onDraw(GrBatchFlushState*, const SkRect& bounds) = 0;
bsalomon53469832015-08-18 09:20:09 -0700195
bsalomonabd30f52015-08-13 13:34:48 -0700196 static uint32_t GenID(int32_t* idCounter) {
reed1b55a962015-09-17 20:16:13 -0700197 // The atomic inc returns the old value not the incremented value. So we add
bsalomonabd30f52015-08-13 13:34:48 -0700198 // 1 to the returned value.
199 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
200 if (!id) {
201 SkFAIL("This should never wrap as it should only be called once for each GrBatch "
202 "subclass.");
203 }
204 return id;
205 }
206
bsalomon88cf17d2016-07-08 06:40:56 -0700207 void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) {
208 fBoundsFlags = 0;
209 fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
210 fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
211 }
212
bsalomonabd30f52015-08-13 13:34:48 -0700213 enum {
214 kIllegalBatchID = 0,
215 };
216
bsalomon88cf17d2016-07-08 06:40:56 -0700217 enum BoundsFlags {
218 kAABloat_BoundsFlag = 0x1,
219 kZeroArea_BoundsFlag = 0x2,
220 SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4)
221 };
222
bsalomonabd30f52015-08-13 13:34:48 -0700223 SkDEBUGCODE(bool fUsed;)
bsalomon88cf17d2016-07-08 06:40:56 -0700224 const uint16_t fClassID;
225 uint16_t fBoundsFlags;
226
reed1b55a962015-09-17 20:16:13 -0700227 static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); }
joshualitt08e65e72016-03-08 09:31:15 -0800228 mutable uint32_t fUniqueID;
bsalomon88cf17d2016-07-08 06:40:56 -0700229 SkRect fBounds;
230
reed1b55a962015-09-17 20:16:13 -0700231 static int32_t gCurrBatchUniqueID;
reed1b55a962015-09-17 20:16:13 -0700232 static int32_t gCurrBatchClassID;
bsalomonabd30f52015-08-13 13:34:48 -0700233};
234
joshualitt4d8da812015-01-28 12:53:54 -0800235#endif