joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef GrBatch_DEFINED |
| 9 | #define GrBatch_DEFINED |
| 10 | |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 11 | #include "../private/SkAtomics.h" |
joshualitt | dbe1e6f | 2015-07-16 08:12:45 -0700 | [diff] [blame] | 12 | #include "GrNonAtomicRef.h" |
bsalomon | 88cf17d | 2016-07-08 06:40:56 -0700 | [diff] [blame] | 13 | #include "SkMatrix.h" |
bsalomon | 16b9913 | 2015-08-13 14:55:50 -0700 | [diff] [blame] | 14 | #include "SkRect.h" |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 15 | #include "SkString.h" |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 16 | |
bungeman | 2c4bd07 | 2016-04-08 06:58:51 -0700 | [diff] [blame] | 17 | #include <new> |
| 18 | |
bsalomon | 16b9913 | 2015-08-13 14:55:50 -0700 | [diff] [blame] | 19 | class GrCaps; |
egdaniel | 9cb6340 | 2016-06-23 08:37:05 -0700 | [diff] [blame] | 20 | class GrGpuCommandBuffer; |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 21 | class GrBatchFlushState; |
bsalomon | 6dea83f | 2015-12-03 12:58:06 -0800 | [diff] [blame] | 22 | class GrRenderTarget; |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 23 | |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 24 | /** |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 25 | * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate |
| 26 | * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it |
| 27 | * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch |
| 28 | * subclasses complete freedom to decide how / what they can batch. |
| 29 | * |
| 30 | * Batches are created when GrContext processes a draw call. Batches of the same subclass may be |
| 31 | * merged using combineIfPossible. When two batches merge, one takes on the union of the data |
| 32 | * and the other is left empty. The merged batch becomes responsible for drawing the data from both |
stephana | 1dc1721 | 2016-04-25 07:01:22 -0700 | [diff] [blame] | 33 | * the original batches. |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 34 | * |
| 35 | * If there are any possible optimizations which might require knowing more about the full state of |
| 36 | * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this |
| 37 | * information will be communicated to the GrBatch prior to geometry generation. |
bsalomon | db4758c | 2015-11-23 11:14:20 -0800 | [diff] [blame] | 38 | * |
| 39 | * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip. |
| 40 | * The bounds are used in determining which clip elements must be applied and thus the bounds cannot |
| 41 | * in turn depend upon the clip. |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 42 | */ |
joshualitt | ca1f07e | 2015-08-07 08:11:19 -0700 | [diff] [blame] | 43 | #define GR_BATCH_SPEW 0 |
| 44 | #if GR_BATCH_SPEW |
| 45 | #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__) |
| 46 | #define GrBATCH_SPEW(code) code |
| 47 | #else |
| 48 | #define GrBATCH_SPEW(code) |
| 49 | #define GrBATCH_INFO(...) |
| 50 | #endif |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 51 | |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 52 | // A helper macro to generate a class static id |
| 53 | #define DEFINE_BATCH_CLASS_ID \ |
| 54 | static uint32_t ClassID() { \ |
| 55 | static uint32_t kClassID = GenBatchClassID(); \ |
| 56 | return kClassID; \ |
| 57 | } |
| 58 | |
cdalton | 4833f39 | 2016-02-02 22:46:16 -0800 | [diff] [blame] | 59 | class GrBatch : public GrNonAtomicRef<GrBatch> { |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 60 | public: |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 61 | GrBatch(uint32_t classID); |
cdalton | 4833f39 | 2016-02-02 22:46:16 -0800 | [diff] [blame] | 62 | virtual ~GrBatch(); |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 63 | |
| 64 | virtual const char* name() const = 0; |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 65 | |
bsalomon | cb02b38 | 2015-08-12 11:14:50 -0700 | [diff] [blame] | 66 | bool combineIfPossible(GrBatch* that, const GrCaps& caps) { |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 67 | if (this->classID() != that->classID()) { |
| 68 | return false; |
| 69 | } |
| 70 | |
bsalomon | cb02b38 | 2015-08-12 11:14:50 -0700 | [diff] [blame] | 71 | return this->onCombineIfPossible(that, caps); |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 72 | } |
| 73 | |
bsalomon | 88cf17d | 2016-07-08 06:40:56 -0700 | [diff] [blame] | 74 | const SkRect& bounds() const { |
| 75 | SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags); |
| 76 | return fBounds; |
| 77 | } |
| 78 | |
| 79 | bool hasAABloat() const { |
| 80 | SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); |
| 81 | return SkToBool(fBoundsFlags & kAABloat_BoundsFlag); |
| 82 | } |
| 83 | |
| 84 | bool hasZeroArea() const { |
| 85 | SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); |
| 86 | return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag); |
| 87 | } |
joshualitt | 99c7c07 | 2015-05-01 13:43:30 -0700 | [diff] [blame] | 88 | |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 89 | void* operator new(size_t size); |
| 90 | void operator delete(void* target); |
| 91 | |
| 92 | void* operator new(size_t size, void* placement) { |
| 93 | return ::operator new(size, placement); |
| 94 | } |
| 95 | void operator delete(void* target, void* placement) { |
| 96 | ::operator delete(target, placement); |
| 97 | } |
| 98 | |
| 99 | /** |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 100 | * Helper for safely down-casting to a GrBatch subclass |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 101 | */ |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 102 | template <typename T> const T& cast() const { |
| 103 | SkASSERT(T::ClassID() == this->classID()); |
| 104 | return *static_cast<const T*>(this); |
| 105 | } |
| 106 | |
| 107 | template <typename T> T* cast() { |
| 108 | SkASSERT(T::ClassID() == this->classID()); |
| 109 | return static_cast<T*>(this); |
| 110 | } |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 111 | |
joshualitt | ca1f07e | 2015-08-07 08:11:19 -0700 | [diff] [blame] | 112 | uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; } |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 113 | |
joshualitt | 08e65e7 | 2016-03-08 09:31:15 -0800 | [diff] [blame] | 114 | // We lazily initialize the uniqueID because currently the only user is GrAuditTrail |
halcanary | 9d524f2 | 2016-03-29 09:03:52 -0700 | [diff] [blame] | 115 | uint32_t uniqueID() const { |
joshualitt | 08e65e7 | 2016-03-08 09:31:15 -0800 | [diff] [blame] | 116 | if (kIllegalBatchID == fUniqueID) { |
| 117 | fUniqueID = GenBatchID(); |
| 118 | } |
halcanary | 9d524f2 | 2016-03-29 09:03:52 -0700 | [diff] [blame] | 119 | return fUniqueID; |
joshualitt | 08e65e7 | 2016-03-08 09:31:15 -0800 | [diff] [blame] | 120 | } |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 121 | SkDEBUGCODE(bool isUsed() const { return fUsed; }) |
joshualitt | ca1f07e | 2015-08-07 08:11:19 -0700 | [diff] [blame] | 122 | |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 123 | /** Called prior to drawing. The batch should perform any resource creation necessary to |
| 124 | to quickly issue its draw when draw is called. */ |
| 125 | void prepare(GrBatchFlushState* state) { this->onPrepare(state); } |
| 126 | |
| 127 | /** Issues the batches commands to GrGpu. */ |
Greg Daniel | 36a77ee | 2016-10-18 10:33:25 -0400 | [diff] [blame^] | 128 | void draw(GrBatchFlushState* state, const SkRect& bounds) { this->onDraw(state, bounds); } |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 129 | |
| 130 | /** Used to block batching across render target changes. Remove this once we store |
| 131 | GrBatches for different RTs in different targets. */ |
| 132 | virtual uint32_t renderTargetUniqueID() const = 0; |
| 133 | |
| 134 | /** Used for spewing information about batches when debugging. */ |
robertphillips | 44fbc79 | 2016-06-29 06:56:12 -0700 | [diff] [blame] | 135 | virtual SkString dumpInfo() const { |
| 136 | SkString string; |
| 137 | string.appendf("BatchBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", |
| 138 | fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom); |
| 139 | return string; |
| 140 | } |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 141 | |
bsalomon | 6dea83f | 2015-12-03 12:58:06 -0800 | [diff] [blame] | 142 | /** Can remove this when multi-draw-buffer lands */ |
| 143 | virtual GrRenderTarget* renderTarget() const = 0; |
| 144 | |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 145 | protected: |
bsalomon | 88cf17d | 2016-07-08 06:40:56 -0700 | [diff] [blame] | 146 | /** |
| 147 | * Indicates that the batch will produce geometry that extends beyond its bounds for the |
| 148 | * purpose of ensuring that the fragment shader runs on partially covered pixels for |
| 149 | * non-MSAA antialiasing. |
| 150 | */ |
| 151 | enum class HasAABloat { |
| 152 | kYes, |
| 153 | kNo |
| 154 | }; |
| 155 | /** |
| 156 | * Indicates that the geometry represented by the batch has zero area (i.e. it is hairline |
| 157 | * or points). |
| 158 | */ |
| 159 | enum class IsZeroArea { |
| 160 | kYes, |
| 161 | kNo |
| 162 | }; |
| 163 | void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) { |
| 164 | fBounds = newBounds; |
| 165 | this->setBoundsFlags(aabloat, zeroArea); |
| 166 | } |
| 167 | void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m, |
| 168 | HasAABloat aabloat, IsZeroArea zeroArea) { |
| 169 | m.mapRect(&fBounds, srcBounds); |
| 170 | this->setBoundsFlags(aabloat, zeroArea); |
| 171 | } |
joshualitt | 99c7c07 | 2015-05-01 13:43:30 -0700 | [diff] [blame] | 172 | |
bsalomon | 88cf17d | 2016-07-08 06:40:56 -0700 | [diff] [blame] | 173 | void joinBounds(const GrBatch& that) { |
| 174 | if (that.hasAABloat()) { |
| 175 | fBoundsFlags |= kAABloat_BoundsFlag; |
| 176 | } |
| 177 | if (that.hasZeroArea()) { |
| 178 | fBoundsFlags |= kZeroArea_BoundsFlag; |
| 179 | } |
| 180 | return fBounds.joinPossiblyEmptyRect(that.fBounds); |
| 181 | } |
| 182 | |
| 183 | void replaceBounds(const GrBatch& that) { |
| 184 | fBounds = that.fBounds; |
| 185 | fBoundsFlags = that.fBoundsFlags; |
joshualitt | 99c7c07 | 2015-05-01 13:43:30 -0700 | [diff] [blame] | 186 | } |
| 187 | |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 188 | static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); } |
| 189 | |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 190 | private: |
| 191 | virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0; |
| 192 | |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 193 | virtual void onPrepare(GrBatchFlushState*) = 0; |
Greg Daniel | 36a77ee | 2016-10-18 10:33:25 -0400 | [diff] [blame^] | 194 | virtual void onDraw(GrBatchFlushState*, const SkRect& bounds) = 0; |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 195 | |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 196 | static uint32_t GenID(int32_t* idCounter) { |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 197 | // The atomic inc returns the old value not the incremented value. So we add |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 198 | // 1 to the returned value. |
| 199 | uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1; |
| 200 | if (!id) { |
| 201 | SkFAIL("This should never wrap as it should only be called once for each GrBatch " |
| 202 | "subclass."); |
| 203 | } |
| 204 | return id; |
| 205 | } |
| 206 | |
bsalomon | 88cf17d | 2016-07-08 06:40:56 -0700 | [diff] [blame] | 207 | void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) { |
| 208 | fBoundsFlags = 0; |
| 209 | fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0; |
| 210 | fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0; |
| 211 | } |
| 212 | |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 213 | enum { |
| 214 | kIllegalBatchID = 0, |
| 215 | }; |
| 216 | |
bsalomon | 88cf17d | 2016-07-08 06:40:56 -0700 | [diff] [blame] | 217 | enum BoundsFlags { |
| 218 | kAABloat_BoundsFlag = 0x1, |
| 219 | kZeroArea_BoundsFlag = 0x2, |
| 220 | SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4) |
| 221 | }; |
| 222 | |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 223 | SkDEBUGCODE(bool fUsed;) |
bsalomon | 88cf17d | 2016-07-08 06:40:56 -0700 | [diff] [blame] | 224 | const uint16_t fClassID; |
| 225 | uint16_t fBoundsFlags; |
| 226 | |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 227 | static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); } |
joshualitt | 08e65e7 | 2016-03-08 09:31:15 -0800 | [diff] [blame] | 228 | mutable uint32_t fUniqueID; |
bsalomon | 88cf17d | 2016-07-08 06:40:56 -0700 | [diff] [blame] | 229 | SkRect fBounds; |
| 230 | |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 231 | static int32_t gCurrBatchUniqueID; |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 232 | static int32_t gCurrBatchClassID; |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 233 | }; |
| 234 | |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 235 | #endif |