joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef GrBatch_DEFINED |
| 9 | #define GrBatch_DEFINED |
| 10 | |
| 11 | #include <new> |
joshualitt | dbe1e6f | 2015-07-16 08:12:45 -0700 | [diff] [blame] | 12 | #include "GrNonAtomicRef.h" |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 13 | |
bsalomon | 16b9913 | 2015-08-13 14:55:50 -0700 | [diff] [blame] | 14 | #include "SkRect.h" |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 15 | #include "SkString.h" |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 16 | |
bsalomon | 16b9913 | 2015-08-13 14:55:50 -0700 | [diff] [blame] | 17 | class GrCaps; |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 18 | class GrBatchFlushState; |
bsalomon | 6dea83f | 2015-12-03 12:58:06 -0800 | [diff] [blame] | 19 | class GrRenderTarget; |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 20 | |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 21 | /** |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 22 | * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate |
| 23 | * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it |
| 24 | * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch |
| 25 | * subclasses complete freedom to decide how / what they can batch. |
| 26 | * |
| 27 | * Batches are created when GrContext processes a draw call. Batches of the same subclass may be |
| 28 | * merged using combineIfPossible. When two batches merge, one takes on the union of the data |
| 29 | * and the other is left empty. The merged batch becomes responsible for drawing the data from both |
| 30 | * the original batches. |
| 31 | * |
| 32 | * If there are any possible optimizations which might require knowing more about the full state of |
| 33 | * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this |
| 34 | * information will be communicated to the GrBatch prior to geometry generation. |
bsalomon | db4758c | 2015-11-23 11:14:20 -0800 | [diff] [blame] | 35 | * |
| 36 | * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip. |
| 37 | * The bounds are used in determining which clip elements must be applied and thus the bounds cannot |
| 38 | * in turn depend upon the clip. |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 39 | */ |
joshualitt | ca1f07e | 2015-08-07 08:11:19 -0700 | [diff] [blame] | 40 | #define GR_BATCH_SPEW 0 |
| 41 | #if GR_BATCH_SPEW |
| 42 | #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__) |
| 43 | #define GrBATCH_SPEW(code) code |
| 44 | #else |
| 45 | #define GrBATCH_SPEW(code) |
| 46 | #define GrBATCH_INFO(...) |
| 47 | #endif |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 48 | |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 49 | // A helper macro to generate a class static id |
| 50 | #define DEFINE_BATCH_CLASS_ID \ |
| 51 | static uint32_t ClassID() { \ |
| 52 | static uint32_t kClassID = GenBatchClassID(); \ |
| 53 | return kClassID; \ |
| 54 | } |
| 55 | |
cdalton | 4833f39 | 2016-02-02 22:46:16 -0800 | [diff] [blame] | 56 | class GrBatch : public GrNonAtomicRef<GrBatch> { |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 57 | public: |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 58 | GrBatch(uint32_t classID); |
cdalton | 4833f39 | 2016-02-02 22:46:16 -0800 | [diff] [blame] | 59 | virtual ~GrBatch(); |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 60 | |
| 61 | virtual const char* name() const = 0; |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 62 | |
bsalomon | cb02b38 | 2015-08-12 11:14:50 -0700 | [diff] [blame] | 63 | bool combineIfPossible(GrBatch* that, const GrCaps& caps) { |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 64 | if (this->classID() != that->classID()) { |
| 65 | return false; |
| 66 | } |
| 67 | |
bsalomon | cb02b38 | 2015-08-12 11:14:50 -0700 | [diff] [blame] | 68 | return this->onCombineIfPossible(that, caps); |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 69 | } |
| 70 | |
joshualitt | 99c7c07 | 2015-05-01 13:43:30 -0700 | [diff] [blame] | 71 | const SkRect& bounds() const { return fBounds; } |
| 72 | |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 73 | void* operator new(size_t size); |
| 74 | void operator delete(void* target); |
| 75 | |
| 76 | void* operator new(size_t size, void* placement) { |
| 77 | return ::operator new(size, placement); |
| 78 | } |
| 79 | void operator delete(void* target, void* placement) { |
| 80 | ::operator delete(target, placement); |
| 81 | } |
| 82 | |
| 83 | /** |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 84 | * Helper for safely down-casting to a GrBatch subclass |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 85 | */ |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 86 | template <typename T> const T& cast() const { |
| 87 | SkASSERT(T::ClassID() == this->classID()); |
| 88 | return *static_cast<const T*>(this); |
| 89 | } |
| 90 | |
| 91 | template <typename T> T* cast() { |
| 92 | SkASSERT(T::ClassID() == this->classID()); |
| 93 | return static_cast<T*>(this); |
| 94 | } |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 95 | |
joshualitt | ca1f07e | 2015-08-07 08:11:19 -0700 | [diff] [blame] | 96 | uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; } |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 97 | |
joshualitt | ca1f07e | 2015-08-07 08:11:19 -0700 | [diff] [blame] | 98 | #if GR_BATCH_SPEW |
| 99 | uint32_t uniqueID() const { return fUniqueID; } |
| 100 | #endif |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 101 | SkDEBUGCODE(bool isUsed() const { return fUsed; }) |
joshualitt | ca1f07e | 2015-08-07 08:11:19 -0700 | [diff] [blame] | 102 | |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 103 | /** Called prior to drawing. The batch should perform any resource creation necessary to |
| 104 | to quickly issue its draw when draw is called. */ |
| 105 | void prepare(GrBatchFlushState* state) { this->onPrepare(state); } |
| 106 | |
| 107 | /** Issues the batches commands to GrGpu. */ |
| 108 | void draw(GrBatchFlushState* state) { this->onDraw(state); } |
| 109 | |
| 110 | /** Used to block batching across render target changes. Remove this once we store |
| 111 | GrBatches for different RTs in different targets. */ |
| 112 | virtual uint32_t renderTargetUniqueID() const = 0; |
| 113 | |
| 114 | /** Used for spewing information about batches when debugging. */ |
| 115 | virtual SkString dumpInfo() const = 0; |
| 116 | |
bsalomon | 6dea83f | 2015-12-03 12:58:06 -0800 | [diff] [blame] | 117 | /** Can remove this when multi-draw-buffer lands */ |
| 118 | virtual GrRenderTarget* renderTarget() const = 0; |
| 119 | |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 120 | protected: |
joshualitt | 99c7c07 | 2015-05-01 13:43:30 -0700 | [diff] [blame] | 121 | // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds |
| 122 | // rect because we outset it for dst copy textures |
| 123 | void setBounds(const SkRect& newBounds) { fBounds = newBounds; } |
| 124 | |
| 125 | void joinBounds(const SkRect& otherBounds) { |
| 126 | return fBounds.joinPossiblyEmptyRect(otherBounds); |
| 127 | } |
| 128 | |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 129 | static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); } |
| 130 | |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 131 | SkRect fBounds; |
| 132 | |
| 133 | private: |
| 134 | virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0; |
| 135 | |
bsalomon | 5346983 | 2015-08-18 09:20:09 -0700 | [diff] [blame] | 136 | virtual void onPrepare(GrBatchFlushState*) = 0; |
| 137 | virtual void onDraw(GrBatchFlushState*) = 0; |
| 138 | |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 139 | static uint32_t GenID(int32_t* idCounter) { |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 140 | // The atomic inc returns the old value not the incremented value. So we add |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 141 | // 1 to the returned value. |
| 142 | uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1; |
| 143 | if (!id) { |
| 144 | SkFAIL("This should never wrap as it should only be called once for each GrBatch " |
| 145 | "subclass."); |
| 146 | } |
| 147 | return id; |
| 148 | } |
| 149 | |
| 150 | enum { |
| 151 | kIllegalBatchID = 0, |
| 152 | }; |
| 153 | |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 154 | SkDEBUGCODE(bool fUsed;) |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 155 | const uint32_t fClassID; |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 156 | #if GR_BATCH_SPEW |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 157 | static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); } |
| 158 | const uint32_t fUniqueID; |
| 159 | static int32_t gCurrBatchUniqueID; |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 160 | #endif |
reed | 1b55a96 | 2015-09-17 20:16:13 -0700 | [diff] [blame] | 161 | static int32_t gCurrBatchClassID; |
bsalomon | abd30f5 | 2015-08-13 13:34:48 -0700 | [diff] [blame] | 162 | }; |
| 163 | |
joshualitt | 4d8da81 | 2015-01-28 12:53:54 -0800 | [diff] [blame] | 164 | #endif |