blob: b29c8a49b701ccccee3290233350f9ee1591877f [file] [log] [blame]
joshualitt4d8da812015-01-28 12:53:54 -08001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrBatch_DEFINED
9#define GrBatch_DEFINED
10
11#include <new>
joshualittdbe1e6f2015-07-16 08:12:45 -070012#include "GrNonAtomicRef.h"
joshualitt4d8da812015-01-28 12:53:54 -080013
bsalomon16b99132015-08-13 14:55:50 -070014#include "SkRect.h"
bsalomon53469832015-08-18 09:20:09 -070015#include "SkString.h"
joshualitt4d8da812015-01-28 12:53:54 -080016
bsalomon16b99132015-08-13 14:55:50 -070017class GrCaps;
bsalomon53469832015-08-18 09:20:09 -070018class GrBatchFlushState;
bsalomon6dea83f2015-12-03 12:58:06 -080019class GrRenderTarget;
joshualitt4d8da812015-01-28 12:53:54 -080020
bsalomonabd30f52015-08-13 13:34:48 -070021/**
joshualitt4d8da812015-01-28 12:53:54 -080022 * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
23 * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
24 * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
25 * subclasses complete freedom to decide how / what they can batch.
26 *
27 * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
28 * merged using combineIfPossible. When two batches merge, one takes on the union of the data
29 * and the other is left empty. The merged batch becomes responsible for drawing the data from both
30 * the original batches.
31 *
32 * If there are any possible optimizations which might require knowing more about the full state of
33 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
34 * information will be communicated to the GrBatch prior to geometry generation.
bsalomondb4758c2015-11-23 11:14:20 -080035 *
36 * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip.
37 * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
38 * in turn depend upon the clip.
joshualitt4d8da812015-01-28 12:53:54 -080039 */
joshualittca1f07e2015-08-07 08:11:19 -070040#define GR_BATCH_SPEW 0
41#if GR_BATCH_SPEW
42 #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__)
43 #define GrBATCH_SPEW(code) code
44#else
45 #define GrBATCH_SPEW(code)
46 #define GrBATCH_INFO(...)
47#endif
joshualitt4d8da812015-01-28 12:53:54 -080048
reed1b55a962015-09-17 20:16:13 -070049// A helper macro to generate a class static id
50#define DEFINE_BATCH_CLASS_ID \
51 static uint32_t ClassID() { \
52 static uint32_t kClassID = GenBatchClassID(); \
53 return kClassID; \
54 }
55
cdalton4833f392016-02-02 22:46:16 -080056class GrBatch : public GrNonAtomicRef<GrBatch> {
joshualitt4d8da812015-01-28 12:53:54 -080057public:
reed1b55a962015-09-17 20:16:13 -070058 GrBatch(uint32_t classID);
cdalton4833f392016-02-02 22:46:16 -080059 virtual ~GrBatch();
joshualitt4d8da812015-01-28 12:53:54 -080060
61 virtual const char* name() const = 0;
joshualitt4d8da812015-01-28 12:53:54 -080062
bsalomoncb02b382015-08-12 11:14:50 -070063 bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
joshualitt4d8da812015-01-28 12:53:54 -080064 if (this->classID() != that->classID()) {
65 return false;
66 }
67
bsalomoncb02b382015-08-12 11:14:50 -070068 return this->onCombineIfPossible(that, caps);
joshualitt4d8da812015-01-28 12:53:54 -080069 }
70
joshualitt99c7c072015-05-01 13:43:30 -070071 const SkRect& bounds() const { return fBounds; }
72
joshualitt4d8da812015-01-28 12:53:54 -080073 void* operator new(size_t size);
74 void operator delete(void* target);
75
76 void* operator new(size_t size, void* placement) {
77 return ::operator new(size, placement);
78 }
79 void operator delete(void* target, void* placement) {
80 ::operator delete(target, placement);
81 }
82
83 /**
reed1b55a962015-09-17 20:16:13 -070084 * Helper for safely down-casting to a GrBatch subclass
bsalomonabd30f52015-08-13 13:34:48 -070085 */
reed1b55a962015-09-17 20:16:13 -070086 template <typename T> const T& cast() const {
87 SkASSERT(T::ClassID() == this->classID());
88 return *static_cast<const T*>(this);
89 }
90
91 template <typename T> T* cast() {
92 SkASSERT(T::ClassID() == this->classID());
93 return static_cast<T*>(this);
94 }
joshualitt4d8da812015-01-28 12:53:54 -080095
joshualittca1f07e2015-08-07 08:11:19 -070096 uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
joshualitt4d8da812015-01-28 12:53:54 -080097
joshualittca1f07e2015-08-07 08:11:19 -070098#if GR_BATCH_SPEW
99 uint32_t uniqueID() const { return fUniqueID; }
100#endif
bsalomonabd30f52015-08-13 13:34:48 -0700101 SkDEBUGCODE(bool isUsed() const { return fUsed; })
joshualittca1f07e2015-08-07 08:11:19 -0700102
bsalomon53469832015-08-18 09:20:09 -0700103 /** Called prior to drawing. The batch should perform any resource creation necessary to
104 to quickly issue its draw when draw is called. */
105 void prepare(GrBatchFlushState* state) { this->onPrepare(state); }
106
107 /** Issues the batches commands to GrGpu. */
108 void draw(GrBatchFlushState* state) { this->onDraw(state); }
109
110 /** Used to block batching across render target changes. Remove this once we store
111 GrBatches for different RTs in different targets. */
112 virtual uint32_t renderTargetUniqueID() const = 0;
113
114 /** Used for spewing information about batches when debugging. */
115 virtual SkString dumpInfo() const = 0;
116
bsalomon6dea83f2015-12-03 12:58:06 -0800117 /** Can remove this when multi-draw-buffer lands */
118 virtual GrRenderTarget* renderTarget() const = 0;
119
joshualitt4d8da812015-01-28 12:53:54 -0800120protected:
joshualitt99c7c072015-05-01 13:43:30 -0700121 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds
122 // rect because we outset it for dst copy textures
123 void setBounds(const SkRect& newBounds) { fBounds = newBounds; }
124
125 void joinBounds(const SkRect& otherBounds) {
126 return fBounds.joinPossiblyEmptyRect(otherBounds);
127 }
128
reed1b55a962015-09-17 20:16:13 -0700129 static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); }
130
bsalomonabd30f52015-08-13 13:34:48 -0700131 SkRect fBounds;
132
133private:
134 virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
135
bsalomon53469832015-08-18 09:20:09 -0700136 virtual void onPrepare(GrBatchFlushState*) = 0;
137 virtual void onDraw(GrBatchFlushState*) = 0;
138
bsalomonabd30f52015-08-13 13:34:48 -0700139 static uint32_t GenID(int32_t* idCounter) {
reed1b55a962015-09-17 20:16:13 -0700140 // The atomic inc returns the old value not the incremented value. So we add
bsalomonabd30f52015-08-13 13:34:48 -0700141 // 1 to the returned value.
142 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
143 if (!id) {
144 SkFAIL("This should never wrap as it should only be called once for each GrBatch "
145 "subclass.");
146 }
147 return id;
148 }
149
150 enum {
151 kIllegalBatchID = 0,
152 };
153
bsalomonabd30f52015-08-13 13:34:48 -0700154 SkDEBUGCODE(bool fUsed;)
reed1b55a962015-09-17 20:16:13 -0700155 const uint32_t fClassID;
bsalomonabd30f52015-08-13 13:34:48 -0700156#if GR_BATCH_SPEW
reed1b55a962015-09-17 20:16:13 -0700157 static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); }
158 const uint32_t fUniqueID;
159 static int32_t gCurrBatchUniqueID;
bsalomonabd30f52015-08-13 13:34:48 -0700160#endif
reed1b55a962015-09-17 20:16:13 -0700161 static int32_t gCurrBatchClassID;
bsalomonabd30f52015-08-13 13:34:48 -0700162};
163
joshualitt4d8da812015-01-28 12:53:54 -0800164#endif