blob: 03e396a29c2fd31d33753c7631f514aaac7cd4a5 [file] [log] [blame]
joshualitt4d8da812015-01-28 12:53:54 -08001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrBatch_DEFINED
9#define GrBatch_DEFINED
10
11#include <new>
joshualittdbe1e6f2015-07-16 08:12:45 -070012#include "GrNonAtomicRef.h"
joshualitt4d8da812015-01-28 12:53:54 -080013
bsalomon16b99132015-08-13 14:55:50 -070014#include "SkRect.h"
bsalomon53469832015-08-18 09:20:09 -070015#include "SkString.h"
joshualitt4d8da812015-01-28 12:53:54 -080016
bsalomon16b99132015-08-13 14:55:50 -070017class GrCaps;
bsalomon53469832015-08-18 09:20:09 -070018class GrBatchFlushState;
joshualitt4d8da812015-01-28 12:53:54 -080019
bsalomonabd30f52015-08-13 13:34:48 -070020/**
joshualitt4d8da812015-01-28 12:53:54 -080021 * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
22 * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
23 * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
24 * subclasses complete freedom to decide how / what they can batch.
25 *
26 * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
27 * merged using combineIfPossible. When two batches merge, one takes on the union of the data
28 * and the other is left empty. The merged batch becomes responsible for drawing the data from both
29 * the original batches.
30 *
31 * If there are any possible optimizations which might require knowing more about the full state of
32 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
33 * information will be communicated to the GrBatch prior to geometry generation.
bsalomondb4758c2015-11-23 11:14:20 -080034 *
35 * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip.
36 * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
37 * in turn depend upon the clip.
joshualitt4d8da812015-01-28 12:53:54 -080038 */
joshualittca1f07e2015-08-07 08:11:19 -070039#define GR_BATCH_SPEW 0
40#if GR_BATCH_SPEW
41 #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__)
42 #define GrBATCH_SPEW(code) code
43#else
44 #define GrBATCH_SPEW(code)
45 #define GrBATCH_INFO(...)
46#endif
joshualitt4d8da812015-01-28 12:53:54 -080047
reed1b55a962015-09-17 20:16:13 -070048// A helper macro to generate a class static id
49#define DEFINE_BATCH_CLASS_ID \
50 static uint32_t ClassID() { \
51 static uint32_t kClassID = GenBatchClassID(); \
52 return kClassID; \
53 }
54
joshualittdbe1e6f2015-07-16 08:12:45 -070055class GrBatch : public GrNonAtomicRef {
joshualitt4d8da812015-01-28 12:53:54 -080056public:
reed1b55a962015-09-17 20:16:13 -070057 GrBatch(uint32_t classID);
bsalomona387a112015-08-11 14:47:42 -070058 ~GrBatch() override;
joshualitt4d8da812015-01-28 12:53:54 -080059
60 virtual const char* name() const = 0;
joshualitt4d8da812015-01-28 12:53:54 -080061
bsalomoncb02b382015-08-12 11:14:50 -070062 bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
joshualitt4d8da812015-01-28 12:53:54 -080063 if (this->classID() != that->classID()) {
64 return false;
65 }
66
bsalomoncb02b382015-08-12 11:14:50 -070067 return this->onCombineIfPossible(that, caps);
joshualitt4d8da812015-01-28 12:53:54 -080068 }
69
joshualitt99c7c072015-05-01 13:43:30 -070070 const SkRect& bounds() const { return fBounds; }
71
joshualitt4d8da812015-01-28 12:53:54 -080072 void* operator new(size_t size);
73 void operator delete(void* target);
74
75 void* operator new(size_t size, void* placement) {
76 return ::operator new(size, placement);
77 }
78 void operator delete(void* target, void* placement) {
79 ::operator delete(target, placement);
80 }
81
82 /**
reed1b55a962015-09-17 20:16:13 -070083 * Helper for safely down-casting to a GrBatch subclass
bsalomonabd30f52015-08-13 13:34:48 -070084 */
reed1b55a962015-09-17 20:16:13 -070085 template <typename T> const T& cast() const {
86 SkASSERT(T::ClassID() == this->classID());
87 return *static_cast<const T*>(this);
88 }
89
90 template <typename T> T* cast() {
91 SkASSERT(T::ClassID() == this->classID());
92 return static_cast<T*>(this);
93 }
joshualitt4d8da812015-01-28 12:53:54 -080094
joshualittca1f07e2015-08-07 08:11:19 -070095 uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
joshualitt4d8da812015-01-28 12:53:54 -080096
joshualittca1f07e2015-08-07 08:11:19 -070097#if GR_BATCH_SPEW
98 uint32_t uniqueID() const { return fUniqueID; }
99#endif
bsalomonabd30f52015-08-13 13:34:48 -0700100 SkDEBUGCODE(bool isUsed() const { return fUsed; })
joshualittca1f07e2015-08-07 08:11:19 -0700101
bsalomon53469832015-08-18 09:20:09 -0700102 /** Called prior to drawing. The batch should perform any resource creation necessary to
103 to quickly issue its draw when draw is called. */
104 void prepare(GrBatchFlushState* state) { this->onPrepare(state); }
105
106 /** Issues the batches commands to GrGpu. */
107 void draw(GrBatchFlushState* state) { this->onDraw(state); }
108
109 /** Used to block batching across render target changes. Remove this once we store
110 GrBatches for different RTs in different targets. */
111 virtual uint32_t renderTargetUniqueID() const = 0;
112
113 /** Used for spewing information about batches when debugging. */
114 virtual SkString dumpInfo() const = 0;
115
joshualitt4d8da812015-01-28 12:53:54 -0800116protected:
joshualitt99c7c072015-05-01 13:43:30 -0700117 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds
118 // rect because we outset it for dst copy textures
119 void setBounds(const SkRect& newBounds) { fBounds = newBounds; }
120
121 void joinBounds(const SkRect& otherBounds) {
122 return fBounds.joinPossiblyEmptyRect(otherBounds);
123 }
124
reed1b55a962015-09-17 20:16:13 -0700125 static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); }
126
bsalomonabd30f52015-08-13 13:34:48 -0700127 SkRect fBounds;
128
129private:
130 virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
131
bsalomon53469832015-08-18 09:20:09 -0700132 virtual void onPrepare(GrBatchFlushState*) = 0;
133 virtual void onDraw(GrBatchFlushState*) = 0;
134
bsalomonabd30f52015-08-13 13:34:48 -0700135 static uint32_t GenID(int32_t* idCounter) {
reed1b55a962015-09-17 20:16:13 -0700136 // The atomic inc returns the old value not the incremented value. So we add
bsalomonabd30f52015-08-13 13:34:48 -0700137 // 1 to the returned value.
138 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
139 if (!id) {
140 SkFAIL("This should never wrap as it should only be called once for each GrBatch "
141 "subclass.");
142 }
143 return id;
144 }
145
146 enum {
147 kIllegalBatchID = 0,
148 };
149
bsalomonabd30f52015-08-13 13:34:48 -0700150 SkDEBUGCODE(bool fUsed;)
reed1b55a962015-09-17 20:16:13 -0700151 const uint32_t fClassID;
bsalomonabd30f52015-08-13 13:34:48 -0700152#if GR_BATCH_SPEW
reed1b55a962015-09-17 20:16:13 -0700153 static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); }
154 const uint32_t fUniqueID;
155 static int32_t gCurrBatchUniqueID;
bsalomonabd30f52015-08-13 13:34:48 -0700156#endif
reed1b55a962015-09-17 20:16:13 -0700157 static int32_t gCurrBatchClassID;
bsalomonabd30f52015-08-13 13:34:48 -0700158 typedef GrNonAtomicRef INHERITED;
159};
160
joshualitt4d8da812015-01-28 12:53:54 -0800161#endif