blob: 21dc3d21c88fd75f3f33afaa184f1be78ba206cf [file] [log] [blame]
joshualitt4d8da812015-01-28 12:53:54 -08001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrBatch_DEFINED
9#define GrBatch_DEFINED
10
11#include <new>
bsalomonb5238a72015-05-05 07:49:49 -070012#include "GrBatchTarget.h"
joshualitt4d8da812015-01-28 12:53:54 -080013#include "GrGeometryProcessor.h"
bsalomoncb8979d2015-05-05 09:51:38 -070014#include "GrVertices.h"
mtklein1b249332015-07-07 12:21:21 -070015#include "SkAtomics.h"
joshualitt4d8da812015-01-28 12:53:54 -080016#include "SkRefCnt.h"
joshualitt4d8da812015-01-28 12:53:54 -080017#include "SkTypes.h"
18
joshualitt4d8da812015-01-28 12:53:54 -080019class GrGpu;
joshualitt4d8da812015-01-28 12:53:54 -080020class GrPipeline;
joshualitt4d8da812015-01-28 12:53:54 -080021
22struct GrInitInvariantOutput;
23
24/*
25 * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
26 * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
27 * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
28 * subclasses complete freedom to decide how / what they can batch.
29 *
30 * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
31 * merged using combineIfPossible. When two batches merge, one takes on the union of the data
32 * and the other is left empty. The merged batch becomes responsible for drawing the data from both
33 * the original batches.
34 *
35 * If there are any possible optimizations which might require knowing more about the full state of
36 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
37 * information will be communicated to the GrBatch prior to geometry generation.
38 */
39
joshualitt4d8da812015-01-28 12:53:54 -080040class GrBatch : public SkRefCnt {
41public:
mtklein1b249332015-07-07 12:21:21 -070042
joshualitt5bf99f12015-03-13 11:47:42 -070043 GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(fUsed = false;) }
joshualitt4d8da812015-01-28 12:53:54 -080044 virtual ~GrBatch() {}
45
46 virtual const char* name() const = 0;
47 virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
48 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
49
50 /*
joshualittdb0f9512015-02-13 12:50:09 -080051 * initBatchTracker is a hook for the some additional overrides / optimization possibilities
52 * from the GrXferProcessor.
joshualitt4d8da812015-01-28 12:53:54 -080053 */
joshualitt4d8da812015-01-28 12:53:54 -080054 virtual void initBatchTracker(const GrPipelineInfo& init) = 0;
55
56 bool combineIfPossible(GrBatch* that) {
57 if (this->classID() != that->classID()) {
58 return false;
59 }
60
joshualitt99c7c072015-05-01 13:43:30 -070061 return this->onCombineIfPossible(that);
joshualitt4d8da812015-01-28 12:53:54 -080062 }
63
64 virtual bool onCombineIfPossible(GrBatch*) = 0;
65
66 virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0;
67
joshualitt99c7c072015-05-01 13:43:30 -070068 const SkRect& bounds() const { return fBounds; }
69
joshualitt7bc18b72015-02-03 16:41:41 -080070 // TODO this goes away when batches are everywhere
71 void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; }
72 int numberOfDraws() const { return fNumberOfDraws; }
73
joshualitt4d8da812015-01-28 12:53:54 -080074 void* operator new(size_t size);
75 void operator delete(void* target);
76
77 void* operator new(size_t size, void* placement) {
78 return ::operator new(size, placement);
79 }
80 void operator delete(void* target, void* placement) {
81 ::operator delete(target, placement);
82 }
83
84 /**
85 * Helper for down-casting to a GrBatch subclass
86 */
87 template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
88 template <typename T> T* cast() { return static_cast<T*>(this); }
89
90 uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); return fClassID; }
91
92 // TODO no GrPrimitiveProcessors yet read fragment position
93 bool willReadFragmentPosition() const { return false; }
94
95 SkDEBUGCODE(bool isUsed() const { return fUsed; })
96
97protected:
98 template <typename PROC_SUBCLASS> void initClassID() {
99 static uint32_t kClassID = GenClassID();
100 fClassID = kClassID;
101 }
102
103 uint32_t fClassID;
104
joshualitt99c7c072015-05-01 13:43:30 -0700105 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds
106 // rect because we outset it for dst copy textures
107 void setBounds(const SkRect& newBounds) { fBounds = newBounds; }
108
109 void joinBounds(const SkRect& otherBounds) {
110 return fBounds.joinPossiblyEmptyRect(otherBounds);
111 }
112
bsalomonb5238a72015-05-05 07:49:49 -0700113 /** Helper for rendering instances using an instanced index index buffer. This class creates the
114 space for the vertices and flushes the draws to the batch target.*/
115 class InstancedHelper {
116 public:
bsalomone64eb572015-05-07 11:35:55 -0700117 InstancedHelper() {}
bsalomonb5238a72015-05-05 07:49:49 -0700118 /** Returns the allocated storage for the vertices. The caller should populate the before
119 vertices before calling issueDraws(). */
120 void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStride,
121 const GrIndexBuffer*, int verticesPerInstance, int indicesPerInstance,
122 int instancesToDraw);
123
124 /** Call after init() to issue draws to the batch target.*/
bsalomone64eb572015-05-07 11:35:55 -0700125 void issueDraw(GrBatchTarget* batchTarget) {
bsalomoncb8979d2015-05-05 09:51:38 -0700126 SkASSERT(fVertices.instanceCount());
bsalomone64eb572015-05-07 11:35:55 -0700127 batchTarget->draw(fVertices);
bsalomonb5238a72015-05-05 07:49:49 -0700128 }
129 private:
bsalomoncb8979d2015-05-05 09:51:38 -0700130 GrVertices fVertices;
bsalomonb5238a72015-05-05 07:49:49 -0700131 };
132
133 static const int kVerticesPerQuad = 4;
134 static const int kIndicesPerQuad = 6;
135
136 /** A specialization of InstanceHelper for quad rendering. */
137 class QuadHelper : private InstancedHelper {
138 public:
139 QuadHelper() : INHERITED() {}
140 /** Finds the cached quad index buffer and reserves vertex space. Returns NULL on failure
141 and on sucess a pointer to the vertex data that the caller should populate before
142 calling issueDraws(). */
143 void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToDraw);
144
bsalomone64eb572015-05-07 11:35:55 -0700145 using InstancedHelper::issueDraw;
bsalomonb5238a72015-05-05 07:49:49 -0700146
147 private:
148 typedef InstancedHelper INHERITED;
149 };
150
joshualitt99c7c072015-05-01 13:43:30 -0700151 SkRect fBounds;
152
joshualitt4d8da812015-01-28 12:53:54 -0800153private:
154 static uint32_t GenClassID() {
155 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
156 // atomic inc returns the old value not the incremented value. So we add
157 // 1 to the returned value.
158 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
159 if (!id) {
160 SkFAIL("This should never wrap as it should only be called once for each GrBatch "
161 "subclass.");
162 }
163 return id;
164 }
165
166 enum {
167 kIllegalBatchClassID = 0,
168 };
169 static int32_t gCurrBatchClassID;
170
171 SkDEBUGCODE(bool fUsed;)
172
joshualitt7bc18b72015-02-03 16:41:41 -0800173 int fNumberOfDraws;
174
joshualitt4d8da812015-01-28 12:53:54 -0800175 typedef SkRefCnt INHERITED;
176};
177
178#endif