blob: 440796e19a16da6a0a24a66c256493ab59faf2c4 [file] [log] [blame]
csmartdaltona7f29642016-07-07 08:49:11 -07001/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GLInstancedRendering.h"
9
10#include "GrResourceProvider.h"
11#include "gl/GrGLGpu.h"
12#include "instanced/InstanceProcessor.h"
13
14#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
15
16namespace gr_instanced {
17
18class GLInstancedRendering::GLBatch : public InstancedRendering::Batch {
19public:
20 DEFINE_BATCH_CLASS_ID
21
22 GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {}
23 int numGLCommands() const { return 1 + fNumChangesInGeometry; }
24
25private:
26 int fEmulatedBaseInstance;
27 int fGLDrawCmdsIdx;
28
29 friend class GLInstancedRendering;
30
31 typedef Batch INHERITED;
32};
33
csmartdaltone0d36292016-07-29 08:14:20 -070034GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCaps) {
35 // This method is only intended to be used for initializing fInstancedSupport in the caps.
36 SkASSERT(GrCaps::InstancedSupport::kNone == glCaps.instancedSupport());
37 if (!glCaps.vertexArrayObjectSupport() || !glCaps.drawIndirectSupport()) {
38 return GrCaps::InstancedSupport::kNone;
csmartdaltona7f29642016-07-07 08:49:11 -070039 }
csmartdaltone0d36292016-07-29 08:14:20 -070040 return InstanceProcessor::CheckSupport(*glCaps.glslCaps(), glCaps);
csmartdaltona7f29642016-07-07 08:49:11 -070041}
42
csmartdaltone0d36292016-07-29 08:14:20 -070043GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu)
44 : INHERITED(gpu),
csmartdaltona7f29642016-07-07 08:49:11 -070045 fVertexArrayID(0),
46 fGLDrawCmdsInfo(0),
47 fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
csmartdaltone0d36292016-07-29 08:14:20 -070048 SkASSERT(GrCaps::InstancedSupport::kNone != this->gpu()->caps()->instancedSupport());
csmartdaltona7f29642016-07-07 08:49:11 -070049}
50
51GLInstancedRendering::~GLInstancedRendering() {
52 if (fVertexArrayID) {
53 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
54 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
55 }
56}
57
58inline GrGLGpu* GLInstancedRendering::glGpu() const {
59 return static_cast<GrGLGpu*>(this->gpu());
60}
61
62InstancedRendering::Batch* GLInstancedRendering::createBatch() {
63 return new GLBatch(this);
64}
65
66void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
67 // Count what there is to draw.
68 BatchList::Iter iter;
69 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
70 int numGLInstances = 0;
71 int numGLDrawCmds = 0;
72 while (Batch* b = iter.get()) {
73 GLBatch* batch = static_cast<GLBatch*>(b);
74 iter.next();
75
76 numGLInstances += batch->fNumDraws;
77 numGLDrawCmds += batch->numGLCommands();
78 }
79 if (!numGLDrawCmds) {
80 return;
81 }
82 SkASSERT(numGLInstances);
83
84 // Lazily create a vertex array object.
85 if (!fVertexArrayID) {
86 GL_CALL(GenVertexArrays(1, &fVertexArrayID));
87 if (!fVertexArrayID) {
88 return;
89 }
90 this->glGpu()->bindVertexArray(fVertexArrayID);
91
92 // Attach our index buffer to the vertex array.
csmartdalton485a1202016-07-13 10:16:32 -070093 SkASSERT(!this->indexBuffer()->isCPUBacked());
csmartdaltona7f29642016-07-07 08:49:11 -070094 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
95 static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));
96
97 // Set up the non-instanced attribs.
csmartdalton485a1202016-07-13 10:16:32 -070098 this->glGpu()->bindBuffer(kVertex_GrBufferType, this->vertexBuffer());
csmartdaltona7f29642016-07-07 08:49:11 -070099 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords));
100 GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, GR_GL_FALSE,
101 sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
102 GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs));
103 GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, sizeof(ShapeVertex),
104 (void*) offsetof(ShapeVertex, fAttrs)));
105
106 SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId);
107 }
108
109 // Create and map instance and draw-indirect buffers.
110 SkASSERT(!fInstanceBuffer);
csmartdalton485a1202016-07-13 10:16:32 -0700111 fInstanceBuffer.reset(
csmartdaltona7f29642016-07-07 08:49:11 -0700112 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType,
csmartdalton485a1202016-07-13 10:16:32 -0700113 kDynamic_GrAccessPattern,
114 GrResourceProvider::kNoPendingIO_Flag |
115 GrResourceProvider::kRequireGpuMemory_Flag));
csmartdaltona7f29642016-07-07 08:49:11 -0700116 if (!fInstanceBuffer) {
117 return;
118 }
119
120 SkASSERT(!fDrawIndirectBuffer);
csmartdalton485a1202016-07-13 10:16:32 -0700121 fDrawIndirectBuffer.reset(
csmartdaltona7f29642016-07-07 08:49:11 -0700122 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds,
123 kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern,
csmartdalton485a1202016-07-13 10:16:32 -0700124 GrResourceProvider::kNoPendingIO_Flag |
125 GrResourceProvider::kRequireGpuMemory_Flag));
csmartdaltona7f29642016-07-07 08:49:11 -0700126 if (!fDrawIndirectBuffer) {
127 return;
128 }
129
130 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map());
131 int glInstancesIdx = 0;
132
133 auto* glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
134 int glDrawCmdsIdx = 0;
135
136 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();
137
138 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
139 fGLDrawCmdsInfo.reset(numGLDrawCmds);
140 }
141
142 // Generate the instance and draw-indirect buffer contents based on the tracked batches.
143 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
144 while (Batch* b = iter.get()) {
145 GLBatch* batch = static_cast<GLBatch*>(b);
146 iter.next();
147
148 batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
149 batch->fGLDrawCmdsIdx = glDrawCmdsIdx;
150
151 const Batch::Draw* draw = batch->fHeadDraw;
152 SkASSERT(draw);
153 do {
154 int instanceCount = 0;
155 IndexRange geometry = draw->fGeometry;
156 SkASSERT(!geometry.isEmpty());
157
158 do {
159 glMappedInstances[glInstancesIdx + instanceCount++] = draw->fInstance;
160 draw = draw->fNext;
161 } while (draw && draw->fGeometry == geometry);
162
163 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx];
164 glCmd.fCount = geometry.fCount;
165 glCmd.fInstanceCount = instanceCount;
166 glCmd.fFirstIndex = geometry.fStart;
167 glCmd.fBaseVertex = 0;
168 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
169
170 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
171 fGLDrawCmdsInfo[glDrawCmdsIdx].fInstanceCount = instanceCount;
172#if GR_GL_LOG_INSTANCED_BATCHES
173 fGLDrawCmdsInfo[glDrawCmdsIdx].fGeometry = geometry;
174#endif
175 }
176
177 glInstancesIdx += instanceCount;
178 ++glDrawCmdsIdx;
179 } while (draw);
180 }
181
182 SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
183 fDrawIndirectBuffer->unmap();
184
185 SkASSERT(glInstancesIdx == numGLInstances);
186 fInstanceBuffer->unmap();
187}
188
189void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
190 const Batch* baseBatch) {
191 if (!fDrawIndirectBuffer) {
192 return; // beginFlush was not successful.
193 }
194 if (!this->glGpu()->flushGLState(pipeline, instProc)) {
195 return;
196 }
197
198 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.get());
199
200 const GrGLCaps& glCaps = this->glGpu()->glCaps();
201 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
202 int numCommands = batch->numGLCommands();
203
204#if GR_GL_LOG_INSTANCED_BATCHES
205 SkASSERT(fGLDrawCmdsInfo);
206 SkDebugf("Instanced batch: [");
207 for (int i = 0; i < numCommands; ++i) {
208 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
209 SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInstanceCount,
210 InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx].fGeometry));
211 }
212 SkDebugf("]\n");
213#else
214 SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport());
215#endif
216
217 if (1 == numCommands || !glCaps.baseInstanceSupport() || !glCaps.multiDrawIndirectSupport()) {
218 int emulatedBaseInstance = batch->fEmulatedBaseInstance;
219 for (int i = 0; i < numCommands; ++i) {
220 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
221 this->flushInstanceAttribs(emulatedBaseInstance);
222 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
223 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdIdx));
224 if (!glCaps.baseInstanceSupport()) {
225 emulatedBaseInstance += fGLDrawCmdsInfo[glCmdIdx].fInstanceCount;
226 }
227 }
228 } else {
229 int glCmdsIdx = batch->fGLDrawCmdsIdx;
230 this->flushInstanceAttribs(batch->fEmulatedBaseInstance);
231 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
232 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx,
233 numCommands, 0));
234 }
235}
236
237void GLInstancedRendering::flushInstanceAttribs(int baseInstance) {
238 SkASSERT(fVertexArrayID);
239 this->glGpu()->bindVertexArray(fVertexArrayID);
240
241 SkASSERT(fInstanceBuffer);
242 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->getUniqueID() ||
243 fInstanceAttribsBaseInstance != baseInstance) {
244 Instance* offsetInBuffer = (Instance*) nullptr + baseInstance;
245
246 this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get());
247
248 // Info attrib.
249 GL_CALL(EnableVertexAttribArray((int)Attrib::kInstanceInfo));
250 GL_CALL(VertexAttribIPointer((int)Attrib::kInstanceInfo, 1, GR_GL_UNSIGNED_INT,
251 sizeof(Instance), &offsetInBuffer->fInfo));
252 GL_CALL(VertexAttribDivisor((int)Attrib::kInstanceInfo, 1));
253
254 // Shape matrix attrib.
255 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixX));
256 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixY));
257 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixX, 3, GR_GL_FLOAT, GR_GL_FALSE,
258 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[0]));
259 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixY, 3, GR_GL_FLOAT, GR_GL_FALSE,
260 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[3]));
261 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixX, 1));
262 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixY, 1));
263
264 // Color attrib.
265 GL_CALL(EnableVertexAttribArray((int)Attrib::kColor));
266 GL_CALL(VertexAttribPointer((int)Attrib::kColor, 4, GR_GL_UNSIGNED_BYTE, GR_GL_TRUE,
267 sizeof(Instance), &offsetInBuffer->fColor));
268 GL_CALL(VertexAttribDivisor((int)Attrib::kColor, 1));
269
270 // Local rect attrib.
271 GL_CALL(EnableVertexAttribArray((int)Attrib::kLocalRect));
272 GL_CALL(VertexAttribPointer((int)Attrib::kLocalRect, 4, GR_GL_FLOAT, GR_GL_FALSE,
273 sizeof(Instance), &offsetInBuffer->fLocalRect));
274 GL_CALL(VertexAttribDivisor((int)Attrib::kLocalRect, 1));
275
276 fInstanceAttribsBufferUniqueId = fInstanceBuffer->getUniqueID();
277 fInstanceAttribsBaseInstance = baseInstance;
278 }
279}
280
281void GLInstancedRendering::onEndFlush() {
282 fInstanceBuffer.reset();
283 fDrawIndirectBuffer.reset();
284 fGLDrawCmdsInfo.reset(0);
285}
286
287void GLInstancedRendering::onResetGpuResources(ResetType resetType) {
288 if (fVertexArrayID && ResetType::kDestroy == resetType) {
289 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
290 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
291 }
292 fVertexArrayID = 0;
293 fInstanceBuffer.reset();
294 fDrawIndirectBuffer.reset();
295 fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID;
296}
297
298}