blob: 3d9058ec3e73aaac0403870990ac3c58445126f4 [file] [log] [blame]
csmartdaltona7f29642016-07-07 08:49:11 -07001/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GLInstancedRendering.h"
9
10#include "GrResourceProvider.h"
11#include "gl/GrGLGpu.h"
12#include "instanced/InstanceProcessor.h"
13
14#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
15
16namespace gr_instanced {
17
18class GLInstancedRendering::GLBatch : public InstancedRendering::Batch {
19public:
20 DEFINE_BATCH_CLASS_ID
21
22 GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {}
23 int numGLCommands() const { return 1 + fNumChangesInGeometry; }
24
25private:
26 int fEmulatedBaseInstance;
27 int fGLDrawCmdsIdx;
28
29 friend class GLInstancedRendering;
30
31 typedef Batch INHERITED;
32};
33
csmartdaltone0d36292016-07-29 08:14:20 -070034GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCaps) {
35 // This method is only intended to be used for initializing fInstancedSupport in the caps.
36 SkASSERT(GrCaps::InstancedSupport::kNone == glCaps.instancedSupport());
csmartdalton4c18b622016-07-29 12:19:28 -070037 if (!glCaps.vertexArrayObjectSupport() ||
38 (!glCaps.drawIndirectSupport() && !glCaps.drawInstancedSupport())) {
csmartdaltone0d36292016-07-29 08:14:20 -070039 return GrCaps::InstancedSupport::kNone;
csmartdaltona7f29642016-07-07 08:49:11 -070040 }
csmartdaltone0d36292016-07-29 08:14:20 -070041 return InstanceProcessor::CheckSupport(*glCaps.glslCaps(), glCaps);
csmartdaltona7f29642016-07-07 08:49:11 -070042}
43
csmartdaltone0d36292016-07-29 08:14:20 -070044GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu)
45 : INHERITED(gpu),
csmartdaltona7f29642016-07-07 08:49:11 -070046 fVertexArrayID(0),
47 fGLDrawCmdsInfo(0),
48 fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
csmartdaltone0d36292016-07-29 08:14:20 -070049 SkASSERT(GrCaps::InstancedSupport::kNone != this->gpu()->caps()->instancedSupport());
csmartdaltona7f29642016-07-07 08:49:11 -070050}
51
52GLInstancedRendering::~GLInstancedRendering() {
53 if (fVertexArrayID) {
54 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
55 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
56 }
57}
58
59inline GrGLGpu* GLInstancedRendering::glGpu() const {
60 return static_cast<GrGLGpu*>(this->gpu());
61}
62
63InstancedRendering::Batch* GLInstancedRendering::createBatch() {
64 return new GLBatch(this);
65}
66
67void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
68 // Count what there is to draw.
69 BatchList::Iter iter;
70 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
71 int numGLInstances = 0;
72 int numGLDrawCmds = 0;
73 while (Batch* b = iter.get()) {
74 GLBatch* batch = static_cast<GLBatch*>(b);
75 iter.next();
76
77 numGLInstances += batch->fNumDraws;
78 numGLDrawCmds += batch->numGLCommands();
79 }
80 if (!numGLDrawCmds) {
81 return;
82 }
83 SkASSERT(numGLInstances);
84
85 // Lazily create a vertex array object.
86 if (!fVertexArrayID) {
87 GL_CALL(GenVertexArrays(1, &fVertexArrayID));
88 if (!fVertexArrayID) {
89 return;
90 }
91 this->glGpu()->bindVertexArray(fVertexArrayID);
92
93 // Attach our index buffer to the vertex array.
csmartdalton485a1202016-07-13 10:16:32 -070094 SkASSERT(!this->indexBuffer()->isCPUBacked());
csmartdaltona7f29642016-07-07 08:49:11 -070095 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
96 static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));
97
98 // Set up the non-instanced attribs.
csmartdalton485a1202016-07-13 10:16:32 -070099 this->glGpu()->bindBuffer(kVertex_GrBufferType, this->vertexBuffer());
csmartdaltona7f29642016-07-07 08:49:11 -0700100 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords));
101 GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, GR_GL_FALSE,
102 sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
103 GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs));
104 GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, sizeof(ShapeVertex),
105 (void*) offsetof(ShapeVertex, fAttrs)));
106
107 SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId);
108 }
109
110 // Create and map instance and draw-indirect buffers.
111 SkASSERT(!fInstanceBuffer);
csmartdalton485a1202016-07-13 10:16:32 -0700112 fInstanceBuffer.reset(
csmartdaltona7f29642016-07-07 08:49:11 -0700113 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType,
csmartdalton485a1202016-07-13 10:16:32 -0700114 kDynamic_GrAccessPattern,
115 GrResourceProvider::kNoPendingIO_Flag |
116 GrResourceProvider::kRequireGpuMemory_Flag));
csmartdaltona7f29642016-07-07 08:49:11 -0700117 if (!fInstanceBuffer) {
118 return;
119 }
120
121 SkASSERT(!fDrawIndirectBuffer);
csmartdalton4c18b622016-07-29 12:19:28 -0700122 if (this->glGpu()->glCaps().drawIndirectSupport()) {
123 fDrawIndirectBuffer.reset(
124 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds,
125 kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern,
126 GrResourceProvider::kNoPendingIO_Flag |
127 GrResourceProvider::kRequireGpuMemory_Flag));
128 if (!fDrawIndirectBuffer) {
129 return;
130 }
csmartdaltona7f29642016-07-07 08:49:11 -0700131 }
132
133 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map());
csmartdalton4c18b622016-07-29 12:19:28 -0700134 SkASSERT(glMappedInstances);
csmartdaltona7f29642016-07-07 08:49:11 -0700135 int glInstancesIdx = 0;
136
csmartdalton4c18b622016-07-29 12:19:28 -0700137 GrGLDrawElementsIndirectCommand* glMappedCmds = nullptr;
csmartdaltona7f29642016-07-07 08:49:11 -0700138 int glDrawCmdsIdx = 0;
csmartdalton4c18b622016-07-29 12:19:28 -0700139 if (fDrawIndirectBuffer) {
140 glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
141 SkASSERT(glMappedCmds);
142 }
csmartdaltona7f29642016-07-07 08:49:11 -0700143
144 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();
csmartdalton4c18b622016-07-29 12:19:28 -0700145 SkASSERT(!baseInstanceSupport || fDrawIndirectBuffer);
csmartdaltona7f29642016-07-07 08:49:11 -0700146
csmartdalton4c18b622016-07-29 12:19:28 -0700147 SkASSERT(!fGLDrawCmdsInfo);
csmartdaltona7f29642016-07-07 08:49:11 -0700148 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
149 fGLDrawCmdsInfo.reset(numGLDrawCmds);
150 }
151
152 // Generate the instance and draw-indirect buffer contents based on the tracked batches.
153 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
154 while (Batch* b = iter.get()) {
155 GLBatch* batch = static_cast<GLBatch*>(b);
156 iter.next();
157
158 batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
159 batch->fGLDrawCmdsIdx = glDrawCmdsIdx;
160
161 const Batch::Draw* draw = batch->fHeadDraw;
162 SkASSERT(draw);
163 do {
164 int instanceCount = 0;
165 IndexRange geometry = draw->fGeometry;
166 SkASSERT(!geometry.isEmpty());
167
168 do {
169 glMappedInstances[glInstancesIdx + instanceCount++] = draw->fInstance;
170 draw = draw->fNext;
171 } while (draw && draw->fGeometry == geometry);
172
csmartdalton4c18b622016-07-29 12:19:28 -0700173 if (fDrawIndirectBuffer) {
174 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx];
175 glCmd.fCount = geometry.fCount;
176 glCmd.fInstanceCount = instanceCount;
177 glCmd.fFirstIndex = geometry.fStart;
178 glCmd.fBaseVertex = 0;
179 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
180 }
csmartdaltona7f29642016-07-07 08:49:11 -0700181
182 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
csmartdalton4c18b622016-07-29 12:19:28 -0700183 GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glDrawCmdsIdx];
184 cmdInfo.fGeometry = geometry;
185 cmdInfo.fInstanceCount = instanceCount;
csmartdaltona7f29642016-07-07 08:49:11 -0700186 }
187
188 glInstancesIdx += instanceCount;
189 ++glDrawCmdsIdx;
190 } while (draw);
191 }
192
193 SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
csmartdalton4c18b622016-07-29 12:19:28 -0700194 if (fDrawIndirectBuffer) {
195 fDrawIndirectBuffer->unmap();
196 }
csmartdaltona7f29642016-07-07 08:49:11 -0700197
198 SkASSERT(glInstancesIdx == numGLInstances);
199 fInstanceBuffer->unmap();
200}
201
202void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
203 const Batch* baseBatch) {
csmartdalton4c18b622016-07-29 12:19:28 -0700204 if (!fDrawIndirectBuffer && !fGLDrawCmdsInfo) {
csmartdaltona7f29642016-07-07 08:49:11 -0700205 return; // beginFlush was not successful.
206 }
207 if (!this->glGpu()->flushGLState(pipeline, instProc)) {
208 return;
209 }
210
csmartdalton4c18b622016-07-29 12:19:28 -0700211 if (fDrawIndirectBuffer) {
212 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.get());
213 }
csmartdaltona7f29642016-07-07 08:49:11 -0700214
215 const GrGLCaps& glCaps = this->glGpu()->glCaps();
216 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
217 int numCommands = batch->numGLCommands();
218
219#if GR_GL_LOG_INSTANCED_BATCHES
220 SkASSERT(fGLDrawCmdsInfo);
221 SkDebugf("Instanced batch: [");
222 for (int i = 0; i < numCommands; ++i) {
223 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
224 SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInstanceCount,
225 InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx].fGeometry));
226 }
227 SkDebugf("]\n");
228#else
229 SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport());
230#endif
231
csmartdalton4c18b622016-07-29 12:19:28 -0700232 if (numCommands > 1 && glCaps.multiDrawIndirectSupport() && glCaps.baseInstanceSupport()) {
233 SkASSERT(fDrawIndirectBuffer);
csmartdaltona7f29642016-07-07 08:49:11 -0700234 int glCmdsIdx = batch->fGLDrawCmdsIdx;
235 this->flushInstanceAttribs(batch->fEmulatedBaseInstance);
236 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
237 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx,
238 numCommands, 0));
csmartdalton4c18b622016-07-29 12:19:28 -0700239 return;
240 }
241
242 int emulatedBaseInstance = batch->fEmulatedBaseInstance;
243 for (int i = 0; i < numCommands; ++i) {
244 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
245 const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
246 this->flushInstanceAttribs(emulatedBaseInstance);
247 if (fDrawIndirectBuffer) {
248 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
249 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdIdx));
250 } else {
251 GL_CALL(DrawElementsInstanced(GR_GL_TRIANGLES, cmdInfo.fGeometry.fCount,
252 GR_GL_UNSIGNED_BYTE,
253 (GrGLubyte*) nullptr + cmdInfo.fGeometry.fStart,
254 cmdInfo.fInstanceCount));
255 }
256 if (!glCaps.baseInstanceSupport()) {
257 emulatedBaseInstance += cmdInfo.fInstanceCount;
258 }
csmartdaltona7f29642016-07-07 08:49:11 -0700259 }
260}
261
262void GLInstancedRendering::flushInstanceAttribs(int baseInstance) {
263 SkASSERT(fVertexArrayID);
264 this->glGpu()->bindVertexArray(fVertexArrayID);
265
266 SkASSERT(fInstanceBuffer);
robertphillips8abb3702016-08-31 14:04:06 -0700267 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->uniqueID() ||
csmartdaltona7f29642016-07-07 08:49:11 -0700268 fInstanceAttribsBaseInstance != baseInstance) {
269 Instance* offsetInBuffer = (Instance*) nullptr + baseInstance;
270
271 this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get());
272
273 // Info attrib.
274 GL_CALL(EnableVertexAttribArray((int)Attrib::kInstanceInfo));
275 GL_CALL(VertexAttribIPointer((int)Attrib::kInstanceInfo, 1, GR_GL_UNSIGNED_INT,
276 sizeof(Instance), &offsetInBuffer->fInfo));
277 GL_CALL(VertexAttribDivisor((int)Attrib::kInstanceInfo, 1));
278
279 // Shape matrix attrib.
280 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixX));
281 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixY));
282 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixX, 3, GR_GL_FLOAT, GR_GL_FALSE,
283 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[0]));
284 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixY, 3, GR_GL_FLOAT, GR_GL_FALSE,
285 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[3]));
286 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixX, 1));
287 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixY, 1));
288
289 // Color attrib.
290 GL_CALL(EnableVertexAttribArray((int)Attrib::kColor));
291 GL_CALL(VertexAttribPointer((int)Attrib::kColor, 4, GR_GL_UNSIGNED_BYTE, GR_GL_TRUE,
292 sizeof(Instance), &offsetInBuffer->fColor));
293 GL_CALL(VertexAttribDivisor((int)Attrib::kColor, 1));
294
295 // Local rect attrib.
296 GL_CALL(EnableVertexAttribArray((int)Attrib::kLocalRect));
297 GL_CALL(VertexAttribPointer((int)Attrib::kLocalRect, 4, GR_GL_FLOAT, GR_GL_FALSE,
298 sizeof(Instance), &offsetInBuffer->fLocalRect));
299 GL_CALL(VertexAttribDivisor((int)Attrib::kLocalRect, 1));
300
robertphillips8abb3702016-08-31 14:04:06 -0700301 fInstanceAttribsBufferUniqueId = fInstanceBuffer->uniqueID();
csmartdaltona7f29642016-07-07 08:49:11 -0700302 fInstanceAttribsBaseInstance = baseInstance;
303 }
304}
305
306void GLInstancedRendering::onEndFlush() {
307 fInstanceBuffer.reset();
308 fDrawIndirectBuffer.reset();
309 fGLDrawCmdsInfo.reset(0);
310}
311
312void GLInstancedRendering::onResetGpuResources(ResetType resetType) {
313 if (fVertexArrayID && ResetType::kDestroy == resetType) {
314 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
315 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
316 }
317 fVertexArrayID = 0;
318 fInstanceBuffer.reset();
319 fDrawIndirectBuffer.reset();
320 fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID;
321}
322
323}