blob: 2962a865481b7d97bbdb07e051fd9afa14ec1d15 [file] [log] [blame]
csmartdaltona7f29642016-07-07 08:49:11 -07001/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GLInstancedRendering.h"
9
10#include "GrResourceProvider.h"
11#include "gl/GrGLGpu.h"
12#include "instanced/InstanceProcessor.h"
13
14#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
15
16namespace gr_instanced {
17
18class GLInstancedRendering::GLBatch : public InstancedRendering::Batch {
19public:
20 DEFINE_BATCH_CLASS_ID
21
22 GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {}
23 int numGLCommands() const { return 1 + fNumChangesInGeometry; }
24
25private:
26 int fEmulatedBaseInstance;
27 int fGLDrawCmdsIdx;
28
29 friend class GLInstancedRendering;
30
31 typedef Batch INHERITED;
32};
33
34GLInstancedRendering* GLInstancedRendering::CreateIfSupported(GrGLGpu* gpu) {
35#ifndef SK_BUILD_FOR_MAC
36 // Only whitelisting on Mac for now. Once we've been able to work through the various issues on
37 // other platforms we can enable more generally.
38 return nullptr;
39#endif
40 const GrGLCaps& glCaps = gpu->glCaps();
41 AntialiasMode lastSupportedAAMode;
42 if (!glCaps.vertexArrayObjectSupport() ||
43 !glCaps.drawIndirectSupport() ||
44 !InstanceProcessor::IsSupported(*glCaps.glslCaps(), glCaps, &lastSupportedAAMode)) {
45 return nullptr;
46 }
47 return new GLInstancedRendering(gpu, lastSupportedAAMode);
48}
49
50GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu, AntialiasMode lastSupportedAAMode)
51 : INHERITED(gpu, lastSupportedAAMode, gpu->glCaps().canDrawIndirectToFloat()),
52 fVertexArrayID(0),
53 fGLDrawCmdsInfo(0),
54 fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
55}
56
57GLInstancedRendering::~GLInstancedRendering() {
58 if (fVertexArrayID) {
59 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
60 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
61 }
62}
63
64inline GrGLGpu* GLInstancedRendering::glGpu() const {
65 return static_cast<GrGLGpu*>(this->gpu());
66}
67
68InstancedRendering::Batch* GLInstancedRendering::createBatch() {
69 return new GLBatch(this);
70}
71
72void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
73 // Count what there is to draw.
74 BatchList::Iter iter;
75 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
76 int numGLInstances = 0;
77 int numGLDrawCmds = 0;
78 while (Batch* b = iter.get()) {
79 GLBatch* batch = static_cast<GLBatch*>(b);
80 iter.next();
81
82 numGLInstances += batch->fNumDraws;
83 numGLDrawCmds += batch->numGLCommands();
84 }
85 if (!numGLDrawCmds) {
86 return;
87 }
88 SkASSERT(numGLInstances);
89
90 // Lazily create a vertex array object.
91 if (!fVertexArrayID) {
92 GL_CALL(GenVertexArrays(1, &fVertexArrayID));
93 if (!fVertexArrayID) {
94 return;
95 }
96 this->glGpu()->bindVertexArray(fVertexArrayID);
97
98 // Attach our index buffer to the vertex array.
csmartdalton485a1202016-07-13 10:16:32 -070099 SkASSERT(!this->indexBuffer()->isCPUBacked());
csmartdaltona7f29642016-07-07 08:49:11 -0700100 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
101 static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));
102
103 // Set up the non-instanced attribs.
csmartdalton485a1202016-07-13 10:16:32 -0700104 this->glGpu()->bindBuffer(kVertex_GrBufferType, this->vertexBuffer());
csmartdaltona7f29642016-07-07 08:49:11 -0700105 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords));
106 GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, GR_GL_FALSE,
107 sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
108 GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs));
109 GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, sizeof(ShapeVertex),
110 (void*) offsetof(ShapeVertex, fAttrs)));
111
112 SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId);
113 }
114
115 // Create and map instance and draw-indirect buffers.
116 SkASSERT(!fInstanceBuffer);
csmartdalton485a1202016-07-13 10:16:32 -0700117 fInstanceBuffer.reset(
csmartdaltona7f29642016-07-07 08:49:11 -0700118 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType,
csmartdalton485a1202016-07-13 10:16:32 -0700119 kDynamic_GrAccessPattern,
120 GrResourceProvider::kNoPendingIO_Flag |
121 GrResourceProvider::kRequireGpuMemory_Flag));
csmartdaltona7f29642016-07-07 08:49:11 -0700122 if (!fInstanceBuffer) {
123 return;
124 }
125
126 SkASSERT(!fDrawIndirectBuffer);
csmartdalton485a1202016-07-13 10:16:32 -0700127 fDrawIndirectBuffer.reset(
csmartdaltona7f29642016-07-07 08:49:11 -0700128 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds,
129 kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern,
csmartdalton485a1202016-07-13 10:16:32 -0700130 GrResourceProvider::kNoPendingIO_Flag |
131 GrResourceProvider::kRequireGpuMemory_Flag));
csmartdaltona7f29642016-07-07 08:49:11 -0700132 if (!fDrawIndirectBuffer) {
133 return;
134 }
135
136 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map());
137 int glInstancesIdx = 0;
138
139 auto* glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
140 int glDrawCmdsIdx = 0;
141
142 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();
143
144 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
145 fGLDrawCmdsInfo.reset(numGLDrawCmds);
146 }
147
148 // Generate the instance and draw-indirect buffer contents based on the tracked batches.
149 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
150 while (Batch* b = iter.get()) {
151 GLBatch* batch = static_cast<GLBatch*>(b);
152 iter.next();
153
154 batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
155 batch->fGLDrawCmdsIdx = glDrawCmdsIdx;
156
157 const Batch::Draw* draw = batch->fHeadDraw;
158 SkASSERT(draw);
159 do {
160 int instanceCount = 0;
161 IndexRange geometry = draw->fGeometry;
162 SkASSERT(!geometry.isEmpty());
163
164 do {
165 glMappedInstances[glInstancesIdx + instanceCount++] = draw->fInstance;
166 draw = draw->fNext;
167 } while (draw && draw->fGeometry == geometry);
168
169 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx];
170 glCmd.fCount = geometry.fCount;
171 glCmd.fInstanceCount = instanceCount;
172 glCmd.fFirstIndex = geometry.fStart;
173 glCmd.fBaseVertex = 0;
174 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
175
176 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
177 fGLDrawCmdsInfo[glDrawCmdsIdx].fInstanceCount = instanceCount;
178#if GR_GL_LOG_INSTANCED_BATCHES
179 fGLDrawCmdsInfo[glDrawCmdsIdx].fGeometry = geometry;
180#endif
181 }
182
183 glInstancesIdx += instanceCount;
184 ++glDrawCmdsIdx;
185 } while (draw);
186 }
187
188 SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
189 fDrawIndirectBuffer->unmap();
190
191 SkASSERT(glInstancesIdx == numGLInstances);
192 fInstanceBuffer->unmap();
193}
194
195void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
196 const Batch* baseBatch) {
197 if (!fDrawIndirectBuffer) {
198 return; // beginFlush was not successful.
199 }
200 if (!this->glGpu()->flushGLState(pipeline, instProc)) {
201 return;
202 }
203
204 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.get());
205
206 const GrGLCaps& glCaps = this->glGpu()->glCaps();
207 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
208 int numCommands = batch->numGLCommands();
209
210#if GR_GL_LOG_INSTANCED_BATCHES
211 SkASSERT(fGLDrawCmdsInfo);
212 SkDebugf("Instanced batch: [");
213 for (int i = 0; i < numCommands; ++i) {
214 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
215 SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInstanceCount,
216 InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx].fGeometry));
217 }
218 SkDebugf("]\n");
219#else
220 SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport());
221#endif
222
223 if (1 == numCommands || !glCaps.baseInstanceSupport() || !glCaps.multiDrawIndirectSupport()) {
224 int emulatedBaseInstance = batch->fEmulatedBaseInstance;
225 for (int i = 0; i < numCommands; ++i) {
226 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
227 this->flushInstanceAttribs(emulatedBaseInstance);
228 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
229 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdIdx));
230 if (!glCaps.baseInstanceSupport()) {
231 emulatedBaseInstance += fGLDrawCmdsInfo[glCmdIdx].fInstanceCount;
232 }
233 }
234 } else {
235 int glCmdsIdx = batch->fGLDrawCmdsIdx;
236 this->flushInstanceAttribs(batch->fEmulatedBaseInstance);
237 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
238 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx,
239 numCommands, 0));
240 }
241}
242
243void GLInstancedRendering::flushInstanceAttribs(int baseInstance) {
244 SkASSERT(fVertexArrayID);
245 this->glGpu()->bindVertexArray(fVertexArrayID);
246
247 SkASSERT(fInstanceBuffer);
248 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->getUniqueID() ||
249 fInstanceAttribsBaseInstance != baseInstance) {
250 Instance* offsetInBuffer = (Instance*) nullptr + baseInstance;
251
252 this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get());
253
254 // Info attrib.
255 GL_CALL(EnableVertexAttribArray((int)Attrib::kInstanceInfo));
256 GL_CALL(VertexAttribIPointer((int)Attrib::kInstanceInfo, 1, GR_GL_UNSIGNED_INT,
257 sizeof(Instance), &offsetInBuffer->fInfo));
258 GL_CALL(VertexAttribDivisor((int)Attrib::kInstanceInfo, 1));
259
260 // Shape matrix attrib.
261 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixX));
262 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixY));
263 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixX, 3, GR_GL_FLOAT, GR_GL_FALSE,
264 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[0]));
265 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixY, 3, GR_GL_FLOAT, GR_GL_FALSE,
266 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[3]));
267 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixX, 1));
268 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixY, 1));
269
270 // Color attrib.
271 GL_CALL(EnableVertexAttribArray((int)Attrib::kColor));
272 GL_CALL(VertexAttribPointer((int)Attrib::kColor, 4, GR_GL_UNSIGNED_BYTE, GR_GL_TRUE,
273 sizeof(Instance), &offsetInBuffer->fColor));
274 GL_CALL(VertexAttribDivisor((int)Attrib::kColor, 1));
275
276 // Local rect attrib.
277 GL_CALL(EnableVertexAttribArray((int)Attrib::kLocalRect));
278 GL_CALL(VertexAttribPointer((int)Attrib::kLocalRect, 4, GR_GL_FLOAT, GR_GL_FALSE,
279 sizeof(Instance), &offsetInBuffer->fLocalRect));
280 GL_CALL(VertexAttribDivisor((int)Attrib::kLocalRect, 1));
281
282 fInstanceAttribsBufferUniqueId = fInstanceBuffer->getUniqueID();
283 fInstanceAttribsBaseInstance = baseInstance;
284 }
285}
286
287void GLInstancedRendering::onEndFlush() {
288 fInstanceBuffer.reset();
289 fDrawIndirectBuffer.reset();
290 fGLDrawCmdsInfo.reset(0);
291}
292
293void GLInstancedRendering::onResetGpuResources(ResetType resetType) {
294 if (fVertexArrayID && ResetType::kDestroy == resetType) {
295 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
296 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
297 }
298 fVertexArrayID = 0;
299 fInstanceBuffer.reset();
300 fDrawIndirectBuffer.reset();
301 fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID;
302}
303
304}