blob: 30920f884d959a6639a0e1f907e077576521d905 [file] [log] [blame]
robertphillips193ea932015-03-03 12:40:49 -08001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrTargetCommands.h"
9
10#include "GrColor.h"
11#include "GrDefaultGeoProcFactory.h"
12#include "GrInOrderDrawBuffer.h"
13#include "GrTemplates.h"
14#include "SkPoint.h"
15
16void GrTargetCommands::closeBatch() {
17 if (fDrawBatch) {
18 fBatchTarget.resetNumberOfDraws();
19 fDrawBatch->execute(NULL, fPrevState);
20 fDrawBatch->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
21 fDrawBatch = NULL;
22 }
23}
24
25static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettings) {
26 static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Face;
27 bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace);
28 if (isWinding) {
29 // Double check that it is in fact winding.
30 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace));
31 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace));
32 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace));
33 SkASSERT(!pathStencilSettings.isTwoSided());
34 }
35 return isWinding;
36}
37
38int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb,
39 const GrDrawTarget::DrawInfo& info) {
40 SkASSERT(!fCmdBuffer.empty());
41 SkASSERT(info.isInstanced());
42
43 const GrIndexBuffer* ib;
44 if (!iodb->canConcatToIndexBuffer(&ib)) {
45 return 0;
46 }
47
48 // Check if there is a draw info that is compatible that uses the same VB from the pool and
49 // the same IB
50 if (Cmd::kDraw_Cmd != fCmdBuffer.back().type()) {
51 return 0;
52 }
53
54 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
55
56 if (!draw->fInfo.isInstanced() ||
57 draw->fInfo.primitiveType() != info.primitiveType() ||
58 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
59 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
60 draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
61 draw->fInfo.indexBuffer() != ib) {
62 return 0;
63 }
64 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVertex()) {
65 return 0;
66 }
67
68 // how many instances can be concat'ed onto draw given the size of the index buffer
69 int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerInstance();
70 instancesToConcat -= draw->fInfo.instanceCount();
71 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
72
73 draw->fInfo.adjustInstanceCount(instancesToConcat);
74
75 // update last fGpuCmdMarkers to include any additional trace markers that have been added
76 iodb->recordTraceMarkersIfNecessary(draw);
77 return instancesToConcat;
78}
79
80GrTargetCommands::Cmd* GrTargetCommands::recordDraw(
81 GrInOrderDrawBuffer* iodb,
82 const GrGeometryProcessor* gp,
83 const GrDrawTarget::DrawInfo& info,
84 const GrDrawTarget::PipelineInfo& pipelineInfo) {
85 SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer()));
86 this->closeBatch();
87
88 if (!this->setupPipelineAndShouldDraw(iodb, gp, pipelineInfo)) {
89 return NULL;
90 }
91
92 Draw* draw;
93 if (info.isInstanced()) {
94 int instancesConcated = this->concatInstancedDraw(iodb, info);
95 if (info.instanceCount() > instancesConcated) {
96 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
97 draw->fInfo.adjustInstanceCount(-instancesConcated);
98 } else {
99 return NULL;
100 }
101 } else {
102 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
103 }
104
105 return draw;
106}
107
108GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch(
109 GrInOrderDrawBuffer* iodb,
110 GrBatch* batch,
111 const GrDrawTarget::PipelineInfo& pipelineInfo) {
112 if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) {
113 return NULL;
114 }
115
116 // Check if there is a Batch Draw we can batch with
117 if (Cmd::kDrawBatch_Cmd != fCmdBuffer.back().type() || !fDrawBatch) {
118 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget));
119 return fDrawBatch;
120 }
121
122 SkASSERT(&fCmdBuffer.back() == fDrawBatch);
123 if (!fDrawBatch->fBatch->combineIfPossible(batch)) {
124 this->closeBatch();
125 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget));
126 }
127
128 return fDrawBatch;
129}
130
131GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath(
132 GrInOrderDrawBuffer* iodb,
133 const GrPipelineBuilder& pipelineBuilder,
134 const GrPathProcessor* pathProc,
135 const GrPath* path,
136 const GrScissorState& scissorState,
137 const GrStencilSettings& stencilSettings) {
138 this->closeBatch();
139
140 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath,
141 (path, pipelineBuilder.getRenderTarget()));
142
143 sp->fScissor = scissorState;
144 sp->fUseHWAA = pipelineBuilder.isHWAntialias();
145 sp->fViewMatrix = pathProc->viewMatrix();
146 sp->fStencil = stencilSettings;
147 return sp;
148}
149
150GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath(
151 GrInOrderDrawBuffer* iodb,
152 const GrPathProcessor* pathProc,
153 const GrPath* path,
154 const GrStencilSettings& stencilSettings,
155 const GrDrawTarget::PipelineInfo& pipelineInfo) {
156 this->closeBatch();
157
158 // TODO: Only compare the subset of GrPipelineBuilder relevant to path covering?
159 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
160 return NULL;
161 }
162 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path));
163 dp->fStencilSettings = stencilSettings;
164 return dp;
165}
166
167GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
168 GrInOrderDrawBuffer* iodb,
169 const GrPathProcessor* pathProc,
170 const GrPathRange* pathRange,
171 const void* indexValues,
172 GrDrawTarget::PathIndexType indexType,
173 const float transformValues[],
174 GrDrawTarget::PathTransformType transformType,
175 int count,
176 const GrStencilSettings& stencilSettings,
177 const GrDrawTarget::PipelineInfo& pipelineInfo) {
178 SkASSERT(pathRange);
179 SkASSERT(indexValues);
180 SkASSERT(transformValues);
181 this->closeBatch();
182
183 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
184 return NULL;
185 }
186
187 char* savedIndices;
188 float* savedTransforms;
189
190 iodb->appendIndicesAndTransforms(indexValues, indexType,
191 transformValues, transformType,
192 count, &savedIndices, &savedTransforms);
193
194 if (Cmd::kDrawPaths_Cmd == fCmdBuffer.back().type()) {
195 // The previous command was also DrawPaths. Try to collapse this call into the one
196 // before. Note that stenciling all the paths at once, then covering, may not be
197 // equivalent to two separate draw calls if there is overlap. Blending won't work,
198 // and the combined calls may also cancel each other's winding numbers in some
199 // places. For now the winding numbers are only an issue if the fill is even/odd,
200 // because DrawPaths is currently only used for glyphs, and glyphs in the same
201 // font tend to all wind in the same direction.
202 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back());
203 if (pathRange == previous->pathRange() &&
204 indexType == previous->fIndexType &&
205 transformType == previous->fTransformType &&
206 stencilSettings == previous->fStencilSettings &&
207 path_fill_type_is_winding(stencilSettings) &&
208 !pipelineInfo.willBlendWithDst(pathProc)) {
209 const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType);
210 const int xformSize = GrPathRendering::PathTransformSize(transformType);
211 if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices &&
212 (0 == xformSize ||
213 &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) {
214 // Fold this DrawPaths call into the one previous.
215 previous->fCount += count;
216 return NULL;
217 }
218 }
219 }
220
221 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange));
222 dp->fIndices = savedIndices;
223 dp->fIndexType = indexType;
224 dp->fTransforms = savedTransforms;
225 dp->fTransformType = transformType;
226 dp->fCount = count;
227 dp->fStencilSettings = stencilSettings;
228 return dp;
229}
230
231GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb,
232 const SkIRect* rect,
233 GrColor color,
234 bool canIgnoreRect,
235 GrRenderTarget* renderTarget) {
236 SkASSERT(renderTarget);
237 this->closeBatch();
238
239 SkIRect r;
240 if (NULL == rect) {
241 // We could do something smart and remove previous draws and clears to
242 // the current render target. If we get that smart we have to make sure
243 // those draws aren't read before this clear (render-to-texture).
244 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
245 rect = &r;
246 }
247 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
248 GrColorIsPMAssert(color);
249 clr->fColor = color;
250 clr->fRect = *rect;
251 clr->fCanIgnoreRect = canIgnoreRect;
252 return clr;
253}
254
255GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuffer* iodb,
256 const SkIRect& rect,
257 bool insideClip,
258 GrRenderTarget* renderTarget) {
259 SkASSERT(renderTarget);
260 this->closeBatch();
261
262 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilClip, (renderTarget));
263 clr->fRect = rect;
264 clr->fInsideClip = insideClip;
265 return clr;
266}
267
268GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb,
269 GrRenderTarget* renderTarget) {
270 SkASSERT(renderTarget);
271 this->closeBatch();
272
273 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
274 clr->fColor = GrColor_ILLEGAL;
275 return clr;
276}
277
278void GrTargetCommands::reset() {
279 fCmdBuffer.reset();
280 fPrevState = NULL;
281 fDrawBatch = NULL;
282}
283
284void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
285 if (fCmdBuffer.empty()) {
286 return;
287 }
288
289 // Updated every time we find a set state cmd to reflect the current state in the playback
290 // stream.
291 SetState* currentState = NULL;
292
293 // TODO this is temporary while batch is being rolled out
294 this->closeBatch();
295 iodb->getVertexAllocPool()->unmap();
296 iodb->getIndexAllocPool()->unmap();
297 fBatchTarget.preFlush();
298
299 currentState = NULL;
300 CmdBuffer::Iter iter(fCmdBuffer);
301
302 int currCmdMarker = 0;
303
304 GrGpu* gpu = iodb->getGpu();
305
306 int i = 0;
307 while (iter.next()) {
308 i++;
309 GrGpuTraceMarker newMarker("", -1);
310 SkString traceString;
311 if (iter->isTraced()) {
312 traceString = iodb->getCmdString(currCmdMarker);
313 newMarker.fMarker = traceString.c_str();
314 gpu->addGpuTraceMarker(&newMarker);
315 ++currCmdMarker;
316 }
317
318 // TODO temporary hack
319 if (Cmd::kDrawBatch_Cmd == iter->type()) {
320 DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get());
321 fBatchTarget.flushNext(db->fBatch->numberOfDraws());
robertphillipsd14101e2015-03-05 08:55:28 -0800322
323 if (iter->isTraced()) {
324 gpu->removeGpuTraceMarker(&newMarker);
325 }
robertphillips193ea932015-03-03 12:40:49 -0800326 continue;
327 }
328
329 if (Cmd::kSetState_Cmd == iter->type()) {
330 SetState* ss = reinterpret_cast<SetState*>(iter.get());
331
332 // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we
333 // will only have GrBatch and we can delete this
334 if (ss->fPrimitiveProcessor) {
335 gpu->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor,
336 *ss->getPipeline(),
337 ss->fBatchTracker);
338 }
339 currentState = ss;
340 } else {
341 iter->execute(gpu, currentState);
342 }
343
344 if (iter->isTraced()) {
345 gpu->removeGpuTraceMarker(&newMarker);
346 }
347 }
348
349 // TODO see copious notes about hack
350 fBatchTarget.postFlush();
351}
352
353void GrTargetCommands::Draw::execute(GrGpu* gpu, const SetState* state) {
354 SkASSERT(state);
355 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
356 &state->fBatchTracker);
357 gpu->draw(args, fInfo);
358}
359
360void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) {
361 GrGpu::StencilPathState state;
362 state.fRenderTarget = fRenderTarget.get();
363 state.fScissor = &fScissor;
364 state.fStencil = &fStencil;
365 state.fUseHWAA = fUseHWAA;
366 state.fViewMatrix = &fViewMatrix;
367
368 gpu->stencilPath(this->path(), state);
369}
370
371void GrTargetCommands::DrawPath::execute(GrGpu* gpu, const SetState* state) {
372 SkASSERT(state);
373 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
374 &state->fBatchTracker);
375 gpu->drawPath(args, this->path(), fStencilSettings);
376}
377
378void GrTargetCommands::DrawPaths::execute(GrGpu* gpu, const SetState* state) {
379 SkASSERT(state);
380 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
381 &state->fBatchTracker);
382 gpu->drawPaths(args, this->pathRange(),
383 fIndices, fIndexType,
384 fTransforms, fTransformType,
385 fCount, fStencilSettings);
386}
387
388void GrTargetCommands::DrawBatch::execute(GrGpu*, const SetState* state) {
389 SkASSERT(state);
390 fBatch->generateGeometry(fBatchTarget, state->getPipeline());
391}
392
393void GrTargetCommands::SetState::execute(GrGpu*, const SetState*) {}
394
395void GrTargetCommands::Clear::execute(GrGpu* gpu, const SetState*) {
396 if (GrColor_ILLEGAL == fColor) {
397 gpu->discard(this->renderTarget());
398 } else {
399 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget());
400 }
401}
402
403void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) {
404 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
405}
406
407void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) {
408 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
409}
410
411GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* iodb,
412 GrSurface* dst,
413 GrSurface* src,
414 const SkIRect& srcRect,
415 const SkIPoint& dstPoint) {
416 if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) {
417 this->closeBatch();
418 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, src));
419 cs->fSrcRect = srcRect;
420 cs->fDstPoint = dstPoint;
421 return cs;
422 }
423 return NULL;
424}
425
426bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
427 const GrPrimitiveProcessor* primProc,
428 const GrDrawTarget::PipelineInfo& pipelineInfo) {
429 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc));
430 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
431
432 if (ss->getPipeline()->mustSkip()) {
433 fCmdBuffer.pop_back();
434 return false;
435 }
436
437 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
438 ss->getPipeline()->getInitBatchTracker());
439
440 if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
441 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
442 *ss->fPrimitiveProcessor,
443 ss->fBatchTracker) &&
444 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
445 fCmdBuffer.pop_back();
446 } else {
447 fPrevState = ss;
448 iodb->recordTraceMarkersIfNecessary(ss);
449 }
450 return true;
451}
452
453bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
454 GrBatch* batch,
455 const GrDrawTarget::PipelineInfo& pipelineInfo) {
456 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, ());
457 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
458
459 if (ss->getPipeline()->mustSkip()) {
460 fCmdBuffer.pop_back();
461 return false;
462 }
463
464 batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker());
465
466 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
467 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
468 fCmdBuffer.pop_back();
469 } else {
470 this->closeBatch();
471 fPrevState = ss;
472 iodb->recordTraceMarkersIfNecessary(ss);
473 }
474 return true;
475}
476