blob: d326226d1f37719dae4e996c7dc6152f76bd9725 [file] [log] [blame]
robertphillips193ea932015-03-03 12:40:49 -08001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrTargetCommands.h"
9
10#include "GrColor.h"
11#include "GrDefaultGeoProcFactory.h"
12#include "GrInOrderDrawBuffer.h"
13#include "GrTemplates.h"
14#include "SkPoint.h"
15
16void GrTargetCommands::closeBatch() {
17 if (fDrawBatch) {
18 fBatchTarget.resetNumberOfDraws();
19 fDrawBatch->execute(NULL, fPrevState);
20 fDrawBatch->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
21 fDrawBatch = NULL;
22 }
23}
24
25static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettings) {
26 static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Face;
27 bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace);
28 if (isWinding) {
29 // Double check that it is in fact winding.
30 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace));
31 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace));
32 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace));
33 SkASSERT(!pathStencilSettings.isTwoSided());
34 }
35 return isWinding;
36}
37
38int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb,
39 const GrDrawTarget::DrawInfo& info) {
40 SkASSERT(!fCmdBuffer.empty());
41 SkASSERT(info.isInstanced());
42
43 const GrIndexBuffer* ib;
44 if (!iodb->canConcatToIndexBuffer(&ib)) {
45 return 0;
46 }
47
48 // Check if there is a draw info that is compatible that uses the same VB from the pool and
49 // the same IB
50 if (Cmd::kDraw_Cmd != fCmdBuffer.back().type()) {
51 return 0;
52 }
53
54 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
55
56 if (!draw->fInfo.isInstanced() ||
57 draw->fInfo.primitiveType() != info.primitiveType() ||
58 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
59 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
60 draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
61 draw->fInfo.indexBuffer() != ib) {
62 return 0;
63 }
64 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVertex()) {
65 return 0;
66 }
67
68 // how many instances can be concat'ed onto draw given the size of the index buffer
69 int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerInstance();
70 instancesToConcat -= draw->fInfo.instanceCount();
71 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
72
73 draw->fInfo.adjustInstanceCount(instancesToConcat);
74
75 // update last fGpuCmdMarkers to include any additional trace markers that have been added
76 iodb->recordTraceMarkersIfNecessary(draw);
77 return instancesToConcat;
78}
79
80GrTargetCommands::Cmd* GrTargetCommands::recordDraw(
81 GrInOrderDrawBuffer* iodb,
82 const GrGeometryProcessor* gp,
83 const GrDrawTarget::DrawInfo& info,
84 const GrDrawTarget::PipelineInfo& pipelineInfo) {
85 SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer()));
86 this->closeBatch();
87
88 if (!this->setupPipelineAndShouldDraw(iodb, gp, pipelineInfo)) {
89 return NULL;
90 }
91
92 Draw* draw;
93 if (info.isInstanced()) {
94 int instancesConcated = this->concatInstancedDraw(iodb, info);
95 if (info.instanceCount() > instancesConcated) {
96 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
97 draw->fInfo.adjustInstanceCount(-instancesConcated);
98 } else {
99 return NULL;
100 }
101 } else {
102 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
103 }
104
105 return draw;
106}
107
108GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch(
109 GrInOrderDrawBuffer* iodb,
110 GrBatch* batch,
111 const GrDrawTarget::PipelineInfo& pipelineInfo) {
112 if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) {
113 return NULL;
114 }
115
116 // Check if there is a Batch Draw we can batch with
117 if (Cmd::kDrawBatch_Cmd != fCmdBuffer.back().type() || !fDrawBatch) {
118 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget));
119 return fDrawBatch;
120 }
121
122 SkASSERT(&fCmdBuffer.back() == fDrawBatch);
123 if (!fDrawBatch->fBatch->combineIfPossible(batch)) {
124 this->closeBatch();
125 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget));
126 }
127
128 return fDrawBatch;
129}
130
131GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath(
132 GrInOrderDrawBuffer* iodb,
133 const GrPipelineBuilder& pipelineBuilder,
134 const GrPathProcessor* pathProc,
135 const GrPath* path,
136 const GrScissorState& scissorState,
137 const GrStencilSettings& stencilSettings) {
138 this->closeBatch();
139
140 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath,
141 (path, pipelineBuilder.getRenderTarget()));
142
143 sp->fScissor = scissorState;
144 sp->fUseHWAA = pipelineBuilder.isHWAntialias();
145 sp->fViewMatrix = pathProc->viewMatrix();
146 sp->fStencil = stencilSettings;
147 return sp;
148}
149
150GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath(
151 GrInOrderDrawBuffer* iodb,
152 const GrPathProcessor* pathProc,
153 const GrPath* path,
154 const GrStencilSettings& stencilSettings,
155 const GrDrawTarget::PipelineInfo& pipelineInfo) {
156 this->closeBatch();
157
158 // TODO: Only compare the subset of GrPipelineBuilder relevant to path covering?
159 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
160 return NULL;
161 }
162 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path));
163 dp->fStencilSettings = stencilSettings;
164 return dp;
165}
166
167GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
168 GrInOrderDrawBuffer* iodb,
169 const GrPathProcessor* pathProc,
170 const GrPathRange* pathRange,
171 const void* indexValues,
172 GrDrawTarget::PathIndexType indexType,
173 const float transformValues[],
174 GrDrawTarget::PathTransformType transformType,
175 int count,
176 const GrStencilSettings& stencilSettings,
177 const GrDrawTarget::PipelineInfo& pipelineInfo) {
178 SkASSERT(pathRange);
179 SkASSERT(indexValues);
180 SkASSERT(transformValues);
181 this->closeBatch();
182
183 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
184 return NULL;
185 }
186
187 char* savedIndices;
188 float* savedTransforms;
189
190 iodb->appendIndicesAndTransforms(indexValues, indexType,
191 transformValues, transformType,
192 count, &savedIndices, &savedTransforms);
193
194 if (Cmd::kDrawPaths_Cmd == fCmdBuffer.back().type()) {
195 // The previous command was also DrawPaths. Try to collapse this call into the one
196 // before. Note that stenciling all the paths at once, then covering, may not be
197 // equivalent to two separate draw calls if there is overlap. Blending won't work,
198 // and the combined calls may also cancel each other's winding numbers in some
199 // places. For now the winding numbers are only an issue if the fill is even/odd,
200 // because DrawPaths is currently only used for glyphs, and glyphs in the same
201 // font tend to all wind in the same direction.
202 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back());
203 if (pathRange == previous->pathRange() &&
204 indexType == previous->fIndexType &&
205 transformType == previous->fTransformType &&
206 stencilSettings == previous->fStencilSettings &&
207 path_fill_type_is_winding(stencilSettings) &&
208 !pipelineInfo.willBlendWithDst(pathProc)) {
209 const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType);
210 const int xformSize = GrPathRendering::PathTransformSize(transformType);
211 if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices &&
212 (0 == xformSize ||
213 &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) {
214 // Fold this DrawPaths call into the one previous.
215 previous->fCount += count;
216 return NULL;
217 }
218 }
219 }
220
221 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange));
222 dp->fIndices = savedIndices;
223 dp->fIndexType = indexType;
224 dp->fTransforms = savedTransforms;
225 dp->fTransformType = transformType;
226 dp->fCount = count;
227 dp->fStencilSettings = stencilSettings;
228 return dp;
229}
230
231GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb,
232 const SkIRect* rect,
233 GrColor color,
234 bool canIgnoreRect,
235 GrRenderTarget* renderTarget) {
236 SkASSERT(renderTarget);
237 this->closeBatch();
238
239 SkIRect r;
240 if (NULL == rect) {
241 // We could do something smart and remove previous draws and clears to
242 // the current render target. If we get that smart we have to make sure
243 // those draws aren't read before this clear (render-to-texture).
244 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
245 rect = &r;
246 }
247 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
248 GrColorIsPMAssert(color);
249 clr->fColor = color;
250 clr->fRect = *rect;
251 clr->fCanIgnoreRect = canIgnoreRect;
252 return clr;
253}
254
255GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuffer* iodb,
256 const SkIRect& rect,
257 bool insideClip,
258 GrRenderTarget* renderTarget) {
259 SkASSERT(renderTarget);
260 this->closeBatch();
261
262 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilClip, (renderTarget));
263 clr->fRect = rect;
264 clr->fInsideClip = insideClip;
265 return clr;
266}
267
268GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb,
269 GrRenderTarget* renderTarget) {
270 SkASSERT(renderTarget);
271 this->closeBatch();
272
273 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
274 clr->fColor = GrColor_ILLEGAL;
275 return clr;
276}
277
278void GrTargetCommands::reset() {
279 fCmdBuffer.reset();
280 fPrevState = NULL;
281 fDrawBatch = NULL;
282}
283
284void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
285 if (fCmdBuffer.empty()) {
286 return;
287 }
288
289 // Updated every time we find a set state cmd to reflect the current state in the playback
290 // stream.
291 SetState* currentState = NULL;
292
293 // TODO this is temporary while batch is being rolled out
294 this->closeBatch();
295 iodb->getVertexAllocPool()->unmap();
296 iodb->getIndexAllocPool()->unmap();
297 fBatchTarget.preFlush();
298
299 currentState = NULL;
300 CmdBuffer::Iter iter(fCmdBuffer);
301
302 int currCmdMarker = 0;
303
304 GrGpu* gpu = iodb->getGpu();
305
306 int i = 0;
307 while (iter.next()) {
308 i++;
309 GrGpuTraceMarker newMarker("", -1);
310 SkString traceString;
311 if (iter->isTraced()) {
312 traceString = iodb->getCmdString(currCmdMarker);
313 newMarker.fMarker = traceString.c_str();
314 gpu->addGpuTraceMarker(&newMarker);
315 ++currCmdMarker;
316 }
317
318 // TODO temporary hack
319 if (Cmd::kDrawBatch_Cmd == iter->type()) {
320 DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get());
321 fBatchTarget.flushNext(db->fBatch->numberOfDraws());
322 continue;
323 }
324
325 if (Cmd::kSetState_Cmd == iter->type()) {
326 SetState* ss = reinterpret_cast<SetState*>(iter.get());
327
328 // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we
329 // will only have GrBatch and we can delete this
330 if (ss->fPrimitiveProcessor) {
331 gpu->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor,
332 *ss->getPipeline(),
333 ss->fBatchTracker);
334 }
335 currentState = ss;
336 } else {
337 iter->execute(gpu, currentState);
338 }
339
340 if (iter->isTraced()) {
341 gpu->removeGpuTraceMarker(&newMarker);
342 }
343 }
344
345 // TODO see copious notes about hack
346 fBatchTarget.postFlush();
347}
348
349void GrTargetCommands::Draw::execute(GrGpu* gpu, const SetState* state) {
350 SkASSERT(state);
351 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
352 &state->fBatchTracker);
353 gpu->draw(args, fInfo);
354}
355
356void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) {
357 GrGpu::StencilPathState state;
358 state.fRenderTarget = fRenderTarget.get();
359 state.fScissor = &fScissor;
360 state.fStencil = &fStencil;
361 state.fUseHWAA = fUseHWAA;
362 state.fViewMatrix = &fViewMatrix;
363
364 gpu->stencilPath(this->path(), state);
365}
366
367void GrTargetCommands::DrawPath::execute(GrGpu* gpu, const SetState* state) {
368 SkASSERT(state);
369 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
370 &state->fBatchTracker);
371 gpu->drawPath(args, this->path(), fStencilSettings);
372}
373
374void GrTargetCommands::DrawPaths::execute(GrGpu* gpu, const SetState* state) {
375 SkASSERT(state);
376 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
377 &state->fBatchTracker);
378 gpu->drawPaths(args, this->pathRange(),
379 fIndices, fIndexType,
380 fTransforms, fTransformType,
381 fCount, fStencilSettings);
382}
383
384void GrTargetCommands::DrawBatch::execute(GrGpu*, const SetState* state) {
385 SkASSERT(state);
386 fBatch->generateGeometry(fBatchTarget, state->getPipeline());
387}
388
389void GrTargetCommands::SetState::execute(GrGpu*, const SetState*) {}
390
391void GrTargetCommands::Clear::execute(GrGpu* gpu, const SetState*) {
392 if (GrColor_ILLEGAL == fColor) {
393 gpu->discard(this->renderTarget());
394 } else {
395 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget());
396 }
397}
398
399void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) {
400 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
401}
402
403void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) {
404 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
405}
406
407GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* iodb,
408 GrSurface* dst,
409 GrSurface* src,
410 const SkIRect& srcRect,
411 const SkIPoint& dstPoint) {
412 if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) {
413 this->closeBatch();
414 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, src));
415 cs->fSrcRect = srcRect;
416 cs->fDstPoint = dstPoint;
417 return cs;
418 }
419 return NULL;
420}
421
422bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
423 const GrPrimitiveProcessor* primProc,
424 const GrDrawTarget::PipelineInfo& pipelineInfo) {
425 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc));
426 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
427
428 if (ss->getPipeline()->mustSkip()) {
429 fCmdBuffer.pop_back();
430 return false;
431 }
432
433 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
434 ss->getPipeline()->getInitBatchTracker());
435
436 if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
437 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
438 *ss->fPrimitiveProcessor,
439 ss->fBatchTracker) &&
440 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
441 fCmdBuffer.pop_back();
442 } else {
443 fPrevState = ss;
444 iodb->recordTraceMarkersIfNecessary(ss);
445 }
446 return true;
447}
448
449bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
450 GrBatch* batch,
451 const GrDrawTarget::PipelineInfo& pipelineInfo) {
452 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, ());
453 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
454
455 if (ss->getPipeline()->mustSkip()) {
456 fCmdBuffer.pop_back();
457 return false;
458 }
459
460 batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker());
461
462 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
463 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
464 fCmdBuffer.pop_back();
465 } else {
466 this->closeBatch();
467 fPrevState = ss;
468 iodb->recordTraceMarkersIfNecessary(ss);
469 }
470 return true;
471}
472