blob: 985ac38400675b73d7aefed10fcac36ce408e09a [file] [log] [blame]
Chris Dalton09a7bb22018-08-31 19:53:15 +08001/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrCCStroker_DEFINED
9#define GrCCStroker_DEFINED
10
Mike Kleinc0bd9f92019-04-23 12:05:21 -050011#include "include/private/SkNx.h"
12#include "src/gpu/GrAllocator.h"
13#include "src/gpu/GrMesh.h"
14#include "src/gpu/ccpr/GrCCStrokeGeometry.h"
Chris Dalton09a7bb22018-08-31 19:53:15 +080015
Brian Salomondbf70722019-02-07 11:31:24 -050016class GrGpuBuffer;
Chris Dalton09a7bb22018-08-31 19:53:15 +080017class GrCCCoverageProcessor;
18class GrOnFlushResourceProvider;
19class GrOpFlushState;
20class GrPipeline;
21class GrPrimitiveProcessor;
22class SkMatrix;
23class SkPath;
24class SkStrokeRec;
25
26/**
27 * This class parses stroked SkPaths into a GPU instance buffer, then issues calls to draw their
28 * coverage counts.
29 */
30class GrCCStroker {
31public:
32 GrCCStroker(int numPaths, int numSkPoints, int numSkVerbs)
33 : fGeometry(numSkPoints, numSkVerbs), fPathInfos(numPaths) {}
34
35 // Parses a device-space SkPath into the current batch, using the SkPath's original verbs with
36 // 'deviceSpacePts', and the SkStrokeRec's original settings with 'strokeDevWidth'. Accepts an
37 // optional post-device-space translate for placement in an atlas.
38 //
39 // Strokes intended as hairlines must have a strokeDevWidth of 1. Non-hairline strokes can only
40 // be drawn with rigid body transforms; affine transformation of the stroke lines themselves is
41 // not yet supported.
42 void parseDeviceSpaceStroke(const SkPath&, const SkPoint* deviceSpacePts, const SkStrokeRec&,
43 float strokeDevWidth, GrScissorTest,
44 const SkIRect& clippedDevIBounds,
45 const SkIVector& devToAtlasOffset);
46
47 using BatchID = int;
48
49 // Compiles the outstanding parsed paths into a batch, and returns an ID that can be used to
50 // draw their strokes in the future.
51 BatchID closeCurrentBatch();
52
53 // Builds an internal GPU buffer and prepares for calls to drawStrokes(). Caller must close the
54 // current batch before calling this method, and cannot parse new paths afer.
55 bool prepareToDraw(GrOnFlushResourceProvider*);
56
57 // Called after prepareToDraw(). Draws the given batch of path strokes.
Chris Dalton2c5e0112019-03-29 13:14:18 -050058 void drawStrokes(
59 GrOpFlushState*, GrCCCoverageProcessor*, BatchID, const SkIRect& drawBounds) const;
Chris Dalton09a7bb22018-08-31 19:53:15 +080060
61private:
62 static constexpr int kNumScissorModes = 2;
63 static constexpr BatchID kEmptyBatchID = -1;
64 using Verb = GrCCStrokeGeometry::Verb;
65 using InstanceTallies = GrCCStrokeGeometry::InstanceTallies;
66
67 // Every kBeginPath verb has a corresponding PathInfo entry.
68 struct PathInfo {
69 SkIVector fDevToAtlasOffset;
70 float fStrokeRadius;
71 GrScissorTest fScissorTest;
72 };
73
74 // Defines a sub-batch of stroke instances that have a scissor test and the same scissor rect.
75 // Start indices are deduced by looking at the previous ScissorSubBatch.
76 struct ScissorSubBatch {
77 ScissorSubBatch(GrTAllocator<InstanceTallies>* alloc, const InstanceTallies& startIndices,
78 const SkIRect& scissor)
79 : fEndInstances(&alloc->emplace_back(startIndices)), fScissor(scissor) {}
80 InstanceTallies* fEndInstances;
81 SkIRect fScissor;
82 };
83
84 // Defines a batch of stroke instances that can be drawn with drawStrokes(). Start indices are
85 // deduced by looking at the previous Batch in the list.
86 struct Batch {
87 Batch(GrTAllocator<InstanceTallies>* alloc, const InstanceTallies& startNonScissorIndices,
88 int startScissorSubBatch)
89 : fNonScissorEndInstances(&alloc->emplace_back(startNonScissorIndices))
90 , fEndScissorSubBatch(startScissorSubBatch) {}
91 InstanceTallies* fNonScissorEndInstances;
92 int fEndScissorSubBatch;
93 };
94
95 class InstanceBufferBuilder;
96
97 void appendStrokeMeshesToBuffers(int numSegmentsLog2, const Batch&,
98 const InstanceTallies* startIndices[2],
99 int startScissorSubBatch, const SkIRect& drawBounds) const;
100 void flushBufferedMeshesAsStrokes(const GrPrimitiveProcessor&, GrOpFlushState*, const
101 GrPipeline&, const SkIRect& drawBounds) const;
102
103 template<int GrCCStrokeGeometry::InstanceTallies::* InstanceType>
104 void drawConnectingGeometry(GrOpFlushState*, const GrPipeline&,
105 const GrCCCoverageProcessor&, const Batch&,
106 const InstanceTallies* startIndices[2], int startScissorSubBatch,
107 const SkIRect& drawBounds) const;
108
109 GrCCStrokeGeometry fGeometry;
110 SkSTArray<32, PathInfo> fPathInfos;
111 SkSTArray<32, Batch> fBatches;
112 SkSTArray<32, ScissorSubBatch> fScissorSubBatches;
113 int fMaxNumScissorSubBatches = 0;
114 bool fHasOpenBatch = false;
115
116 const InstanceTallies fZeroTallies = InstanceTallies();
117 GrSTAllocator<128, InstanceTallies> fTalliesAllocator;
118 const InstanceTallies* fInstanceCounts[kNumScissorModes] = {&fZeroTallies, &fZeroTallies};
119
Brian Salomondbf70722019-02-07 11:31:24 -0500120 sk_sp<GrGpuBuffer> fInstanceBuffer;
Chris Dalton09a7bb22018-08-31 19:53:15 +0800121 // The indices stored in batches are relative to these base instances.
122 InstanceTallies fBaseInstances[kNumScissorModes];
123
124 mutable SkSTArray<32, GrMesh> fMeshesBuffer;
125 mutable SkSTArray<32, SkIRect> fScissorsBuffer;
126};
127
128#endif