Chris Dalton | 09a7bb2 | 2018-08-31 19:53:15 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef GrCCStroker_DEFINED |
| 9 | #define GrCCStroker_DEFINED |
| 10 | |
| 11 | #include "GrAllocator.h" |
| 12 | #include "GrMesh.h" |
| 13 | #include "SkNx.h" |
| 14 | #include "ccpr/GrCCStrokeGeometry.h" |
| 15 | |
Brian Salomon | dbf7072 | 2019-02-07 11:31:24 -0500 | [diff] [blame] | 16 | class GrGpuBuffer; |
Chris Dalton | 09a7bb2 | 2018-08-31 19:53:15 +0800 | [diff] [blame] | 17 | class GrCCCoverageProcessor; |
| 18 | class GrOnFlushResourceProvider; |
| 19 | class GrOpFlushState; |
| 20 | class GrPipeline; |
| 21 | class GrPrimitiveProcessor; |
| 22 | class SkMatrix; |
| 23 | class SkPath; |
| 24 | class SkStrokeRec; |
| 25 | |
| 26 | /** |
| 27 | * This class parses stroked SkPaths into a GPU instance buffer, then issues calls to draw their |
| 28 | * coverage counts. |
| 29 | */ |
| 30 | class GrCCStroker { |
| 31 | public: |
| 32 | GrCCStroker(int numPaths, int numSkPoints, int numSkVerbs) |
| 33 | : fGeometry(numSkPoints, numSkVerbs), fPathInfos(numPaths) {} |
| 34 | |
| 35 | // Parses a device-space SkPath into the current batch, using the SkPath's original verbs with |
| 36 | // 'deviceSpacePts', and the SkStrokeRec's original settings with 'strokeDevWidth'. Accepts an |
| 37 | // optional post-device-space translate for placement in an atlas. |
| 38 | // |
| 39 | // Strokes intended as hairlines must have a strokeDevWidth of 1. Non-hairline strokes can only |
| 40 | // be drawn with rigid body transforms; affine transformation of the stroke lines themselves is |
| 41 | // not yet supported. |
| 42 | void parseDeviceSpaceStroke(const SkPath&, const SkPoint* deviceSpacePts, const SkStrokeRec&, |
| 43 | float strokeDevWidth, GrScissorTest, |
| 44 | const SkIRect& clippedDevIBounds, |
| 45 | const SkIVector& devToAtlasOffset); |
| 46 | |
| 47 | using BatchID = int; |
| 48 | |
| 49 | // Compiles the outstanding parsed paths into a batch, and returns an ID that can be used to |
| 50 | // draw their strokes in the future. |
| 51 | BatchID closeCurrentBatch(); |
| 52 | |
| 53 | // Builds an internal GPU buffer and prepares for calls to drawStrokes(). Caller must close the |
| 54 | // current batch before calling this method, and cannot parse new paths afer. |
| 55 | bool prepareToDraw(GrOnFlushResourceProvider*); |
| 56 | |
| 57 | // Called after prepareToDraw(). Draws the given batch of path strokes. |
| 58 | void drawStrokes(GrOpFlushState*, BatchID, const SkIRect& drawBounds) const; |
| 59 | |
| 60 | private: |
| 61 | static constexpr int kNumScissorModes = 2; |
| 62 | static constexpr BatchID kEmptyBatchID = -1; |
| 63 | using Verb = GrCCStrokeGeometry::Verb; |
| 64 | using InstanceTallies = GrCCStrokeGeometry::InstanceTallies; |
| 65 | |
| 66 | // Every kBeginPath verb has a corresponding PathInfo entry. |
| 67 | struct PathInfo { |
| 68 | SkIVector fDevToAtlasOffset; |
| 69 | float fStrokeRadius; |
| 70 | GrScissorTest fScissorTest; |
| 71 | }; |
| 72 | |
| 73 | // Defines a sub-batch of stroke instances that have a scissor test and the same scissor rect. |
| 74 | // Start indices are deduced by looking at the previous ScissorSubBatch. |
| 75 | struct ScissorSubBatch { |
| 76 | ScissorSubBatch(GrTAllocator<InstanceTallies>* alloc, const InstanceTallies& startIndices, |
| 77 | const SkIRect& scissor) |
| 78 | : fEndInstances(&alloc->emplace_back(startIndices)), fScissor(scissor) {} |
| 79 | InstanceTallies* fEndInstances; |
| 80 | SkIRect fScissor; |
| 81 | }; |
| 82 | |
| 83 | // Defines a batch of stroke instances that can be drawn with drawStrokes(). Start indices are |
| 84 | // deduced by looking at the previous Batch in the list. |
| 85 | struct Batch { |
| 86 | Batch(GrTAllocator<InstanceTallies>* alloc, const InstanceTallies& startNonScissorIndices, |
| 87 | int startScissorSubBatch) |
| 88 | : fNonScissorEndInstances(&alloc->emplace_back(startNonScissorIndices)) |
| 89 | , fEndScissorSubBatch(startScissorSubBatch) {} |
| 90 | InstanceTallies* fNonScissorEndInstances; |
| 91 | int fEndScissorSubBatch; |
| 92 | }; |
| 93 | |
| 94 | class InstanceBufferBuilder; |
| 95 | |
| 96 | void appendStrokeMeshesToBuffers(int numSegmentsLog2, const Batch&, |
| 97 | const InstanceTallies* startIndices[2], |
| 98 | int startScissorSubBatch, const SkIRect& drawBounds) const; |
| 99 | void flushBufferedMeshesAsStrokes(const GrPrimitiveProcessor&, GrOpFlushState*, const |
| 100 | GrPipeline&, const SkIRect& drawBounds) const; |
| 101 | |
| 102 | template<int GrCCStrokeGeometry::InstanceTallies::* InstanceType> |
| 103 | void drawConnectingGeometry(GrOpFlushState*, const GrPipeline&, |
| 104 | const GrCCCoverageProcessor&, const Batch&, |
| 105 | const InstanceTallies* startIndices[2], int startScissorSubBatch, |
| 106 | const SkIRect& drawBounds) const; |
| 107 | |
| 108 | GrCCStrokeGeometry fGeometry; |
| 109 | SkSTArray<32, PathInfo> fPathInfos; |
| 110 | SkSTArray<32, Batch> fBatches; |
| 111 | SkSTArray<32, ScissorSubBatch> fScissorSubBatches; |
| 112 | int fMaxNumScissorSubBatches = 0; |
| 113 | bool fHasOpenBatch = false; |
| 114 | |
| 115 | const InstanceTallies fZeroTallies = InstanceTallies(); |
| 116 | GrSTAllocator<128, InstanceTallies> fTalliesAllocator; |
| 117 | const InstanceTallies* fInstanceCounts[kNumScissorModes] = {&fZeroTallies, &fZeroTallies}; |
| 118 | |
Brian Salomon | dbf7072 | 2019-02-07 11:31:24 -0500 | [diff] [blame] | 119 | sk_sp<GrGpuBuffer> fInstanceBuffer; |
Chris Dalton | 09a7bb2 | 2018-08-31 19:53:15 +0800 | [diff] [blame] | 120 | // The indices stored in batches are relative to these base instances. |
| 121 | InstanceTallies fBaseInstances[kNumScissorModes]; |
| 122 | |
| 123 | mutable SkSTArray<32, GrMesh> fMeshesBuffer; |
| 124 | mutable SkSTArray<32, SkIRect> fScissorsBuffer; |
| 125 | }; |
| 126 | |
| 127 | #endif |