csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2016 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef gr_instanced_InstancedRendering_DEFINED |
| 9 | #define gr_instanced_InstancedRendering_DEFINED |
| 10 | |
Greg Daniel | 77b53f6 | 2016-10-18 11:48:51 -0400 | [diff] [blame] | 11 | #include "GrGpu.h" |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 12 | #include "GrMemoryPool.h" |
| 13 | #include "SkTInternalLList.h" |
Brian Salomon | 9afd371 | 2016-12-01 10:59:09 -0500 | [diff] [blame] | 14 | #include "batches/GrDrawOp.h" |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 15 | #include "instanced/InstancedRenderingTypes.h" |
| 16 | #include "../private/GrInstancedPipelineInfo.h" |
| 17 | |
| 18 | class GrResourceProvider; |
| 19 | |
| 20 | namespace gr_instanced { |
| 21 | |
| 22 | class InstanceProcessor; |
| 23 | |
| 24 | /** |
| 25 | * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for |
| 26 | * instanced draws into one location, and creates special batches that pull from this data. The |
| 27 | * nature of instanced rendering allows these batches to combine well and render efficiently. |
| 28 | * |
| 29 | * During a flush, this class assembles the accumulated draw data into a single vertex and texel |
| 30 | * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs. |
| 31 | * |
| 32 | * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by |
| 33 | * InstanceProcessor. |
| 34 | */ |
| 35 | class InstancedRendering : public SkNoncopyable { |
| 36 | public: |
| 37 | virtual ~InstancedRendering() { SkASSERT(State::kRecordingDraws == fState); } |
| 38 | |
Hal Canary | 144caf5 | 2016-11-07 17:57:18 -0500 | [diff] [blame] | 39 | GrGpu* gpu() const { return fGpu.get(); } |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 40 | |
| 41 | /** |
| 42 | * These methods make a new record internally for an instanced draw, and return a batch that is |
| 43 | * effectively just an index to that record. The returned batch is not self-contained, but |
| 44 | * rather relies on this class to handle the rendering. The client must call beginFlush() on |
| 45 | * this class before attempting to flush batches returned by it. It is invalid to record new |
| 46 | * draws between beginFlush() and endFlush(). |
| 47 | */ |
Brian Salomon | 9afd371 | 2016-12-01 10:59:09 -0500 | [diff] [blame] | 48 | GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
Brian Salomon | 0e8fc8b | 2016-12-09 15:10:07 -0500 | [diff] [blame^] | 49 | GrAA, const GrInstancedPipelineInfo&, GrAAType*); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 50 | |
Brian Salomon | 9afd371 | 2016-12-01 10:59:09 -0500 | [diff] [blame] | 51 | GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
Brian Salomon | 0e8fc8b | 2016-12-09 15:10:07 -0500 | [diff] [blame^] | 52 | const SkRect& localRect, GrAA, |
| 53 | const GrInstancedPipelineInfo&, GrAAType*); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 54 | |
Brian Salomon | 9afd371 | 2016-12-01 10:59:09 -0500 | [diff] [blame] | 55 | GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
Brian Salomon | 0e8fc8b | 2016-12-09 15:10:07 -0500 | [diff] [blame^] | 56 | const SkMatrix& localMatrix, GrAA, |
| 57 | const GrInstancedPipelineInfo&, GrAAType*); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 58 | |
Brian Salomon | 9afd371 | 2016-12-01 10:59:09 -0500 | [diff] [blame] | 59 | GrDrawOp* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor, |
Brian Salomon | 0e8fc8b | 2016-12-09 15:10:07 -0500 | [diff] [blame^] | 60 | GrAA, const GrInstancedPipelineInfo&, GrAAType*); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 61 | |
Brian Salomon | 9afd371 | 2016-12-01 10:59:09 -0500 | [diff] [blame] | 62 | GrDrawOp* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor, |
Brian Salomon | 0e8fc8b | 2016-12-09 15:10:07 -0500 | [diff] [blame^] | 63 | GrAA, const GrInstancedPipelineInfo&, GrAAType*); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 64 | |
Brian Salomon | 9afd371 | 2016-12-01 10:59:09 -0500 | [diff] [blame] | 65 | GrDrawOp* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner, |
Brian Salomon | 0e8fc8b | 2016-12-09 15:10:07 -0500 | [diff] [blame^] | 66 | const SkMatrix&, GrColor, GrAA, |
| 67 | const GrInstancedPipelineInfo&, GrAAType*); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 68 | |
| 69 | /** |
| 70 | * Compiles all recorded draws into GPU buffers and allows the client to begin flushing the |
| 71 | * batches created by this class. |
| 72 | */ |
| 73 | void beginFlush(GrResourceProvider*); |
| 74 | |
| 75 | /** |
| 76 | * Called once the batches created previously by this class have all been released. Allows the |
| 77 | * client to begin recording draws again. |
| 78 | */ |
| 79 | void endFlush(); |
| 80 | |
| 81 | enum class ResetType : bool { |
| 82 | kDestroy, |
| 83 | kAbandon |
| 84 | }; |
| 85 | |
| 86 | /** |
| 87 | * Resets all GPU resources, including those that are held long term. They will be lazily |
| 88 | * reinitialized if the class begins to be used again. |
| 89 | */ |
| 90 | void resetGpuResources(ResetType); |
| 91 | |
| 92 | protected: |
Brian Salomon | 9afd371 | 2016-12-01 10:59:09 -0500 | [diff] [blame] | 93 | class Batch : public GrDrawOp { |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 94 | public: |
| 95 | SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch); |
| 96 | |
| 97 | ~Batch() override; |
| 98 | const char* name() const override { return "Instanced Batch"; } |
| 99 | |
Brian Salomon | 7c3e718 | 2016-12-01 09:35:30 -0500 | [diff] [blame] | 100 | SkString dumpInfo() const override { |
| 101 | SkString string; |
| 102 | string.printf("AA: %d, ShapeTypes: 0x%02x, IShapeTypes: 0x%02x, Persp %d, " |
| 103 | "NonSquare: %d, PLoad: %0.2f, Tracked: %d, NumDraws: %d, " |
| 104 | "GeomChanges: %d\n", |
| 105 | (int)fInfo.fAntialiasMode, |
| 106 | fInfo.fShapeTypes, |
| 107 | fInfo.fInnerShapeTypes, |
| 108 | fInfo.fHasPerspective, |
| 109 | fInfo.fNonSquare, |
| 110 | fPixelLoad, |
| 111 | fIsTracked, |
| 112 | fNumDraws, |
| 113 | fNumChangesInGeometry); |
| 114 | string.append(DumpPipelineInfo(*this->pipeline())); |
| 115 | string.append(INHERITED::dumpInfo()); |
| 116 | return string; |
| 117 | } |
| 118 | |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 119 | struct Draw { |
| 120 | Instance fInstance; |
| 121 | IndexRange fGeometry; |
| 122 | Draw* fNext; |
| 123 | }; |
| 124 | |
| 125 | Draw& getSingleDraw() const { SkASSERT(fHeadDraw && !fHeadDraw->fNext); return *fHeadDraw; } |
| 126 | Instance& getSingleInstance() const { return this->getSingleDraw().fInstance; } |
| 127 | |
| 128 | void appendRRectParams(const SkRRect&); |
| 129 | void appendParamsTexel(const SkScalar* vals, int count); |
| 130 | void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w); |
| 131 | void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z); |
| 132 | |
| 133 | protected: |
| 134 | Batch(uint32_t classID, InstancedRendering* ir); |
| 135 | |
| 136 | void initBatchTracker(const GrXPOverridesForBatch&) override; |
Brian Salomon | 25a8809 | 2016-12-01 09:36:50 -0500 | [diff] [blame] | 137 | bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override; |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 138 | |
| 139 | void computePipelineOptimizations(GrInitInvariantOutput* color, |
| 140 | GrInitInvariantOutput* coverage, |
| 141 | GrBatchToXPOverrides*) const override; |
| 142 | |
Brian Salomon | 742e31d | 2016-12-07 17:06:19 -0500 | [diff] [blame] | 143 | void onPrepare(GrOpFlushState*) override {} |
| 144 | void onDraw(GrOpFlushState*, const SkRect& bounds) override; |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 145 | |
| 146 | InstancedRendering* const fInstancedRendering; |
| 147 | BatchInfo fInfo; |
| 148 | SkScalar fPixelLoad; |
| 149 | SkSTArray<5, ParamsTexel, true> fParams; |
| 150 | bool fIsTracked; |
| 151 | int fNumDraws; |
| 152 | int fNumChangesInGeometry; |
| 153 | Draw* fHeadDraw; |
| 154 | Draw* fTailDraw; |
| 155 | |
Brian Salomon | 9afd371 | 2016-12-01 10:59:09 -0500 | [diff] [blame] | 156 | typedef GrDrawOp INHERITED; |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 157 | |
| 158 | friend class InstancedRendering; |
| 159 | }; |
| 160 | |
| 161 | typedef SkTInternalLList<Batch> BatchList; |
| 162 | |
csmartdalton | e0d3629 | 2016-07-29 08:14:20 -0700 | [diff] [blame] | 163 | InstancedRendering(GrGpu* gpu); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 164 | |
| 165 | const BatchList& trackedBatches() const { return fTrackedBatches; } |
Hal Canary | 144caf5 | 2016-11-07 17:57:18 -0500 | [diff] [blame] | 166 | const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer.get(); } |
| 167 | const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer.get(); } |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 168 | |
| 169 | virtual void onBeginFlush(GrResourceProvider*) = 0; |
| 170 | virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) = 0; |
| 171 | virtual void onEndFlush() = 0; |
| 172 | virtual void onResetGpuResources(ResetType) = 0; |
| 173 | |
| 174 | private: |
| 175 | enum class State : bool { |
| 176 | kRecordingDraws, |
| 177 | kFlushing |
| 178 | }; |
| 179 | |
| 180 | Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds, |
| 181 | const SkMatrix& viewMatrix, GrColor, |
Brian Salomon | 0e8fc8b | 2016-12-09 15:10:07 -0500 | [diff] [blame^] | 182 | const SkRect& localRect, GrAA aa, |
| 183 | const GrInstancedPipelineInfo&, GrAAType*); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 184 | |
Brian Salomon | 0e8fc8b | 2016-12-09 15:10:07 -0500 | [diff] [blame^] | 185 | bool selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa, const GrInstancedPipelineInfo&, |
| 186 | GrAAType*, AntialiasMode*); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 187 | |
| 188 | virtual Batch* createBatch() = 0; |
| 189 | |
Hal Canary | 144caf5 | 2016-11-07 17:57:18 -0500 | [diff] [blame] | 190 | const sk_sp<GrGpu> fGpu; |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 191 | State fState; |
dskiba | e4cd006 | 2016-11-29 06:50:35 -0800 | [diff] [blame] | 192 | GrObjectMemoryPool<Batch::Draw> fDrawPool; |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 193 | SkSTArray<1024, ParamsTexel, true> fParams; |
| 194 | BatchList fTrackedBatches; |
Hal Canary | 144caf5 | 2016-11-07 17:57:18 -0500 | [diff] [blame] | 195 | sk_sp<const GrBuffer> fVertexBuffer; |
| 196 | sk_sp<const GrBuffer> fIndexBuffer; |
| 197 | sk_sp<GrBuffer> fParamsBuffer; |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 198 | }; |
| 199 | |
| 200 | } |
| 201 | |
| 202 | #endif |