csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2016 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef gr_instanced_InstancedRendering_DEFINED |
| 9 | #define gr_instanced_InstancedRendering_DEFINED |
| 10 | |
| 11 | #include "GrMemoryPool.h" |
| 12 | #include "SkTInternalLList.h" |
| 13 | #include "batches/GrDrawBatch.h" |
| 14 | #include "instanced/InstancedRenderingTypes.h" |
| 15 | #include "../private/GrInstancedPipelineInfo.h" |
| 16 | |
| 17 | class GrResourceProvider; |
| 18 | |
| 19 | namespace gr_instanced { |
| 20 | |
| 21 | class InstanceProcessor; |
| 22 | |
| 23 | /** |
| 24 | * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for |
| 25 | * instanced draws into one location, and creates special batches that pull from this data. The |
| 26 | * nature of instanced rendering allows these batches to combine well and render efficiently. |
| 27 | * |
| 28 | * During a flush, this class assembles the accumulated draw data into a single vertex and texel |
| 29 | * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs. |
| 30 | * |
| 31 | * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by |
| 32 | * InstanceProcessor. |
| 33 | */ |
| 34 | class InstancedRendering : public SkNoncopyable { |
| 35 | public: |
| 36 | virtual ~InstancedRendering() { SkASSERT(State::kRecordingDraws == fState); } |
| 37 | |
| 38 | GrGpu* gpu() const { return fGpu; } |
| 39 | |
| 40 | /** |
| 41 | * These methods make a new record internally for an instanced draw, and return a batch that is |
| 42 | * effectively just an index to that record. The returned batch is not self-contained, but |
| 43 | * rather relies on this class to handle the rendering. The client must call beginFlush() on |
| 44 | * this class before attempting to flush batches returned by it. It is invalid to record new |
| 45 | * draws between beginFlush() and endFlush(). |
| 46 | */ |
| 47 | GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
| 48 | bool antialias, const GrInstancedPipelineInfo&, |
| 49 | bool* useHWAA); |
| 50 | |
| 51 | GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
| 52 | const SkRect& localRect, bool antialias, |
| 53 | const GrInstancedPipelineInfo&, bool* useHWAA); |
| 54 | |
| 55 | GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
| 56 | const SkMatrix& localMatrix, bool antialias, |
| 57 | const GrInstancedPipelineInfo&, bool* useHWAA); |
| 58 | |
| 59 | GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor, |
| 60 | bool antialias, const GrInstancedPipelineInfo&, |
| 61 | bool* useHWAA); |
| 62 | |
| 63 | GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor, |
| 64 | bool antialias, const GrInstancedPipelineInfo&, |
| 65 | bool* useHWAA); |
| 66 | |
| 67 | GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner, |
| 68 | const SkMatrix&, GrColor, bool antialias, |
| 69 | const GrInstancedPipelineInfo&, bool* useHWAA); |
| 70 | |
| 71 | /** |
| 72 | * Compiles all recorded draws into GPU buffers and allows the client to begin flushing the |
| 73 | * batches created by this class. |
| 74 | */ |
| 75 | void beginFlush(GrResourceProvider*); |
| 76 | |
| 77 | /** |
| 78 | * Called once the batches created previously by this class have all been released. Allows the |
| 79 | * client to begin recording draws again. |
| 80 | */ |
| 81 | void endFlush(); |
| 82 | |
| 83 | enum class ResetType : bool { |
| 84 | kDestroy, |
| 85 | kAbandon |
| 86 | }; |
| 87 | |
| 88 | /** |
| 89 | * Resets all GPU resources, including those that are held long term. They will be lazily |
| 90 | * reinitialized if the class begins to be used again. |
| 91 | */ |
| 92 | void resetGpuResources(ResetType); |
| 93 | |
| 94 | protected: |
| 95 | class Batch : public GrDrawBatch { |
| 96 | public: |
| 97 | SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch); |
| 98 | |
| 99 | ~Batch() override; |
| 100 | const char* name() const override { return "Instanced Batch"; } |
| 101 | |
| 102 | struct Draw { |
| 103 | Instance fInstance; |
| 104 | IndexRange fGeometry; |
| 105 | Draw* fNext; |
| 106 | }; |
| 107 | |
| 108 | Draw& getSingleDraw() const { SkASSERT(fHeadDraw && !fHeadDraw->fNext); return *fHeadDraw; } |
| 109 | Instance& getSingleInstance() const { return this->getSingleDraw().fInstance; } |
| 110 | |
| 111 | void appendRRectParams(const SkRRect&); |
| 112 | void appendParamsTexel(const SkScalar* vals, int count); |
| 113 | void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w); |
| 114 | void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z); |
| 115 | |
| 116 | protected: |
| 117 | Batch(uint32_t classID, InstancedRendering* ir); |
| 118 | |
| 119 | void initBatchTracker(const GrXPOverridesForBatch&) override; |
| 120 | bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override; |
| 121 | |
| 122 | void computePipelineOptimizations(GrInitInvariantOutput* color, |
| 123 | GrInitInvariantOutput* coverage, |
| 124 | GrBatchToXPOverrides*) const override; |
| 125 | |
| 126 | void onPrepare(GrBatchFlushState*) override {} |
| 127 | void onDraw(GrBatchFlushState*) override; |
| 128 | |
| 129 | InstancedRendering* const fInstancedRendering; |
| 130 | BatchInfo fInfo; |
| 131 | SkScalar fPixelLoad; |
| 132 | SkSTArray<5, ParamsTexel, true> fParams; |
| 133 | bool fIsTracked; |
| 134 | int fNumDraws; |
| 135 | int fNumChangesInGeometry; |
| 136 | Draw* fHeadDraw; |
| 137 | Draw* fTailDraw; |
| 138 | |
| 139 | typedef GrDrawBatch INHERITED; |
| 140 | |
| 141 | friend class InstancedRendering; |
| 142 | }; |
| 143 | |
| 144 | typedef SkTInternalLList<Batch> BatchList; |
| 145 | |
csmartdalton | e0d3629 | 2016-07-29 08:14:20 -0700 | [diff] [blame^] | 146 | InstancedRendering(GrGpu* gpu); |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 147 | |
| 148 | const BatchList& trackedBatches() const { return fTrackedBatches; } |
| 149 | const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer; } |
| 150 | const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer; } |
| 151 | |
| 152 | virtual void onBeginFlush(GrResourceProvider*) = 0; |
| 153 | virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) = 0; |
| 154 | virtual void onEndFlush() = 0; |
| 155 | virtual void onResetGpuResources(ResetType) = 0; |
| 156 | |
| 157 | private: |
| 158 | enum class State : bool { |
| 159 | kRecordingDraws, |
| 160 | kFlushing |
| 161 | }; |
| 162 | |
| 163 | Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds, |
| 164 | const SkMatrix& viewMatrix, GrColor, |
| 165 | const SkRect& localRect, bool antialias, |
| 166 | const GrInstancedPipelineInfo&, bool* requireHWAA); |
| 167 | |
| 168 | bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias, |
| 169 | const GrInstancedPipelineInfo&, bool* useHWAA, AntialiasMode*); |
| 170 | |
| 171 | virtual Batch* createBatch() = 0; |
| 172 | |
| 173 | const SkAutoTUnref<GrGpu> fGpu; |
csmartdalton | a7f2964 | 2016-07-07 08:49:11 -0700 | [diff] [blame] | 174 | State fState; |
| 175 | GrMemoryPool fDrawPool; |
| 176 | SkSTArray<1024, ParamsTexel, true> fParams; |
| 177 | BatchList fTrackedBatches; |
| 178 | SkAutoTUnref<const GrBuffer> fVertexBuffer; |
| 179 | SkAutoTUnref<const GrBuffer> fIndexBuffer; |
| 180 | SkAutoTUnref<GrBuffer> fParamsBuffer; |
| 181 | }; |
| 182 | |
| 183 | } |
| 184 | |
| 185 | #endif |