blob: 903697436c09111ce27b402cb642b4910f28c792 [file] [log] [blame]
csmartdaltona7f29642016-07-07 08:49:11 -07001/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef gr_instanced_InstancedRendering_DEFINED
9#define gr_instanced_InstancedRendering_DEFINED
10
Greg Daniel77b53f62016-10-18 11:48:51 -040011#include "GrGpu.h"
csmartdaltona7f29642016-07-07 08:49:11 -070012#include "GrMemoryPool.h"
13#include "SkTInternalLList.h"
Brian Salomon9afd3712016-12-01 10:59:09 -050014#include "batches/GrDrawOp.h"
csmartdaltona7f29642016-07-07 08:49:11 -070015#include "instanced/InstancedRenderingTypes.h"
16#include "../private/GrInstancedPipelineInfo.h"
17
18class GrResourceProvider;
19
20namespace gr_instanced {
21
22class InstanceProcessor;
23
24/**
25 * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
26 * instanced draws into one location, and creates special batches that pull from this data. The
27 * nature of instanced rendering allows these batches to combine well and render efficiently.
28 *
29 * During a flush, this class assembles the accumulated draw data into a single vertex and texel
30 * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs.
31 *
32 * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by
33 * InstanceProcessor.
34 */
35class InstancedRendering : public SkNoncopyable {
36public:
37 virtual ~InstancedRendering() { SkASSERT(State::kRecordingDraws == fState); }
38
Hal Canary144caf52016-11-07 17:57:18 -050039 GrGpu* gpu() const { return fGpu.get(); }
csmartdaltona7f29642016-07-07 08:49:11 -070040
41 /**
42 * These methods make a new record internally for an instanced draw, and return a batch that is
43 * effectively just an index to that record. The returned batch is not self-contained, but
44 * rather relies on this class to handle the rendering. The client must call beginFlush() on
45 * this class before attempting to flush batches returned by it. It is invalid to record new
46 * draws between beginFlush() and endFlush().
47 */
Brian Salomon9afd3712016-12-01 10:59:09 -050048 GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
Brian Salomon0e8fc8b2016-12-09 15:10:07 -050049 GrAA, const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070050
Brian Salomon9afd3712016-12-01 10:59:09 -050051 GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
Brian Salomon0e8fc8b2016-12-09 15:10:07 -050052 const SkRect& localRect, GrAA,
53 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070054
Brian Salomon9afd3712016-12-01 10:59:09 -050055 GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
Brian Salomon0e8fc8b2016-12-09 15:10:07 -050056 const SkMatrix& localMatrix, GrAA,
57 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070058
Brian Salomon9afd3712016-12-01 10:59:09 -050059 GrDrawOp* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor,
Brian Salomon0e8fc8b2016-12-09 15:10:07 -050060 GrAA, const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070061
Brian Salomon9afd3712016-12-01 10:59:09 -050062 GrDrawOp* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor,
Brian Salomon0e8fc8b2016-12-09 15:10:07 -050063 GrAA, const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070064
Brian Salomon9afd3712016-12-01 10:59:09 -050065 GrDrawOp* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
Brian Salomon0e8fc8b2016-12-09 15:10:07 -050066 const SkMatrix&, GrColor, GrAA,
67 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070068
69 /**
70 * Compiles all recorded draws into GPU buffers and allows the client to begin flushing the
71 * batches created by this class.
72 */
73 void beginFlush(GrResourceProvider*);
74
75 /**
76 * Called once the batches created previously by this class have all been released. Allows the
77 * client to begin recording draws again.
78 */
79 void endFlush();
80
81 enum class ResetType : bool {
82 kDestroy,
83 kAbandon
84 };
85
86 /**
87 * Resets all GPU resources, including those that are held long term. They will be lazily
88 * reinitialized if the class begins to be used again.
89 */
90 void resetGpuResources(ResetType);
91
92protected:
Brian Salomon9afd3712016-12-01 10:59:09 -050093 class Batch : public GrDrawOp {
csmartdaltona7f29642016-07-07 08:49:11 -070094 public:
95 SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch);
96
97 ~Batch() override;
98 const char* name() const override { return "Instanced Batch"; }
99
Brian Salomon7c3e7182016-12-01 09:35:30 -0500100 SkString dumpInfo() const override {
101 SkString string;
102 string.printf("AA: %d, ShapeTypes: 0x%02x, IShapeTypes: 0x%02x, Persp %d, "
103 "NonSquare: %d, PLoad: %0.2f, Tracked: %d, NumDraws: %d, "
104 "GeomChanges: %d\n",
105 (int)fInfo.fAntialiasMode,
106 fInfo.fShapeTypes,
107 fInfo.fInnerShapeTypes,
108 fInfo.fHasPerspective,
109 fInfo.fNonSquare,
110 fPixelLoad,
111 fIsTracked,
112 fNumDraws,
113 fNumChangesInGeometry);
114 string.append(DumpPipelineInfo(*this->pipeline()));
115 string.append(INHERITED::dumpInfo());
116 return string;
117 }
118
csmartdaltona7f29642016-07-07 08:49:11 -0700119 struct Draw {
120 Instance fInstance;
121 IndexRange fGeometry;
122 Draw* fNext;
123 };
124
125 Draw& getSingleDraw() const { SkASSERT(fHeadDraw && !fHeadDraw->fNext); return *fHeadDraw; }
126 Instance& getSingleInstance() const { return this->getSingleDraw().fInstance; }
127
128 void appendRRectParams(const SkRRect&);
129 void appendParamsTexel(const SkScalar* vals, int count);
130 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
131 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
132
133 protected:
134 Batch(uint32_t classID, InstancedRendering* ir);
135
136 void initBatchTracker(const GrXPOverridesForBatch&) override;
Brian Salomon25a88092016-12-01 09:36:50 -0500137 bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
csmartdaltona7f29642016-07-07 08:49:11 -0700138
139 void computePipelineOptimizations(GrInitInvariantOutput* color,
140 GrInitInvariantOutput* coverage,
141 GrBatchToXPOverrides*) const override;
142
Brian Salomon742e31d2016-12-07 17:06:19 -0500143 void onPrepare(GrOpFlushState*) override {}
144 void onDraw(GrOpFlushState*, const SkRect& bounds) override;
csmartdaltona7f29642016-07-07 08:49:11 -0700145
146 InstancedRendering* const fInstancedRendering;
147 BatchInfo fInfo;
148 SkScalar fPixelLoad;
149 SkSTArray<5, ParamsTexel, true> fParams;
150 bool fIsTracked;
151 int fNumDraws;
152 int fNumChangesInGeometry;
153 Draw* fHeadDraw;
154 Draw* fTailDraw;
155
Brian Salomon9afd3712016-12-01 10:59:09 -0500156 typedef GrDrawOp INHERITED;
csmartdaltona7f29642016-07-07 08:49:11 -0700157
158 friend class InstancedRendering;
159 };
160
161 typedef SkTInternalLList<Batch> BatchList;
162
csmartdaltone0d36292016-07-29 08:14:20 -0700163 InstancedRendering(GrGpu* gpu);
csmartdaltona7f29642016-07-07 08:49:11 -0700164
165 const BatchList& trackedBatches() const { return fTrackedBatches; }
Hal Canary144caf52016-11-07 17:57:18 -0500166 const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer.get(); }
167 const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer.get(); }
csmartdaltona7f29642016-07-07 08:49:11 -0700168
169 virtual void onBeginFlush(GrResourceProvider*) = 0;
170 virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) = 0;
171 virtual void onEndFlush() = 0;
172 virtual void onResetGpuResources(ResetType) = 0;
173
174private:
175 enum class State : bool {
176 kRecordingDraws,
177 kFlushing
178 };
179
180 Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
181 const SkMatrix& viewMatrix, GrColor,
Brian Salomon0e8fc8b2016-12-09 15:10:07 -0500182 const SkRect& localRect, GrAA aa,
183 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -0700184
Brian Salomon0e8fc8b2016-12-09 15:10:07 -0500185 bool selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa, const GrInstancedPipelineInfo&,
186 GrAAType*, AntialiasMode*);
csmartdaltona7f29642016-07-07 08:49:11 -0700187
188 virtual Batch* createBatch() = 0;
189
Hal Canary144caf52016-11-07 17:57:18 -0500190 const sk_sp<GrGpu> fGpu;
csmartdaltona7f29642016-07-07 08:49:11 -0700191 State fState;
dskibae4cd0062016-11-29 06:50:35 -0800192 GrObjectMemoryPool<Batch::Draw> fDrawPool;
csmartdaltona7f29642016-07-07 08:49:11 -0700193 SkSTArray<1024, ParamsTexel, true> fParams;
194 BatchList fTrackedBatches;
Hal Canary144caf52016-11-07 17:57:18 -0500195 sk_sp<const GrBuffer> fVertexBuffer;
196 sk_sp<const GrBuffer> fIndexBuffer;
197 sk_sp<GrBuffer> fParamsBuffer;
csmartdaltona7f29642016-07-07 08:49:11 -0700198};
199
200}
201
202#endif