blob: 078e52cee3e83d6cb53d756aff7b1df57bc317e5 [file] [log] [blame]
csmartdaltona7f29642016-07-07 08:49:11 -07001/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef gr_instanced_InstancedRendering_DEFINED
9#define gr_instanced_InstancedRendering_DEFINED
10
Greg Daniel77b53f62016-10-18 11:48:51 -040011#include "GrGpu.h"
csmartdaltona7f29642016-07-07 08:49:11 -070012#include "GrMemoryPool.h"
13#include "SkTInternalLList.h"
Brian Salomon9afd3712016-12-01 10:59:09 -050014#include "batches/GrDrawOp.h"
csmartdaltona7f29642016-07-07 08:49:11 -070015#include "instanced/InstancedRenderingTypes.h"
16#include "../private/GrInstancedPipelineInfo.h"
17
18class GrResourceProvider;
19
20namespace gr_instanced {
21
22class InstanceProcessor;
23
24/**
25 * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
Brian Salomon99ad1642016-12-16 09:50:45 -050026 * instanced draws into one location, and creates special ops that pull from this data. The nature
27 * of instanced rendering allows these ops to combine well and render efficiently.
csmartdaltona7f29642016-07-07 08:49:11 -070028 *
29 * During a flush, this class assembles the accumulated draw data into a single vertex and texel
Brian Salomon99ad1642016-12-16 09:50:45 -050030 * buffer, and its subclass draws the ops using backend-specific instanced rendering APIs.
csmartdaltona7f29642016-07-07 08:49:11 -070031 *
32 * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by
33 * InstanceProcessor.
34 */
35class InstancedRendering : public SkNoncopyable {
36public:
37 virtual ~InstancedRendering() { SkASSERT(State::kRecordingDraws == fState); }
38
Hal Canary144caf52016-11-07 17:57:18 -050039 GrGpu* gpu() const { return fGpu.get(); }
csmartdaltona7f29642016-07-07 08:49:11 -070040
41 /**
Brian Salomon99ad1642016-12-16 09:50:45 -050042 * These methods make a new record internally for an instanced draw, and return an op that is
43 * effectively just an index to that record. The returned op is not self-contained, but
csmartdaltona7f29642016-07-07 08:49:11 -070044 * rather relies on this class to handle the rendering. The client must call beginFlush() on
Brian Salomon99ad1642016-12-16 09:50:45 -050045 * this class before attempting to flush ops returned by it. It is invalid to record new
csmartdaltona7f29642016-07-07 08:49:11 -070046 * draws between beginFlush() and endFlush().
47 */
Brian Salomon99ad1642016-12-16 09:50:45 -050048 sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, GrAA,
49 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070050
Brian Salomon99ad1642016-12-16 09:50:45 -050051 sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
52 const SkRect& localRect, GrAA,
53 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070054
Brian Salomon99ad1642016-12-16 09:50:45 -050055 sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
56 const SkMatrix& localMatrix, GrAA,
57 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070058
Brian Salomon99ad1642016-12-16 09:50:45 -050059 sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor, GrAA,
60 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070061
Brian Salomon99ad1642016-12-16 09:50:45 -050062 sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor,
63 GrAA, const GrInstancedPipelineInfo&,
64 GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070065
Brian Salomon99ad1642016-12-16 09:50:45 -050066 sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
67 const SkMatrix&, GrColor, GrAA,
68 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -070069
70 /**
71 * Compiles all recorded draws into GPU buffers and allows the client to begin flushing the
Brian Salomon99ad1642016-12-16 09:50:45 -050072 * ops created by this class.
csmartdaltona7f29642016-07-07 08:49:11 -070073 */
74 void beginFlush(GrResourceProvider*);
75
76 /**
Brian Salomon99ad1642016-12-16 09:50:45 -050077 * Called once the ops created previously by this class have all been released. Allows the
csmartdaltona7f29642016-07-07 08:49:11 -070078 * client to begin recording draws again.
79 */
80 void endFlush();
81
82 enum class ResetType : bool {
83 kDestroy,
84 kAbandon
85 };
86
87 /**
88 * Resets all GPU resources, including those that are held long term. They will be lazily
89 * reinitialized if the class begins to be used again.
90 */
91 void resetGpuResources(ResetType);
92
93protected:
Brian Salomon99ad1642016-12-16 09:50:45 -050094 class Op : public GrDrawOp {
csmartdaltona7f29642016-07-07 08:49:11 -070095 public:
Brian Salomon99ad1642016-12-16 09:50:45 -050096 SK_DECLARE_INTERNAL_LLIST_INTERFACE(Op);
csmartdaltona7f29642016-07-07 08:49:11 -070097
Brian Salomon99ad1642016-12-16 09:50:45 -050098 ~Op() override;
99 const char* name() const override { return "InstancedRendering::Op"; }
csmartdaltona7f29642016-07-07 08:49:11 -0700100
Brian Salomon7c3e7182016-12-01 09:35:30 -0500101 SkString dumpInfo() const override {
102 SkString string;
103 string.printf("AA: %d, ShapeTypes: 0x%02x, IShapeTypes: 0x%02x, Persp %d, "
104 "NonSquare: %d, PLoad: %0.2f, Tracked: %d, NumDraws: %d, "
105 "GeomChanges: %d\n",
106 (int)fInfo.fAntialiasMode,
107 fInfo.fShapeTypes,
108 fInfo.fInnerShapeTypes,
109 fInfo.fHasPerspective,
110 fInfo.fNonSquare,
111 fPixelLoad,
112 fIsTracked,
113 fNumDraws,
114 fNumChangesInGeometry);
115 string.append(DumpPipelineInfo(*this->pipeline()));
116 string.append(INHERITED::dumpInfo());
117 return string;
118 }
119
csmartdaltona7f29642016-07-07 08:49:11 -0700120 struct Draw {
121 Instance fInstance;
122 IndexRange fGeometry;
123 Draw* fNext;
124 };
125
126 Draw& getSingleDraw() const { SkASSERT(fHeadDraw && !fHeadDraw->fNext); return *fHeadDraw; }
127 Instance& getSingleInstance() const { return this->getSingleDraw().fInstance; }
128
129 void appendRRectParams(const SkRRect&);
130 void appendParamsTexel(const SkScalar* vals, int count);
131 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
132 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
133
134 protected:
Brian Salomon99ad1642016-12-16 09:50:45 -0500135 Op(uint32_t classID, InstancedRendering* ir);
csmartdaltona7f29642016-07-07 08:49:11 -0700136
137 void initBatchTracker(const GrXPOverridesForBatch&) override;
Brian Salomon25a88092016-12-01 09:36:50 -0500138 bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
csmartdaltona7f29642016-07-07 08:49:11 -0700139
140 void computePipelineOptimizations(GrInitInvariantOutput* color,
141 GrInitInvariantOutput* coverage,
142 GrBatchToXPOverrides*) const override;
143
Brian Salomon742e31d2016-12-07 17:06:19 -0500144 void onPrepare(GrOpFlushState*) override {}
145 void onDraw(GrOpFlushState*, const SkRect& bounds) override;
csmartdaltona7f29642016-07-07 08:49:11 -0700146
Brian Salomon99ad1642016-12-16 09:50:45 -0500147 InstancedRendering* const fInstancedRendering;
148 OpInfo fInfo;
149 SkScalar fPixelLoad;
150 SkSTArray<5, ParamsTexel, true> fParams;
151 bool fIsTracked;
152 int fNumDraws;
153 int fNumChangesInGeometry;
154 Draw* fHeadDraw;
155 Draw* fTailDraw;
csmartdaltona7f29642016-07-07 08:49:11 -0700156
Brian Salomon9afd3712016-12-01 10:59:09 -0500157 typedef GrDrawOp INHERITED;
csmartdaltona7f29642016-07-07 08:49:11 -0700158
159 friend class InstancedRendering;
160 };
161
Brian Salomon99ad1642016-12-16 09:50:45 -0500162 typedef SkTInternalLList<Op> OpList;
csmartdaltona7f29642016-07-07 08:49:11 -0700163
csmartdaltone0d36292016-07-29 08:14:20 -0700164 InstancedRendering(GrGpu* gpu);
csmartdaltona7f29642016-07-07 08:49:11 -0700165
Brian Salomon99ad1642016-12-16 09:50:45 -0500166 const OpList& trackedOps() const { return fTrackedOps; }
Hal Canary144caf52016-11-07 17:57:18 -0500167 const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer.get(); }
168 const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer.get(); }
csmartdaltona7f29642016-07-07 08:49:11 -0700169
170 virtual void onBeginFlush(GrResourceProvider*) = 0;
Brian Salomon99ad1642016-12-16 09:50:45 -0500171 virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Op*) = 0;
csmartdaltona7f29642016-07-07 08:49:11 -0700172 virtual void onEndFlush() = 0;
173 virtual void onResetGpuResources(ResetType) = 0;
174
175private:
176 enum class State : bool {
177 kRecordingDraws,
178 kFlushing
179 };
180
Brian Salomon99ad1642016-12-16 09:50:45 -0500181 sk_sp<Op> SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
182 const SkMatrix& viewMatrix, GrColor,
183 const SkRect& localRect, GrAA aa,
184 const GrInstancedPipelineInfo&, GrAAType*);
csmartdaltona7f29642016-07-07 08:49:11 -0700185
Brian Salomon0e8fc8b2016-12-09 15:10:07 -0500186 bool selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa, const GrInstancedPipelineInfo&,
187 GrAAType*, AntialiasMode*);
csmartdaltona7f29642016-07-07 08:49:11 -0700188
Brian Salomon99ad1642016-12-16 09:50:45 -0500189 virtual sk_sp<Op> makeOp() = 0;
csmartdaltona7f29642016-07-07 08:49:11 -0700190
Brian Salomon99ad1642016-12-16 09:50:45 -0500191 const sk_sp<GrGpu> fGpu;
192 State fState;
193 GrObjectMemoryPool<Op::Draw> fDrawPool;
194 SkSTArray<1024, ParamsTexel, true> fParams;
195 OpList fTrackedOps;
196 sk_sp<const GrBuffer> fVertexBuffer;
197 sk_sp<const GrBuffer> fIndexBuffer;
198 sk_sp<GrBuffer> fParamsBuffer;
csmartdaltona7f29642016-07-07 08:49:11 -0700199};
200
201}
202
203#endif