blob: 1325370a28485aebd523528c72ac48c3284e5db1 [file] [log] [blame]
csmartdaltona7f29642016-07-07 08:49:11 -07001/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef gr_instanced_InstancedRendering_DEFINED
9#define gr_instanced_InstancedRendering_DEFINED
10
Greg Daniel77b53f62016-10-18 11:48:51 -040011#include "GrGpu.h"
csmartdaltona7f29642016-07-07 08:49:11 -070012#include "GrMemoryPool.h"
13#include "SkTInternalLList.h"
14#include "batches/GrDrawBatch.h"
15#include "instanced/InstancedRenderingTypes.h"
16#include "../private/GrInstancedPipelineInfo.h"
17
18class GrResourceProvider;
19
20namespace gr_instanced {
21
22class InstanceProcessor;
23
24/**
25 * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
26 * instanced draws into one location, and creates special batches that pull from this data. The
27 * nature of instanced rendering allows these batches to combine well and render efficiently.
28 *
29 * During a flush, this class assembles the accumulated draw data into a single vertex and texel
30 * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs.
31 *
32 * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by
33 * InstanceProcessor.
34 */
35class InstancedRendering : public SkNoncopyable {
36public:
37 virtual ~InstancedRendering() { SkASSERT(State::kRecordingDraws == fState); }
38
Hal Canary144caf52016-11-07 17:57:18 -050039 GrGpu* gpu() const { return fGpu.get(); }
csmartdaltona7f29642016-07-07 08:49:11 -070040
41 /**
42 * These methods make a new record internally for an instanced draw, and return a batch that is
43 * effectively just an index to that record. The returned batch is not self-contained, but
44 * rather relies on this class to handle the rendering. The client must call beginFlush() on
45 * this class before attempting to flush batches returned by it. It is invalid to record new
46 * draws between beginFlush() and endFlush().
47 */
48 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
49 bool antialias, const GrInstancedPipelineInfo&,
50 bool* useHWAA);
51
52 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
53 const SkRect& localRect, bool antialias,
54 const GrInstancedPipelineInfo&, bool* useHWAA);
55
56 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
57 const SkMatrix& localMatrix, bool antialias,
58 const GrInstancedPipelineInfo&, bool* useHWAA);
59
60 GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor,
61 bool antialias, const GrInstancedPipelineInfo&,
62 bool* useHWAA);
63
64 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor,
65 bool antialias, const GrInstancedPipelineInfo&,
66 bool* useHWAA);
67
68 GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
69 const SkMatrix&, GrColor, bool antialias,
70 const GrInstancedPipelineInfo&, bool* useHWAA);
71
72 /**
73 * Compiles all recorded draws into GPU buffers and allows the client to begin flushing the
74 * batches created by this class.
75 */
76 void beginFlush(GrResourceProvider*);
77
78 /**
79 * Called once the batches created previously by this class have all been released. Allows the
80 * client to begin recording draws again.
81 */
82 void endFlush();
83
84 enum class ResetType : bool {
85 kDestroy,
86 kAbandon
87 };
88
89 /**
90 * Resets all GPU resources, including those that are held long term. They will be lazily
91 * reinitialized if the class begins to be used again.
92 */
93 void resetGpuResources(ResetType);
94
95protected:
96 class Batch : public GrDrawBatch {
97 public:
98 SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch);
99
100 ~Batch() override;
101 const char* name() const override { return "Instanced Batch"; }
102
103 struct Draw {
104 Instance fInstance;
105 IndexRange fGeometry;
106 Draw* fNext;
107 };
108
109 Draw& getSingleDraw() const { SkASSERT(fHeadDraw && !fHeadDraw->fNext); return *fHeadDraw; }
110 Instance& getSingleInstance() const { return this->getSingleDraw().fInstance; }
111
112 void appendRRectParams(const SkRRect&);
113 void appendParamsTexel(const SkScalar* vals, int count);
114 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
115 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
116
117 protected:
118 Batch(uint32_t classID, InstancedRendering* ir);
119
120 void initBatchTracker(const GrXPOverridesForBatch&) override;
121 bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override;
122
123 void computePipelineOptimizations(GrInitInvariantOutput* color,
124 GrInitInvariantOutput* coverage,
125 GrBatchToXPOverrides*) const override;
126
127 void onPrepare(GrBatchFlushState*) override {}
Greg Daniel36a77ee2016-10-18 10:33:25 -0400128 void onDraw(GrBatchFlushState*, const SkRect& bounds) override;
csmartdaltona7f29642016-07-07 08:49:11 -0700129
130 InstancedRendering* const fInstancedRendering;
131 BatchInfo fInfo;
132 SkScalar fPixelLoad;
133 SkSTArray<5, ParamsTexel, true> fParams;
134 bool fIsTracked;
135 int fNumDraws;
136 int fNumChangesInGeometry;
137 Draw* fHeadDraw;
138 Draw* fTailDraw;
139
140 typedef GrDrawBatch INHERITED;
141
142 friend class InstancedRendering;
143 };
144
145 typedef SkTInternalLList<Batch> BatchList;
146
csmartdaltone0d36292016-07-29 08:14:20 -0700147 InstancedRendering(GrGpu* gpu);
csmartdaltona7f29642016-07-07 08:49:11 -0700148
149 const BatchList& trackedBatches() const { return fTrackedBatches; }
Hal Canary144caf52016-11-07 17:57:18 -0500150 const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer.get(); }
151 const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer.get(); }
csmartdaltona7f29642016-07-07 08:49:11 -0700152
153 virtual void onBeginFlush(GrResourceProvider*) = 0;
154 virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) = 0;
155 virtual void onEndFlush() = 0;
156 virtual void onResetGpuResources(ResetType) = 0;
157
158private:
159 enum class State : bool {
160 kRecordingDraws,
161 kFlushing
162 };
163
164 Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
165 const SkMatrix& viewMatrix, GrColor,
166 const SkRect& localRect, bool antialias,
167 const GrInstancedPipelineInfo&, bool* requireHWAA);
168
169 bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias,
170 const GrInstancedPipelineInfo&, bool* useHWAA, AntialiasMode*);
171
172 virtual Batch* createBatch() = 0;
173
Hal Canary144caf52016-11-07 17:57:18 -0500174 const sk_sp<GrGpu> fGpu;
csmartdaltona7f29642016-07-07 08:49:11 -0700175 State fState;
176 GrMemoryPool fDrawPool;
177 SkSTArray<1024, ParamsTexel, true> fParams;
178 BatchList fTrackedBatches;
Hal Canary144caf52016-11-07 17:57:18 -0500179 sk_sp<const GrBuffer> fVertexBuffer;
180 sk_sp<const GrBuffer> fIndexBuffer;
181 sk_sp<GrBuffer> fParamsBuffer;
csmartdaltona7f29642016-07-07 08:49:11 -0700182};
183
184}
185
186#endif