blob: 82a3d1872c470c2039cdf2ab6db4dd94cf338103 [file] [log] [blame]
bsalomon6251d172014-10-15 10:50:36 -07001/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrFragmentProcessor_DEFINED
9#define GrFragmentProcessor_DEFINED
10
11#include "GrProcessor.h"
Brian Salomonee783962018-08-01 09:55:10 -040012#include "GrProxyRef.h"
bsalomon6251d172014-10-15 10:50:36 -070013
14class GrCoordTransform;
egdaniel64c47282015-11-13 06:54:19 -080015class GrGLSLFragmentProcessor;
Chris Dalton1c548942018-05-22 13:09:48 -060016class GrPaint;
bsalomona624bf32016-09-20 09:12:47 -070017class GrPipeline;
joshualitteb2a6762014-12-04 11:35:33 -080018class GrProcessorKeyBuilder;
Brian Salomon94efbf52016-11-29 13:43:05 -050019class GrShaderCaps;
Brian Osmance425512017-03-22 14:37:50 -040020class GrSwizzle;
bsalomon6251d172014-10-15 10:50:36 -070021
Ethan Nicholasf7b88202017-09-18 14:10:39 -040022/** Provides custom fragment shader code. Fragment processors receive an input color (half4) and
bsalomon6251d172014-10-15 10:50:36 -070023 produce an output color. They may reference textures and uniforms. They may use
24 GrCoordTransforms to receive a transformation of the local coordinates that map from local space
25 to the fragment being processed.
26 */
Brian Salomone782f842018-07-31 13:53:11 -040027class GrFragmentProcessor : public GrProcessor {
bsalomon6251d172014-10-15 10:50:36 -070028public:
Brian Salomone782f842018-07-31 13:53:11 -040029 class TextureSampler;
30
bsalomon87ba62e2015-09-22 06:41:59 -070031 /**
32 * In many instances (e.g. SkShader::asFragmentProcessor() implementations) it is desirable to
33 * only consider the input color's alpha. However, there is a competing desire to have reusable
34 * GrFragmentProcessor subclasses that can be used in other scenarios where the entire input
35 * color is considered. This function exists to filter the input color and pass it to a FP. It
36 * does so by returning a parent FP that multiplies the passed in FPs output by the parent's
37 * input alpha. The passed in FP will not receive an input color.
38 */
Mike Reed28eaed22018-02-01 11:24:53 -050039 static std::unique_ptr<GrFragmentProcessor> MulChildByInputAlpha(
40 std::unique_ptr<GrFragmentProcessor> child);
41
42 /**
43 * Like MulChildByInputAlpha(), but reverses the sense of src and dst. In this case, return
44 * the input modulated by the child's alpha. The passed in FP will not receive an input color.
45 *
46 * output = input * child.a
47 */
48 static std::unique_ptr<GrFragmentProcessor> MulInputByChildAlpha(
49 std::unique_ptr<GrFragmentProcessor> child);
bsalomonf1b7a1d2015-09-28 06:26:28 -070050
51 /**
Brian Salomon22af73f2017-01-26 11:25:12 -050052 * This assumes that the input color to the returned processor will be unpremul and that the
53 * passed processor (which becomes the returned processor's child) produces a premul output.
54 * The result of the returned processor is a premul of its input color modulated by the child
55 * processor's premul output.
bsalomonf1b7a1d2015-09-28 06:26:28 -070056 */
Brian Salomonaff329b2017-08-11 09:40:37 -040057 static std::unique_ptr<GrFragmentProcessor> MakeInputPremulAndMulByOutput(
58 std::unique_ptr<GrFragmentProcessor>);
bsalomonf1b7a1d2015-09-28 06:26:28 -070059
60 /**
bsalomone25eea42015-09-29 06:38:55 -070061 * Returns a parent fragment processor that adopts the passed fragment processor as a child.
62 * The parent will ignore its input color and instead feed the passed in color as input to the
63 * child.
bsalomonf1b7a1d2015-09-28 06:26:28 -070064 */
Brian Salomonaff329b2017-08-11 09:40:37 -040065 static std::unique_ptr<GrFragmentProcessor> OverrideInput(std::unique_ptr<GrFragmentProcessor>,
Brian Osman080e77f2018-10-04 10:57:29 -040066 const SkPMColor4f&);
bsalomon87ba62e2015-09-22 06:41:59 -070067
bsalomone25eea42015-09-29 06:38:55 -070068 /**
dvonbeckc526da92016-07-20 11:20:30 -070069 * Returns a fragment processor that premuls the input before calling the passed in fragment
70 * processor.
71 */
Brian Salomonaff329b2017-08-11 09:40:37 -040072 static std::unique_ptr<GrFragmentProcessor> PremulInput(std::unique_ptr<GrFragmentProcessor>);
dvonbeckc526da92016-07-20 11:20:30 -070073
74 /**
Brian Osmance425512017-03-22 14:37:50 -040075 * Returns a fragment processor that calls the passed in fragment processor, and then swizzles
76 * the output.
77 */
Brian Salomonaff329b2017-08-11 09:40:37 -040078 static std::unique_ptr<GrFragmentProcessor> SwizzleOutput(std::unique_ptr<GrFragmentProcessor>,
79 const GrSwizzle&);
Brian Osmance425512017-03-22 14:37:50 -040080
81 /**
bsalomone25eea42015-09-29 06:38:55 -070082 * Returns a fragment processor that runs the passed in array of fragment processors in a
83 * series. The original input is passed to the first, the first's output is passed to the
84 * second, etc. The output of the returned processor is the output of the last processor of the
85 * series.
bungeman06ca8ec2016-06-09 08:01:03 -070086 *
87 * The array elements with be moved.
bsalomone25eea42015-09-29 06:38:55 -070088 */
Brian Salomonaff329b2017-08-11 09:40:37 -040089 static std::unique_ptr<GrFragmentProcessor> RunInSeries(std::unique_ptr<GrFragmentProcessor>*,
90 int cnt);
bsalomonac856c92015-08-27 06:30:17 -070091
Brian Salomon0e05a822017-07-25 09:43:22 -040092 /**
93 * Makes a copy of this fragment processor that draws equivalently to the original.
Brian Salomon96271cd2017-07-31 16:27:23 -040094 * If the processor has child processors they are cloned as well.
Brian Salomon0e05a822017-07-25 09:43:22 -040095 */
Brian Salomonaff329b2017-08-11 09:40:37 -040096 virtual std::unique_ptr<GrFragmentProcessor> clone() const = 0;
Brian Salomon0e05a822017-07-25 09:43:22 -040097
egdaniel57d3b032015-11-13 11:57:27 -080098 GrGLSLFragmentProcessor* createGLSLInstance() const;
joshualitteb2a6762014-12-04 11:35:33 -080099
Brian Salomon94efbf52016-11-29 13:43:05 -0500100 void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const {
egdaniel57d3b032015-11-13 11:57:27 -0800101 this->onGetGLSLProcessorKey(caps, b);
wangyix4b3050b2015-08-04 07:59:37 -0700102 for (int i = 0; i < fChildProcessors.count(); ++i) {
egdaniel57d3b032015-11-13 11:57:27 -0800103 fChildProcessors[i]->getGLSLProcessorKey(caps, b);
wangyix4b3050b2015-08-04 07:59:37 -0700104 }
105 }
106
Brian Salomone782f842018-07-31 13:53:11 -0400107 int numTextureSamplers() const { return fTextureSamplerCnt; }
108 const TextureSampler& textureSampler(int i) const;
109
bsalomona624bf32016-09-20 09:12:47 -0700110 int numCoordTransforms() const { return fCoordTransforms.count(); }
bsalomon6251d172014-10-15 10:50:36 -0700111
112 /** Returns the coordinate transformation at index. index must be valid according to
113 numTransforms(). */
114 const GrCoordTransform& coordTransform(int index) const { return *fCoordTransforms[index]; }
115
joshualittabb52a12015-01-13 15:02:10 -0800116 const SkTArray<const GrCoordTransform*, true>& coordTransforms() const {
117 return fCoordTransforms;
118 }
119
wangyix4b3050b2015-08-04 07:59:37 -0700120 int numChildProcessors() const { return fChildProcessors.count(); }
121
bsalomonac856c92015-08-27 06:30:17 -0700122 const GrFragmentProcessor& childProcessor(int index) const { return *fChildProcessors[index]; }
wangyix4b3050b2015-08-04 07:59:37 -0700123
Robert Phillips9bee2e52017-05-29 12:37:20 -0400124 bool instantiate(GrResourceProvider*) const;
125
Brian Salomonaff329b2017-08-11 09:40:37 -0400126 void markPendingExecution() const;
127
joshualitt290c09b2014-12-19 13:45:20 -0800128 /** Do any of the coordtransforms for this processor require local coords? */
Brian Salomon587e08f2017-01-27 10:59:27 -0500129 bool usesLocalCoords() const { return SkToBool(fFlags & kUsesLocalCoords_Flag); }
joshualitt290c09b2014-12-19 13:45:20 -0800130
Brian Salomon587e08f2017-01-27 10:59:27 -0500131 /**
Brian Salomonf3b995b2017-02-15 10:22:23 -0500132 * A GrDrawOp may premultiply its antialiasing coverage into its GrGeometryProcessor's color
133 * output under the following scenario:
134 * * all the color fragment processors report true to this query,
135 * * all the coverage fragment processors report true to this query,
136 * * the blend mode arithmetic allows for it it.
137 * To be compatible a fragment processor's output must be a modulation of its input color or
138 * alpha with a computed premultiplied color or alpha that is in 0..1 range. The computed color
139 * or alpha that is modulated against the input cannot depend on the input's alpha. The computed
140 * value cannot depend on the input's color channels unless it unpremultiplies the input color
141 * channels by the input alpha.
Brian Salomon587e08f2017-01-27 10:59:27 -0500142 */
Brian Salomonf3b995b2017-02-15 10:22:23 -0500143 bool compatibleWithCoverageAsAlpha() const {
144 return SkToBool(fFlags & kCompatibleWithCoverageAsAlpha_OptimizationFlag);
145 }
Brian Salomon587e08f2017-01-27 10:59:27 -0500146
147 /**
148 * If this is true then all opaque input colors to the processor produce opaque output colors.
149 */
150 bool preservesOpaqueInput() const {
151 return SkToBool(fFlags & kPreservesOpaqueInput_OptimizationFlag);
152 }
153
154 /**
155 * Tests whether given a constant input color the processor produces a constant output color
156 * (for all fragments). If true outputColor will contain the constant color produces for
157 * inputColor.
158 */
Brian Osman1d5b5982018-10-01 13:41:39 -0400159 bool hasConstantOutputForConstantInput(SkPMColor4f inputColor, SkPMColor4f* outputColor) const {
Brian Salomon587e08f2017-01-27 10:59:27 -0500160 if (fFlags & kConstantOutputForConstantInput_OptimizationFlag) {
161 *outputColor = this->constantOutputForConstantInput(inputColor);
162 return true;
163 }
164 return false;
165 }
166 bool hasConstantOutputForConstantInput() const {
167 return SkToBool(fFlags & kConstantOutputForConstantInput_OptimizationFlag);
168 }
dvonbeck9b03e7b2016-08-01 11:01:56 -0700169
joshualitteb2a6762014-12-04 11:35:33 -0800170 /** Returns true if this and other processor conservatively draw identically. It can only return
171 true when the two processor are of the same subclass (i.e. they return the same object from
bsalomon6251d172014-10-15 10:50:36 -0700172 from getFactory()).
173
joshualitteb2a6762014-12-04 11:35:33 -0800174 A return value of true from isEqual() should not be used to test whether the processor would
egdaniel57d3b032015-11-13 11:57:27 -0800175 generate the same shader code. To test for identical code generation use getGLSLProcessorKey
176 */
bsalomon7312ff82016-09-12 08:55:38 -0700177 bool isEqual(const GrFragmentProcessor& that) const;
bsalomon6251d172014-10-15 10:50:36 -0700178
joshualitt56995b52014-12-11 15:44:02 -0800179 /**
bsalomona624bf32016-09-20 09:12:47 -0700180 * Pre-order traversal of a FP hierarchy, or of the forest of FPs in a GrPipeline. In the latter
181 * case the tree rooted at each FP in the GrPipeline is visited successively.
bsalomonb58a2b42016-09-26 06:55:02 -0700182 */
bsalomona624bf32016-09-20 09:12:47 -0700183 class Iter : public SkNoncopyable {
184 public:
185 explicit Iter(const GrFragmentProcessor* fp) { fFPStack.push_back(fp); }
186 explicit Iter(const GrPipeline& pipeline);
Chris Dalton1c548942018-05-22 13:09:48 -0600187 explicit Iter(const GrPaint&);
bsalomona624bf32016-09-20 09:12:47 -0700188 const GrFragmentProcessor* next();
189
190 private:
191 SkSTArray<4, const GrFragmentProcessor*, true> fFPStack;
192 };
193
194 /**
bsalomonb58a2b42016-09-26 06:55:02 -0700195 * Iterates over all the Ts owned by a GrFragmentProcessor and its children or over all the Ts
196 * owned by the forest of GrFragmentProcessors in a GrPipeline. FPs are visited in the same
197 * order as Iter and each of an FP's Ts are visited in order.
bsalomona624bf32016-09-20 09:12:47 -0700198 */
Brian Salomone782f842018-07-31 13:53:11 -0400199 template <typename T, int (GrFragmentProcessor::*COUNT)() const,
200 const T& (GrFragmentProcessor::*GET)(int)const>
bsalomonb58a2b42016-09-26 06:55:02 -0700201 class FPItemIter : public SkNoncopyable {
bsalomona624bf32016-09-20 09:12:47 -0700202 public:
bsalomonb58a2b42016-09-26 06:55:02 -0700203 explicit FPItemIter(const GrFragmentProcessor* fp)
204 : fCurrFP(nullptr)
205 , fCTIdx(0)
206 , fFPIter(fp) {
207 fCurrFP = fFPIter.next();
208 }
209 explicit FPItemIter(const GrPipeline& pipeline)
bsalomona624bf32016-09-20 09:12:47 -0700210 : fCurrFP(nullptr)
211 , fCTIdx(0)
212 , fFPIter(pipeline) {
213 fCurrFP = fFPIter.next();
214 }
bsalomonb58a2b42016-09-26 06:55:02 -0700215
216 const T* next() {
217 if (!fCurrFP) {
218 return nullptr;
219 }
220 while (fCTIdx == (fCurrFP->*COUNT)()) {
221 fCTIdx = 0;
222 fCurrFP = fFPIter.next();
223 if (!fCurrFP) {
224 return nullptr;
225 }
226 }
227 return &(fCurrFP->*GET)(fCTIdx++);
228 }
bsalomona624bf32016-09-20 09:12:47 -0700229
230 private:
231 const GrFragmentProcessor* fCurrFP;
232 int fCTIdx;
233 GrFragmentProcessor::Iter fFPIter;
234 };
235
bsalomonb58a2b42016-09-26 06:55:02 -0700236 using CoordTransformIter = FPItemIter<GrCoordTransform,
bsalomonb58a2b42016-09-26 06:55:02 -0700237 &GrFragmentProcessor::numCoordTransforms,
238 &GrFragmentProcessor::coordTransform>;
239
Brian Salomon0bbecb22016-11-17 11:38:22 -0500240 using TextureAccessIter = FPItemIter<TextureSampler,
Brian Salomone782f842018-07-31 13:53:11 -0400241 &GrFragmentProcessor::numTextureSamplers,
242 &GrFragmentProcessor::textureSampler>;
bsalomonb58a2b42016-09-26 06:55:02 -0700243
Brian Salomone782f842018-07-31 13:53:11 -0400244 void visitProxies(const std::function<void(GrSurfaceProxy*)>& func);
Robert Phillipsb493eeb2017-09-13 13:10:52 -0400245
bsalomon6251d172014-10-15 10:50:36 -0700246protected:
Brian Salomon587e08f2017-01-27 10:59:27 -0500247 enum OptimizationFlags : uint32_t {
248 kNone_OptimizationFlags,
Brian Salomonf3b995b2017-02-15 10:22:23 -0500249 kCompatibleWithCoverageAsAlpha_OptimizationFlag = 0x1,
Brian Salomon587e08f2017-01-27 10:59:27 -0500250 kPreservesOpaqueInput_OptimizationFlag = 0x2,
251 kConstantOutputForConstantInput_OptimizationFlag = 0x4,
Brian Salomonf3b995b2017-02-15 10:22:23 -0500252 kAll_OptimizationFlags = kCompatibleWithCoverageAsAlpha_OptimizationFlag |
Brian Salomon587e08f2017-01-27 10:59:27 -0500253 kPreservesOpaqueInput_OptimizationFlag |
254 kConstantOutputForConstantInput_OptimizationFlag
255 };
256 GR_DECL_BITFIELD_OPS_FRIENDS(OptimizationFlags)
257
Brian Salomon6cd51b52017-07-26 19:07:15 -0400258 /**
259 * Can be used as a helper to decide which fragment processor OptimizationFlags should be set.
260 * This assumes that the subclass output color will be a modulation of the input color with a
261 * value read from a texture of the passed config and that the texture contains premultiplied
262 * color or alpha values that are in range.
Michael Ludwig257a03d2018-12-13 14:07:07 -0500263 *
264 * Since there are multiple ways in which a sampler may have its coordinates clamped or wrapped,
265 * callers must determine on their own if the sampling uses a decal strategy in any way, in
266 * which case the texture may become transparent regardless of the pixel config.
Brian Salomon6cd51b52017-07-26 19:07:15 -0400267 */
Michael Ludwig257a03d2018-12-13 14:07:07 -0500268 static OptimizationFlags ModulateForSamplerOptFlags(GrPixelConfig config, bool samplingDecal) {
269 if (samplingDecal) {
270 return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
271 } else {
272 return ModulateForClampedSamplerOptFlags(config);
273 }
274 }
275
276 // As above, but callers should somehow ensure or assert their sampler still uses clamping
277 static OptimizationFlags ModulateForClampedSamplerOptFlags(GrPixelConfig config) {
Brian Salomon6cd51b52017-07-26 19:07:15 -0400278 if (GrPixelConfigIsOpaque(config)) {
279 return kCompatibleWithCoverageAsAlpha_OptimizationFlag |
280 kPreservesOpaqueInput_OptimizationFlag;
281 } else {
282 return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
283 }
284 }
285
Ethan Nicholasabff9562017-10-09 10:54:08 -0400286 GrFragmentProcessor(ClassID classID, OptimizationFlags optimizationFlags)
287 : INHERITED(classID)
288 , fFlags(optimizationFlags) {
Brian Salomon587e08f2017-01-27 10:59:27 -0500289 SkASSERT((fFlags & ~kAll_OptimizationFlags) == 0);
290 }
291
292 OptimizationFlags optimizationFlags() const {
293 return static_cast<OptimizationFlags>(kAll_OptimizationFlags & fFlags);
294 }
295
296 /**
297 * This allows one subclass to access another subclass's implementation of
298 * constantOutputForConstantInput. It must only be called when
299 * hasConstantOutputForConstantInput() is known to be true.
300 */
Brian Osman1d5b5982018-10-01 13:41:39 -0400301 static SkPMColor4f ConstantOutputForConstantInput(const GrFragmentProcessor& fp,
302 const SkPMColor4f& input) {
Brian Salomon587e08f2017-01-27 10:59:27 -0500303 SkASSERT(fp.hasConstantOutputForConstantInput());
304 return fp.constantOutputForConstantInput(input);
305 }
306
bsalomon6251d172014-10-15 10:50:36 -0700307 /**
308 * Fragment Processor subclasses call this from their constructor to register coordinate
bsalomonde258cd2014-10-15 19:06:21 -0700309 * transformations. Coord transforms provide a mechanism for a processor to receive coordinates
310 * in their FS code. The matrix expresses a transformation from local space. For a given
311 * fragment the matrix will be applied to the local coordinate that maps to the fragment.
312 *
313 * When the transformation has perspective, the transformed coordinates will have
mtklein790d74f2015-08-19 11:05:39 -0700314 * 3 components. Otherwise they'll have 2.
bsalomonde258cd2014-10-15 19:06:21 -0700315 *
316 * This must only be called from the constructor because GrProcessors are immutable. The
317 * processor subclass manages the lifetime of the transformations (this function only stores a
mtklein790d74f2015-08-19 11:05:39 -0700318 * pointer). The GrCoordTransform is typically a member field of the GrProcessor subclass.
bsalomonde258cd2014-10-15 19:06:21 -0700319 *
320 * A processor subclass that has multiple methods of construction should always add its coord
321 * transforms in a consistent order. The non-virtual implementation of isEqual() automatically
322 * compares transforms and will assume they line up across the two processor instances.
bsalomon6251d172014-10-15 10:50:36 -0700323 */
324 void addCoordTransform(const GrCoordTransform*);
325
326 /**
wangyix58d890b2015-08-12 09:40:47 -0700327 * FragmentProcessor subclasses call this from their constructor to register any child
wangyix93ab2542015-08-19 08:23:12 -0700328 * FragmentProcessors they have. This must be called AFTER all texture accesses and coord
329 * transforms have been added.
wangyix4b3050b2015-08-04 07:59:37 -0700330 * This is for processors whose shader code will be composed of nested processors whose output
331 * colors will be combined somehow to produce its output color. Registering these child
wangyix58d890b2015-08-12 09:40:47 -0700332 * processors will allow the ProgramBuilder to automatically handle their transformed coords and
333 * texture accesses and mangle their uniform and output color names.
wangyix4b3050b2015-08-04 07:59:37 -0700334 */
Brian Salomonaff329b2017-08-11 09:40:37 -0400335 int registerChildProcessor(std::unique_ptr<GrFragmentProcessor> child);
wangyix4b3050b2015-08-04 07:59:37 -0700336
Brian Salomone782f842018-07-31 13:53:11 -0400337 void setTextureSamplerCnt(int cnt) {
338 SkASSERT(cnt >= 0);
339 fTextureSamplerCnt = cnt;
340 }
341
342 /**
343 * Helper for implementing onTextureSampler(). E.g.:
344 * return IthTexureSampler(i, fMyFirstSampler, fMySecondSampler, fMyThirdSampler);
345 */
346 template <typename... Args>
347 static const TextureSampler& IthTextureSampler(int i, const TextureSampler& samp0,
348 const Args&... samps) {
349 return (0 == i) ? samp0 : IthTextureSampler(i - 1, samps...);
350 }
351 inline static const TextureSampler& IthTextureSampler(int i);
352
bsalomon6251d172014-10-15 10:50:36 -0700353private:
Brian Osman1d5b5982018-10-01 13:41:39 -0400354 virtual SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& /* inputColor */) const {
Ben Wagnerb4aab9a2017-08-16 10:53:04 -0400355 SK_ABORT("Subclass must override this if advertising this optimization.");
Brian Osmanf28e55d2018-10-03 16:35:54 -0400356 return SK_PMColor4fTRANSPARENT;
Brian Salomon587e08f2017-01-27 10:59:27 -0500357 }
358
wangyixb1daa862015-08-18 11:29:31 -0700359 /** Returns a new instance of the appropriate *GL* implementation class
360 for the given GrFragmentProcessor; caller is responsible for deleting
361 the object. */
egdaniel57d3b032015-11-13 11:57:27 -0800362 virtual GrGLSLFragmentProcessor* onCreateGLSLInstance() const = 0;
wangyixb1daa862015-08-18 11:29:31 -0700363
wangyix4b3050b2015-08-04 07:59:37 -0700364 /** Implemented using GLFragmentProcessor::GenKey as described in this class's comment. */
Brian Salomon94efbf52016-11-29 13:43:05 -0500365 virtual void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const = 0;
wangyix4b3050b2015-08-04 07:59:37 -0700366
bsalomonde258cd2014-10-15 19:06:21 -0700367 /**
368 * Subclass implements this to support isEqual(). It will only be called if it is known that
369 * the two processors are of the same subclass (i.e. they return the same object from
370 * getFactory()). The processor subclass should not compare its coord transforms as that will
371 * be performed automatically in the non-virtual isEqual().
372 */
373 virtual bool onIsEqual(const GrFragmentProcessor&) const = 0;
374
Brian Salomone782f842018-07-31 13:53:11 -0400375 virtual const TextureSampler& onTextureSampler(int) const { return IthTextureSampler(0); }
376
bsalomonde258cd2014-10-15 19:06:21 -0700377 bool hasSameTransforms(const GrFragmentProcessor&) const;
bsalomon6251d172014-10-15 10:50:36 -0700378
Brian Salomon587e08f2017-01-27 10:59:27 -0500379 enum PrivateFlags {
380 kFirstPrivateFlag = kAll_OptimizationFlags + 1,
381 kUsesLocalCoords_Flag = kFirstPrivateFlag,
Brian Salomon587e08f2017-01-27 10:59:27 -0500382 };
383
384 mutable uint32_t fFlags = 0;
wangyix58d890b2015-08-12 09:40:47 -0700385
Brian Salomone782f842018-07-31 13:53:11 -0400386 int fTextureSamplerCnt = 0;
387
bsalomona624bf32016-09-20 09:12:47 -0700388 SkSTArray<4, const GrCoordTransform*, true> fCoordTransforms;
389
Brian Salomonaff329b2017-08-11 09:40:37 -0400390 SkSTArray<1, std::unique_ptr<GrFragmentProcessor>, true> fChildProcessors;
bsalomon6251d172014-10-15 10:50:36 -0700391
Brian Salomone782f842018-07-31 13:53:11 -0400392 typedef GrProcessor INHERITED;
bsalomon6251d172014-10-15 10:50:36 -0700393};
394
Brian Salomone782f842018-07-31 13:53:11 -0400395/**
396 * Used to represent a texture that is required by a GrFragmentProcessor. It holds a GrTextureProxy
397 * along with an associated GrSamplerState. TextureSamplers don't perform any coord manipulation to
398 * account for texture origin.
399 */
400class GrFragmentProcessor::TextureSampler {
401public:
402 TextureSampler() = default;
403
404 /**
405 * This copy constructor is used by GrFragmentProcessor::clone() implementations. The copy
406 * always takes a new ref on the texture proxy as the new fragment processor will not yet be
407 * in pending execution state.
408 */
409 explicit TextureSampler(const TextureSampler& that)
410 : fProxyRef(sk_ref_sp(that.fProxyRef.get()), that.fProxyRef.ioType())
411 , fSamplerState(that.fSamplerState) {}
412
413 TextureSampler(sk_sp<GrTextureProxy>, const GrSamplerState&);
414
415 explicit TextureSampler(sk_sp<GrTextureProxy>,
416 GrSamplerState::Filter = GrSamplerState::Filter::kNearest,
417 GrSamplerState::WrapMode wrapXAndY = GrSamplerState::WrapMode::kClamp);
418
419 TextureSampler& operator=(const TextureSampler&) = delete;
420
421 void reset(sk_sp<GrTextureProxy>, const GrSamplerState&);
422 void reset(sk_sp<GrTextureProxy>,
423 GrSamplerState::Filter = GrSamplerState::Filter::kNearest,
424 GrSamplerState::WrapMode wrapXAndY = GrSamplerState::WrapMode::kClamp);
425
426 bool operator==(const TextureSampler& that) const {
427 return this->proxy()->underlyingUniqueID() == that.proxy()->underlyingUniqueID() &&
428 fSamplerState == that.fSamplerState;
429 }
430
431 bool operator!=(const TextureSampler& other) const { return !(*this == other); }
432
433 // 'instantiate' should only ever be called at flush time.
434 bool instantiate(GrResourceProvider* resourceProvider) const {
435 return SkToBool(fProxyRef.get()->instantiate(resourceProvider));
436 }
437
438 // 'peekTexture' should only ever be called after a successful 'instantiate' call
439 GrTexture* peekTexture() const {
Brian Salomonfd98c2c2018-07-31 17:25:29 -0400440 SkASSERT(fProxyRef.get()->peekTexture());
441 return fProxyRef.get()->peekTexture();
Brian Salomone782f842018-07-31 13:53:11 -0400442 }
443
Brian Salomonee783962018-08-01 09:55:10 -0400444 GrTextureProxy* proxy() const { return fProxyRef.get(); }
Brian Salomone782f842018-07-31 13:53:11 -0400445 const GrSamplerState& samplerState() const { return fSamplerState; }
446
447 bool isInitialized() const { return SkToBool(fProxyRef.get()); }
448 /**
449 * For internal use by GrFragmentProcessor.
450 */
Brian Salomonee783962018-08-01 09:55:10 -0400451 const GrTextureProxyRef* proxyRef() const { return &fProxyRef; }
Brian Salomone782f842018-07-31 13:53:11 -0400452
453private:
Brian Salomonee783962018-08-01 09:55:10 -0400454 GrTextureProxyRef fProxyRef;
Brian Salomone782f842018-07-31 13:53:11 -0400455 GrSamplerState fSamplerState;
456};
457
458//////////////////////////////////////////////////////////////////////////////
459
460const GrFragmentProcessor::TextureSampler& GrFragmentProcessor::IthTextureSampler(int i) {
461 SK_ABORT("Illegal texture sampler index");
462 static const TextureSampler kBogus;
463 return kBogus;
464}
465
Brian Salomon587e08f2017-01-27 10:59:27 -0500466GR_MAKE_BITFIELD_OPS(GrFragmentProcessor::OptimizationFlags)
467
bsalomon6251d172014-10-15 10:50:36 -0700468#endif